summaryrefslogtreecommitdiff
path: root/arch/ia64/kernel/sys_ia64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel/sys_ia64.c')
-rw-r--r--arch/ia64/kernel/sys_ia64.c183
1 files changed, 183 insertions, 0 deletions
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
new file mode 100644
index 000000000..41e33f84c
--- /dev/null
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -0,0 +1,183 @@
+/*
+ * This file contains various system calls that have different calling
+ * conventions on different platforms.
+ *
+ * Copyright (C) 1999-2000, 2002-2003, 2005 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <linux/shm.h>
+#include <linux/file.h> /* doh, must come after sched.h... */
+#include <linux/smp.h>
+#include <linux/syscalls.h>
+#include <linux/highuid.h>
+#include <linux/hugetlb.h>
+
+#include <asm/shmparam.h>
+#include <asm/uaccess.h>
+
+unsigned long
+arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ long map_shared = (flags & MAP_SHARED);
+ unsigned long align_mask = 0;
+ struct mm_struct *mm = current->mm;
+ struct vm_unmapped_area_info info;
+
+ if (len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+
+ /* handle fixed mapping: prevent overlap with huge pages */
+ if (flags & MAP_FIXED) {
+ if (is_hugepage_only_range(mm, addr, len))
+ return -EINVAL;
+ return addr;
+ }
+
+#ifdef CONFIG_HUGETLB_PAGE
+ if (REGION_NUMBER(addr) == RGN_HPAGE)
+ addr = 0;
+#endif
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+ if (map_shared && (TASK_SIZE > 0xfffffffful))
+ /*
+ * For 64-bit tasks, align shared segments to 1MB to avoid potential
+ * performance penalty due to virtual aliasing (see ASDM). For 32-bit
+ * tasks, we prefer to avoid exhausting the address space too quickly by
+ * limiting alignment to a single page.
+ */
+ align_mask = PAGE_MASK & (SHMLBA - 1);
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = addr;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = align_mask;
+ info.align_offset = 0;
+ return vm_unmapped_area(&info);
+}
+
+asmlinkage long
+ia64_getpriority (int which, int who)
+{
+ long prio;
+
+ prio = sys_getpriority(which, who);
+ if (prio >= 0) {
+ force_successful_syscall_return();
+ prio = 20 - prio;
+ }
+ return prio;
+}
+
+/* XXX obsolete, but leave it here until the old libc is gone... */
+asmlinkage unsigned long
+sys_getpagesize (void)
+{
+ return PAGE_SIZE;
+}
+
+asmlinkage unsigned long
+ia64_brk (unsigned long brk)
+{
+ unsigned long retval = sys_brk(brk);
+ force_successful_syscall_return();
+ return retval;
+}
+
+/*
+ * On IA-64, we return the two file descriptors in ret0 and ret1 (r8
+ * and r9) as this is faster than doing a copy_to_user().
+ */
+asmlinkage long
+sys_ia64_pipe (void)
+{
+ struct pt_regs *regs = task_pt_regs(current);
+ int fd[2];
+ int retval;
+
+ retval = do_pipe_flags(fd, 0);
+ if (retval)
+ goto out;
+ retval = fd[0];
+ regs->r9 = fd[1];
+ out:
+ return retval;
+}
+
+int ia64_mmap_check(unsigned long addr, unsigned long len,
+ unsigned long flags)
+{
+ unsigned long roff;
+
+ /*
+ * Don't permit mappings into unmapped space, the virtual page table
+ * of a region, or across a region boundary. Note: RGN_MAP_LIMIT is
+ * equal to 2^n-PAGE_SIZE (for some integer n <= 61) and len > 0.
+ */
+ roff = REGION_OFFSET(addr);
+ if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len)))
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * mmap2() is like mmap() except that the offset is expressed in units
+ * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces
+ * of) files that are larger than the address space of the CPU.
+ */
+asmlinkage unsigned long
+sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff)
+{
+ addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
+ if (!IS_ERR((void *) addr))
+ force_successful_syscall_return();
+ return addr;
+}
+
+asmlinkage unsigned long
+sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, long off)
+{
+ if (offset_in_page(off) != 0)
+ return -EINVAL;
+
+ addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
+ if (!IS_ERR((void *) addr))
+ force_successful_syscall_return();
+ return addr;
+}
+
+asmlinkage unsigned long
+ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags,
+ unsigned long new_addr)
+{
+ addr = sys_mremap(addr, old_len, new_len, flags, new_addr);
+ if (!IS_ERR((void *) addr))
+ force_successful_syscall_return();
+ return addr;
+}
+
+#ifndef CONFIG_PCI
+
+asmlinkage long
+sys_pciconfig_read (unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len,
+ void *buf)
+{
+ return -ENOSYS;
+}
+
+asmlinkage long
+sys_pciconfig_write (unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len,
+ void *buf)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_PCI */