diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-01-20 14:01:31 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-01-20 14:01:31 -0300 |
commit | b4b7ff4b08e691656c9d77c758fc355833128ac0 (patch) | |
tree | 82fcb00e6b918026dc9f2d1f05ed8eee83874cc0 /arch/parisc | |
parent | 35acfa0fc609f2a2cd95cef4a6a9c3a5c38f1778 (diff) |
Linux-libre 4.4-gnupck-4.4-gnu
Diffstat (limited to 'arch/parisc')
31 files changed, 495 insertions, 214 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index c36546959..729f89163 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -108,6 +108,9 @@ config PGTABLE_LEVELS default 3 if 64BIT && PARISC_PAGE_SIZE_4KB default 2 +config SYS_SUPPORTS_HUGETLBFS + def_bool y if PA20 + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 2536965d0..1d109990a 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -67,7 +67,7 @@ static __inline__ void atomic_set(atomic_t *v, int i) static __inline__ int atomic_read(const atomic_t *v) { - return ACCESS_ONCE((v)->counter); + return READ_ONCE((v)->counter); } /* exported interface */ diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index 47f11c707..3d0e17bcc 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -7,20 +7,12 @@ /* - * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have - * 32-byte cachelines. The default configuration is not for SMP anyway, - * so if you're building for SMP, you should select the appropriate - * processor type. There is a potential livelock danger when running - * a machine with this value set too small, but it's more probable you'll - * just ruin performance. + * PA 2.0 processors have 64 and 128-byte L2 cachelines; PA 1.1 processors + * have 32-byte cachelines. The L1 length appears to be 16 bytes but this + * is not clearly documented. */ -#ifdef CONFIG_PA20 -#define L1_CACHE_BYTES 64 -#define L1_CACHE_SHIFT 6 -#else -#define L1_CACHE_BYTES 32 -#define L1_CACHE_SHIFT 5 -#endif +#define L1_CACHE_BYTES 16 +#define L1_CACHE_SHIFT 4 #ifndef __ASSEMBLY__ diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index ec2df4bab..845272ce9 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -156,7 +156,6 @@ static inline void __kunmap_atomic(void *addr) #define kmap_atomic_prot(page, prot) kmap_atomic(page) #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) -#define kmap_atomic_to_page(ptr) virt_to_page(ptr) #endif /* _PARISC_CACHEFLUSH_H */ diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index 94710cfc1..0448a2c8e 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h @@ -206,10 +206,10 @@ struct compat_ipc64_perm { struct compat_semid64_ds { struct compat_ipc64_perm sem_perm; - compat_time_t sem_otime; unsigned int __unused1; - compat_time_t sem_ctime; + compat_time_t sem_otime; unsigned int __unused2; + compat_time_t sem_ctime; compat_ulong_t sem_nsems; compat_ulong_t __unused3; compat_ulong_t __unused4; diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h new file mode 100644 index 000000000..7d56a9ccb --- /dev/null +++ b/arch/parisc/include/asm/hugetlb.h @@ -0,0 +1,85 @@ +#ifndef _ASM_PARISC64_HUGETLB_H +#define _ASM_PARISC64_HUGETLB_H + +#include <asm/page.h> +#include <asm-generic/hugetlb.h> + + +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte); + +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); + +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, + unsigned long len) { + return 0; +} + +/* + * If the arch doesn't supply something else, assume that hugepage + * size aligned regions are ok without further preparation. + */ +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + +static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, + unsigned long addr, unsigned long end, + unsigned long floor, + unsigned long ceiling) +{ + free_pgd_range(tlb, addr, end, floor, ceiling); +} + +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ +} + +static inline int huge_pte_none(pte_t pte) +{ + return pte_none(pte); +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ + return pte_wrprotect(pte); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_t old_pte = *ptep; + set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); +} + +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + int changed = !pte_same(*ptep, pte); + if (changed) { + set_huge_pte_at(vma->vm_mm, addr, ptep, pte); + flush_tlb_page(vma, addr); + } + return changed; +} + +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + return *ptep; +} + +static inline void arch_clear_hugepage_flags(struct page *page) +{ +} + +#endif /* _ASM_PARISC64_HUGETLB_H */ diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index 60d5d174d..80e742a1c 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -145,11 +145,22 @@ extern int npmem_ranges; #endif /* CONFIG_DISCONTIGMEM */ #ifdef CONFIG_HUGETLB_PAGE -#define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */ +#define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */ #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) +# define REAL_HPAGE_SHIFT 20 /* 20 = 1MB */ +# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_1M +#elif !defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) +# define REAL_HPAGE_SHIFT 22 /* 22 = 4MB */ +# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4M +#else +# define REAL_HPAGE_SHIFT 24 /* 24 = 16MB */ +# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16M #endif +#endif /* CONFIG_HUGETLB_PAGE */ #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index 3edbb9fc9..f2fd327dc 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -35,7 +35,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) PxD_FLAG_VALID | PxD_FLAG_ATTACHED) + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); - /* The first pmd entry also is marked with _PAGE_GATEWAY as + /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as * a signal that this pmd may not be freed */ __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); #endif diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index f93c4a4e6..291cee28c 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -83,7 +83,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) /* This is the size of the initially mapped kernel memory */ -#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ +#ifdef CONFIG_64BIT +#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */ +#else +#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */ +#endif #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) #if CONFIG_PGTABLE_LEVELS == 3 @@ -167,7 +171,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ #define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ -/* bit 21 was formerly the FLUSH bit but is now unused */ +#define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */ #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ /* N.B. The bits are defined in terms of a 32 bit word above, so the */ @@ -194,6 +198,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) +#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT)) #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) @@ -217,7 +222,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) #define PxD_FLAG_MASK (0xf) #define PxD_FLAG_SHIFT (4) -#define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ +#define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT) #ifndef __ASSEMBLY__ @@ -363,6 +368,19 @@ static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return static inline pte_t pte_mkspecial(pte_t pte) { return pte; } /* + * Huge pte definitions. + */ +#ifdef CONFIG_HUGETLB_PAGE +#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE) +#define pte_mkhuge(pte) (__pte(pte_val(pte) | \ + (parisc_requires_coherency() ? 0 : _PAGE_HUGE))) +#else +#define pte_huge(pte) (0) +#define pte_mkhuge(pte) (pte) +#endif + + +/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ @@ -410,8 +428,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) /* Find an entry in the second-level page table.. */ #if CONFIG_PGTABLE_LEVELS == 3 +#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) #define pmd_offset(dir,address) \ -((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1))) +((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address)) #else #define pmd_offset(dir,addr) ((pmd_t *) dir) #endif diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 54adb60c0..7e759ecb1 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -192,33 +192,6 @@ void show_trace(struct task_struct *task, unsigned long *stack); */ typedef unsigned int elf_caddr_t; -#define start_thread_som(regs, new_pc, new_sp) do { \ - unsigned long *sp = (unsigned long *)new_sp; \ - __u32 spaceid = (__u32)current->mm->context; \ - unsigned long pc = (unsigned long)new_pc; \ - /* offset pc for priv. level */ \ - pc |= 3; \ - \ - regs->iasq[0] = spaceid; \ - regs->iasq[1] = spaceid; \ - regs->iaoq[0] = pc; \ - regs->iaoq[1] = pc + 4; \ - regs->sr[2] = LINUX_GATEWAY_SPACE; \ - regs->sr[3] = 0xffff; \ - regs->sr[4] = spaceid; \ - regs->sr[5] = spaceid; \ - regs->sr[6] = spaceid; \ - regs->sr[7] = spaceid; \ - regs->gr[ 0] = USER_PSW; \ - regs->gr[30] = ((new_sp)+63)&~63; \ - regs->gr[31] = pc; \ - \ - get_user(regs->gr[26],&sp[0]); \ - get_user(regs->gr[25],&sp[-1]); \ - get_user(regs->gr[24],&sp[-2]); \ - get_user(regs->gr[23],&sp[-3]); \ -} while(0) - /* The ELF abi wants things done a "wee bit" differently than * som does. Supporting this behavior here avoids * having our own version of create_elf_tables. diff --git a/arch/parisc/include/uapi/asm/ipcbuf.h b/arch/parisc/include/uapi/asm/ipcbuf.h index bd956c425..790c4119f 100644 --- a/arch/parisc/include/uapi/asm/ipcbuf.h +++ b/arch/parisc/include/uapi/asm/ipcbuf.h @@ -1,6 +1,9 @@ #ifndef __PARISC_IPCBUF_H__ #define __PARISC_IPCBUF_H__ +#include <asm/bitsperlong.h> +#include <linux/posix_types.h> + /* * The ipc64_perm structure for PA-RISC is almost identical to * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the kernel. @@ -10,16 +13,18 @@ struct ipc64_perm { - key_t key; - uid_t uid; - gid_t gid; - uid_t cuid; - gid_t cgid; + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; +#if __BITS_PER_LONG != 64 unsigned short int __pad1; - mode_t mode; +#endif + __kernel_mode_t mode; unsigned short int __pad2; unsigned short int seq; - unsigned int __pad3; + unsigned int __pad3; unsigned long long int __unused1; unsigned long long int __unused2; }; diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index 294d251ca..dd4d1876a 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h @@ -31,6 +31,9 @@ #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ +#define MCL_ONFAULT 4 /* lock all pages that are faulted in */ + +#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */ #define MADV_NORMAL 0 /* no further special treatment */ #define MADV_RANDOM 1 /* expect random page references */ @@ -46,16 +49,6 @@ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ -/* The range 12-64 is reserved for page size specification. */ -#define MADV_4K_PAGES 12 /* Use 4K pages */ -#define MADV_16K_PAGES 14 /* Use 16K pages */ -#define MADV_64K_PAGES 16 /* Use 64K pages */ -#define MADV_256K_PAGES 18 /* Use 256K pages */ -#define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */ -#define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */ -#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */ -#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */ - #define MADV_MERGEABLE 65 /* KSM may merge identical pages */ #define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ diff --git a/arch/parisc/include/uapi/asm/msgbuf.h b/arch/parisc/include/uapi/asm/msgbuf.h index 342138983..2e83ac758 100644 --- a/arch/parisc/include/uapi/asm/msgbuf.h +++ b/arch/parisc/include/uapi/asm/msgbuf.h @@ -27,13 +27,13 @@ struct msqid64_ds { unsigned int __pad3; #endif __kernel_time_t msg_ctime; /* last change time */ - unsigned int msg_cbytes; /* current number of bytes on queue */ - unsigned int msg_qnum; /* number of messages in queue */ - unsigned int msg_qbytes; /* max number of bytes on queue */ + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ __kernel_pid_t msg_lspid; /* pid of last msgsnd */ __kernel_pid_t msg_lrpid; /* last receive pid */ - unsigned int __unused1; - unsigned int __unused2; + unsigned long __unused1; + unsigned long __unused2; }; #endif /* _PARISC_MSGBUF_H */ diff --git a/arch/parisc/include/uapi/asm/posix_types.h b/arch/parisc/include/uapi/asm/posix_types.h index b9344256f..f3b5f70b9 100644 --- a/arch/parisc/include/uapi/asm/posix_types.h +++ b/arch/parisc/include/uapi/asm/posix_types.h @@ -7,8 +7,10 @@ * assume GCC is being used. */ +#ifndef __LP64__ typedef unsigned short __kernel_mode_t; #define __kernel_mode_t __kernel_mode_t +#endif typedef unsigned short __kernel_ipc_pid_t; #define __kernel_ipc_pid_t __kernel_ipc_pid_t diff --git a/arch/parisc/include/uapi/asm/sembuf.h b/arch/parisc/include/uapi/asm/sembuf.h index f01d89e30..c20971bf5 100644 --- a/arch/parisc/include/uapi/asm/sembuf.h +++ b/arch/parisc/include/uapi/asm/sembuf.h @@ -23,9 +23,9 @@ struct semid64_ds { unsigned int __pad2; #endif __kernel_time_t sem_ctime; /* last change time */ - unsigned int sem_nsems; /* no. of semaphores in array */ - unsigned int __unused1; - unsigned int __unused2; + unsigned long sem_nsems; /* no. of semaphores in array */ + unsigned long __unused1; + unsigned long __unused2; }; #endif /* _PARISC_SEMBUF_H */ diff --git a/arch/parisc/include/uapi/asm/shmbuf.h b/arch/parisc/include/uapi/asm/shmbuf.h index 8496c3856..750e13e77 100644 --- a/arch/parisc/include/uapi/asm/shmbuf.h +++ b/arch/parisc/include/uapi/asm/shmbuf.h @@ -30,12 +30,12 @@ struct shmid64_ds { #if __BITS_PER_LONG != 64 unsigned int __pad4; #endif - size_t shm_segsz; /* size of segment (bytes) */ + __kernel_size_t shm_segsz; /* size of segment (bytes) */ __kernel_pid_t shm_cpid; /* pid of creator */ __kernel_pid_t shm_lpid; /* pid of last operator */ - unsigned int shm_nattch; /* no. of current attaches */ - unsigned int __unused1; - unsigned int __unused2; + unsigned long shm_nattch; /* no. of current attaches */ + unsigned long __unused1; + unsigned long __unused2; }; struct shminfo64 { diff --git a/arch/parisc/include/uapi/asm/stat.h b/arch/parisc/include/uapi/asm/stat.h index b606b366d..3310d2a49 100644 --- a/arch/parisc/include/uapi/asm/stat.h +++ b/arch/parisc/include/uapi/asm/stat.h @@ -36,37 +36,6 @@ struct stat { #define STAT_HAVE_NSEC -struct hpux_stat64 { - unsigned int st_dev; /* dev_t is 32 bits on parisc */ - unsigned int st_ino; /* 32 bits */ - unsigned short st_mode; /* 16 bits */ - unsigned short st_nlink; /* 16 bits */ - unsigned short st_reserved1; /* old st_uid */ - unsigned short st_reserved2; /* old st_gid */ - unsigned int st_rdev; - signed long long st_size; - signed int st_atime; - unsigned int st_spare1; - signed int st_mtime; - unsigned int st_spare2; - signed int st_ctime; - unsigned int st_spare3; - int st_blksize; - unsigned long long st_blocks; - unsigned int __unused1; /* ACL stuff */ - unsigned int __unused2; /* network */ - unsigned int __unused3; /* network */ - unsigned int __unused4; /* cnodes */ - unsigned short __unused5; /* netsite */ - short st_fstype; - unsigned int st_realdev; - unsigned short st_basemode; - unsigned short st_spareshort; - unsigned int st_uid; - unsigned int st_gid; - unsigned int st_spare4[3]; -}; - /* This is the struct that 32-bit userspace applications are expecting. * How 64-bit apps are going to be compiled, I have no idea. But at least * this way, we don't have a wrapper in the kernel. diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index 2e639d760..35bdccbb2 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h @@ -358,8 +358,11 @@ #define __NR_memfd_create (__NR_Linux + 340) #define __NR_bpf (__NR_Linux + 341) #define __NR_execveat (__NR_Linux + 342) +#define __NR_membarrier (__NR_Linux + 343) +#define __NR_userfaultfd (__NR_Linux + 344) +#define __NR_mlock2 (__NR_Linux + 345) -#define __NR_Linux_syscalls (__NR_execveat + 1) +#define __NR_Linux_syscalls (__NR_mlock2 + 1) #define __IGNORE_select /* newselect */ diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 59001cea1..d2f62570a 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -290,6 +290,14 @@ int main(void) DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); DEFINE(ASM_PT_INITIAL, PT_INITIAL); BLANK(); + /* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text + * and kernel data on physical huge pages */ +#ifdef CONFIG_HUGETLB_PAGE + DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT); +#else + DEFINE(HUGEPAGE_SIZE, PAGE_SIZE); +#endif + BLANK(); DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index c5ef4081b..623496c11 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -502,21 +502,38 @@ STREG \pte,0(\ptp) .endm + /* We have (depending on the page size): + * - 38 to 52-bit Physical Page Number + * - 12 to 26-bit page offset + */ /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ - #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) + #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) + #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12) /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ - .macro convert_for_tlb_insert20 pte + .macro convert_for_tlb_insert20 pte,tmp +#ifdef CONFIG_HUGETLB_PAGE + copy \pte,\tmp + extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ + 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte + + depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ + (63-58)+PAGE_ADD_SHIFT,\pte + extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0 + depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\ + (63-58)+PAGE_ADD_HUGE_SHIFT,\pte +#else /* Huge pages disabled */ extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ (63-58)+PAGE_ADD_SHIFT,\pte +#endif .endm /* Convert the pte and prot to tlb insertion values. How * this happens is quite subtle, read below */ - .macro make_insert_tlb spc,pte,prot + .macro make_insert_tlb spc,pte,prot,tmp space_to_prot \spc \prot /* create prot id from space */ /* The following is the real subtlety. This is depositing * T <-> _PAGE_REFTRAP @@ -553,7 +570,7 @@ depdi 1,12,1,\prot /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ - convert_for_tlb_insert20 \pte + convert_for_tlb_insert20 \pte \tmp .endm /* Identical macro to make_insert_tlb above, except it @@ -646,17 +663,12 @@ /* - * Align fault_vector_20 on 4K boundary so that both - * fault_vector_11 and fault_vector_20 are on the - * same page. This is only necessary as long as we - * write protect the kernel text, which we may stop - * doing once we use large page translations to cover - * the static part of the kernel address space. + * Fault_vectors are architecturally required to be aligned on a 2K + * boundary */ .text - - .align 4096 + .align 2048 ENTRY(fault_vector_20) /* First vector is invalid (0) */ @@ -1147,7 +1159,7 @@ dtlb_miss_20w: tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w update_accessed ptp,pte,t0,t1 - make_insert_tlb spc,pte,prot + make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot @@ -1173,7 +1185,7 @@ nadtlb_miss_20w: tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w update_accessed ptp,pte,t0,t1 - make_insert_tlb spc,pte,prot + make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot @@ -1267,7 +1279,7 @@ dtlb_miss_20: tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 update_accessed ptp,pte,t0,t1 - make_insert_tlb spc,pte,prot + make_insert_tlb spc,pte,prot,t1 f_extend pte,t1 @@ -1295,7 +1307,7 @@ nadtlb_miss_20: tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 update_accessed ptp,pte,t0,t1 - make_insert_tlb spc,pte,prot + make_insert_tlb spc,pte,prot,t1 f_extend pte,t1 @@ -1404,7 +1416,7 @@ itlb_miss_20w: tlb_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 - make_insert_tlb spc,pte,prot + make_insert_tlb spc,pte,prot,t1 iitlbt pte,prot @@ -1428,7 +1440,7 @@ naitlb_miss_20w: tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w update_accessed ptp,pte,t0,t1 - make_insert_tlb spc,pte,prot + make_insert_tlb spc,pte,prot,t1 iitlbt pte,prot @@ -1514,7 +1526,7 @@ itlb_miss_20: tlb_lock spc,ptp,pte,t0,t1,itlb_fault update_accessed ptp,pte,t0,t1 - make_insert_tlb spc,pte,prot + make_insert_tlb spc,pte,prot,t1 f_extend pte,t1 @@ -1534,7 +1546,7 @@ naitlb_miss_20: tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 update_accessed ptp,pte,t0,t1 - make_insert_tlb spc,pte,prot + make_insert_tlb spc,pte,prot,t1 f_extend pte,t1 @@ -1566,7 +1578,7 @@ dbit_trap_20w: tlb_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 - make_insert_tlb spc,pte,prot + make_insert_tlb spc,pte,prot,t1 idtlbt pte,prot @@ -1610,7 +1622,7 @@ dbit_trap_20: tlb_lock spc,ptp,pte,t0,t1,dbit_fault update_dirty ptp,pte,t1 - make_insert_tlb spc,pte,prot + make_insert_tlb spc,pte,prot,t1 f_extend pte,t1 diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index e7d64527a..75aa0db9f 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -69,7 +69,7 @@ $bss_loop: stw,ma %arg2,4(%r1) stw,ma %arg3,4(%r1) - /* Initialize startup VM. Just map first 8/16 MB of memory */ + /* Initialize startup VM. Just map first 16/32 MB of memory */ load32 PA(swapper_pg_dir),%r4 mtctl %r4,%cr24 /* Initialize kernel root pointer */ mtctl %r4,%cr25 /* Initialize user root pointer */ @@ -107,7 +107,7 @@ $bss_loop: /* Now initialize the PTEs themselves. We use RWX for * everything ... it will get remapped correctly later */ ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */ - ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ + load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ load32 PA(pg0),%r1 $pgt_fill_loop: diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c index 64f2764a8..c99f3dde4 100644 --- a/arch/parisc/kernel/pci.c +++ b/arch/parisc/kernel/pci.c @@ -171,24 +171,6 @@ void pcibios_set_master(struct pci_dev *dev) } -void __init pcibios_init_bus(struct pci_bus *bus) -{ - struct pci_dev *dev = bus->self; - unsigned short bridge_ctl; - - /* We deal only with pci controllers and pci-pci bridges. */ - if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) - return; - - /* PCI-PCI bridge - set the cache line and default latency - (32) for primary and secondary buses. */ - pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32); - - pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl); - bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR; - pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl); -} - /* * pcibios align resources() is called every time generic PCI code * wants to generate a new address. The process of looking for diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 72a3c658a..f7ea626e2 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -130,7 +130,16 @@ void __init setup_arch(char **cmdline_p) printk(KERN_INFO "The 32-bit Kernel has started...\n"); #endif - printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024)); + printk(KERN_INFO "Kernel default page size is %d KB. Huge pages ", + (int)(PAGE_SIZE / 1024)); +#ifdef CONFIG_HUGETLB_PAGE + printk(KERN_CONT "enabled with %d MB physical and %d MB virtual size", + 1 << (REAL_HPAGE_SHIFT - 20), 1 << (HPAGE_SHIFT - 20)); +#else + printk(KERN_CONT "disabled"); +#endif + printk(KERN_CONT ".\n"); + pdc_console_init(); @@ -377,6 +386,7 @@ arch_initcall(parisc_init); void start_parisc(void) { extern void start_kernel(void); + extern void early_trap_init(void); int ret, cpunum; struct pdc_coproc_cfg coproc_cfg; @@ -397,6 +407,8 @@ void start_parisc(void) panic("must have an fpu to boot linux"); } + early_trap_init(); /* initialize checksum of fault_vector */ + start_kernel(); // not reached } diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index dc1ea796f..2264f68f3 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -435,6 +435,55 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall) regs->gr[28]); } +/* + * Check how the syscall number gets loaded into %r20 within + * the delay branch in userspace and adjust as needed. + */ + +static void check_syscallno_in_delay_branch(struct pt_regs *regs) +{ + u32 opcode, source_reg; + u32 __user *uaddr; + int err; + + /* Usually we don't have to restore %r20 (the system call number) + * because it gets loaded in the delay slot of the branch external + * instruction via the ldi instruction. + * In some cases a register-to-register copy instruction might have + * been used instead, in which case we need to copy the syscall + * number into the source register before returning to userspace. + */ + + /* A syscall is just a branch, so all we have to do is fiddle the + * return pointer so that the ble instruction gets executed again. + */ + regs->gr[31] -= 8; /* delayed branching */ + + /* Get assembler opcode of code in delay branch */ + uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4); + err = get_user(opcode, uaddr); + if (err) + return; + + /* Check if delay branch uses "ldi int,%r20" */ + if ((opcode & 0xffff0000) == 0x34140000) + return; /* everything ok, just return */ + + /* Check if delay branch uses "nop" */ + if (opcode == INSN_NOP) + return; + + /* Check if delay branch uses "copy %rX,%r20" */ + if ((opcode & 0xffe0ffff) == 0x08000254) { + source_reg = (opcode >> 16) & 31; + regs->gr[source_reg] = regs->gr[20]; + return; + } + + pr_warn("syscall restart: %s (pid %d): unexpected opcode 0x%08x\n", + current->comm, task_pid_nr(current), opcode); +} + static inline void syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) { @@ -457,10 +506,7 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) } /* fallthrough */ case -ERESTARTNOINTR: - /* A syscall is just a branch, so all - * we have to do is fiddle the return pointer. - */ - regs->gr[31] -= 8; /* delayed branching */ + check_syscallno_in_delay_branch(regs); break; } } @@ -510,15 +556,9 @@ insert_restart_trampoline(struct pt_regs *regs) } case -ERESTARTNOHAND: case -ERESTARTSYS: - case -ERESTARTNOINTR: { - /* Hooray for delayed branching. We don't - * have to restore %r20 (the system call - * number) because it gets loaded in the delay - * slot of the branch external instruction. - */ - regs->gr[31] -= 8; + case -ERESTARTNOINTR: + check_syscallno_in_delay_branch(regs); return; - } default: break; } diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 0b8d26d3b..3fbd7252a 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -369,7 +369,7 @@ tracesys_exit: ldo -16(%r30),%r29 /* Reference param save area */ #endif ldo TASK_REGS(%r1),%r26 - bl do_syscall_trace_exit,%r2 + BL do_syscall_trace_exit,%r2 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ LDREG TI_TASK(%r1), %r1 @@ -390,7 +390,7 @@ tracesys_sigexit: #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - bl do_syscall_trace_exit,%r2 + BL do_syscall_trace_exit,%r2 ldo TASK_REGS(%r1),%r26 ldil L%syscall_exit_rfi,%r1 diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 8eefb12d1..d4ffcfbc9 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -438,6 +438,9 @@ ENTRY_SAME(memfd_create) /* 340 */ ENTRY_SAME(bpf) ENTRY_COMP(execveat) + ENTRY_SAME(membarrier) + ENTRY_SAME(userfaultfd) + ENTRY_SAME(mlock2) /* 345 */ .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index b99b39f1d..553b09855 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -807,7 +807,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) } -int __init check_ivt(void *iva) +void __init initialize_ivt(const void *iva) { extern u32 os_hpmc_size; extern const u32 os_hpmc[]; @@ -818,8 +818,8 @@ int __init check_ivt(void *iva) u32 *hpmcp; u32 length; - if (strcmp((char *)iva, "cows can fly")) - return -1; + if (strcmp((const char *)iva, "cows can fly")) + panic("IVT invalid"); ivap = (u32 *)iva; @@ -839,28 +839,23 @@ int __init check_ivt(void *iva) check += ivap[i]; ivap[5] = -check; - - return 0; } -#ifndef CONFIG_64BIT -extern const void fault_vector_11; -#endif -extern const void fault_vector_20; -void __init trap_init(void) +/* early_trap_init() is called before we set up kernel mappings and + * write-protect the kernel */ +void __init early_trap_init(void) { - void *iva; + extern const void fault_vector_20; - if (boot_cpu_data.cpu_type >= pcxu) - iva = (void *) &fault_vector_20; - else -#ifdef CONFIG_64BIT - panic("Can't boot 64-bit OS on PA1.1 processor!"); -#else - iva = (void *) &fault_vector_11; +#ifndef CONFIG_64BIT + extern const void fault_vector_11; + initialize_ivt(&fault_vector_11); #endif - if (check_ivt(iva)) - panic("IVT invalid"); + initialize_ivt(&fault_vector_20); +} + +void __init trap_init(void) +{ } diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 0dacc5ca5..308f29081 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -60,7 +60,7 @@ SECTIONS EXIT_DATA } PERCPU_SECTION(8) - . = ALIGN(PAGE_SIZE); + . = ALIGN(HUGEPAGE_SIZE); __init_end = .; /* freed after init ends here */ @@ -116,7 +116,7 @@ SECTIONS * that we can properly leave these * as writable */ - . = ALIGN(PAGE_SIZE); + . = ALIGN(HUGEPAGE_SIZE); data_start = .; EXCEPTION_TABLE(8) @@ -135,8 +135,11 @@ SECTIONS _edata = .; /* BSS */ - BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8) + BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE) + + /* bootmap is allocated in setup_bootmem() directly behind bss. */ + . = ALIGN(HUGEPAGE_SIZE); _end = . ; STABS_DEBUG diff --git a/arch/parisc/mm/Makefile b/arch/parisc/mm/Makefile index 758ceefb3..134393de6 100644 --- a/arch/parisc/mm/Makefile +++ b/arch/parisc/mm/Makefile @@ -3,3 +3,4 @@ # obj-y := init.o fault.o ioremap.o +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c new file mode 100644 index 000000000..f6fdc77a7 --- /dev/null +++ b/arch/parisc/mm/hugetlbpage.c @@ -0,0 +1,161 @@ +/* + * PARISC64 Huge TLB page support. + * + * This parisc implementation is heavily based on the SPARC and x86 code. + * + * Copyright (C) 2015 Helge Deller <deller@gmx.de> + */ + +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/hugetlb.h> +#include <linux/pagemap.h> +#include <linux/sysctl.h> + +#include <asm/mman.h> +#include <asm/pgalloc.h> +#include <asm/tlb.h> +#include <asm/tlbflush.h> +#include <asm/cacheflush.h> +#include <asm/mmu_context.h> + + +unsigned long +hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (len > TASK_SIZE) + return -ENOMEM; + + if (flags & MAP_FIXED) + if (prepare_hugepage_range(file, addr, len)) + return -EINVAL; + + if (addr) + addr = ALIGN(addr, huge_page_size(h)); + + /* we need to make sure the colouring is OK */ + return arch_get_unmapped_area(file, addr, len, pgoff, flags); +} + + +pte_t *huge_pte_alloc(struct mm_struct *mm, + unsigned long addr, unsigned long sz) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte = NULL; + + /* We must align the address, because our caller will run + * set_huge_pte_at() on whatever we return, which writes out + * all of the sub-ptes for the hugepage range. So we have + * to give it the first such sub-pte. + */ + addr &= HPAGE_MASK; + + pgd = pgd_offset(mm, addr); + pud = pud_alloc(mm, pgd, addr); + if (pud) { + pmd = pmd_alloc(mm, pud, addr); + if (pmd) + pte = pte_alloc_map(mm, NULL, pmd, addr); + } + return pte; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte = NULL; + + addr &= HPAGE_MASK; + + pgd = pgd_offset(mm, addr); + if (!pgd_none(*pgd)) { + pud = pud_offset(pgd, addr); + if (!pud_none(*pud)) { + pmd = pmd_offset(pud, addr); + if (!pmd_none(*pmd)) + pte = pte_offset_map(pmd, addr); + } + } + return pte; +} + +/* Purge data and instruction TLB entries. Must be called holding + * the pa_tlb_lock. The TLB purge instructions are slow on SMP + * machines since the purge must be broadcast to all CPUs. + */ +static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr) +{ + int i; + + /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate + * Linux standard huge pages (e.g. 2 MB) */ + BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT); + + addr &= HPAGE_MASK; + addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT; + + for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) { + mtsp(mm->context, 1); + pdtlb(addr); + if (unlikely(split_tlb)) + pitlb(addr); + addr += (1UL << REAL_HPAGE_SHIFT); + } +} + +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t entry) +{ + unsigned long addr_start; + int i; + + addr &= HPAGE_MASK; + addr_start = addr; + + for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { + /* Directly write pte entry. We could call set_pte_at(mm, addr, ptep, entry) + * instead, but then we get double locking on pa_tlb_lock. */ + *ptep = entry; + ptep++; + + /* Drop the PAGE_SIZE/non-huge tlb entry */ + purge_tlb_entries(mm, addr); + + addr += PAGE_SIZE; + pte_val(entry) += PAGE_SIZE; + } + + purge_tlb_entries_huge(mm, addr_start); +} + + +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + pte_t entry; + + entry = *ptep; + set_huge_pte_at(mm, addr, ptep, __pte(0)); + + return entry; +} + +int pmd_huge(pmd_t pmd) +{ + return 0; +} + +int pud_huge(pud_t pud) +{ + return 0; +} diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index c229427fa..1b366c477 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -23,6 +23,7 @@ #include <linux/unistd.h> #include <linux/nodemask.h> /* for node_online_map */ #include <linux/pagemap.h> /* for release_pages and page_cache_release */ +#include <linux/compat.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> @@ -30,6 +31,7 @@ #include <asm/pdc_chassis.h> #include <asm/mmzone.h> #include <asm/sections.h> +#include <asm/msgbuf.h> extern int data_start; extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ @@ -407,15 +409,11 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long vaddr; unsigned long ro_start; unsigned long ro_end; - unsigned long fv_addr; - unsigned long gw_addr; - extern const unsigned long fault_vector_20; - extern void * const linux_gateway_page; + unsigned long kernel_end; ro_start = __pa((unsigned long)_text); ro_end = __pa((unsigned long)&data_start); - fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; - gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; + kernel_end = __pa((unsigned long)&_end); end_paddr = start_paddr + size; @@ -473,24 +471,25 @@ static void __init map_pages(unsigned long start_vaddr, for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { pte_t pte; - /* - * Map the fault vector writable so we can - * write the HPMC checksum. - */ if (force) pte = __mk_pte(address, pgprot); - else if (parisc_text_address(vaddr) && - address != fv_addr) + else if (parisc_text_address(vaddr)) { pte = __mk_pte(address, PAGE_KERNEL_EXEC); + if (address >= ro_start && address < kernel_end) + pte = pte_mkhuge(pte); + } else #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) - if (address >= ro_start && address < ro_end - && address != fv_addr - && address != gw_addr) - pte = __mk_pte(address, PAGE_KERNEL_RO); - else + if (address >= ro_start && address < ro_end) { + pte = __mk_pte(address, PAGE_KERNEL_EXEC); + pte = pte_mkhuge(pte); + } else #endif + { pte = __mk_pte(address, pgprot); + if (address >= ro_start && address < kernel_end) + pte = pte_mkhuge(pte); + } if (address >= end_paddr) { if (force) @@ -534,15 +533,12 @@ void free_initmem(void) /* force the kernel to see the new TLB entries */ __flush_tlb_range(0, init_begin, init_end); - /* Attempt to catch anyone trying to execute code here - * by filling the page with BRK insns. - */ - memset((void *)init_begin, 0x00, init_end - init_begin); + /* finally dump all the instructions which were cached, since the * pages are no-longer executable */ flush_icache_range(init_begin, init_end); - free_initmem_default(-1); + free_initmem_default(POISON_FREE_INITMEM); /* set up a new led state on systems shipped LED State panel */ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); @@ -590,6 +586,20 @@ unsigned long pcxl_dma_start __read_mostly; void __init mem_init(void) { + /* Do sanity checks on IPC (compat) structures */ + BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48); +#ifndef CONFIG_64BIT + BUILD_BUG_ON(sizeof(struct semid64_ds) != 80); + BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104); + BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104); +#endif +#ifdef CONFIG_COMPAT + BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm)); + BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80); + BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104); + BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104); +#endif + /* Do sanity checks on page table constants */ BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t)); BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t)); @@ -712,8 +722,8 @@ static void __init pagetable_init(void) unsigned long size; start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; - end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); size = pmem_ranges[range].pages << PAGE_SHIFT; + end_paddr = start_paddr + size; map_pages((unsigned long)__va(start_paddr), start_paddr, size, PAGE_KERNEL, 0); |