summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-12-15 14:52:16 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-12-15 14:52:16 -0300
commit8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be (patch)
treee9891aa6c295060d065adffd610c4f49ecf884f3 /arch/powerpc/mm
parenta71852147516bc1cb5b0b3cbd13639bfd4022dc8 (diff)
Linux-libre 4.3.2-gnu
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c2
-rw-r--r--arch/powerpc/mm/hash_low_64.S4
-rw-r--r--arch/powerpc/mm/hash_native_64.c23
-rw-r--r--arch/powerpc/mm/hash_utils_64.c12
-rw-r--r--arch/powerpc/mm/hugetlbpage.c8
-rw-r--r--arch/powerpc/mm/mem.c18
-rw-r--r--arch/powerpc/mm/numa.c16
-rw-r--r--arch/powerpc/mm/pgtable_64.c10
-rw-r--r--arch/powerpc/mm/slb.c24
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S10
10 files changed, 65 insertions, 62 deletions
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 9c90e66cf..354ba3c09 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -112,7 +112,7 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
tsize = __ilog2(size) - 10;
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
if ((flags & _PAGE_NO_CACHE) == 0)
flags |= _PAGE_COHERENT;
#endif
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 463174a4a..3b49e3295 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -701,7 +701,7 @@ htab_pte_insert_failure:
#endif /* CONFIG_PPC_64K_PAGES */
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
/*****************************************************************************
* *
@@ -993,7 +993,7 @@ ht64_pte_insert_failure:
b ht64_bail
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
/*****************************************************************************
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 13befa35d..c8822af10 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -582,13 +582,21 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
* be when they isi), and we are the only one left. We rely on our kernel
* mapping being 0xC0's and the hardware ignoring those two real bits.
*
+ * This must be called with interrupts disabled.
+ *
+ * Taking the native_tlbie_lock is unsafe here due to the possibility of
+ * lockdep being on. On pre POWER5 hardware, not taking the lock could
+ * cause deadlock. POWER5 and newer not taking the lock is fine. This only
+ * gets called during boot before secondary CPUs have come up and during
+ * crashdump and all bets are off anyway.
+ *
* TODO: add batching support when enabled. remember, no dynamic memory here,
* athough there is the control page available...
*/
static void native_hpte_clear(void)
{
unsigned long vpn = 0;
- unsigned long slot, slots, flags;
+ unsigned long slot, slots;
struct hash_pte *hptep = htab_address;
unsigned long hpte_v;
unsigned long pteg_count;
@@ -596,13 +604,6 @@ static void native_hpte_clear(void)
pteg_count = htab_hash_mask + 1;
- local_irq_save(flags);
-
- /* we take the tlbie lock and hold it. Some hardware will
- * deadlock if we try to tlbie from two processors at once.
- */
- raw_spin_lock(&native_tlbie_lock);
-
slots = pteg_count * HPTES_PER_GROUP;
for (slot = 0; slot < slots; slot++, hptep++) {
@@ -614,8 +615,8 @@ static void native_hpte_clear(void)
hpte_v = be64_to_cpu(hptep->v);
/*
- * Call __tlbie() here rather than tlbie() since we
- * already hold the native_tlbie_lock.
+ * Call __tlbie() here rather than tlbie() since we can't take the
+ * native_tlbie_lock.
*/
if (hpte_v & HPTE_V_VALID) {
hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
@@ -625,8 +626,6 @@ static void native_hpte_clear(void)
}
asm volatile("eieio; tlbsync; ptesync":::"memory");
- raw_spin_unlock(&native_tlbie_lock);
- local_irq_restore(flags);
}
/*
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5ec987f65..aee701713 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -640,7 +640,7 @@ extern u32 ht64_call_hpte_updatepp[];
static void __init htab_finish_init(void)
{
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
patch_branch(ht64_call_hpte_insert1,
ppc_function_entry(ppc_md.hpte_insert),
BRANCH_SET_LINK);
@@ -653,7 +653,7 @@ static void __init htab_finish_init(void)
patch_branch(ht64_call_hpte_updatepp,
ppc_function_entry(ppc_md.hpte_updatepp),
BRANCH_SET_LINK);
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
patch_branch(htab_call_hpte_insert1,
ppc_function_entry(ppc_md.hpte_insert),
@@ -1151,12 +1151,12 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
check_paca_psize(ea, mm, psize, user_region);
#endif /* CONFIG_PPC_64K_PAGES */
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
if (psize == MMU_PAGE_64K)
rc = __hash_page_64K(ea, access, vsid, ptep, trap,
flags, ssize);
else
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
{
int spp = subpage_protection(mm, ea);
if (access & spp)
@@ -1264,12 +1264,12 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
update_flags |= HPTE_LOCAL_UPDATE;
/* Hash it in */
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
if (mm->context.user_psize == MMU_PAGE_64K)
rc = __hash_page_64K(ea, access, vsid, ptep, trap,
update_flags, ssize);
else
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags,
ssize, subpage_protection(mm, ea));
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index bb0bd7025..06c14523b 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -808,14 +808,6 @@ static int __init add_huge_page_size(unsigned long long size)
if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
return -EINVAL;
-#ifdef CONFIG_SPU_FS_64K_LS
- /* Disable support for 64K huge pages when 64K SPU local store
- * support is enabled as the current implementation conflicts.
- */
- if (shift == PAGE_SHIFT_64K)
- return -EINVAL;
-#endif /* CONFIG_SPU_FS_64K_LS */
-
BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
/* Return if huge page size has already been setup */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 0f11819d8..22d94c3e6 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -113,7 +113,7 @@ int memory_add_physaddr_to_nid(u64 start)
}
#endif
-int arch_add_memory(int nid, u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{
struct pglist_data *pgdata;
struct zone *zone;
@@ -128,7 +128,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
/* this should work for most non-highmem platforms */
zone = pgdata->node_zones +
- zone_for_memory(nid, start, size, 0);
+ zone_for_memory(nid, start, size, 0, for_device);
return __add_pages(nid, zone, start_pfn, nr_pages);
}
@@ -414,17 +414,17 @@ void flush_dcache_icache_page(struct page *page)
return;
}
#endif
-#ifdef CONFIG_BOOKE
- {
+#if defined(CONFIG_8xx) || defined(CONFIG_PPC64)
+ /* On 8xx there is no need to kmap since highmem is not supported */
+ __flush_dcache_icache(page_address(page));
+#else
+ if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
void *start = kmap_atomic(page);
__flush_dcache_icache(start);
kunmap_atomic(start);
+ } else {
+ __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
}
-#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
- /* On 8xx there is no need to kmap since highmem is not supported */
- __flush_dcache_icache(page_address(page));
-#else
- __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
#endif
}
EXPORT_SYMBOL(flush_dcache_icache_page);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 5e80621d9..8b9502ada 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -225,7 +225,7 @@ static void initialize_distance_lookup_table(int nid,
for (i = 0; i < distance_ref_points_depth; i++) {
const __be32 *entry;
- entry = &associativity[be32_to_cpu(distance_ref_points[i])];
+ entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
distance_lookup_table[nid][i] = of_read_number(entry, 1);
}
}
@@ -248,8 +248,12 @@ static int associativity_to_nid(const __be32 *associativity)
nid = -1;
if (nid > 0 &&
- of_read_number(associativity, 1) >= distance_ref_points_depth)
- initialize_distance_lookup_table(nid, associativity);
+ of_read_number(associativity, 1) >= distance_ref_points_depth) {
+ /*
+ * Skip the length field and send start of associativity array
+ */
+ initialize_distance_lookup_table(nid, associativity + 1);
+ }
out:
return nid;
@@ -507,6 +511,12 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
if (nid == 0xffff || nid >= MAX_NUMNODES)
nid = default_nid;
+
+ if (nid > 0) {
+ index = drmem->aa_index * aa->array_sz;
+ initialize_distance_lookup_table(nid,
+ &aa->arrays[index]);
+ }
}
return nid;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 876232d64..e92cb2146 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -149,17 +149,7 @@ int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
#endif /* !CONFIG_PPC_MMU_NOHASH */
}
-#ifdef CONFIG_PPC_BOOK3E_64
- /*
- * With hardware tablewalk, a sync is needed to ensure that
- * subsequent accesses see the PTE we just wrote. Unlike userspace
- * mappings, we can't tolerate spurious faults, so make sure
- * the new PTE will be seen the first time.
- */
- mb();
-#else
smp_wmb();
-#endif
return 0;
}
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 6e450ca66..8a32a2be3 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -41,9 +41,9 @@ static void slb_allocate(unsigned long ea)
(((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
- unsigned long slot)
+ unsigned long entry)
{
- return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
+ return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | entry;
}
static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
@@ -249,11 +249,24 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
static inline void patch_slb_encoding(unsigned int *insn_addr,
unsigned int immed)
{
- int insn = (*insn_addr & 0xffff0000) | immed;
+
+ /*
+ * This function patches either an li or a cmpldi instruction with
+ * a new immediate value. This relies on the fact that both li
+ * (which is actually addi) and cmpldi both take a 16-bit immediate
+ * value, and it is situated in the same location in the instruction,
+ * ie. bits 16-31 (Big endian bit order) or the lower 16 bits.
+ * The signedness of the immediate operand differs between the two
+ * instructions however this code is only ever patching a small value,
+ * much less than 1 << 15, so we can get away with it.
+ * To patch the value we read the existing instruction, clear the
+ * immediate value, and or in our new value, then write the instruction
+ * back.
+ */
+ unsigned int insn = (*insn_addr & 0xffff0000) | immed;
patch_instruction(insn_addr, insn);
}
-extern u32 slb_compare_rr_to_size[];
extern u32 slb_miss_kernel_load_linear[];
extern u32 slb_miss_kernel_load_io[];
extern u32 slb_compare_rr_to_size[];
@@ -309,12 +322,11 @@ void slb_initialize(void)
lflags = SLB_VSID_KERNEL | linear_llp;
vflags = SLB_VSID_KERNEL | vmalloc_llp;
- /* Invalidate the entire SLB (even slot 0) & all the ERATS */
+ /* Invalidate the entire SLB (even entry 0) & all the ERATS */
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
-
create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
/* For the boot cpu, we're running on the stack in init_thread_union,
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 765b41988..e4185581c 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -308,11 +308,11 @@ BEGIN_FTR_SECTION /* CPU_FTR_SMT */
*
* MAS6:IND should be already set based on MAS4
*/
-1: lbarx r15,0,r11
lhz r10,PACAPACAINDEX(r13)
- cmpdi r15,0
- cmpdi cr1,r15,1 /* set cr1.eq = 0 for non-recursive */
addi r10,r10,1
+ crclr cr1*4+eq /* set cr1.eq = 0 for non-recursive */
+1: lbarx r15,0,r11
+ cmpdi r15,0
bne 2f
stbcx. r10,0,r11
bne 1b
@@ -320,9 +320,9 @@ BEGIN_FTR_SECTION /* CPU_FTR_SMT */
.subsection 1
2: cmpd cr1,r15,r10 /* recursive lock due to mcheck/crit/etc? */
beq cr1,3b /* unlock will happen if cr1.eq = 0 */
- lbz r15,0(r11)
+10: lbz r15,0(r11)
cmpdi r15,0
- bne 2b
+ bne 10b
b 1b
.previous