Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc fixes from David Miller: "Several fixes here, mostly having to due with either build errors or memory corruptions depending upon whether you have THP enabled or not" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: sparc: remove unused wp_works_ok macro sparc32: Export vac_cache_size to fix build error sparc64: Fix memory corruption when THP is enabled sparc64: Fix kernel panic due to erroneous #ifdef surrounding pmd_write() arch/sparc: Avoid DCTI Couples sparc64: kern_addr_valid regression sparc64: Add support for 2G hugepages sparc64: Fix size check in huge_pte_alloc
This commit is contained in:
commit
8b65bb57d8
20 changed files with 47 additions and 25 deletions
|
@ -17,6 +17,7 @@
|
|||
|
||||
#define HPAGE_SHIFT 23
|
||||
#define REAL_HPAGE_SHIFT 22
|
||||
#define HPAGE_2GB_SHIFT 31
|
||||
#define HPAGE_256MB_SHIFT 28
|
||||
#define HPAGE_64K_SHIFT 16
|
||||
#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT)
|
||||
|
@ -27,7 +28,7 @@
|
|||
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
|
||||
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
||||
#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
|
||||
#define HUGE_MAX_HSTATE 3
|
||||
#define HUGE_MAX_HSTATE 4
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
|
|
@ -679,6 +679,14 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
|
|||
return pte_pfn(pte);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMD_WRITE
|
||||
static inline unsigned long pmd_write(pmd_t pmd)
|
||||
{
|
||||
pte_t pte = __pte(pmd_val(pmd));
|
||||
|
||||
return pte_write(pte);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
static inline unsigned long pmd_dirty(pmd_t pmd)
|
||||
{
|
||||
|
@ -694,13 +702,6 @@ static inline unsigned long pmd_young(pmd_t pmd)
|
|||
return pte_young(pte);
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_write(pmd_t pmd)
|
||||
{
|
||||
pte_t pte = __pte(pmd_val(pmd));
|
||||
|
||||
return pte_write(pte);
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_trans_huge(pmd_t pmd)
|
||||
{
|
||||
pte_t pte = __pte(pmd_val(pmd));
|
||||
|
|
|
@ -18,12 +18,6 @@
|
|||
#include <asm/signal.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
/*
|
||||
* The sparc has no problems with write protection
|
||||
*/
|
||||
#define wp_works_ok 1
|
||||
#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
|
||||
|
||||
/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too...
|
||||
* That one page is used to protect kernel from intruders, so that
|
||||
* we can make our access_ok test faster
|
||||
|
|
|
@ -18,10 +18,6 @@
|
|||
#include <asm/ptrace.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
/* The sparc has no problems with write protection */
|
||||
#define wp_works_ok 1
|
||||
#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
|
||||
|
||||
/*
|
||||
* User lives in his very own context, and cannot reference us. Note
|
||||
* that TASK_SIZE is a misnomer, it really gives maximum user virtual
|
||||
|
|
|
@ -96,6 +96,7 @@ sparc64_boot:
|
|||
andn %g1, PSTATE_AM, %g1
|
||||
wrpr %g1, 0x0, %pstate
|
||||
ba,a,pt %xcc, 1f
|
||||
nop
|
||||
|
||||
.globl prom_finddev_name, prom_chosen_path, prom_root_node
|
||||
.globl prom_getprop_name, prom_mmu_name, prom_peer_name
|
||||
|
@ -613,6 +614,7 @@ niagara_tlb_fixup:
|
|||
nop
|
||||
|
||||
ba,a,pt %xcc, 80f
|
||||
nop
|
||||
niagara4_patch:
|
||||
call niagara4_patch_copyops
|
||||
nop
|
||||
|
@ -622,6 +624,7 @@ niagara4_patch:
|
|||
nop
|
||||
|
||||
ba,a,pt %xcc, 80f
|
||||
nop
|
||||
|
||||
niagara2_patch:
|
||||
call niagara2_patch_copyops
|
||||
|
@ -632,6 +635,7 @@ niagara2_patch:
|
|||
nop
|
||||
|
||||
ba,a,pt %xcc, 80f
|
||||
nop
|
||||
|
||||
niagara_patch:
|
||||
call niagara_patch_copyops
|
||||
|
|
|
@ -82,6 +82,7 @@ do_stdfmna:
|
|||
call handle_stdfmna
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,a,pt %xcc, rtrap
|
||||
nop
|
||||
.size do_stdfmna,.-do_stdfmna
|
||||
|
||||
.type breakpoint_trap,#function
|
||||
|
|
|
@ -237,6 +237,7 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
|
|||
bne,pt %xcc, user_rtt_fill_32bit
|
||||
wrpr %g1, %cwp
|
||||
ba,a,pt %xcc, user_rtt_fill_64bit
|
||||
nop
|
||||
|
||||
user_rtt_fill_fixup_dax:
|
||||
ba,pt %xcc, user_rtt_fill_fixup_common
|
||||
|
|
|
@ -86,6 +86,7 @@ __spitfire_cee_trap_continue:
|
|||
rd %pc, %g7
|
||||
|
||||
ba,a,pt %xcc, 2f
|
||||
nop
|
||||
|
||||
1: ba,pt %xcc, etrap_irq
|
||||
rd %pc, %g7
|
||||
|
|
|
@ -352,6 +352,7 @@ sun4v_mna:
|
|||
call sun4v_do_mna
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
ba,a,pt %xcc, rtrap
|
||||
nop
|
||||
|
||||
/* Privileged Action. */
|
||||
sun4v_privact:
|
||||
|
|
|
@ -92,6 +92,7 @@ user_rtt_fill_fixup_common:
|
|||
call sun4v_data_access_exception
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
nop
|
||||
|
||||
1: call spitfire_data_access_exception
|
||||
nop
|
||||
|
|
|
@ -152,6 +152,8 @@ fill_fixup_dax:
|
|||
call sun4v_data_access_exception
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
nop
|
||||
1: call spitfire_data_access_exception
|
||||
nop
|
||||
ba,a,pt %xcc, rtrap
|
||||
nop
|
||||
|
|
|
@ -326,11 +326,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
|||
blu 170f
|
||||
nop
|
||||
ba,a,pt %xcc, 180f
|
||||
nop
|
||||
|
||||
4: /* 32 <= low bits < 48 */
|
||||
blu 150f
|
||||
nop
|
||||
ba,a,pt %xcc, 160f
|
||||
nop
|
||||
5: /* 0 < low bits < 32 */
|
||||
blu,a 6f
|
||||
cmp %g2, 8
|
||||
|
@ -338,6 +340,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
|||
blu 130f
|
||||
nop
|
||||
ba,a,pt %xcc, 140f
|
||||
nop
|
||||
6: /* 0 < low bits < 16 */
|
||||
bgeu 120f
|
||||
nop
|
||||
|
@ -475,6 +478,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
|||
brz,pt %o2, 85f
|
||||
sub %o0, %o1, GLOBAL_SPARE
|
||||
ba,a,pt %XCC, 90f
|
||||
nop
|
||||
|
||||
.align 64
|
||||
75: /* 16 < len <= 64 */
|
||||
|
|
|
@ -530,4 +530,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
|||
bne,pt %icc, 1b
|
||||
EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1)
|
||||
ba,a,pt %icc, .Lexit
|
||||
nop
|
||||
.size FUNC_NAME, .-FUNC_NAME
|
||||
|
|
|
@ -102,4 +102,5 @@ NG4bzero:
|
|||
bne,pt %icc, 1b
|
||||
add %o0, 0x30, %o0
|
||||
ba,a,pt %icc, .Lpostloop
|
||||
nop
|
||||
.size NG4bzero,.-NG4bzero
|
||||
|
|
|
@ -394,6 +394,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */
|
|||
brz,pt %i2, 85f
|
||||
sub %o0, %i1, %i3
|
||||
ba,a,pt %XCC, 90f
|
||||
nop
|
||||
|
||||
.align 64
|
||||
70: /* 16 < len <= 64 */
|
||||
|
|
|
@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
|
|||
pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
|
||||
|
||||
switch (shift) {
|
||||
case HPAGE_2GB_SHIFT:
|
||||
hugepage_size = _PAGE_SZ2GB_4V;
|
||||
pte_val(entry) |= _PAGE_PMD_HUGE;
|
||||
break;
|
||||
case HPAGE_256MB_SHIFT:
|
||||
hugepage_size = _PAGE_SZ256MB_4V;
|
||||
pte_val(entry) |= _PAGE_PMD_HUGE;
|
||||
|
@ -183,6 +187,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
|
|||
unsigned int shift;
|
||||
|
||||
switch (tte_szbits) {
|
||||
case _PAGE_SZ2GB_4V:
|
||||
shift = HPAGE_2GB_SHIFT;
|
||||
break;
|
||||
case _PAGE_SZ256MB_4V:
|
||||
shift = HPAGE_256MB_SHIFT;
|
||||
break;
|
||||
|
@ -261,7 +268,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|||
if (!pmd)
|
||||
return NULL;
|
||||
|
||||
if (sz == PMD_SHIFT)
|
||||
if (sz >= PMD_SIZE)
|
||||
pte = (pte_t *)pmd;
|
||||
else
|
||||
pte = pte_alloc_map(mm, pmd, addr);
|
||||
|
|
|
@ -337,6 +337,10 @@ static int __init setup_hugepagesz(char *string)
|
|||
hugepage_shift = ilog2(hugepage_size);
|
||||
|
||||
switch (hugepage_shift) {
|
||||
case HPAGE_2GB_SHIFT:
|
||||
hv_pgsz_mask = HV_PGSZ_MASK_2GB;
|
||||
hv_pgsz_idx = HV_PGSZ_IDX_2GB;
|
||||
break;
|
||||
case HPAGE_256MB_SHIFT:
|
||||
hv_pgsz_mask = HV_PGSZ_MASK_256MB;
|
||||
hv_pgsz_idx = HV_PGSZ_IDX_256MB;
|
||||
|
@ -1563,7 +1567,7 @@ bool kern_addr_valid(unsigned long addr)
|
|||
if ((long)addr < 0L) {
|
||||
unsigned long pa = __pa(addr);
|
||||
|
||||
if ((addr >> max_phys_bits) != 0UL)
|
||||
if ((pa >> max_phys_bits) != 0UL)
|
||||
return false;
|
||||
|
||||
return pfn_valid(pa >> PAGE_SHIFT);
|
||||
|
|
|
@ -54,6 +54,7 @@
|
|||
enum mbus_module srmmu_modtype;
|
||||
static unsigned int hwbug_bitmask;
|
||||
int vac_cache_size;
|
||||
EXPORT_SYMBOL(vac_cache_size);
|
||||
int vac_line_size;
|
||||
|
||||
extern struct resource sparc_iomap;
|
||||
|
|
|
@ -154,7 +154,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
|
|||
if (pte_val(*pte) & _PAGE_VALID) {
|
||||
bool exec = pte_exec(*pte);
|
||||
|
||||
tlb_batch_add_one(mm, vaddr, exec, false);
|
||||
tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
|
||||
}
|
||||
pte++;
|
||||
vaddr += PAGE_SIZE;
|
||||
|
@ -209,9 +209,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|||
pte_t orig_pte = __pte(pmd_val(orig));
|
||||
bool exec = pte_exec(orig_pte);
|
||||
|
||||
tlb_batch_add_one(mm, addr, exec, true);
|
||||
tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
|
||||
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
|
||||
true);
|
||||
REAL_HPAGE_SHIFT);
|
||||
} else {
|
||||
tlb_batch_pmd_scan(mm, addr, orig);
|
||||
}
|
||||
|
|
|
@ -122,7 +122,7 @@ void flush_tsb_user(struct tlb_batch *tb)
|
|||
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
|
||||
if (tb->hugepage_shift < HPAGE_SHIFT) {
|
||||
if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
|
@ -155,7 +155,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
|
|||
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
|
||||
if (hugepage_shift < HPAGE_SHIFT) {
|
||||
if (hugepage_shift < REAL_HPAGE_SHIFT) {
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
|
|
Loading…
Reference in a new issue