- Build fix with DMA_CMA enabled
- Introduction of PTE_WRITE to distinguish between writable but clean and truly read-only pages - FIQs enabling/disabling clean-up (they aren't used on arm64) - CPU resume fix for the per-cpu offset restoring - Code comment typos -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.9 (GNU/Linux) iQIcBAABAgAGBQJS6+SLAAoJEGvWsS0AyF7xk60P/RBEAXqBdlNkTPQ9eIBs8P9b txv7T6k6MnQt6vK+3Kepgnoadeaw4WRCqKPduA90v7tnrw8fVLmuAM29o0uITkDH 4Cz69/8A9B2Y+ouiODS6MrEvlz99BfbSZdUZd89eNSO7tTzv1YckpepNQCL3Wf+S 1nXa4dZrciCR2vMmkF01jvc3yTjCJ0W7IduUhFQevpMfEzq2bLhvS5eGQd618pBc Ih44IYO6RfBeYB3CxWiSJeHgSXzlSijUeoPQbiDEhFsKXO7SaZN5oQ5Ra8v+YAMr fhcblX9rxH2Z1AfYizPssQ0VEnm3cN4nKurr7CA5R+LeHuR38lD9WWABhkYgQMRB V43/3wvozRyWhLBkS9V0NjObfpPhDuLR6pdbWRQnCO97mH/0HVd2ql49gvsOcdgb qa2XTF+iOJOQ3325SGdruTkbzkccFoKniiiOsndhx48VGLhHD9U0sCYLfb5X6NJC ZFI3qQx+3+q32jkDoShqQZ/4fnb8TqzH/KqNtZdZPJ7dlkw/2B8wcnAIoF+BfgbH 2TRfbndSrNGLJSc4bgFQ3e7SCHSdm/PUR/l4c+xN9Ia4tF9614ndfPAaSKaMaKca O6IwX8+IBo94JDhtPsypplYllBlBeXzMjZFO6b60sBF9UaxgdS1ZQ3NP9pumGN80 2czoY2kwV68DScgxBFLx =tWas -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pyll ARM64 patches from Catalin Marinas: - Build fix with DMA_CMA enabled - Introduction of PTE_WRITE to distinguish between writable but clean and truly read-only pages - FIQs enabling/disabling clean-up (they aren't used on arm64) - CPU resume fix for the per-cpu offset restoring - Code comment typos * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: mm: Introduce PTE_WRITE arm64: mm: Remove PTE_BIT_FUNC macro arm64: FIQs are unused arm64: mm: fix the function name in comment of cpu_do_switch_mm arm64: fix build error if DMA_CMA is enabled arm64: kernel: fix per-cpu offset restore on resume arm64: mm: fix the function name in comment of __flush_dcache_area arm64: mm: use ubfm for dcache_line_size
This commit is contained in:
commit
deb2a1d29b
8 changed files with 74 additions and 43 deletions
|
@ -18,7 +18,6 @@
|
|||
#ifdef CONFIG_DMA_CMA
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm-generic/dma-contiguous.h>
|
||||
|
||||
static inline void
|
||||
dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
|
||||
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
|
||||
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
|
||||
/* bit 57 for PMD_SECT_SPLITTING */
|
||||
#define PTE_WRITE (_AT(pteval_t, 1) << 57)
|
||||
#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
|
||||
|
||||
/*
|
||||
|
@ -67,15 +67,15 @@ extern pgprot_t pgprot_default;
|
|||
|
||||
#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b)
|
||||
|
||||
#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
|
||||
#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
|
||||
#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
|
||||
#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
|
||||
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
|
||||
#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
|
||||
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
|
||||
#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
|
||||
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
|
||||
#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
|
||||
#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
|
||||
#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
|
||||
#define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP)
|
||||
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
|
||||
|
@ -83,13 +83,13 @@ extern pgprot_t pgprot_default;
|
|||
#define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
|
||||
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
|
||||
|
||||
#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
|
||||
#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
|
||||
#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
|
||||
#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
|
||||
#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
|
||||
#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
|
||||
#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
|
||||
#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
|
||||
#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
|
||||
#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
|
||||
#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
@ -140,22 +140,53 @@ extern struct page *empty_zero_page;
|
|||
#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
|
||||
#define pte_young(pte) (pte_val(pte) & PTE_AF)
|
||||
#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
|
||||
#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
|
||||
#define pte_write(pte) (pte_val(pte) & PTE_WRITE)
|
||||
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
|
||||
|
||||
#define pte_valid_user(pte) \
|
||||
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
|
||||
|
||||
#define PTE_BIT_FUNC(fn,op) \
|
||||
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
|
||||
static inline pte_t pte_wrprotect(pte_t pte)
|
||||
{
|
||||
pte_val(pte) &= ~PTE_WRITE;
|
||||
return pte;
|
||||
}
|
||||
|
||||
PTE_BIT_FUNC(wrprotect, |= PTE_RDONLY);
|
||||
PTE_BIT_FUNC(mkwrite, &= ~PTE_RDONLY);
|
||||
PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY);
|
||||
PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY);
|
||||
PTE_BIT_FUNC(mkold, &= ~PTE_AF);
|
||||
PTE_BIT_FUNC(mkyoung, |= PTE_AF);
|
||||
PTE_BIT_FUNC(mkspecial, |= PTE_SPECIAL);
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
{
|
||||
pte_val(pte) |= PTE_WRITE;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkclean(pte_t pte)
|
||||
{
|
||||
pte_val(pte) &= ~PTE_DIRTY;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkdirty(pte_t pte)
|
||||
{
|
||||
pte_val(pte) |= PTE_DIRTY;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkold(pte_t pte)
|
||||
{
|
||||
pte_val(pte) &= ~PTE_AF;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkyoung(pte_t pte)
|
||||
{
|
||||
pte_val(pte) |= PTE_AF;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkspecial(pte_t pte)
|
||||
{
|
||||
pte_val(pte) |= PTE_SPECIAL;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline void set_pte(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
|
@ -170,8 +201,10 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
if (pte_valid_user(pte)) {
|
||||
if (pte_exec(pte))
|
||||
__sync_icache_dcache(pte, addr);
|
||||
if (!pte_dirty(pte))
|
||||
pte = pte_wrprotect(pte);
|
||||
if (pte_dirty(pte) && pte_write(pte))
|
||||
pte_val(pte) &= ~PTE_RDONLY;
|
||||
else
|
||||
pte_val(pte) |= PTE_RDONLY;
|
||||
}
|
||||
|
||||
set_pte(ptep, pte);
|
||||
|
@ -345,7 +378,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
|
|||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
|
||||
PTE_PROT_NONE | PTE_VALID;
|
||||
PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
|
||||
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
|
||||
return pte;
|
||||
}
|
||||
|
|
|
@ -85,11 +85,6 @@ EXPORT_SYMBOL_GPL(pm_power_off);
|
|||
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
|
||||
EXPORT_SYMBOL_GPL(arm_pm_restart);
|
||||
|
||||
void arch_cpu_idle_prepare(void)
|
||||
{
|
||||
local_fiq_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* This is our default idle handler.
|
||||
*/
|
||||
|
@ -138,7 +133,6 @@ void machine_restart(char *cmd)
|
|||
|
||||
/* Disable interrupts first */
|
||||
local_irq_disable();
|
||||
local_fiq_disable();
|
||||
|
||||
/* Now call the architecture specific reboot code. */
|
||||
if (arm_pm_restart)
|
||||
|
|
|
@ -161,7 +161,6 @@ asmlinkage void secondary_start_kernel(void)
|
|||
complete(&cpu_running);
|
||||
|
||||
local_irq_enable();
|
||||
local_fiq_enable();
|
||||
local_async_enable();
|
||||
|
||||
/*
|
||||
|
@ -495,7 +494,6 @@ static void ipi_cpu_stop(unsigned int cpu)
|
|||
|
||||
set_cpu_online(cpu, false);
|
||||
|
||||
local_fiq_disable();
|
||||
local_irq_disable();
|
||||
|
||||
while (1)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#include <linux/percpu.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
|
@ -89,6 +90,13 @@ int cpu_suspend(unsigned long arg)
|
|||
if (ret == 0) {
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
flush_tlb_all();
|
||||
|
||||
/*
|
||||
* Restore per-cpu offset before any kernel
|
||||
* subsystem relying on it has a chance to run.
|
||||
*/
|
||||
set_my_cpu_offset(per_cpu_offset(cpu));
|
||||
|
||||
/*
|
||||
* Restore HW breakpoint registers to sane values
|
||||
* before debug exceptions are possibly reenabled
|
||||
|
|
|
@ -146,7 +146,7 @@ ENDPROC(flush_icache_range)
|
|||
ENDPROC(__flush_cache_user_range)
|
||||
|
||||
/*
|
||||
* __flush_kern_dcache_page(kaddr)
|
||||
* __flush_dcache_area(kaddr, size)
|
||||
*
|
||||
* Ensure that the data held in the page kaddr is written back to the
|
||||
* page in question.
|
||||
|
|
|
@ -38,8 +38,7 @@
|
|||
*/
|
||||
.macro dcache_line_size, reg, tmp
|
||||
mrs \tmp, ctr_el0 // read CTR
|
||||
lsr \tmp, \tmp, #16
|
||||
and \tmp, \tmp, #0xf // cache line size encoding
|
||||
ubfm \tmp, \tmp, #16, #19 // cache line size encoding
|
||||
mov \reg, #4 // bytes per word
|
||||
lsl \reg, \reg, \tmp // actual cache line size
|
||||
.endm
|
||||
|
|
|
@ -150,7 +150,7 @@ ENDPROC(cpu_do_resume)
|
|||
#endif
|
||||
|
||||
/*
|
||||
* cpu_switch_mm(pgd_phys, tsk)
|
||||
* cpu_do_switch_mm(pgd_phys, tsk)
|
||||
*
|
||||
* Set the translation table base pointer to be pgd_phys.
|
||||
*
|
||||
|
|
Loading…
Reference in a new issue