Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: powerpc: Remove IOMMU_VMERGE config option powerpc: Fix swiotlb to respect the boot option powerpc: Do not call prink when CONFIG_PRINTK is not defined powerpc: Use correct ccr bit for syscall error status powerpc/fsl-booke: Get coherent bit from PTE powerpc/85xx: Make sure lwarx hint isn't set on ppc32
This commit is contained in:
commit
95c46afe60
8 changed files with 17 additions and 40 deletions
|
@ -313,19 +313,6 @@ config 8XX_MINIMAL_FPEMU
|
|||
|
||||
It is recommended that you build a soft-float userspace instead.
|
||||
|
||||
config IOMMU_VMERGE
|
||||
bool "Enable IOMMU virtual merging"
|
||||
depends on PPC64
|
||||
default y
|
||||
help
|
||||
Cause IO segments sent to a device for DMA to be merged virtually
|
||||
by the IOMMU when they happen to have been allocated contiguously.
|
||||
This doesn't add pressure to the IOMMU allocator. However, some
|
||||
drivers don't support getting large merged segments coming back
|
||||
from *_map_sg().
|
||||
|
||||
Most drivers don't have this problem; it is safe to say Y here.
|
||||
|
||||
config IOMMU_HELPER
|
||||
def_bool PPC64
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#define PPC_INST_LDARX 0x7c0000a8
|
||||
#define PPC_INST_LSWI 0x7c0004aa
|
||||
#define PPC_INST_LSWX 0x7c00042a
|
||||
#define PPC_INST_LWARX 0x7c000029
|
||||
#define PPC_INST_LWARX 0x7c000028
|
||||
#define PPC_INST_LWSYNC 0x7c2004ac
|
||||
#define PPC_INST_LXVD2X 0x7c000698
|
||||
#define PPC_INST_MCRXR 0x7c000400
|
||||
|
@ -62,8 +62,8 @@
|
|||
#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
|
||||
#define __PPC_WC(w) (((w) & 0x3) << 21)
|
||||
/*
|
||||
* Only use the larx hint bit on 64bit CPUs. Once we verify it doesn't have
|
||||
* any side effects on all 32bit processors, we can do this all the time.
|
||||
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
|
||||
* larx with EH set as an illegal instruction.
|
||||
*/
|
||||
#ifdef CONFIG_PPC64
|
||||
#define __PPC_EH(eh) (((eh) & 0x1) << 0)
|
||||
|
|
|
@ -30,7 +30,7 @@ static inline void syscall_rollback(struct task_struct *task,
|
|||
static inline long syscall_get_error(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return (regs->ccr & 0x1000) ? -regs->gpr[3] : 0;
|
||||
return (regs->ccr & 0x10000000) ? -regs->gpr[3] : 0;
|
||||
}
|
||||
|
||||
static inline long syscall_get_return_value(struct task_struct *task,
|
||||
|
@ -44,10 +44,10 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
|||
int error, long val)
|
||||
{
|
||||
if (error) {
|
||||
regs->ccr |= 0x1000L;
|
||||
regs->ccr |= 0x10000000L;
|
||||
regs->gpr[3] = -error;
|
||||
} else {
|
||||
regs->ccr &= ~0x1000L;
|
||||
regs->ccr &= ~0x10000000L;
|
||||
regs->gpr[3] = val;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -746,9 +746,6 @@ finish_tlb_load:
|
|||
rlwimi r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */
|
||||
#else
|
||||
rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
ori r12, r12, MAS2_M
|
||||
#endif
|
||||
mtspr SPRN_MAS2, r12
|
||||
|
||||
|
@ -887,13 +884,17 @@ KernelSPE:
|
|||
lwz r3,_MSR(r1)
|
||||
oris r3,r3,MSR_SPE@h
|
||||
stw r3,_MSR(r1) /* enable use of SPE after return */
|
||||
#ifdef CONFIG_PRINTK
|
||||
lis r3,87f@h
|
||||
ori r3,r3,87f@l
|
||||
mr r4,r2 /* current */
|
||||
lwz r5,_NIP(r1)
|
||||
bl printk
|
||||
#endif
|
||||
b ret_from_except
|
||||
#ifdef CONFIG_PRINTK
|
||||
87: .string "SPE used in kernel (task=%p, pc=%x) \n"
|
||||
#endif
|
||||
.align 4,0
|
||||
|
||||
#endif /* CONFIG_SPE */
|
||||
|
|
|
@ -42,12 +42,7 @@
|
|||
|
||||
#define DBG(...)
|
||||
|
||||
#ifdef CONFIG_IOMMU_VMERGE
|
||||
static int novmerge = 0;
|
||||
#else
|
||||
static int novmerge = 1;
|
||||
#endif
|
||||
|
||||
static int novmerge;
|
||||
static int protect4gb = 1;
|
||||
|
||||
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
|
||||
|
|
|
@ -39,7 +39,6 @@
|
|||
#include <asm/serial.h>
|
||||
#include <asm/udbg.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/swiotlb.h>
|
||||
|
||||
#include "setup.h"
|
||||
|
||||
|
@ -343,11 +342,6 @@ void __init setup_arch(char **cmdline_p)
|
|||
ppc_md.setup_arch();
|
||||
if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (ppc_swiotlb_enable)
|
||||
swiotlb_init(1);
|
||||
#endif
|
||||
|
||||
paging_init();
|
||||
|
||||
/* Initialize the MMU context management stuff */
|
||||
|
|
|
@ -61,7 +61,6 @@
|
|||
#include <asm/xmon.h>
|
||||
#include <asm/udbg.h>
|
||||
#include <asm/kexec.h>
|
||||
#include <asm/swiotlb.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
#include "setup.h"
|
||||
|
@ -541,11 +540,6 @@ void __init setup_arch(char **cmdline_p)
|
|||
if (ppc_md.setup_arch)
|
||||
ppc_md.setup_arch();
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (ppc_swiotlb_enable)
|
||||
swiotlb_init(1);
|
||||
#endif
|
||||
|
||||
paging_init();
|
||||
|
||||
/* Initialize the MMU context management stuff */
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
#include <asm/sparsemem.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/swiotlb.h>
|
||||
|
||||
#include "mmu_decl.h"
|
||||
|
||||
|
@ -320,6 +321,11 @@ void __init mem_init(void)
|
|||
struct page *page;
|
||||
unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (ppc_swiotlb_enable)
|
||||
swiotlb_init(1);
|
||||
#endif
|
||||
|
||||
num_physpages = lmb.memory.size >> PAGE_SHIFT;
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
|
||||
|
||||
|
|
Loading…
Reference in a new issue