A small L3 cache index disable fix from Srivatsa Bhat which unifies the

way the code checks for already disabled indices.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.12 (GNU/Linux)
 
 iQIcBAABAgAGBQJPkEFNAAoJEBLB8Bhh3lVKEigP/1hrNM0oE3IQHejNYkZ2kbj0
 DL4WkALiHCgWJVcr/lk9PvTaY7aIAo8HFgvbUKF4PcBvnFZvu6rckJPIsWgsE1wD
 +OHDaO27vyYKMaA9ImvqYneB8hcl528EDlZ7ssfTepQXPyx99SoTYc9ToT1+znVp
 qQUC+d67MTLl/eHL+i6rbQfYdVaXGaVTuAIpzQn3oTqUDLrmXKe+60oTgnC0zgSW
 kQ63Vo8MHi9CpzCe0JhZwvK3d8sPzrBktNisaEbdHe+0Gk5zvcEiPzE2nIyvLXjz
 cf0QoQFQHzniCdFQoYjzLafT3aItBsN1v29gOrydPT6LxMZ8wO5k+8Suu1NcyI9R
 RLJR61wKwSzi4JQ/1+LAqoDQHrldATKPCM74BLYiNTi8OGeqda+10COJQmLFITzM
 9mn9fg/ZPg8V+z+0e2zObYcFVRtUCIs+XRWIzjhuPICdRRnwoNT+b9HyrLZ5JY1X
 BH3nHon0W4Bm5jEBhOb02XLdzBMtF3vRM4mNYCzpoS/tDFRszyOuV63FXkPurVbe
 hT9ZKhDZ63YV8ycwx2jqZgZAFuySi719kG3aUMDNMJYbUEi6D0QRKH9ngE6JAvwH
 rw1hX65sNX9jbBOAvcDTq2780SpV3dpO1TywLip9uSWIk6DYyEVHbr8AdWwnLlfb
 fdq9y8V/3+NC9aTNf/e3
 =VwUL
 -----END PGP SIGNATURE-----

Merge tag 'l3-fix-for-3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp into x86/urgent

A small L3 cache index disable fix from Srivatsa Bhat which unifies the
way the code checks for already disabled indices.

( Pulling it into v3.4 despite the v3.5 tag - the fix is small and we better
  keep the same code across kernel versions for such user facing interfaces. )

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2012-04-25 12:24:16 +02:00
commit cd32b1616b
56 changed files with 227 additions and 311 deletions

View file

@ -43,7 +43,9 @@ ALC680
ALC882/883/885/888/889
======================
N/A
acer-aspire-4930g Acer Aspire 4930G/5930G/6530G/6930G/7730G
acer-aspire-8930g Acer Aspire 8330G/6935G
acer-aspire Acer Aspire others
ALC861/660
==========

View file

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 4
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc3
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*

View file

@ -77,6 +77,8 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
} else if (atag->hdr.tag == ATAG_MEM) {
if (memcount >= sizeof(mem_reg_property)/4)
continue;
if (!atag->u.mem.size)
continue;
mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start);
mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size);
} else if (atag->hdr.tag == ATAG_INITRD2) {

View file

@ -273,7 +273,7 @@ restart: adr r0, LC0
add r0, r0, #0x100
mov r1, r6
sub r2, sp, r6
blne atags_to_fdt
bleq atags_to_fdt
ldmfd sp!, {r0-r3, ip, lr}
sub sp, sp, #0x10000

View file

@ -55,7 +55,6 @@
#interrupt-cells = <2>;
compatible = "atmel,at91rm9200-aic";
interrupt-controller;
interrupt-parent;
reg = <0xfffff000 0x200>;
};

View file

@ -56,7 +56,6 @@
#interrupt-cells = <2>;
compatible = "atmel,at91rm9200-aic";
interrupt-controller;
interrupt-parent;
reg = <0xfffff000 0x200>;
};

View file

@ -54,7 +54,6 @@
#interrupt-cells = <2>;
compatible = "atmel,at91rm9200-aic";
interrupt-controller;
interrupt-parent;
reg = <0xfffff000 0x200>;
};

View file

@ -24,7 +24,6 @@
#interrupt-cells = <3>;
#address-cells = <1>;
interrupt-controller;
interrupt-parent;
reg = <0xa0411000 0x1000>,
<0xa0410100 0x100>;
};

View file

@ -89,7 +89,6 @@
#size-cells = <0>;
#address-cells = <1>;
interrupt-controller;
interrupt-parent;
reg = <0xfff11000 0x1000>,
<0xfff10100 0x100>;
};

View file

@ -427,19 +427,18 @@ int __init vic_of_init(struct device_node *node, struct device_node *parent)
/*
* Handle each interrupt in a single VIC. Returns non-zero if we've
* handled at least one interrupt. This does a single read of the
* status register and handles all interrupts in order from LSB first.
* handled at least one interrupt. This reads the status register
* before handling each interrupt, which is necessary given that
* handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
*/
static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
{
u32 stat, irq;
int handled = 0;
stat = readl_relaxed(vic->base + VIC_IRQ_STATUS);
while (stat) {
while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
irq = ffs(stat) - 1;
handle_IRQ(irq_find_mapping(vic->domain, irq), regs);
stat &= ~(1 << irq);
handled = 1;
}

View file

@ -14,7 +14,7 @@
#define JUMP_LABEL_NOP "nop"
#endif
static __always_inline bool arch_static_branch(struct jump_label_key *key)
static __always_inline bool arch_static_branch(struct static_key *key)
{
asm goto("1:\n\t"
JUMP_LABEL_NOP "\n\t"

View file

@ -523,7 +523,21 @@ int __init arm_add_memory(phys_addr_t start, unsigned long size)
*/
size -= start & ~PAGE_MASK;
bank->start = PAGE_ALIGN(start);
bank->size = size & PAGE_MASK;
#ifndef CONFIG_LPAE
if (bank->start + size < bank->start) {
printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
"32-bit physical address space\n", (long long)start);
/*
* To ensure bank->start + bank->size is representable in
* 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
* This means we lose a page after masking.
*/
size = ULONG_MAX - bank->start;
}
#endif
bank->size = size & PAGE_MASK;
/*
* Check whether this memory region has non-zero size or

View file

@ -118,10 +118,14 @@ static int twd_cpufreq_transition(struct notifier_block *nb,
* The twd clock events must be reprogrammed to account for the new
* frequency. The timer is local to a cpu, so cross-call to the
* changing cpu.
*
* Only wait for it to finish, if the cpu is active to avoid
* deadlock when cpu1 is spinning on while(!cpu_active(cpu1)) during
* booting of that cpu.
*/
if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
smp_call_function_single(freqs->cpu, twd_update_frequency,
NULL, 1);
NULL, cpu_active(freqs->cpu));
return NOTIFY_OK;
}

View file

@ -368,6 +368,7 @@ comment "Flattened Device Tree based board for EXYNOS SoCs"
config MACH_EXYNOS4_DT
bool "Samsung Exynos4 Machine using device tree"
depends on ARCH_EXYNOS4
select CPU_EXYNOS4210
select USE_OF
select ARM_AMBA
@ -380,6 +381,7 @@ config MACH_EXYNOS4_DT
config MACH_EXYNOS5_DT
bool "SAMSUNG EXYNOS5 Machine using device tree"
depends on ARCH_EXYNOS5
select SOC_EXYNOS5250
select USE_OF
select ARM_AMBA

View file

@ -212,6 +212,8 @@
#define IRQ_MFC EXYNOS4_IRQ_MFC
#define IRQ_SDO EXYNOS4_IRQ_SDO
#define IRQ_I2S0 EXYNOS4_IRQ_I2S0
#define IRQ_ADC EXYNOS4_IRQ_ADC0
#define IRQ_TC EXYNOS4_IRQ_PEN0

View file

@ -89,6 +89,10 @@
#define EXYNOS4_PA_MDMA1 0x12840000
#define EXYNOS4_PA_PDMA0 0x12680000
#define EXYNOS4_PA_PDMA1 0x12690000
#define EXYNOS5_PA_MDMA0 0x10800000
#define EXYNOS5_PA_MDMA1 0x11C10000
#define EXYNOS5_PA_PDMA0 0x121A0000
#define EXYNOS5_PA_PDMA1 0x121B0000
#define EXYNOS4_PA_SYSMMU_MDMA 0x10A40000
#define EXYNOS4_PA_SYSMMU_SSS 0x10A50000

View file

@ -255,9 +255,15 @@
/* For EXYNOS5250 */
#define EXYNOS5_APLL_LOCK EXYNOS_CLKREG(0x00000)
#define EXYNOS5_APLL_CON0 EXYNOS_CLKREG(0x00100)
#define EXYNOS5_CLKSRC_CPU EXYNOS_CLKREG(0x00200)
#define EXYNOS5_CLKMUX_STATCPU EXYNOS_CLKREG(0x00400)
#define EXYNOS5_CLKDIV_CPU0 EXYNOS_CLKREG(0x00500)
#define EXYNOS5_CLKDIV_CPU1 EXYNOS_CLKREG(0x00504)
#define EXYNOS5_CLKDIV_STATCPU0 EXYNOS_CLKREG(0x00600)
#define EXYNOS5_CLKDIV_STATCPU1 EXYNOS_CLKREG(0x00604)
#define EXYNOS5_MPLL_CON0 EXYNOS_CLKREG(0x04100)
#define EXYNOS5_CLKSRC_CORE1 EXYNOS_CLKREG(0x04204)

View file

@ -45,7 +45,7 @@ static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = {
"exynos4210-uart.3", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.2", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_MDMA1, "dma-pl330.2", NULL),
{},
};

View file

@ -307,49 +307,7 @@ static struct i2c_board_info i2c1_devs[] __initdata = {
};
/* TSP */
static u8 mxt_init_vals[] = {
/* MXT_GEN_COMMAND(6) */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* MXT_GEN_POWER(7) */
0x20, 0xff, 0x32,
/* MXT_GEN_ACQUIRE(8) */
0x0a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x09, 0x23,
/* MXT_TOUCH_MULTI(9) */
0x00, 0x00, 0x00, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x02, 0x00,
0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00,
/* MXT_TOUCH_KEYARRAY(15) */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
0x00,
/* MXT_SPT_GPIOPWM(19) */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* MXT_PROCI_GRIPFACE(20) */
0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x28, 0x04,
0x0f, 0x0a,
/* MXT_PROCG_NOISE(22) */
0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x23, 0x00,
0x00, 0x05, 0x0f, 0x19, 0x23, 0x2d, 0x03,
/* MXT_TOUCH_PROXIMITY(23) */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00,
/* MXT_PROCI_ONETOUCH(24) */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* MXT_SPT_SELFTEST(25) */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
/* MXT_PROCI_TWOTOUCH(27) */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* MXT_SPT_CTECONFIG(28) */
0x00, 0x00, 0x02, 0x08, 0x10, 0x00,
};
static struct mxt_platform_data mxt_platform_data = {
.config = mxt_init_vals,
.config_length = ARRAY_SIZE(mxt_init_vals),
.x_line = 18,
.y_line = 11,
.x_size = 1024,
@ -571,7 +529,7 @@ static struct regulator_init_data __initdata max8997_ldo7_data = {
static struct regulator_init_data __initdata max8997_ldo8_data = {
.constraints = {
.name = "VUSB/VDAC_3.3V_C210",
.name = "VUSB+VDAC_3.3V_C210",
.min_uV = 3300000,
.max_uV = 3300000,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
@ -1347,6 +1305,7 @@ static struct platform_device *nuri_devices[] __initdata = {
static void __init nuri_map_io(void)
{
clk_xusbxti.rate = 24000000;
exynos_init_io(NULL, 0);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(nuri_uartcfgs, ARRAY_SIZE(nuri_uartcfgs));
@ -1379,7 +1338,6 @@ static void __init nuri_machine_init(void)
nuri_camera_init();
nuri_ehci_init();
clk_xusbxti.rate = 24000000;
/* Last */
platform_add_devices(nuri_devices, ARRAY_SIZE(nuri_devices));

View file

@ -29,6 +29,7 @@
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/iic.h>
@ -1057,6 +1058,7 @@ static struct platform_device *universal_devices[] __initdata = {
static void __init universal_map_io(void)
{
clk_xusbxti.rate = 24000000;
exynos_init_io(NULL, 0);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs));

View file

@ -86,9 +86,6 @@ static void __init halibut_init(void)
static void __init halibut_fixup(struct tag *tags, char **cmdline,
struct meminfo *mi)
{
mi->nr_banks=1;
mi->bank[0].start = PHYS_OFFSET;
mi->bank[0].size = (101*1024*1024);
}
static void __init halibut_map_io(void)

View file

@ -12,6 +12,7 @@
#include <asm/io.h>
#include <asm/mach-types.h>
#include <asm/system_info.h>
#include <mach/msm_fb.h>
#include <mach/vreg.h>

View file

@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/clkdev.h>
#include <asm/system_info.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>

View file

@ -121,7 +121,7 @@ int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2)
* and unknown state. This function should be called early to
* wait on the ARM9.
*/
void __init proc_comm_boot_wait(void)
void __devinit proc_comm_boot_wait(void)
{
void __iomem *base = MSM_SHARED_RAM_BASE;

View file

@ -165,83 +165,3 @@ int omap2_select_table_rate(struct clk *clk, unsigned long rate)
return 0;
}
#ifdef CONFIG_CPU_FREQ
/*
* Walk PRCM rate table and fillout cpufreq freq_table
* XXX This should be replaced by an OPP layer in the near future
*/
static struct cpufreq_frequency_table *freq_table;
void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
{
const struct prcm_config *prcm;
int i = 0;
int tbl_sz = 0;
if (!cpu_is_omap24xx())
return;
for (prcm = rate_table; prcm->mpu_speed; prcm++) {
if (!(prcm->flags & cpu_mask))
continue;
if (prcm->xtal_speed != sclk->rate)
continue;
/* don't put bypass rates in table */
if (prcm->dpll_speed == prcm->xtal_speed)
continue;
tbl_sz++;
}
/*
* XXX Ensure that we're doing what CPUFreq expects for this error
* case and the following one
*/
if (tbl_sz == 0) {
pr_warning("%s: no matching entries in rate_table\n",
__func__);
return;
}
/* Include the CPUFREQ_TABLE_END terminator entry */
tbl_sz++;
freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * tbl_sz,
GFP_ATOMIC);
if (!freq_table) {
pr_err("%s: could not kzalloc frequency table\n", __func__);
return;
}
for (prcm = rate_table; prcm->mpu_speed; prcm++) {
if (!(prcm->flags & cpu_mask))
continue;
if (prcm->xtal_speed != sclk->rate)
continue;
/* don't put bypass rates in table */
if (prcm->dpll_speed == prcm->xtal_speed)
continue;
freq_table[i].index = i;
freq_table[i].frequency = prcm->mpu_speed / 1000;
i++;
}
freq_table[i].index = i;
freq_table[i].frequency = CPUFREQ_TABLE_END;
*table = &freq_table[0];
}
void omap2_clk_exit_cpufreq_table(struct cpufreq_frequency_table **table)
{
if (!cpu_is_omap24xx())
return;
kfree(freq_table);
}
#endif

View file

@ -536,10 +536,5 @@ struct clk_functions omap2_clk_functions = {
.clk_set_rate = omap2_clk_set_rate,
.clk_set_parent = omap2_clk_set_parent,
.clk_disable_unused = omap2_clk_disable_unused,
#ifdef CONFIG_CPU_FREQ
/* These will be removed when the OPP code is integrated */
.clk_init_cpufreq_table = omap2_clk_init_cpufreq_table,
.clk_exit_cpufreq_table = omap2_clk_exit_cpufreq_table,
#endif
};

View file

@ -146,14 +146,6 @@ extern const struct clksel_rate gpt_sys_rates[];
extern const struct clksel_rate gfx_l3_rates[];
extern const struct clksel_rate dsp_ick_rates[];
#if defined(CONFIG_ARCH_OMAP2) && defined(CONFIG_CPU_FREQ)
extern void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table);
extern void omap2_clk_exit_cpufreq_table(struct cpufreq_frequency_table **table);
#else
#define omap2_clk_init_cpufreq_table 0
#define omap2_clk_exit_cpufreq_table 0
#endif
extern const struct clkops clkops_omap2_iclk_dflt_wait;
extern const struct clkops clkops_omap2_iclk_dflt;
extern const struct clkops clkops_omap2_iclk_idle_only;

View file

@ -33,8 +33,6 @@
#include <mach/irqs.h>
#include <mach/dma.h>
static u64 dma_dmamask = DMA_BIT_MASK(32);
static u8 pdma0_peri[] = {
DMACH_UART0_RX,
DMACH_UART0_TX,

View file

@ -484,8 +484,8 @@ static struct wm8994_pdata wm8994_platform_data = {
.gpio_defaults[8] = 0x0100,
.gpio_defaults[9] = 0x0100,
.gpio_defaults[10] = 0x0100,
.ldo[0] = { S5PV210_MP03(6), NULL, &wm8994_ldo1_data }, /* XM0FRNB_2 */
.ldo[1] = { 0, NULL, &wm8994_ldo2_data },
.ldo[0] = { S5PV210_MP03(6), &wm8994_ldo1_data }, /* XM0FRNB_2 */
.ldo[1] = { 0, &wm8994_ldo2_data },
};
/* GPIO I2C PMIC */

View file

@ -674,8 +674,8 @@ static struct wm8994_pdata wm8994_platform_data = {
.gpio_defaults[8] = 0x0100,
.gpio_defaults[9] = 0x0100,
.gpio_defaults[10] = 0x0100,
.ldo[0] = { S5PV210_MP03(6), NULL, &wm8994_ldo1_data }, /* XM0FRNB_2 */
.ldo[1] = { 0, NULL, &wm8994_ldo2_data },
.ldo[0] = { S5PV210_MP03(6), &wm8994_ldo1_data }, /* XM0FRNB_2 */
.ldo[1] = { 0, &wm8994_ldo2_data },
};
/* GPIO I2C PMIC */

View file

@ -723,7 +723,7 @@ config CPU_HIGH_VECTOR
bool "Select the High exception vector"
help
Say Y here to select high exception vector(0xFFFF0000~).
The exception vector can be vary depending on the platform
The exception vector can vary depending on the platform
design in nommu mode. If your platform needs to select
high exception vector, say Y.
Otherwise or if you are unsure, say N, and the low exception

View file

@ -320,7 +320,7 @@ retry:
*/
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,

View file

@ -13,6 +13,7 @@
#include <asm/sections.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/traps.h>
#include <asm/mach/arch.h>
#include "mm.h"
@ -39,6 +40,7 @@ void __init sanity_check_meminfo(void)
*/
void __init paging_init(struct machine_desc *mdesc)
{
early_trap_init((void *)CONFIG_VECTORS_BASE);
bootmem_init();
}

View file

@ -254,6 +254,18 @@ __v7_setup:
ldr r6, =NMRR @ NMRR
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
#endif
#ifndef CONFIG_ARM_THUMBEE
mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
and r0, r0, #(0xf << 12) @ ThumbEE enabled field
teq r0, #(1 << 12) @ check if ThumbEE is present
bne 1f
mov r5, #0
mcr p14, 6, r5, c1, c0, 0 @ Initialize TEEHBR to 0
mrc p14, 6, r0, c0, c0, 0 @ load TEECR
orr r0, r0, #1 @ set the 1st bit in order to
mcr p14, 6, r0, c0, c0, 0 @ stop userspace TEEHBR access
1:
#endif
adr r5, v7_crval
ldmia r5, {r5, r6}

View file

@ -398,32 +398,6 @@ struct clk dummy_ck = {
.ops = &clkops_null,
};
#ifdef CONFIG_CPU_FREQ
void clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
{
unsigned long flags;
if (!arch_clock || !arch_clock->clk_init_cpufreq_table)
return;
spin_lock_irqsave(&clockfw_lock, flags);
arch_clock->clk_init_cpufreq_table(table);
spin_unlock_irqrestore(&clockfw_lock, flags);
}
void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table)
{
unsigned long flags;
if (!arch_clock || !arch_clock->clk_exit_cpufreq_table)
return;
spin_lock_irqsave(&clockfw_lock, flags);
arch_clock->clk_exit_cpufreq_table(table);
spin_unlock_irqrestore(&clockfw_lock, flags);
}
#endif
/*
*
*/

View file

@ -272,8 +272,6 @@ struct clk {
#endif
};
struct cpufreq_frequency_table;
struct clk_functions {
int (*clk_enable)(struct clk *clk);
void (*clk_disable)(struct clk *clk);
@ -283,10 +281,6 @@ struct clk_functions {
void (*clk_allow_idle)(struct clk *clk);
void (*clk_deny_idle)(struct clk *clk);
void (*clk_disable_unused)(struct clk *clk);
#ifdef CONFIG_CPU_FREQ
void (*clk_init_cpufreq_table)(struct cpufreq_frequency_table **);
void (*clk_exit_cpufreq_table)(struct cpufreq_frequency_table **);
#endif
};
extern int mpurate;
@ -301,10 +295,6 @@ extern void recalculate_root_clocks(void);
extern unsigned long followparent_recalc(struct clk *clk);
extern void clk_enable_init_clocks(void);
unsigned long omap_fixed_divisor_recalc(struct clk *clk);
#ifdef CONFIG_CPU_FREQ
extern void clk_init_cpufreq_table(struct cpufreq_frequency_table **table);
extern void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table);
#endif
extern struct clk *omap_clk_get_by_name(const char *name);
extern int omap_clk_enable_autoidle_all(void);
extern int omap_clk_disable_autoidle_all(void);

View file

@ -302,6 +302,7 @@ comment "Power management"
config SAMSUNG_PM_DEBUG
bool "S3C2410 PM Suspend debug"
depends on PM
select DEBUG_LL
help
Say Y here if you want verbose debugging from the PM Suspend and
Resume code. See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt>

View file

@ -433,14 +433,14 @@ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
/* check if @slot is already used or the index is already disabled */
ret = amd_get_l3_disable_slot(nb, slot);
if (ret >= 0)
return -EINVAL;
return -EEXIST;
if (index > nb->l3_cache.indices)
return -EINVAL;
/* check whether the other slot has disabled the same index already */
if (index == amd_get_l3_disable_slot(nb, !slot))
return -EINVAL;
return -EEXIST;
amd_l3_disable_index(nb, cpu, slot, index);
@ -468,8 +468,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
if (err) {
if (err == -EEXIST)
printk(KERN_WARNING "L3 disable slot %d in use!\n",
slot);
pr_warning("L3 slot %d in use/index already disabled!\n",
slot);
return err;
}
return count;

View file

@ -57,7 +57,7 @@ static inline unsigned long count_bytes(unsigned long mask)
* hit it), 'max' is the address space maximum (and we return
* -EFAULT if we hit it).
*/
static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, long max)
static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
{
long res = 0;
@ -100,7 +100,7 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
* too? If so, that's ok - we got as much as the user asked for.
*/
if (res >= count)
return count;
return res;
/*
* Nope: we hit the address space limit, and we still had more

View file

@ -247,8 +247,7 @@ static int amba_pm_restore(struct device *dev)
/*
* Hooks to provide runtime PM of the pclk (bus clock). It is safe to
* enable/disable the bus clock at runtime PM suspend/resume as this
* does not result in loss of context. However, disabling vcore power
* would do, so we leave that to the driver.
* does not result in loss of context.
*/
static int amba_pm_runtime_suspend(struct device *dev)
{
@ -354,39 +353,6 @@ static void amba_put_disable_pclk(struct amba_device *pcdev)
clk_put(pclk);
}
static int amba_get_enable_vcore(struct amba_device *pcdev)
{
struct regulator *vcore = regulator_get(&pcdev->dev, "vcore");
int ret;
pcdev->vcore = vcore;
if (IS_ERR(vcore)) {
/* It is OK not to supply a vcore regulator */
if (PTR_ERR(vcore) == -ENODEV)
return 0;
return PTR_ERR(vcore);
}
ret = regulator_enable(vcore);
if (ret) {
regulator_put(vcore);
pcdev->vcore = ERR_PTR(-ENODEV);
}
return ret;
}
static void amba_put_disable_vcore(struct amba_device *pcdev)
{
struct regulator *vcore = pcdev->vcore;
if (!IS_ERR(vcore)) {
regulator_disable(vcore);
regulator_put(vcore);
}
}
/*
* These are the device model conversion veneers; they convert the
* device model structures to our more specific structures.
@ -399,10 +365,6 @@ static int amba_probe(struct device *dev)
int ret;
do {
ret = amba_get_enable_vcore(pcdev);
if (ret)
break;
ret = amba_get_enable_pclk(pcdev);
if (ret)
break;
@ -420,7 +382,6 @@ static int amba_probe(struct device *dev)
pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
amba_put_disable_vcore(pcdev);
} while (0);
return ret;
@ -442,7 +403,6 @@ static int amba_remove(struct device *dev)
pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
amba_put_disable_vcore(pcdev);
return ret;
}

View file

@ -2788,6 +2788,7 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
.constraints = {
.name = "db8500-vape",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.always_on = true,
},
.consumer_supplies = db8500_vape_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers),

View file

@ -967,16 +967,47 @@ pci_save_state(struct pci_dev *dev)
return 0;
}
static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
u32 saved_val, int retry)
{
u32 val;
pci_read_config_dword(pdev, offset, &val);
if (val == saved_val)
return;
for (;;) {
dev_dbg(&pdev->dev, "restoring config space at offset "
"%#x (was %#x, writing %#x)\n", offset, val, saved_val);
pci_write_config_dword(pdev, offset, saved_val);
if (retry-- <= 0)
return;
pci_read_config_dword(pdev, offset, &val);
if (val == saved_val)
return;
mdelay(1);
}
}
static void pci_restore_config_space(struct pci_dev *pdev, int start, int end,
int retry)
{
int index;
for (index = end; index >= start; index--)
pci_restore_config_dword(pdev, 4 * index,
pdev->saved_config_space[index],
retry);
}
/**
* pci_restore_state - Restore the saved state of a PCI device
* @dev: - PCI device that we're dealing with
*/
void pci_restore_state(struct pci_dev *dev)
{
int i;
u32 val;
int tries;
if (!dev->state_saved)
return;
@ -984,24 +1015,14 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_pcie_state(dev);
pci_restore_ats_state(dev);
pci_restore_config_space(dev, 10, 15, 0);
/*
* The Base Address register should be programmed before the command
* register(s)
*/
for (i = 15; i >= 0; i--) {
pci_read_config_dword(dev, i * 4, &val);
tries = 10;
while (tries && val != dev->saved_config_space[i]) {
dev_dbg(&dev->dev, "restoring config "
"space at offset %#x (was %#x, writing %#x)\n",
i, val, (int)dev->saved_config_space[i]);
pci_write_config_dword(dev,i * 4,
dev->saved_config_space[i]);
pci_read_config_dword(dev, i * 4, &val);
mdelay(10);
tries--;
}
}
pci_restore_config_space(dev, 4, 9, 10);
pci_restore_config_space(dev, 0, 3, 0);
pci_restore_pcix_state(dev);
pci_restore_msi_state(dev);
pci_restore_iov_state(dev);

View file

@ -835,7 +835,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
scsi_eh_restore_cmnd(scmd, &ses);
if (sdrv->eh_action)
if (sdrv && sdrv->eh_action)
rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn);
return rtn;

View file

@ -2195,7 +2195,6 @@ static int pl022_runtime_suspend(struct device *dev)
struct pl022 *pl022 = dev_get_drvdata(dev);
clk_disable(pl022->clk);
amba_vcore_disable(pl022->adev);
return 0;
}
@ -2204,7 +2203,6 @@ static int pl022_runtime_resume(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
amba_vcore_enable(pl022->adev);
clk_enable(pl022->clk);
return 0;

View file

@ -420,7 +420,7 @@ static void mddi_resume(struct msm_mddi_client_data *cdata)
mddi_set_auto_hibernate(&mddi->client_data, 1);
}
static int __init mddi_get_client_caps(struct mddi_info *mddi)
static int __devinit mddi_get_client_caps(struct mddi_info *mddi)
{
int i, j;
@ -622,9 +622,9 @@ uint32_t mddi_remote_read(struct msm_mddi_client_data *cdata, uint32_t reg)
static struct mddi_info mddi_info[2];
static int __init mddi_clk_setup(struct platform_device *pdev,
struct mddi_info *mddi,
unsigned long clk_rate)
static int __devinit mddi_clk_setup(struct platform_device *pdev,
struct mddi_info *mddi,
unsigned long clk_rate)
{
int ret;

View file

@ -30,7 +30,6 @@ struct amba_device {
struct device dev;
struct resource res;
struct clk *pclk;
struct regulator *vcore;
u64 dma_mask;
unsigned int periphid;
unsigned int irq[AMBA_NR_IRQS];
@ -75,12 +74,6 @@ void amba_release_regions(struct amba_device *);
#define amba_pclk_disable(d) \
do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)
#define amba_vcore_enable(d) \
(IS_ERR((d)->vcore) ? 0 : regulator_enable((d)->vcore))
#define amba_vcore_disable(d) \
do { if (!IS_ERR((d)->vcore)) regulator_disable((d)->vcore); } while (0)
/* Some drivers don't use the struct amba_device */
#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)

View file

@ -3,15 +3,11 @@
#include <linux/compiler.h>
#undef NULL
#if defined(__cplusplus)
#define NULL 0
#else
#define NULL ((void *)0)
#endif
#ifdef __KERNEL__
#undef NULL
#define NULL ((void *)0)
enum {
false = 0,
true = 1

View file

@ -134,6 +134,9 @@ struct scsi_cmnd {
static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
{
if (!cmd->request->rq_disk)
return NULL;
return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
}

View file

@ -3398,8 +3398,10 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
for (;;) {
badness = fill_and_eval_dacs(codec, fill_hardwired,
fill_mio_first);
if (badness < 0)
if (badness < 0) {
kfree(best_cfg);
return badness;
}
debug_badness("==> lo_type=%d, wired=%d, mio=%d, badness=0x%x\n",
cfg->line_out_type, fill_hardwired, fill_mio_first,
badness);
@ -3434,7 +3436,7 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
cfg->line_out_type = AUTO_PIN_SPEAKER_OUT;
fill_hardwired = true;
continue;
}
}
if (cfg->hp_outs > 0 &&
cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
cfg->speaker_outs = cfg->line_outs;
@ -3448,7 +3450,7 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
cfg->line_out_type = AUTO_PIN_HP_OUT;
fill_hardwired = true;
continue;
}
}
break;
}
@ -4423,7 +4425,7 @@ static int alc_parse_auto_config(struct hda_codec *codec,
static int alc880_parse_auto_config(struct hda_codec *codec)
{
static const hda_nid_t alc880_ignore[] = { 0x1d, 0 };
static const hda_nid_t alc880_ssids[] = { 0x15, 0x1b, 0x14, 0 };
static const hda_nid_t alc880_ssids[] = { 0x15, 0x1b, 0x14, 0 };
return alc_parse_auto_config(codec, alc880_ignore, alc880_ssids);
}
@ -5269,7 +5271,9 @@ static const struct alc_fixup alc882_fixups[] = {
{ 0x16, 0x99130111 }, /* CLFE speaker */
{ 0x17, 0x99130112 }, /* surround speaker */
{ }
}
},
.chained = true,
.chain_id = ALC882_FIXUP_GPIO1,
},
[ALC882_FIXUP_ACER_ASPIRE_8930G] = {
.type = ALC_FIXUP_PINS,
@ -5312,7 +5316,9 @@ static const struct alc_fixup alc882_fixups[] = {
{ 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
{ 0x20, AC_VERB_SET_PROC_COEF, 0x3050 },
{ }
}
},
.chained = true,
.chain_id = ALC882_FIXUP_GPIO1,
},
[ALC885_FIXUP_MACPRO_GPIO] = {
.type = ALC_FIXUP_FUNC,
@ -5359,6 +5365,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
ALC882_FIXUP_ACER_ASPIRE_4930G),
SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
SND_PCI_QUIRK(0x1025, 0x026b, "Acer Aspire 8940G", ALC882_FIXUP_ACER_ASPIRE_8930G),
SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736),
SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
@ -5384,6 +5391,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO),
SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
@ -5399,6 +5407,13 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
{}
};
static const struct alc_model_fixup alc882_fixup_models[] = {
{.id = ALC882_FIXUP_ACER_ASPIRE_4930G, .name = "acer-aspire-4930g"},
{.id = ALC882_FIXUP_ACER_ASPIRE_8930G, .name = "acer-aspire-8930g"},
{.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"},
{}
};
/*
* BIOS auto configuration
*/
@ -5439,7 +5454,8 @@ static int patch_alc882(struct hda_codec *codec)
if (err < 0)
goto error;
alc_pick_fixup(codec, NULL, alc882_fixup_tbl, alc882_fixups);
alc_pick_fixup(codec, alc882_fixup_models, alc882_fixup_tbl,
alc882_fixups);
alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
alc_auto_parse_customize_define(codec);
@ -6079,7 +6095,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
* Basically the device should work as is without the fixup table.
* If BIOS doesn't give a proper info, enable the corresponding
* fixup entry.
*/
*/
SND_PCI_QUIRK(0x1043, 0x8330, "ASUS Eeepc P703 P900A",
ALC269_FIXUP_AMIC),
SND_PCI_QUIRK(0x1043, 0x1013, "ASUS N61Da", ALC269_FIXUP_AMIC),
@ -6296,7 +6312,7 @@ static void alc_fixup_no_jack_detect(struct hda_codec *codec,
{
if (action == ALC_FIXUP_ACT_PRE_PROBE)
codec->no_jack_detect = 1;
}
}
static const struct alc_fixup alc861_fixups[] = {
[ALC861_FIXUP_FSC_AMILO_PI1505] = {
@ -6714,7 +6730,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
* Basically the device should work as is without the fixup table.
* If BIOS doesn't give a proper info, enable the corresponding
* fixup entry.
*/
*/
SND_PCI_QUIRK(0x1043, 0x1000, "ASUS N50Vm", ALC662_FIXUP_ASUS_MODE1),
SND_PCI_QUIRK(0x1043, 0x1092, "ASUS NB", ALC662_FIXUP_ASUS_MODE3),
SND_PCI_QUIRK(0x1043, 0x1173, "ASUS K73Jn", ALC662_FIXUP_ASUS_MODE1),

View file

@ -42,6 +42,7 @@
#include "util/debug.h"
#include <assert.h>
#include <elf.h>
#include <fcntl.h>
#include <stdio.h>
@ -59,6 +60,7 @@
#include <sys/prctl.h>
#include <sys/wait.h>
#include <sys/uio.h>
#include <sys/utsname.h>
#include <sys/mman.h>
#include <linux/unistd.h>
@ -162,12 +164,40 @@ static void __zero_source_counters(struct hist_entry *he)
symbol__annotate_zero_histograms(sym);
}
static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
{
struct utsname uts;
int err = uname(&uts);
ui__warning("Out of bounds address found:\n\n"
"Addr: %" PRIx64 "\n"
"DSO: %s %c\n"
"Map: %" PRIx64 "-%" PRIx64 "\n"
"Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
"Arch: %s\n"
"Kernel: %s\n"
"Tools: %s\n\n"
"Not all samples will be on the annotation output.\n\n"
"Please report to linux-kernel@vger.kernel.org\n",
ip, map->dso->long_name, dso__symtab_origin(map->dso),
map->start, map->end, sym->start, sym->end,
sym->binding == STB_GLOBAL ? 'g' :
sym->binding == STB_LOCAL ? 'l' : 'w', sym->name,
err ? "[unknown]" : uts.machine,
err ? "[unknown]" : uts.release, perf_version_string);
if (use_browser <= 0)
sleep(5);
map->erange_warned = true;
}
static void perf_top__record_precise_ip(struct perf_top *top,
struct hist_entry *he,
int counter, u64 ip)
{
struct annotation *notes;
struct symbol *sym;
int err;
if (he == NULL || he->ms.sym == NULL ||
((top->sym_filter_entry == NULL ||
@ -189,9 +219,12 @@ static void perf_top__record_precise_ip(struct perf_top *top,
}
ip = he->ms.map->map_ip(he->ms.map, ip);
symbol__inc_addr_samples(sym, he->ms.map, counter, ip);
err = symbol__inc_addr_samples(sym, he->ms.map, counter, ip);
pthread_mutex_unlock(&notes->lock);
if (err == -ERANGE && !he->ms.map->erange_warned)
ui__warn_map_erange(he->ms.map, sym, ip);
}
static void perf_top__show_details(struct perf_top *top)
@ -615,6 +648,7 @@ process_hotkey:
/* Tag samples to be skipped. */
static const char *skip_symbols[] = {
"intel_idle",
"default_idle",
"native_safe_halt",
"cpu_idle",

View file

@ -64,8 +64,8 @@ int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
if (addr > sym->end)
return 0;
if (addr < sym->start || addr > sym->end)
return -ERANGE;
offset = addr - sym->start;
h = annotation__histogram(notes, evidx);
@ -561,16 +561,12 @@ void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
{
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evidx);
struct objdump_line *pos;
int len = sym->end - sym->start;
int len = sym->end - sym->start, offset;
h->sum = 0;
list_for_each_entry(pos, &notes->src->source, node) {
if (pos->offset != -1 && pos->offset < len) {
h->addr[pos->offset] = h->addr[pos->offset] * 7 / 8;
h->sum += h->addr[pos->offset];
}
for (offset = 0; offset < len; ++offset) {
h->addr[offset] = h->addr[offset] * 7 / 8;
h->sum += h->addr[offset];
}
}

View file

@ -256,6 +256,18 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
if (!cmp) {
he->period += period;
++he->nr_events;
/* If the map of an existing hist_entry has
* become out-of-date due to an exec() or
* similar, update it. Otherwise we will
* mis-adjust symbol addresses when computing
* the history counter to increment.
*/
if (he->ms.map != entry->ms.map) {
he->ms.map = entry->ms.map;
if (he->ms.map)
he->ms.map->referenced = true;
}
goto out;
}

View file

@ -38,6 +38,7 @@ void map__init(struct map *self, enum map_type type,
RB_CLEAR_NODE(&self->rb_node);
self->groups = NULL;
self->referenced = false;
self->erange_warned = false;
}
struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,

View file

@ -33,6 +33,7 @@ struct map {
u64 end;
u8 /* enum map_type */ type;
bool referenced;
bool erange_warned;
u32 priv;
u64 pgoff;

View file

@ -826,8 +826,16 @@ static struct machine *
{
const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest)
return perf_session__find_machine(session, event->ip.pid);
if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
u32 pid;
if (event->header.type == PERF_RECORD_MMAP)
pid = event->mmap.pid;
else
pid = event->ip.pid;
return perf_session__find_machine(session, pid);
}
return perf_session__find_host_machine(session);
}

View file

@ -125,6 +125,9 @@ static int callchain__count_rows(struct rb_root *chain)
static bool map_symbol__toggle_fold(struct map_symbol *self)
{
if (!self)
return false;
if (!self->has_children)
return false;