Merge branches 'acpi-pm', 'acpi-processor' and 'acpi-video'
* acpi-pm: ACPI / PM: Fix PM initialization for devices that are not present * acpi-processor: ACPI / processor: Rename acpi_(un)map_lsapic() to acpi_(un)map_cpu() ACPI / processor: Convert apic_id to phys_id to make it arch agnostic * acpi-video: ACPI / video: Add disable_native_backlight quirk for Dell XPS15 L521X
This commit is contained in:
commit
794c3a0a93
189 changed files with 1999 additions and 2246 deletions
|
@ -10,12 +10,13 @@ Optional properties:
|
|||
Each button (key) is represented as a sub-node of "gpio-keys":
|
||||
Subnode properties:
|
||||
|
||||
- gpios: OF device-tree gpio specification.
|
||||
- interrupts: the interrupt line for that input.
|
||||
- label: Descriptive name of the key.
|
||||
- linux,code: Keycode to emit.
|
||||
|
||||
Required mutual exclusive subnode-properties:
|
||||
- gpios: OF device-tree gpio specification.
|
||||
- interrupts: the interrupt line for that input
|
||||
Note that either "interrupts" or "gpios" properties can be omitted, but not
|
||||
both at the same time. Specifying both properties is allowed.
|
||||
|
||||
Optional subnode-properties:
|
||||
- linux,input-type: Specify event type this button/key generates.
|
||||
|
@ -23,6 +24,9 @@ Optional subnode-properties:
|
|||
- debounce-interval: Debouncing interval time in milliseconds.
|
||||
If not specified defaults to 5.
|
||||
- gpio-key,wakeup: Boolean, button can wake-up the system.
|
||||
- linux,can-disable: Boolean, indicates that button is connected
|
||||
to dedicated (not shared) interrupt which can be disabled to
|
||||
suppress events from the button.
|
||||
|
||||
Example nodes:
|
||||
|
||||
|
|
|
@ -8,6 +8,8 @@ Optional properties:
|
|||
- debounce-interval : Debouncing interval time in milliseconds
|
||||
- st,scan-count : Scanning cycles elapsed before key data is updated
|
||||
- st,no-autorepeat : If specified device will not autorepeat
|
||||
- keypad,num-rows : See ./matrix-keymap.txt
|
||||
- keypad,num-columns : See ./matrix-keymap.txt
|
||||
|
||||
Example:
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Diseased Newt
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -203,27 +203,3 @@
|
|||
compatible = "linux,spdif-dir";
|
||||
};
|
||||
};
|
||||
|
||||
&pinctrl {
|
||||
/*
|
||||
* These pins might be muxed as I2S by
|
||||
* the bootloader, but it conflicts
|
||||
* with the real I2S pins that are
|
||||
* muxed using i2s_pins. We must mux
|
||||
* those pins to a function other than
|
||||
* I2S.
|
||||
*/
|
||||
pinctrl-0 = <&hog_pins1 &hog_pins2>;
|
||||
pinctrl-names = "default";
|
||||
|
||||
hog_pins1: hog-pins1 {
|
||||
marvell,pins = "mpp6", "mpp8", "mpp10",
|
||||
"mpp12", "mpp13";
|
||||
marvell,function = "gpio";
|
||||
};
|
||||
|
||||
hog_pins2: hog-pins2 {
|
||||
marvell,pins = "mpp5", "mpp7", "mpp9";
|
||||
marvell,function = "gpo";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -338,6 +338,7 @@ CONFIG_USB=y
|
|||
CONFIG_USB_XHCI_HCD=y
|
||||
CONFIG_USB_XHCI_MVEBU=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_EHCI_EXYNOS=y
|
||||
CONFIG_USB_EHCI_TEGRA=y
|
||||
CONFIG_USB_EHCI_HCD_STI=y
|
||||
CONFIG_USB_EHCI_HCD_PLATFORM=y
|
||||
|
|
|
@ -1046,6 +1046,15 @@ static int c_show(struct seq_file *m, void *v)
|
|||
seq_printf(m, "model name\t: %s rev %d (%s)\n",
|
||||
cpu_name, cpuid & 15, elf_platform);
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
|
||||
per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
|
||||
(per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
|
||||
#else
|
||||
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
|
||||
loops_per_jiffy / (500000/HZ),
|
||||
(loops_per_jiffy / (5000/HZ)) % 100);
|
||||
#endif
|
||||
/* dump out the processor features */
|
||||
seq_puts(m, "Features\t: ");
|
||||
|
||||
|
|
|
@ -387,6 +387,18 @@ asmlinkage void secondary_start_kernel(void)
|
|||
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long bogosum = 0;
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
|
||||
|
||||
printk(KERN_INFO "SMP: Total of %d processors activated "
|
||||
"(%lu.%02lu BogoMIPS).\n",
|
||||
num_online_cpus(),
|
||||
bogosum / (500000/HZ),
|
||||
(bogosum / (5000/HZ)) % 100);
|
||||
|
||||
hyp_mode_check();
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
|
||||
|
||||
#define NR_syscalls 318 /* length of syscall table */
|
||||
#define NR_syscalls 319 /* length of syscall table */
|
||||
|
||||
/*
|
||||
* The following defines stop scripts/checksyscalls.sh from complaining about
|
||||
|
|
|
@ -331,5 +331,6 @@
|
|||
#define __NR_getrandom 1339
|
||||
#define __NR_memfd_create 1340
|
||||
#define __NR_bpf 1341
|
||||
#define __NR_execveat 1342
|
||||
|
||||
#endif /* _UAPI_ASM_IA64_UNISTD_H */
|
||||
|
|
|
@ -893,13 +893,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
|
|||
}
|
||||
|
||||
/* wrapper to silence section mismatch warning */
|
||||
int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
|
||||
int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
|
||||
{
|
||||
return _acpi_map_lsapic(handle, physid, pcpu);
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_map_lsapic);
|
||||
EXPORT_SYMBOL(acpi_map_cpu);
|
||||
|
||||
int acpi_unmap_lsapic(int cpu)
|
||||
int acpi_unmap_cpu(int cpu)
|
||||
{
|
||||
ia64_cpu_to_sapicid[cpu] = -1;
|
||||
set_cpu_present(cpu, false);
|
||||
|
@ -910,8 +910,7 @@ int acpi_unmap_lsapic(int cpu)
|
|||
|
||||
return (0);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(acpi_unmap_lsapic);
|
||||
EXPORT_SYMBOL(acpi_unmap_cpu);
|
||||
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
|
|
|
@ -1779,6 +1779,7 @@ sys_call_table:
|
|||
data8 sys_getrandom
|
||||
data8 sys_memfd_create // 1340
|
||||
data8 sys_bpf
|
||||
data8 sys_execveat
|
||||
|
||||
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
|
||||
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
|
||||
|
|
|
@ -72,6 +72,7 @@ void __init setup_cpuinfo(void)
|
|||
cpuinfo.has_div = fcpu_has(cpu, "altr,has-div");
|
||||
cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul");
|
||||
cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx");
|
||||
cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu");
|
||||
|
||||
if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div)
|
||||
err_cpu("DIV");
|
||||
|
|
|
@ -365,30 +365,14 @@ ENTRY(ret_from_interrupt)
|
|||
GET_THREAD_INFO r1
|
||||
ldw r4, TI_PREEMPT_COUNT(r1)
|
||||
bne r4, r0, restore_all
|
||||
|
||||
need_resched:
|
||||
ldw r4, TI_FLAGS(r1) /* ? Need resched set */
|
||||
BTBZ r10, r4, TIF_NEED_RESCHED, restore_all
|
||||
ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */
|
||||
andi r10, r4, ESTATUS_EPIE
|
||||
beq r10, r0, restore_all
|
||||
movia r4, PREEMPT_ACTIVE
|
||||
stw r4, TI_PREEMPT_COUNT(r1)
|
||||
rdctl r10, status /* enable intrs again */
|
||||
ori r10, r10 ,STATUS_PIE
|
||||
wrctl status, r10
|
||||
PUSH r1
|
||||
call schedule
|
||||
POP r1
|
||||
mov r4, r0
|
||||
stw r4, TI_PREEMPT_COUNT(r1)
|
||||
rdctl r10, status /* disable intrs */
|
||||
andi r10, r10, %lo(~STATUS_PIE)
|
||||
wrctl status, r10
|
||||
br need_resched
|
||||
#else
|
||||
br restore_all
|
||||
call preempt_schedule_irq
|
||||
#endif
|
||||
br restore_all
|
||||
|
||||
/***********************************************************************
|
||||
* A few syscall wrappers
|
||||
|
|
|
@ -86,6 +86,11 @@ extern int overlaps_crashkernel(unsigned long start, unsigned long size);
|
|||
extern void reserve_crashkernel(void);
|
||||
extern void machine_kexec_mask_interrupts(void);
|
||||
|
||||
static inline bool kdump_in_progress(void)
|
||||
{
|
||||
return crashing_cpu >= 0;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_KEXEC */
|
||||
static inline void crash_kexec_secondary(struct pt_regs *regs) { }
|
||||
|
||||
|
@ -106,6 +111,11 @@ static inline int crash_shutdown_unregister(crash_shutdown_t handler)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline bool kdump_in_progress(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_KEXEC */
|
||||
#endif /* ! __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -366,3 +366,4 @@ SYSCALL_SPU(seccomp)
|
|||
SYSCALL_SPU(getrandom)
|
||||
SYSCALL_SPU(memfd_create)
|
||||
SYSCALL_SPU(bpf)
|
||||
COMPAT_SYS(execveat)
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <uapi/asm/unistd.h>
|
||||
|
||||
|
||||
#define __NR_syscalls 362
|
||||
#define __NR_syscalls 363
|
||||
|
||||
#define __NR__exit __NR_exit
|
||||
#define NR_syscalls __NR_syscalls
|
||||
|
|
|
@ -384,5 +384,6 @@
|
|||
#define __NR_getrandom 359
|
||||
#define __NR_memfd_create 360
|
||||
#define __NR_bpf 361
|
||||
#define __NR_execveat 362
|
||||
|
||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
||||
|
|
|
@ -330,7 +330,7 @@ void default_machine_kexec(struct kimage *image)
|
|||
* using debugger IPI.
|
||||
*/
|
||||
|
||||
if (crashing_cpu == -1)
|
||||
if (!kdump_in_progress())
|
||||
kexec_prepare_cpus();
|
||||
|
||||
pr_debug("kexec: Starting switchover sequence.\n");
|
||||
|
|
|
@ -700,6 +700,7 @@ void start_secondary(void *unused)
|
|||
smp_store_cpu_info(cpu);
|
||||
set_dec(tb_ticks_per_jiffy);
|
||||
preempt_disable();
|
||||
cpu_callin_map[cpu] = 1;
|
||||
|
||||
if (smp_ops->setup_cpu)
|
||||
smp_ops->setup_cpu(cpu);
|
||||
|
@ -738,14 +739,6 @@ void start_secondary(void *unused)
|
|||
notify_cpu_starting(cpu);
|
||||
set_cpu_online(cpu, true);
|
||||
|
||||
/*
|
||||
* CPU must be marked active and online before we signal back to the
|
||||
* master, because the scheduler needs to see the cpu_online and
|
||||
* cpu_active bits set.
|
||||
*/
|
||||
smp_wmb();
|
||||
cpu_callin_map[cpu] = 1;
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
cpu_startup_entry(CPUHP_ONLINE);
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
#include <asm/trace.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/plpar_wrappers.h>
|
||||
#include <asm/kexec.h>
|
||||
#include <asm/fadump.h>
|
||||
|
||||
#include "pseries.h"
|
||||
|
@ -267,8 +268,13 @@ static void pSeries_lpar_hptab_clear(void)
|
|||
* out to the user, but at least this will stop us from
|
||||
* continuing on further and creating an even more
|
||||
* difficult to debug situation.
|
||||
*
|
||||
* There is a known problem when kdump'ing, if cpus are offline
|
||||
* the above call will fail. Rather than panicking again, keep
|
||||
* going and hope the kdump kernel is also little endian, which
|
||||
* it usually is.
|
||||
*/
|
||||
if (rc)
|
||||
if (rc && !kdump_in_progress())
|
||||
panic("Could not enable big endian exceptions");
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -3,6 +3,7 @@ config UML
|
|||
default y
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_UID16
|
||||
select HAVE_FUTEX_CMPXCHG if FUTEX
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_CPU_DEVICES
|
||||
select GENERIC_IO
|
||||
|
|
|
@ -750,13 +750,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
|
|||
}
|
||||
|
||||
/* wrapper to silence section mismatch warning */
|
||||
int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
|
||||
int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
|
||||
{
|
||||
return _acpi_map_lsapic(handle, physid, pcpu);
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_map_lsapic);
|
||||
EXPORT_SYMBOL(acpi_map_cpu);
|
||||
|
||||
int acpi_unmap_lsapic(int cpu)
|
||||
int acpi_unmap_cpu(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
|
||||
|
@ -768,8 +768,7 @@ int acpi_unmap_lsapic(int cpu)
|
|||
|
||||
return (0);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(acpi_unmap_lsapic);
|
||||
EXPORT_SYMBOL(acpi_unmap_cpu);
|
||||
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
|
||||
|
||||
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
|
||||
|
|
|
@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
|
|||
|
||||
extern asmlinkage void sys_ni_syscall(void);
|
||||
|
||||
const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
|
||||
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
|
||||
/*
|
||||
* Smells like a compiler bug -- it doesn't work
|
||||
* when the & below is removed.
|
||||
|
|
|
@ -47,7 +47,7 @@ typedef void (*sys_call_ptr_t)(void);
|
|||
|
||||
extern void sys_ni_syscall(void);
|
||||
|
||||
const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
|
||||
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
|
||||
/*
|
||||
* Smells like a compiler bug -- it doesn't work
|
||||
* when the & below is removed.
|
||||
|
|
|
@ -455,6 +455,9 @@ void af_alg_complete(struct crypto_async_request *req, int err)
|
|||
{
|
||||
struct af_alg_completion *completion = req->data;
|
||||
|
||||
if (err == -EINPROGRESS)
|
||||
return;
|
||||
|
||||
completion->err = err;
|
||||
complete(&completion->completion);
|
||||
}
|
||||
|
|
|
@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
|
|||
acpi_status status;
|
||||
int ret;
|
||||
|
||||
if (pr->apic_id == -1)
|
||||
if (pr->phys_id == -1)
|
||||
return -ENODEV;
|
||||
|
||||
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
|
||||
|
@ -180,13 +180,13 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
|
|||
cpu_maps_update_begin();
|
||||
cpu_hotplug_begin();
|
||||
|
||||
ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
|
||||
ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = arch_register_cpu(pr->id);
|
||||
if (ret) {
|
||||
acpi_unmap_lsapic(pr->id);
|
||||
acpi_unmap_cpu(pr->id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -215,7 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
|
|||
union acpi_object object = { 0 };
|
||||
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
|
||||
struct acpi_processor *pr = acpi_driver_data(device);
|
||||
int apic_id, cpu_index, device_declaration = 0;
|
||||
int phys_id, cpu_index, device_declaration = 0;
|
||||
acpi_status status = AE_OK;
|
||||
static int cpu0_initialized;
|
||||
unsigned long long value;
|
||||
|
@ -262,15 +262,18 @@ static int acpi_processor_get_info(struct acpi_device *device)
|
|||
pr->acpi_id = value;
|
||||
}
|
||||
|
||||
apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
|
||||
if (apic_id < 0)
|
||||
acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
|
||||
pr->apic_id = apic_id;
|
||||
phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id);
|
||||
if (phys_id < 0)
|
||||
acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
|
||||
pr->phys_id = phys_id;
|
||||
|
||||
cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
|
||||
cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
|
||||
if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
|
||||
cpu0_initialized = 1;
|
||||
/* Handle UP system running SMP kernel, with no LAPIC in MADT */
|
||||
/*
|
||||
* Handle UP system running SMP kernel, with no CPU
|
||||
* entry in MADT
|
||||
*/
|
||||
if ((cpu_index == -1) && (num_online_cpus() == 1))
|
||||
cpu_index = 0;
|
||||
}
|
||||
|
@ -458,7 +461,7 @@ static void acpi_processor_remove(struct acpi_device *device)
|
|||
|
||||
/* Remove the CPU. */
|
||||
arch_unregister_cpu(pr->id);
|
||||
acpi_unmap_lsapic(pr->id);
|
||||
acpi_unmap_cpu(pr->id);
|
||||
|
||||
cpu_hotplug_done();
|
||||
cpu_maps_update_done();
|
||||
|
|
|
@ -69,7 +69,7 @@ static int map_madt_entry(int type, u32 acpi_id)
|
|||
unsigned long madt_end, entry;
|
||||
static struct acpi_table_madt *madt;
|
||||
static int read_madt;
|
||||
int apic_id = -1;
|
||||
int phys_id = -1; /* CPU hardware ID */
|
||||
|
||||
if (!read_madt) {
|
||||
if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
|
||||
|
@ -79,7 +79,7 @@ static int map_madt_entry(int type, u32 acpi_id)
|
|||
}
|
||||
|
||||
if (!madt)
|
||||
return apic_id;
|
||||
return phys_id;
|
||||
|
||||
entry = (unsigned long)madt;
|
||||
madt_end = entry + madt->header.length;
|
||||
|
@ -91,18 +91,18 @@ static int map_madt_entry(int type, u32 acpi_id)
|
|||
struct acpi_subtable_header *header =
|
||||
(struct acpi_subtable_header *)entry;
|
||||
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
|
||||
if (!map_lapic_id(header, acpi_id, &apic_id))
|
||||
if (!map_lapic_id(header, acpi_id, &phys_id))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
|
||||
if (!map_x2apic_id(header, type, acpi_id, &apic_id))
|
||||
if (!map_x2apic_id(header, type, acpi_id, &phys_id))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
|
||||
if (!map_lsapic_id(header, type, acpi_id, &apic_id))
|
||||
if (!map_lsapic_id(header, type, acpi_id, &phys_id))
|
||||
break;
|
||||
}
|
||||
entry += header->length;
|
||||
}
|
||||
return apic_id;
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
|
||||
|
@ -110,7 +110,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
|
|||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *obj;
|
||||
struct acpi_subtable_header *header;
|
||||
int apic_id = -1;
|
||||
int phys_id = -1;
|
||||
|
||||
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
|
||||
goto exit;
|
||||
|
@ -126,38 +126,38 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
|
|||
|
||||
header = (struct acpi_subtable_header *)obj->buffer.pointer;
|
||||
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
|
||||
map_lapic_id(header, acpi_id, &apic_id);
|
||||
map_lapic_id(header, acpi_id, &phys_id);
|
||||
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
|
||||
map_lsapic_id(header, type, acpi_id, &apic_id);
|
||||
map_lsapic_id(header, type, acpi_id, &phys_id);
|
||||
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
|
||||
map_x2apic_id(header, type, acpi_id, &apic_id);
|
||||
map_x2apic_id(header, type, acpi_id, &phys_id);
|
||||
|
||||
exit:
|
||||
kfree(buffer.pointer);
|
||||
return apic_id;
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
|
||||
int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
|
||||
{
|
||||
int apic_id;
|
||||
int phys_id;
|
||||
|
||||
apic_id = map_mat_entry(handle, type, acpi_id);
|
||||
if (apic_id == -1)
|
||||
apic_id = map_madt_entry(type, acpi_id);
|
||||
phys_id = map_mat_entry(handle, type, acpi_id);
|
||||
if (phys_id == -1)
|
||||
phys_id = map_madt_entry(type, acpi_id);
|
||||
|
||||
return apic_id;
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
int acpi_map_cpuid(int apic_id, u32 acpi_id)
|
||||
int acpi_map_cpuid(int phys_id, u32 acpi_id)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
int i;
|
||||
#endif
|
||||
|
||||
if (apic_id == -1) {
|
||||
if (phys_id == -1) {
|
||||
/*
|
||||
* On UP processor, there is no _MAT or MADT table.
|
||||
* So above apic_id is always set to -1.
|
||||
* So above phys_id is always set to -1.
|
||||
*
|
||||
* BIOS may define multiple CPU handles even for UP processor.
|
||||
* For example,
|
||||
|
@ -170,7 +170,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
|
|||
* Processor (CPU3, 0x03, 0x00000410, 0x06) {}
|
||||
* }
|
||||
*
|
||||
* Ignores apic_id and always returns 0 for the processor
|
||||
* Ignores phys_id and always returns 0 for the processor
|
||||
* handle with acpi id 0 if nr_cpu_ids is 1.
|
||||
* This should be the case if SMP tables are not found.
|
||||
* Return -1 for other CPU's handle.
|
||||
|
@ -178,28 +178,28 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
|
|||
if (nr_cpu_ids <= 1 && acpi_id == 0)
|
||||
return acpi_id;
|
||||
else
|
||||
return apic_id;
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
for_each_possible_cpu(i) {
|
||||
if (cpu_physical_id(i) == apic_id)
|
||||
if (cpu_physical_id(i) == phys_id)
|
||||
return i;
|
||||
}
|
||||
#else
|
||||
/* In UP kernel, only processor 0 is valid */
|
||||
if (apic_id == 0)
|
||||
return apic_id;
|
||||
if (phys_id == 0)
|
||||
return phys_id;
|
||||
#endif
|
||||
return -1;
|
||||
}
|
||||
|
||||
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
|
||||
{
|
||||
int apic_id;
|
||||
int phys_id;
|
||||
|
||||
apic_id = acpi_get_apicid(handle, type, acpi_id);
|
||||
phys_id = acpi_get_phys_id(handle, type, acpi_id);
|
||||
|
||||
return acpi_map_cpuid(apic_id, acpi_id);
|
||||
return acpi_map_cpuid(phys_id, acpi_id);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
|
||||
|
|
|
@ -985,8 +985,6 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
|
|||
state->flags = 0;
|
||||
switch (cx->type) {
|
||||
case ACPI_STATE_C1:
|
||||
if (cx->entry_method != ACPI_CSTATE_FFH)
|
||||
state->flags |= CPUIDLE_FLAG_TIME_INVALID;
|
||||
|
||||
state->enter = acpi_idle_enter_c1;
|
||||
state->enter_dead = acpi_idle_play_dead;
|
||||
|
|
|
@ -505,6 +505,33 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
.callback = video_disable_native_backlight,
|
||||
.ident = "SAMSUNG 870Z5E/880Z5E/680Z5E",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = video_disable_native_backlight,
|
||||
.ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
|
||||
.callback = video_disable_native_backlight,
|
||||
.ident = "Dell XPS15 L521X",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -2088,7 +2088,7 @@ EXPORT_SYMBOL_GPL(of_genpd_del_provider);
|
|||
* Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
|
||||
* on failure.
|
||||
*/
|
||||
static struct generic_pm_domain *of_genpd_get_from_provider(
|
||||
struct generic_pm_domain *of_genpd_get_from_provider(
|
||||
struct of_phandle_args *genpdspec)
|
||||
{
|
||||
struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
|
||||
|
@ -2108,6 +2108,7 @@ static struct generic_pm_domain *of_genpd_get_from_provider(
|
|||
|
||||
return genpd;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
|
||||
|
||||
/**
|
||||
* genpd_dev_pm_detach - Detach a device from its PM domain.
|
||||
|
|
|
@ -108,6 +108,14 @@ static LIST_HEAD(dev_opp_list);
|
|||
/* Lock to allow exclusive modification to the device and opp lists */
|
||||
static DEFINE_MUTEX(dev_opp_list_lock);
|
||||
|
||||
#define opp_rcu_lockdep_assert() \
|
||||
do { \
|
||||
rcu_lockdep_assert(rcu_read_lock_held() || \
|
||||
lockdep_is_held(&dev_opp_list_lock), \
|
||||
"Missing rcu_read_lock() or " \
|
||||
"dev_opp_list_lock protection"); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* find_device_opp() - find device_opp struct using device pointer
|
||||
* @dev: device pointer used to lookup device OPPs
|
||||
|
@ -208,9 +216,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
|
|||
* This function returns the number of available opps if there are any,
|
||||
* else returns 0 if none or the corresponding error value.
|
||||
*
|
||||
* Locking: This function must be called under rcu_read_lock(). This function
|
||||
* internally references two RCU protected structures: device_opp and opp which
|
||||
* are safe as long as we are under a common RCU locked section.
|
||||
* Locking: This function takes rcu_read_lock().
|
||||
*/
|
||||
int dev_pm_opp_get_opp_count(struct device *dev)
|
||||
{
|
||||
|
@ -218,11 +224,14 @@ int dev_pm_opp_get_opp_count(struct device *dev)
|
|||
struct dev_pm_opp *temp_opp;
|
||||
int count = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
dev_opp = find_device_opp(dev);
|
||||
if (IS_ERR(dev_opp)) {
|
||||
int r = PTR_ERR(dev_opp);
|
||||
dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
|
||||
return r;
|
||||
count = PTR_ERR(dev_opp);
|
||||
dev_err(dev, "%s: device OPP not found (%d)\n",
|
||||
__func__, count);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
|
||||
|
@ -230,6 +239,8 @@ int dev_pm_opp_get_opp_count(struct device *dev)
|
|||
count++;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
return count;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
|
||||
|
@ -267,6 +278,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
|
|||
struct device_opp *dev_opp;
|
||||
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
|
||||
|
||||
opp_rcu_lockdep_assert();
|
||||
|
||||
dev_opp = find_device_opp(dev);
|
||||
if (IS_ERR(dev_opp)) {
|
||||
int r = PTR_ERR(dev_opp);
|
||||
|
@ -313,6 +326,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
|
|||
struct device_opp *dev_opp;
|
||||
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
|
||||
|
||||
opp_rcu_lockdep_assert();
|
||||
|
||||
if (!dev || !freq) {
|
||||
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
@ -361,6 +376,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
|
|||
struct device_opp *dev_opp;
|
||||
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
|
||||
|
||||
opp_rcu_lockdep_assert();
|
||||
|
||||
if (!dev || !freq) {
|
||||
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
@ -783,9 +800,15 @@ void of_free_opp_table(struct device *dev)
|
|||
|
||||
/* Check for existing list for 'dev' */
|
||||
dev_opp = find_device_opp(dev);
|
||||
if (WARN(IS_ERR(dev_opp), "%s: dev_opp: %ld\n", dev_name(dev),
|
||||
PTR_ERR(dev_opp)))
|
||||
if (IS_ERR(dev_opp)) {
|
||||
int error = PTR_ERR(dev_opp);
|
||||
if (error != -ENODEV)
|
||||
WARN(1, "%s: dev_opp: %d\n",
|
||||
IS_ERR_OR_NULL(dev) ?
|
||||
"Invalid device" : dev_name(dev),
|
||||
error);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Hold our list modification lock here */
|
||||
mutex_lock(&dev_opp_list_lock);
|
||||
|
|
|
@ -462,7 +462,7 @@ static void __init arch_counter_register(unsigned type)
|
|||
|
||||
/* Register the CP15 based counter if we have one */
|
||||
if (type & ARCH_CP15_TIMER) {
|
||||
if (arch_timer_use_virtual)
|
||||
if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
|
||||
arch_timer_read_counter = arch_counter_get_cntvct;
|
||||
else
|
||||
arch_timer_read_counter = arch_counter_get_cntpct;
|
||||
|
|
|
@ -211,6 +211,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
|
|||
/* OPPs might be populated at runtime, don't check for error here */
|
||||
of_init_opp_table(cpu_dev);
|
||||
|
||||
/*
|
||||
* But we need OPP table to function so if it is not there let's
|
||||
* give platform code chance to provide it for us.
|
||||
*/
|
||||
ret = dev_pm_opp_get_opp_count(cpu_dev);
|
||||
if (ret <= 0) {
|
||||
pr_debug("OPP table is not ready, deferring probe\n");
|
||||
ret = -EPROBE_DEFER;
|
||||
goto out_free_opp;
|
||||
}
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -2028,6 +2028,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
|
|||
/* Don't start any governor operations if we are entering suspend */
|
||||
if (cpufreq_suspended)
|
||||
return 0;
|
||||
/*
|
||||
* Governor might not be initiated here if ACPI _PPC changed
|
||||
* notification happened, so check it.
|
||||
*/
|
||||
if (!policy->governor)
|
||||
return -EINVAL;
|
||||
|
||||
if (policy->governor->max_transition_latency &&
|
||||
policy->cpuinfo.transition_latency >
|
||||
|
|
|
@ -79,12 +79,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
|
|||
|
||||
last_state = &ldev->states[last_idx];
|
||||
|
||||
if (!(drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_INVALID)) {
|
||||
last_residency = cpuidle_get_last_residency(dev) - \
|
||||
drv->states[last_idx].exit_latency;
|
||||
}
|
||||
else
|
||||
last_residency = last_state->threshold.promotion_time + 1;
|
||||
last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
|
||||
|
||||
/* consider promotion */
|
||||
if (last_idx < drv->state_count - 1 &&
|
||||
|
|
|
@ -396,8 +396,8 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
|||
* power state and occurrence of the wakeup event.
|
||||
*
|
||||
* If the entered idle state didn't support residency measurements,
|
||||
* we are basically lost in the dark how much time passed.
|
||||
* As a compromise, assume we slept for the whole expected time.
|
||||
* we use them anyway if they are short, and if long,
|
||||
* truncate to the whole expected time.
|
||||
*
|
||||
* Any measured amount of time will include the exit latency.
|
||||
* Since we are interested in when the wakeup begun, not when it
|
||||
|
@ -405,23 +405,18 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
|||
* the measured amount of time is less than the exit latency,
|
||||
* assume the state was never reached and the exit latency is 0.
|
||||
*/
|
||||
if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) {
|
||||
/* Use timer value as is */
|
||||
|
||||
/* measured value */
|
||||
measured_us = cpuidle_get_last_residency(dev);
|
||||
|
||||
/* Deduct exit latency */
|
||||
if (measured_us > target->exit_latency)
|
||||
measured_us -= target->exit_latency;
|
||||
|
||||
/* Make sure our coefficients do not exceed unity */
|
||||
if (measured_us > data->next_timer_us)
|
||||
measured_us = data->next_timer_us;
|
||||
|
||||
} else {
|
||||
/* Use measured value */
|
||||
measured_us = cpuidle_get_last_residency(dev);
|
||||
|
||||
/* Deduct exit latency */
|
||||
if (measured_us > target->exit_latency)
|
||||
measured_us -= target->exit_latency;
|
||||
|
||||
/* Make sure our coefficients do not exceed unity */
|
||||
if (measured_us > data->next_timer_us)
|
||||
measured_us = data->next_timer_us;
|
||||
}
|
||||
|
||||
/* Update our correction ratio */
|
||||
new_factor = data->correction_factor[data->bucket];
|
||||
new_factor -= new_factor / DECAY;
|
||||
|
|
|
@ -28,6 +28,13 @@
|
|||
#include <linux/cdev.h>
|
||||
#include "input-compat.h"
|
||||
|
||||
enum evdev_clock_type {
|
||||
EV_CLK_REAL = 0,
|
||||
EV_CLK_MONO,
|
||||
EV_CLK_BOOT,
|
||||
EV_CLK_MAX
|
||||
};
|
||||
|
||||
struct evdev {
|
||||
int open;
|
||||
struct input_handle handle;
|
||||
|
@ -49,12 +56,32 @@ struct evdev_client {
|
|||
struct fasync_struct *fasync;
|
||||
struct evdev *evdev;
|
||||
struct list_head node;
|
||||
int clkid;
|
||||
int clk_type;
|
||||
bool revoked;
|
||||
unsigned int bufsize;
|
||||
struct input_event buffer[];
|
||||
};
|
||||
|
||||
static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
|
||||
{
|
||||
switch (clkid) {
|
||||
|
||||
case CLOCK_REALTIME:
|
||||
client->clk_type = EV_CLK_REAL;
|
||||
break;
|
||||
case CLOCK_MONOTONIC:
|
||||
client->clk_type = EV_CLK_MONO;
|
||||
break;
|
||||
case CLOCK_BOOTTIME:
|
||||
client->clk_type = EV_CLK_BOOT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* flush queued events of type @type, caller must hold client->buffer_lock */
|
||||
static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
|
||||
{
|
||||
|
@ -108,8 +135,11 @@ static void evdev_queue_syn_dropped(struct evdev_client *client)
|
|||
struct input_event ev;
|
||||
ktime_t time;
|
||||
|
||||
time = (client->clkid == CLOCK_MONOTONIC) ?
|
||||
ktime_get() : ktime_get_real();
|
||||
time = client->clk_type == EV_CLK_REAL ?
|
||||
ktime_get_real() :
|
||||
client->clk_type == EV_CLK_MONO ?
|
||||
ktime_get() :
|
||||
ktime_get_boottime();
|
||||
|
||||
ev.time = ktime_to_timeval(time);
|
||||
ev.type = EV_SYN;
|
||||
|
@ -159,7 +189,7 @@ static void __pass_event(struct evdev_client *client,
|
|||
|
||||
static void evdev_pass_values(struct evdev_client *client,
|
||||
const struct input_value *vals, unsigned int count,
|
||||
ktime_t mono, ktime_t real)
|
||||
ktime_t *ev_time)
|
||||
{
|
||||
struct evdev *evdev = client->evdev;
|
||||
const struct input_value *v;
|
||||
|
@ -169,8 +199,7 @@ static void evdev_pass_values(struct evdev_client *client,
|
|||
if (client->revoked)
|
||||
return;
|
||||
|
||||
event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ?
|
||||
mono : real);
|
||||
event.time = ktime_to_timeval(ev_time[client->clk_type]);
|
||||
|
||||
/* Interrupts are disabled, just acquire the lock. */
|
||||
spin_lock(&client->buffer_lock);
|
||||
|
@ -198,21 +227,22 @@ static void evdev_events(struct input_handle *handle,
|
|||
{
|
||||
struct evdev *evdev = handle->private;
|
||||
struct evdev_client *client;
|
||||
ktime_t time_mono, time_real;
|
||||
ktime_t ev_time[EV_CLK_MAX];
|
||||
|
||||
time_mono = ktime_get();
|
||||
time_real = ktime_mono_to_real(time_mono);
|
||||
ev_time[EV_CLK_MONO] = ktime_get();
|
||||
ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]);
|
||||
ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO],
|
||||
TK_OFFS_BOOT);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
client = rcu_dereference(evdev->grab);
|
||||
|
||||
if (client)
|
||||
evdev_pass_values(client, vals, count, time_mono, time_real);
|
||||
evdev_pass_values(client, vals, count, ev_time);
|
||||
else
|
||||
list_for_each_entry_rcu(client, &evdev->client_list, node)
|
||||
evdev_pass_values(client, vals, count,
|
||||
time_mono, time_real);
|
||||
evdev_pass_values(client, vals, count, ev_time);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -877,10 +907,8 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
|
|||
case EVIOCSCLOCKID:
|
||||
if (copy_from_user(&i, p, sizeof(unsigned int)))
|
||||
return -EFAULT;
|
||||
if (i != CLOCK_MONOTONIC && i != CLOCK_REALTIME)
|
||||
return -EINVAL;
|
||||
client->clkid = i;
|
||||
return 0;
|
||||
|
||||
return evdev_set_clk_type(client, i);
|
||||
|
||||
case EVIOCGKEYCODE:
|
||||
return evdev_handle_get_keycode(dev, p);
|
||||
|
|
|
@ -1974,18 +1974,22 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
|
|||
|
||||
events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
|
||||
|
||||
for (i = 0; i < ABS_CNT; i++) {
|
||||
if (test_bit(i, dev->absbit)) {
|
||||
if (input_is_mt_axis(i))
|
||||
events += mt_slots;
|
||||
else
|
||||
events++;
|
||||
if (test_bit(EV_ABS, dev->evbit)) {
|
||||
for (i = 0; i < ABS_CNT; i++) {
|
||||
if (test_bit(i, dev->absbit)) {
|
||||
if (input_is_mt_axis(i))
|
||||
events += mt_slots;
|
||||
else
|
||||
events++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < REL_CNT; i++)
|
||||
if (test_bit(i, dev->relbit))
|
||||
events++;
|
||||
if (test_bit(EV_REL, dev->evbit)) {
|
||||
for (i = 0; i < REL_CNT; i++)
|
||||
if (test_bit(i, dev->relbit))
|
||||
events++;
|
||||
}
|
||||
|
||||
/* Make room for KEY and MSC events */
|
||||
events += 7;
|
||||
|
|
|
@ -559,6 +559,7 @@ config KEYBOARD_SH_KEYSC
|
|||
config KEYBOARD_STMPE
|
||||
tristate "STMPE keypad support"
|
||||
depends on MFD_STMPE
|
||||
depends on OF
|
||||
select INPUT_MATRIXKMAP
|
||||
help
|
||||
Say Y here if you want to use the keypad controller on STMPE I/O
|
||||
|
|
|
@ -35,9 +35,13 @@
|
|||
struct gpio_button_data {
|
||||
const struct gpio_keys_button *button;
|
||||
struct input_dev *input;
|
||||
struct timer_list timer;
|
||||
struct work_struct work;
|
||||
unsigned int timer_debounce; /* in msecs */
|
||||
|
||||
struct timer_list release_timer;
|
||||
unsigned int release_delay; /* in msecs, for IRQ-only buttons */
|
||||
|
||||
struct delayed_work work;
|
||||
unsigned int software_debounce; /* in msecs, for GPIO-driven buttons */
|
||||
|
||||
unsigned int irq;
|
||||
spinlock_t lock;
|
||||
bool disabled;
|
||||
|
@ -116,11 +120,14 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
|
|||
{
|
||||
if (!bdata->disabled) {
|
||||
/*
|
||||
* Disable IRQ and possible debouncing timer.
|
||||
* Disable IRQ and associated timer/work structure.
|
||||
*/
|
||||
disable_irq(bdata->irq);
|
||||
if (bdata->timer_debounce)
|
||||
del_timer_sync(&bdata->timer);
|
||||
|
||||
if (gpio_is_valid(bdata->button->gpio))
|
||||
cancel_delayed_work_sync(&bdata->work);
|
||||
else
|
||||
del_timer_sync(&bdata->release_timer);
|
||||
|
||||
bdata->disabled = true;
|
||||
}
|
||||
|
@ -343,7 +350,7 @@ static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
|
|||
static void gpio_keys_gpio_work_func(struct work_struct *work)
|
||||
{
|
||||
struct gpio_button_data *bdata =
|
||||
container_of(work, struct gpio_button_data, work);
|
||||
container_of(work, struct gpio_button_data, work.work);
|
||||
|
||||
gpio_keys_gpio_report_event(bdata);
|
||||
|
||||
|
@ -351,13 +358,6 @@ static void gpio_keys_gpio_work_func(struct work_struct *work)
|
|||
pm_relax(bdata->input->dev.parent);
|
||||
}
|
||||
|
||||
static void gpio_keys_gpio_timer(unsigned long _data)
|
||||
{
|
||||
struct gpio_button_data *bdata = (struct gpio_button_data *)_data;
|
||||
|
||||
schedule_work(&bdata->work);
|
||||
}
|
||||
|
||||
static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
|
||||
{
|
||||
struct gpio_button_data *bdata = dev_id;
|
||||
|
@ -366,11 +366,10 @@ static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
|
|||
|
||||
if (bdata->button->wakeup)
|
||||
pm_stay_awake(bdata->input->dev.parent);
|
||||
if (bdata->timer_debounce)
|
||||
mod_timer(&bdata->timer,
|
||||
jiffies + msecs_to_jiffies(bdata->timer_debounce));
|
||||
else
|
||||
schedule_work(&bdata->work);
|
||||
|
||||
mod_delayed_work(system_wq,
|
||||
&bdata->work,
|
||||
msecs_to_jiffies(bdata->software_debounce));
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -408,7 +407,7 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
|
|||
input_event(input, EV_KEY, button->code, 1);
|
||||
input_sync(input);
|
||||
|
||||
if (!bdata->timer_debounce) {
|
||||
if (!bdata->release_delay) {
|
||||
input_event(input, EV_KEY, button->code, 0);
|
||||
input_sync(input);
|
||||
goto out;
|
||||
|
@ -417,9 +416,9 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
|
|||
bdata->key_pressed = true;
|
||||
}
|
||||
|
||||
if (bdata->timer_debounce)
|
||||
mod_timer(&bdata->timer,
|
||||
jiffies + msecs_to_jiffies(bdata->timer_debounce));
|
||||
if (bdata->release_delay)
|
||||
mod_timer(&bdata->release_timer,
|
||||
jiffies + msecs_to_jiffies(bdata->release_delay));
|
||||
out:
|
||||
spin_unlock_irqrestore(&bdata->lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
|
@ -429,10 +428,10 @@ static void gpio_keys_quiesce_key(void *data)
|
|||
{
|
||||
struct gpio_button_data *bdata = data;
|
||||
|
||||
if (bdata->timer_debounce)
|
||||
del_timer_sync(&bdata->timer);
|
||||
|
||||
cancel_work_sync(&bdata->work);
|
||||
if (gpio_is_valid(bdata->button->gpio))
|
||||
cancel_delayed_work_sync(&bdata->work);
|
||||
else
|
||||
del_timer_sync(&bdata->release_timer);
|
||||
}
|
||||
|
||||
static int gpio_keys_setup_key(struct platform_device *pdev,
|
||||
|
@ -466,23 +465,25 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
|
|||
button->debounce_interval * 1000);
|
||||
/* use timer if gpiolib doesn't provide debounce */
|
||||
if (error < 0)
|
||||
bdata->timer_debounce =
|
||||
bdata->software_debounce =
|
||||
button->debounce_interval;
|
||||
}
|
||||
|
||||
irq = gpio_to_irq(button->gpio);
|
||||
if (irq < 0) {
|
||||
error = irq;
|
||||
dev_err(dev,
|
||||
"Unable to get irq number for GPIO %d, error %d\n",
|
||||
button->gpio, error);
|
||||
return error;
|
||||
if (button->irq) {
|
||||
bdata->irq = button->irq;
|
||||
} else {
|
||||
irq = gpio_to_irq(button->gpio);
|
||||
if (irq < 0) {
|
||||
error = irq;
|
||||
dev_err(dev,
|
||||
"Unable to get irq number for GPIO %d, error %d\n",
|
||||
button->gpio, error);
|
||||
return error;
|
||||
}
|
||||
bdata->irq = irq;
|
||||
}
|
||||
bdata->irq = irq;
|
||||
|
||||
INIT_WORK(&bdata->work, gpio_keys_gpio_work_func);
|
||||
setup_timer(&bdata->timer,
|
||||
gpio_keys_gpio_timer, (unsigned long)bdata);
|
||||
INIT_DELAYED_WORK(&bdata->work, gpio_keys_gpio_work_func);
|
||||
|
||||
isr = gpio_keys_gpio_isr;
|
||||
irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
|
||||
|
@ -499,8 +500,8 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
bdata->timer_debounce = button->debounce_interval;
|
||||
setup_timer(&bdata->timer,
|
||||
bdata->release_delay = button->debounce_interval;
|
||||
setup_timer(&bdata->release_timer,
|
||||
gpio_keys_irq_timer, (unsigned long)bdata);
|
||||
|
||||
isr = gpio_keys_irq_isr;
|
||||
|
@ -510,7 +511,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
|
|||
input_set_capability(input, button->type ?: EV_KEY, button->code);
|
||||
|
||||
/*
|
||||
* Install custom action to cancel debounce timer and
|
||||
* Install custom action to cancel release timer and
|
||||
* workqueue item.
|
||||
*/
|
||||
error = devm_add_action(&pdev->dev, gpio_keys_quiesce_key, bdata);
|
||||
|
@ -618,33 +619,30 @@ gpio_keys_get_devtree_pdata(struct device *dev)
|
|||
|
||||
i = 0;
|
||||
for_each_child_of_node(node, pp) {
|
||||
int gpio = -1;
|
||||
enum of_gpio_flags flags;
|
||||
|
||||
button = &pdata->buttons[i++];
|
||||
|
||||
if (!of_find_property(pp, "gpios", NULL)) {
|
||||
button->irq = irq_of_parse_and_map(pp, 0);
|
||||
if (button->irq == 0) {
|
||||
i--;
|
||||
pdata->nbuttons--;
|
||||
dev_warn(dev, "Found button without gpios or irqs\n");
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
gpio = of_get_gpio_flags(pp, 0, &flags);
|
||||
if (gpio < 0) {
|
||||
error = gpio;
|
||||
button->gpio = of_get_gpio_flags(pp, 0, &flags);
|
||||
if (button->gpio < 0) {
|
||||
error = button->gpio;
|
||||
if (error != -ENOENT) {
|
||||
if (error != -EPROBE_DEFER)
|
||||
dev_err(dev,
|
||||
"Failed to get gpio flags, error: %d\n",
|
||||
error);
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
} else {
|
||||
button->active_low = flags & OF_GPIO_ACTIVE_LOW;
|
||||
}
|
||||
|
||||
button->gpio = gpio;
|
||||
button->active_low = flags & OF_GPIO_ACTIVE_LOW;
|
||||
button->irq = irq_of_parse_and_map(pp, 0);
|
||||
|
||||
if (!gpio_is_valid(button->gpio) && !button->irq) {
|
||||
dev_err(dev, "Found button without gpios or irqs\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (of_property_read_u32(pp, "linux,code", &button->code)) {
|
||||
dev_err(dev, "Button without keycode: 0x%x\n",
|
||||
|
@ -659,6 +657,8 @@ gpio_keys_get_devtree_pdata(struct device *dev)
|
|||
|
||||
button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
|
||||
|
||||
button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
|
||||
|
||||
if (of_property_read_u32(pp, "debounce-interval",
|
||||
&button->debounce_interval))
|
||||
button->debounce_interval = 5;
|
||||
|
|
|
@ -473,7 +473,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
|
|||
if (error)
|
||||
goto bail1;
|
||||
|
||||
init_completion(&dev->cmd_done);
|
||||
reinit_completion(&dev->cmd_done);
|
||||
serio_write(serio, 0);
|
||||
serio_write(serio, 0);
|
||||
serio_write(serio, HIL_PKT_CMD >> 8);
|
||||
|
@ -482,7 +482,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
|
|||
if (error)
|
||||
goto bail1;
|
||||
|
||||
init_completion(&dev->cmd_done);
|
||||
reinit_completion(&dev->cmd_done);
|
||||
serio_write(serio, 0);
|
||||
serio_write(serio, 0);
|
||||
serio_write(serio, HIL_PKT_CMD >> 8);
|
||||
|
@ -491,7 +491,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
|
|||
if (error)
|
||||
goto bail1;
|
||||
|
||||
init_completion(&dev->cmd_done);
|
||||
reinit_completion(&dev->cmd_done);
|
||||
serio_write(serio, 0);
|
||||
serio_write(serio, 0);
|
||||
serio_write(serio, HIL_PKT_CMD >> 8);
|
||||
|
|
|
@ -45,13 +45,14 @@
|
|||
#define STMPE_KEYPAD_MAX_ROWS 8
|
||||
#define STMPE_KEYPAD_MAX_COLS 8
|
||||
#define STMPE_KEYPAD_ROW_SHIFT 3
|
||||
#define STMPE_KEYPAD_KEYMAP_SIZE \
|
||||
#define STMPE_KEYPAD_KEYMAP_MAX_SIZE \
|
||||
(STMPE_KEYPAD_MAX_ROWS * STMPE_KEYPAD_MAX_COLS)
|
||||
|
||||
/**
|
||||
* struct stmpe_keypad_variant - model-specific attributes
|
||||
* @auto_increment: whether the KPC_DATA_BYTE register address
|
||||
* auto-increments on multiple read
|
||||
* @set_pullup: whether the pins need to have their pull-ups set
|
||||
* @num_data: number of data bytes
|
||||
* @num_normal_data: number of normal keys' data bytes
|
||||
* @max_cols: maximum number of columns supported
|
||||
|
@ -61,6 +62,7 @@
|
|||
*/
|
||||
struct stmpe_keypad_variant {
|
||||
bool auto_increment;
|
||||
bool set_pullup;
|
||||
int num_data;
|
||||
int num_normal_data;
|
||||
int max_cols;
|
||||
|
@ -81,6 +83,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
|
|||
},
|
||||
[STMPE2401] = {
|
||||
.auto_increment = false,
|
||||
.set_pullup = true,
|
||||
.num_data = 3,
|
||||
.num_normal_data = 2,
|
||||
.max_cols = 8,
|
||||
|
@ -90,6 +93,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
|
|||
},
|
||||
[STMPE2403] = {
|
||||
.auto_increment = true,
|
||||
.set_pullup = true,
|
||||
.num_data = 5,
|
||||
.num_normal_data = 3,
|
||||
.max_cols = 8,
|
||||
|
@ -99,16 +103,30 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
|
|||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* struct stmpe_keypad - STMPE keypad state container
|
||||
* @stmpe: pointer to parent STMPE device
|
||||
* @input: spawned input device
|
||||
* @variant: STMPE variant
|
||||
* @debounce_ms: debounce interval, in ms. Maximum is
|
||||
* %STMPE_KEYPAD_MAX_DEBOUNCE.
|
||||
* @scan_count: number of key scanning cycles to confirm key data.
|
||||
* Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT.
|
||||
* @no_autorepeat: disable key autorepeat
|
||||
* @rows: bitmask for the rows
|
||||
* @cols: bitmask for the columns
|
||||
* @keymap: the keymap
|
||||
*/
|
||||
struct stmpe_keypad {
|
||||
struct stmpe *stmpe;
|
||||
struct input_dev *input;
|
||||
const struct stmpe_keypad_variant *variant;
|
||||
const struct stmpe_keypad_platform_data *plat;
|
||||
|
||||
unsigned int debounce_ms;
|
||||
unsigned int scan_count;
|
||||
bool no_autorepeat;
|
||||
unsigned int rows;
|
||||
unsigned int cols;
|
||||
|
||||
unsigned short keymap[STMPE_KEYPAD_KEYMAP_SIZE];
|
||||
unsigned short keymap[STMPE_KEYPAD_KEYMAP_MAX_SIZE];
|
||||
};
|
||||
|
||||
static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data)
|
||||
|
@ -171,7 +189,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
|
|||
unsigned int col_gpios = variant->col_gpios;
|
||||
unsigned int row_gpios = variant->row_gpios;
|
||||
struct stmpe *stmpe = keypad->stmpe;
|
||||
u8 pureg = stmpe->regs[STMPE_IDX_GPPUR_LSB];
|
||||
unsigned int pins = 0;
|
||||
unsigned int pu_pins = 0;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/*
|
||||
|
@ -188,8 +209,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
|
|||
for (i = 0; i < variant->max_cols; i++) {
|
||||
int num = __ffs(col_gpios);
|
||||
|
||||
if (keypad->cols & (1 << i))
|
||||
if (keypad->cols & (1 << i)) {
|
||||
pins |= 1 << num;
|
||||
pu_pins |= 1 << num;
|
||||
}
|
||||
|
||||
col_gpios &= ~(1 << num);
|
||||
}
|
||||
|
@ -203,20 +226,43 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
|
|||
row_gpios &= ~(1 << num);
|
||||
}
|
||||
|
||||
return stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
|
||||
ret = stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* On STMPE24xx, set pin bias to pull-up on all keypad input
|
||||
* pins (columns), this incidentally happen to be maximum 8 pins
|
||||
* and placed at GPIO0-7 so only the LSB of the pull up register
|
||||
* ever needs to be written.
|
||||
*/
|
||||
if (variant->set_pullup) {
|
||||
u8 val;
|
||||
|
||||
ret = stmpe_reg_read(stmpe, pureg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Do not touch unused pins, may be used for GPIO */
|
||||
val = ret & ~pu_pins;
|
||||
val |= pu_pins;
|
||||
|
||||
ret = stmpe_reg_write(stmpe, pureg, val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
|
||||
{
|
||||
const struct stmpe_keypad_platform_data *plat = keypad->plat;
|
||||
const struct stmpe_keypad_variant *variant = keypad->variant;
|
||||
struct stmpe *stmpe = keypad->stmpe;
|
||||
int ret;
|
||||
|
||||
if (plat->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
|
||||
if (keypad->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
|
||||
return -EINVAL;
|
||||
|
||||
if (plat->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
|
||||
if (keypad->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
ret = stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
|
||||
|
@ -245,7 +291,7 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
|
|||
|
||||
ret = stmpe_set_bits(stmpe, STMPE_KPC_CTRL_MSB,
|
||||
STMPE_KPC_CTRL_MSB_SCAN_COUNT,
|
||||
plat->scan_count << 4);
|
||||
keypad->scan_count << 4);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -253,17 +299,18 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
|
|||
STMPE_KPC_CTRL_LSB_SCAN |
|
||||
STMPE_KPC_CTRL_LSB_DEBOUNCE,
|
||||
STMPE_KPC_CTRL_LSB_SCAN |
|
||||
(plat->debounce_ms << 1));
|
||||
(keypad->debounce_ms << 1));
|
||||
}
|
||||
|
||||
static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
|
||||
static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad,
|
||||
u32 used_rows, u32 used_cols)
|
||||
{
|
||||
int row, col;
|
||||
|
||||
for (row = 0; row < STMPE_KEYPAD_MAX_ROWS; row++) {
|
||||
for (col = 0; col < STMPE_KEYPAD_MAX_COLS; col++) {
|
||||
for (row = 0; row < used_rows; row++) {
|
||||
for (col = 0; col < used_cols; col++) {
|
||||
int code = MATRIX_SCAN_CODE(row, col,
|
||||
STMPE_KEYPAD_ROW_SHIFT);
|
||||
STMPE_KEYPAD_ROW_SHIFT);
|
||||
if (keypad->keymap[code] != KEY_RESERVED) {
|
||||
keypad->rows |= 1 << row;
|
||||
keypad->cols |= 1 << col;
|
||||
|
@ -272,51 +319,17 @@ static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static const struct stmpe_keypad_platform_data *
|
||||
stmpe_keypad_of_probe(struct device *dev)
|
||||
{
|
||||
struct device_node *np = dev->of_node;
|
||||
struct stmpe_keypad_platform_data *plat;
|
||||
|
||||
if (!np)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
|
||||
if (!plat)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
of_property_read_u32(np, "debounce-interval", &plat->debounce_ms);
|
||||
of_property_read_u32(np, "st,scan-count", &plat->scan_count);
|
||||
|
||||
plat->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
|
||||
|
||||
return plat;
|
||||
}
|
||||
#else
|
||||
static inline const struct stmpe_keypad_platform_data *
|
||||
stmpe_keypad_of_probe(struct device *dev)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int stmpe_keypad_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
|
||||
const struct stmpe_keypad_platform_data *plat;
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct stmpe_keypad *keypad;
|
||||
struct input_dev *input;
|
||||
u32 rows;
|
||||
u32 cols;
|
||||
int error;
|
||||
int irq;
|
||||
|
||||
plat = stmpe->pdata->keypad;
|
||||
if (!plat) {
|
||||
plat = stmpe_keypad_of_probe(&pdev->dev);
|
||||
if (IS_ERR(plat))
|
||||
return PTR_ERR(plat);
|
||||
}
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
@ -326,6 +339,13 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
|
|||
if (!keypad)
|
||||
return -ENOMEM;
|
||||
|
||||
keypad->stmpe = stmpe;
|
||||
keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
|
||||
|
||||
of_property_read_u32(np, "debounce-interval", &keypad->debounce_ms);
|
||||
of_property_read_u32(np, "st,scan-count", &keypad->scan_count);
|
||||
keypad->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
|
||||
|
||||
input = devm_input_allocate_device(&pdev->dev);
|
||||
if (!input)
|
||||
return -ENOMEM;
|
||||
|
@ -334,23 +354,22 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
|
|||
input->id.bustype = BUS_I2C;
|
||||
input->dev.parent = &pdev->dev;
|
||||
|
||||
error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
|
||||
STMPE_KEYPAD_MAX_ROWS,
|
||||
STMPE_KEYPAD_MAX_COLS,
|
||||
error = matrix_keypad_parse_of_params(&pdev->dev, &rows, &cols);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = matrix_keypad_build_keymap(NULL, NULL, rows, cols,
|
||||
keypad->keymap, input);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
input_set_capability(input, EV_MSC, MSC_SCAN);
|
||||
if (!plat->no_autorepeat)
|
||||
if (!keypad->no_autorepeat)
|
||||
__set_bit(EV_REP, input->evbit);
|
||||
|
||||
stmpe_keypad_fill_used_pins(keypad);
|
||||
stmpe_keypad_fill_used_pins(keypad, rows, cols);
|
||||
|
||||
keypad->stmpe = stmpe;
|
||||
keypad->plat = plat;
|
||||
keypad->input = input;
|
||||
keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
|
||||
|
||||
error = stmpe_keypad_chip_init(keypad);
|
||||
if (error < 0)
|
||||
|
|
|
@ -881,6 +881,34 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
|
|||
unsigned char *pkt,
|
||||
unsigned char pkt_id)
|
||||
{
|
||||
/*
|
||||
* packet-fmt b7 b6 b5 b4 b3 b2 b1 b0
|
||||
* Byte0 TWO & MULTI L 1 R M 1 Y0-2 Y0-1 Y0-0
|
||||
* Byte0 NEW L 1 X1-5 1 1 Y0-2 Y0-1 Y0-0
|
||||
* Byte1 Y0-10 Y0-9 Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
|
||||
* Byte2 X0-11 1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
|
||||
* Byte3 X1-11 1 X0-4 X0-3 1 X0-2 X0-1 X0-0
|
||||
* Byte4 TWO X1-10 TWO X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
|
||||
* Byte4 MULTI X1-10 TWO X1-9 X1-8 X1-7 X1-6 Y1-5 1
|
||||
* Byte4 NEW X1-10 TWO X1-9 X1-8 X1-7 X1-6 0 0
|
||||
* Byte5 TWO & NEW Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
|
||||
* Byte5 MULTI Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 F-1 F-0
|
||||
* L: Left button
|
||||
* R / M: Non-clickpads: Right / Middle button
|
||||
* Clickpads: When > 2 fingers are down, and some fingers
|
||||
* are in the button area, then the 2 coordinates reported
|
||||
* are for fingers outside the button area and these report
|
||||
* extra fingers being present in the right / left button
|
||||
* area. Note these fingers are not added to the F field!
|
||||
* so if a TWO packet is received and R = 1 then there are
|
||||
* 3 fingers down, etc.
|
||||
* TWO: 1: Two touches present, byte 0/4/5 are in TWO fmt
|
||||
* 0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
|
||||
* otherwise byte 0 bit 4 must be set and byte 0/4/5 are
|
||||
* in NEW fmt
|
||||
* F: Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
|
||||
*/
|
||||
|
||||
mt[0].x = ((pkt[2] & 0x80) << 4);
|
||||
mt[0].x |= ((pkt[2] & 0x3F) << 5);
|
||||
mt[0].x |= ((pkt[3] & 0x30) >> 1);
|
||||
|
@ -919,18 +947,21 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
|
|||
|
||||
static int alps_get_mt_count(struct input_mt_pos *mt)
|
||||
{
|
||||
int i;
|
||||
int i, fingers = 0;
|
||||
|
||||
for (i = 0; i < MAX_TOUCHES && mt[i].x != 0 && mt[i].y != 0; i++)
|
||||
/* empty */;
|
||||
for (i = 0; i < MAX_TOUCHES; i++) {
|
||||
if (mt[i].x != 0 || mt[i].y != 0)
|
||||
fingers++;
|
||||
}
|
||||
|
||||
return i;
|
||||
return fingers;
|
||||
}
|
||||
|
||||
static int alps_decode_packet_v7(struct alps_fields *f,
|
||||
unsigned char *p,
|
||||
struct psmouse *psmouse)
|
||||
{
|
||||
struct alps_data *priv = psmouse->private;
|
||||
unsigned char pkt_id;
|
||||
|
||||
pkt_id = alps_get_packet_id_v7(p);
|
||||
|
@ -938,19 +969,52 @@ static int alps_decode_packet_v7(struct alps_fields *f,
|
|||
return 0;
|
||||
if (pkt_id == V7_PACKET_ID_UNKNOWN)
|
||||
return -1;
|
||||
/*
|
||||
* NEW packets are send to indicate a discontinuity in the finger
|
||||
* coordinate reporting. Specifically a finger may have moved from
|
||||
* slot 0 to 1 or vice versa. INPUT_MT_TRACK takes care of this for
|
||||
* us.
|
||||
*
|
||||
* NEW packets have 3 problems:
|
||||
* 1) They do not contain middle / right button info (on non clickpads)
|
||||
* this can be worked around by preserving the old button state
|
||||
* 2) They do not contain an accurate fingercount, and they are
|
||||
* typically send when the number of fingers changes. We cannot use
|
||||
* the old finger count as that may mismatch with the amount of
|
||||
* touch coordinates we've available in the NEW packet
|
||||
* 3) Their x data for the second touch is inaccurate leading to
|
||||
* a possible jump of the x coordinate by 16 units when the first
|
||||
* non NEW packet comes in
|
||||
* Since problems 2 & 3 cannot be worked around, just ignore them.
|
||||
*/
|
||||
if (pkt_id == V7_PACKET_ID_NEW)
|
||||
return 1;
|
||||
|
||||
alps_get_finger_coordinate_v7(f->mt, p, pkt_id);
|
||||
|
||||
if (pkt_id == V7_PACKET_ID_TWO || pkt_id == V7_PACKET_ID_MULTI) {
|
||||
f->left = (p[0] & 0x80) >> 7;
|
||||
if (pkt_id == V7_PACKET_ID_TWO)
|
||||
f->fingers = alps_get_mt_count(f->mt);
|
||||
else /* pkt_id == V7_PACKET_ID_MULTI */
|
||||
f->fingers = 3 + (p[5] & 0x03);
|
||||
|
||||
f->left = (p[0] & 0x80) >> 7;
|
||||
if (priv->flags & ALPS_BUTTONPAD) {
|
||||
if (p[0] & 0x20)
|
||||
f->fingers++;
|
||||
if (p[0] & 0x10)
|
||||
f->fingers++;
|
||||
} else {
|
||||
f->right = (p[0] & 0x20) >> 5;
|
||||
f->middle = (p[0] & 0x10) >> 4;
|
||||
}
|
||||
|
||||
if (pkt_id == V7_PACKET_ID_TWO)
|
||||
f->fingers = alps_get_mt_count(f->mt);
|
||||
else if (pkt_id == V7_PACKET_ID_MULTI)
|
||||
f->fingers = 3 + (p[5] & 0x03);
|
||||
/* Sometimes a single touch is reported in mt[1] rather then mt[0] */
|
||||
if (f->fingers == 1 && f->mt[0].x == 0 && f->mt[0].y == 0) {
|
||||
f->mt[0].x = f->mt[1].x;
|
||||
f->mt[0].y = f->mt[1].y;
|
||||
f->mt[1].x = 0;
|
||||
f->mt[1].y = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -227,6 +227,7 @@ TRACKPOINT_INT_ATTR(thresh, TP_THRESH, TP_DEF_THRESH);
|
|||
TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH, TP_DEF_UP_THRESH);
|
||||
TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
|
||||
TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
|
||||
TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
|
||||
|
||||
TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0,
|
||||
TP_DEF_PTSON);
|
||||
|
@ -246,6 +247,7 @@ static struct attribute *trackpoint_attrs[] = {
|
|||
&psmouse_attr_upthresh.dattr.attr,
|
||||
&psmouse_attr_ztime.dattr.attr,
|
||||
&psmouse_attr_jenks.dattr.attr,
|
||||
&psmouse_attr_drift_time.dattr.attr,
|
||||
&psmouse_attr_press_to_select.dattr.attr,
|
||||
&psmouse_attr_skipback.dattr.attr,
|
||||
&psmouse_attr_ext_dev.dattr.attr,
|
||||
|
@ -312,6 +314,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
|
|||
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, upthresh);
|
||||
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, ztime);
|
||||
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, jenks);
|
||||
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, drift_time);
|
||||
|
||||
/* toggles */
|
||||
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, press_to_select);
|
||||
|
@ -332,6 +335,7 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
|
|||
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, upthresh);
|
||||
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, ztime);
|
||||
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, jenks);
|
||||
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, drift_time);
|
||||
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, inertia);
|
||||
|
||||
/* toggles */
|
||||
|
|
|
@ -70,6 +70,9 @@
|
|||
#define TP_UP_THRESH 0x5A /* Used to generate a 'click' on Z-axis */
|
||||
#define TP_Z_TIME 0x5E /* How sharp of a press */
|
||||
#define TP_JENKS_CURV 0x5D /* Minimum curvature for double click */
|
||||
#define TP_DRIFT_TIME 0x5F /* How long a 'hands off' condition */
|
||||
/* must last (x*107ms) for drift */
|
||||
/* correction to occur */
|
||||
|
||||
/*
|
||||
* Toggling Flag bits
|
||||
|
@ -120,6 +123,7 @@
|
|||
#define TP_DEF_UP_THRESH 0xFF
|
||||
#define TP_DEF_Z_TIME 0x26
|
||||
#define TP_DEF_JENKS_CURV 0x87
|
||||
#define TP_DEF_DRIFT_TIME 0x05
|
||||
|
||||
/* Toggles */
|
||||
#define TP_DEF_MB 0x00
|
||||
|
@ -137,6 +141,7 @@ struct trackpoint_data
|
|||
unsigned char draghys, mindrag;
|
||||
unsigned char thresh, upthresh;
|
||||
unsigned char ztime, jenks;
|
||||
unsigned char drift_time;
|
||||
|
||||
/* toggles */
|
||||
unsigned char press_to_select;
|
||||
|
|
|
@ -99,13 +99,9 @@
|
|||
#define MXT_T6_STATUS_COMSERR (1 << 2)
|
||||
|
||||
/* MXT_GEN_POWER_T7 field */
|
||||
struct t7_config {
|
||||
u8 idle;
|
||||
u8 active;
|
||||
} __packed;
|
||||
|
||||
#define MXT_POWER_CFG_RUN 0
|
||||
#define MXT_POWER_CFG_DEEPSLEEP 1
|
||||
#define MXT_POWER_IDLEACQINT 0
|
||||
#define MXT_POWER_ACTVACQINT 1
|
||||
#define MXT_POWER_ACTV2IDLETO 2
|
||||
|
||||
/* MXT_GEN_ACQUIRE_T8 field */
|
||||
#define MXT_ACQUIRE_CHRGTIME 0
|
||||
|
@ -117,6 +113,7 @@ struct t7_config {
|
|||
#define MXT_ACQUIRE_ATCHCALSTHR 7
|
||||
|
||||
/* MXT_TOUCH_MULTI_T9 field */
|
||||
#define MXT_TOUCH_CTRL 0
|
||||
#define MXT_T9_ORIENT 9
|
||||
#define MXT_T9_RANGE 18
|
||||
|
||||
|
@ -256,7 +253,6 @@ struct mxt_data {
|
|||
bool update_input;
|
||||
u8 last_message_count;
|
||||
u8 num_touchids;
|
||||
struct t7_config t7_cfg;
|
||||
|
||||
/* Cached parameters from object table */
|
||||
u16 T5_address;
|
||||
|
@ -672,6 +668,20 @@ static void mxt_proc_t6_messages(struct mxt_data *data, u8 *msg)
|
|||
data->t6_status = status;
|
||||
}
|
||||
|
||||
static int mxt_write_object(struct mxt_data *data,
|
||||
u8 type, u8 offset, u8 val)
|
||||
{
|
||||
struct mxt_object *object;
|
||||
u16 reg;
|
||||
|
||||
object = mxt_get_object(data, type);
|
||||
if (!object || offset >= mxt_obj_size(object))
|
||||
return -EINVAL;
|
||||
|
||||
reg = object->start_address;
|
||||
return mxt_write_reg(data->client, reg + offset, val);
|
||||
}
|
||||
|
||||
static void mxt_input_button(struct mxt_data *data, u8 *message)
|
||||
{
|
||||
struct input_dev *input = data->input_dev;
|
||||
|
@ -1742,60 +1752,6 @@ err_free_object_table:
|
|||
return error;
|
||||
}
|
||||
|
||||
static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
|
||||
{
|
||||
struct device *dev = &data->client->dev;
|
||||
int error;
|
||||
struct t7_config *new_config;
|
||||
struct t7_config deepsleep = { .active = 0, .idle = 0 };
|
||||
|
||||
if (sleep == MXT_POWER_CFG_DEEPSLEEP)
|
||||
new_config = &deepsleep;
|
||||
else
|
||||
new_config = &data->t7_cfg;
|
||||
|
||||
error = __mxt_write_reg(data->client, data->T7_address,
|
||||
sizeof(data->t7_cfg), new_config);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n",
|
||||
new_config->active, new_config->idle);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mxt_init_t7_power_cfg(struct mxt_data *data)
|
||||
{
|
||||
struct device *dev = &data->client->dev;
|
||||
int error;
|
||||
bool retry = false;
|
||||
|
||||
recheck:
|
||||
error = __mxt_read_reg(data->client, data->T7_address,
|
||||
sizeof(data->t7_cfg), &data->t7_cfg);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) {
|
||||
if (!retry) {
|
||||
dev_dbg(dev, "T7 cfg zero, resetting\n");
|
||||
mxt_soft_reset(data);
|
||||
retry = true;
|
||||
goto recheck;
|
||||
} else {
|
||||
dev_dbg(dev, "T7 cfg zero after reset, overriding\n");
|
||||
data->t7_cfg.active = 20;
|
||||
data->t7_cfg.idle = 100;
|
||||
return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
|
||||
}
|
||||
}
|
||||
|
||||
dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n",
|
||||
data->t7_cfg.active, data->t7_cfg.idle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mxt_configure_objects(struct mxt_data *data,
|
||||
const struct firmware *cfg)
|
||||
{
|
||||
|
@ -1809,12 +1765,6 @@ static int mxt_configure_objects(struct mxt_data *data,
|
|||
dev_warn(dev, "Error %d updating config\n", error);
|
||||
}
|
||||
|
||||
error = mxt_init_t7_power_cfg(data);
|
||||
if (error) {
|
||||
dev_err(dev, "Failed to initialize power cfg\n");
|
||||
return error;
|
||||
}
|
||||
|
||||
error = mxt_initialize_t9_input_device(data);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -2093,15 +2043,16 @@ static const struct attribute_group mxt_attr_group = {
|
|||
|
||||
static void mxt_start(struct mxt_data *data)
|
||||
{
|
||||
mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
|
||||
|
||||
/* Recalibrate since chip has been in deep sleep */
|
||||
mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
|
||||
/* Touch enable */
|
||||
mxt_write_object(data,
|
||||
MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83);
|
||||
}
|
||||
|
||||
static void mxt_stop(struct mxt_data *data)
|
||||
{
|
||||
mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP);
|
||||
/* Touch disable */
|
||||
mxt_write_object(data,
|
||||
MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0);
|
||||
}
|
||||
|
||||
static int mxt_input_open(struct input_dev *dev)
|
||||
|
@ -2266,6 +2217,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
|
|||
struct mxt_data *data = i2c_get_clientdata(client);
|
||||
struct input_dev *input_dev = data->input_dev;
|
||||
|
||||
mxt_soft_reset(data);
|
||||
|
||||
mutex_lock(&input_dev->mutex);
|
||||
|
||||
if (input_dev->users)
|
||||
|
|
|
@ -850,9 +850,11 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
|
|||
}
|
||||
|
||||
#define EDT_ATTR_CHECKSET(name, reg) \
|
||||
do { \
|
||||
if (pdata->name >= edt_ft5x06_attr_##name.limit_low && \
|
||||
pdata->name <= edt_ft5x06_attr_##name.limit_high) \
|
||||
edt_ft5x06_register_write(tsdata, reg, pdata->name)
|
||||
edt_ft5x06_register_write(tsdata, reg, pdata->name); \
|
||||
} while (0)
|
||||
|
||||
#define EDT_GET_PROP(name, reg) { \
|
||||
u32 val; \
|
||||
|
|
|
@ -519,6 +519,7 @@ static const u8 stmpe1601_regs[] = {
|
|||
[STMPE_IDX_GPDR_LSB] = STMPE1601_REG_GPIO_SET_DIR_LSB,
|
||||
[STMPE_IDX_GPRER_LSB] = STMPE1601_REG_GPIO_RE_LSB,
|
||||
[STMPE_IDX_GPFER_LSB] = STMPE1601_REG_GPIO_FE_LSB,
|
||||
[STMPE_IDX_GPPUR_LSB] = STMPE1601_REG_GPIO_PU_LSB,
|
||||
[STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB,
|
||||
[STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB,
|
||||
[STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB,
|
||||
|
@ -667,6 +668,7 @@ static const u8 stmpe1801_regs[] = {
|
|||
[STMPE_IDX_GPDR_LSB] = STMPE1801_REG_GPIO_SET_DIR_LOW,
|
||||
[STMPE_IDX_GPRER_LSB] = STMPE1801_REG_GPIO_RE_LOW,
|
||||
[STMPE_IDX_GPFER_LSB] = STMPE1801_REG_GPIO_FE_LOW,
|
||||
[STMPE_IDX_GPPUR_LSB] = STMPE1801_REG_GPIO_PULL_UP_LOW,
|
||||
[STMPE_IDX_IEGPIOR_LSB] = STMPE1801_REG_INT_EN_GPIO_MASK_LOW,
|
||||
[STMPE_IDX_ISGPIOR_LSB] = STMPE1801_REG_INT_STA_GPIO_LOW,
|
||||
};
|
||||
|
@ -750,6 +752,8 @@ static const u8 stmpe24xx_regs[] = {
|
|||
[STMPE_IDX_GPDR_LSB] = STMPE24XX_REG_GPDR_LSB,
|
||||
[STMPE_IDX_GPRER_LSB] = STMPE24XX_REG_GPRER_LSB,
|
||||
[STMPE_IDX_GPFER_LSB] = STMPE24XX_REG_GPFER_LSB,
|
||||
[STMPE_IDX_GPPUR_LSB] = STMPE24XX_REG_GPPUR_LSB,
|
||||
[STMPE_IDX_GPPDR_LSB] = STMPE24XX_REG_GPPDR_LSB,
|
||||
[STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB,
|
||||
[STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB,
|
||||
[STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB,
|
||||
|
|
|
@ -188,6 +188,7 @@ int stmpe_remove(struct stmpe *stmpe);
|
|||
#define STMPE1601_REG_GPIO_ED_MSB 0x8A
|
||||
#define STMPE1601_REG_GPIO_RE_LSB 0x8D
|
||||
#define STMPE1601_REG_GPIO_FE_LSB 0x8F
|
||||
#define STMPE1601_REG_GPIO_PU_LSB 0x91
|
||||
#define STMPE1601_REG_GPIO_AF_U_MSB 0x92
|
||||
|
||||
#define STMPE1601_SYS_CTRL_ENABLE_GPIO (1 << 3)
|
||||
|
@ -276,6 +277,8 @@ int stmpe_remove(struct stmpe *stmpe);
|
|||
#define STMPE24XX_REG_GPEDR_MSB 0x8C
|
||||
#define STMPE24XX_REG_GPRER_LSB 0x91
|
||||
#define STMPE24XX_REG_GPFER_LSB 0x94
|
||||
#define STMPE24XX_REG_GPPUR_LSB 0x97
|
||||
#define STMPE24XX_REG_GPPDR_LSB 0x9a
|
||||
#define STMPE24XX_REG_GPAFR_U_MSB 0x9B
|
||||
|
||||
#define STMPE24XX_SYS_CTRL_ENABLE_GPIO (1 << 3)
|
||||
|
|
|
@ -886,7 +886,7 @@ static int mmc_select_bus_width(struct mmc_card *card)
|
|||
unsigned idx, bus_width = 0;
|
||||
int err = 0;
|
||||
|
||||
if (!mmc_can_ext_csd(card) &&
|
||||
if (!mmc_can_ext_csd(card) ||
|
||||
!(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1648,7 +1648,7 @@ static int __bond_release_one(struct net_device *bond_dev,
|
|||
/* slave is not a slave or master is not master of this slave */
|
||||
if (!(slave_dev->flags & IFF_SLAVE) ||
|
||||
!netdev_has_upper_dev(slave_dev, bond_dev)) {
|
||||
netdev_err(bond_dev, "cannot release %s\n",
|
||||
netdev_dbg(bond_dev, "cannot release %s\n",
|
||||
slave_dev->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -257,7 +257,6 @@ static int cfv_rx_poll(struct napi_struct *napi, int quota)
|
|||
struct vringh_kiov *riov = &cfv->ctx.riov;
|
||||
unsigned int skb_len;
|
||||
|
||||
again:
|
||||
do {
|
||||
skb = NULL;
|
||||
|
||||
|
@ -322,7 +321,6 @@ exit:
|
|||
napi_schedule_prep(napi)) {
|
||||
vringh_notify_disable_kern(cfv->vr_rx);
|
||||
__napi_schedule(napi);
|
||||
goto again;
|
||||
}
|
||||
break;
|
||||
|
||||
|
|
|
@ -246,13 +246,13 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
|
|||
|
||||
if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) {
|
||||
dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n");
|
||||
return -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) {
|
||||
dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n",
|
||||
NE_IO_EXTENT, ioaddr);
|
||||
return -EBUSY;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
reg0 = inb(ioaddr);
|
||||
|
@ -392,6 +392,8 @@ err_out_free_netdev:
|
|||
free_netdev (dev);
|
||||
err_out_free_res:
|
||||
release_region (ioaddr, NE_IO_EXTENT);
|
||||
err_out:
|
||||
pci_disable_device(pdev);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
|
|
@ -156,18 +156,6 @@ source "drivers/net/ethernet/realtek/Kconfig"
|
|||
source "drivers/net/ethernet/renesas/Kconfig"
|
||||
source "drivers/net/ethernet/rdc/Kconfig"
|
||||
source "drivers/net/ethernet/rocker/Kconfig"
|
||||
|
||||
config S6GMAC
|
||||
tristate "S6105 GMAC ethernet support"
|
||||
depends on XTENSA_VARIANT_S6000
|
||||
select PHYLIB
|
||||
---help---
|
||||
This driver supports the on chip ethernet device on the
|
||||
S6105 xtensa processor.
|
||||
|
||||
To compile this driver as a module, choose M here. The module
|
||||
will be called s6gmac.
|
||||
|
||||
source "drivers/net/ethernet/samsung/Kconfig"
|
||||
source "drivers/net/ethernet/seeq/Kconfig"
|
||||
source "drivers/net/ethernet/silan/Kconfig"
|
||||
|
|
|
@ -66,7 +66,6 @@ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
|
|||
obj-$(CONFIG_SH_ETH) += renesas/
|
||||
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
|
||||
obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
|
||||
obj-$(CONFIG_S6GMAC) += s6gmac.o
|
||||
obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
|
||||
obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
|
||||
obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
|
||||
|
|
|
@ -12553,9 +12553,11 @@ static int bnx2x_get_phys_port_id(struct net_device *netdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool bnx2x_gso_check(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
return vxlan_gso_check(skb);
|
||||
return vxlan_features_check(skb, features);
|
||||
}
|
||||
|
||||
static const struct net_device_ops bnx2x_netdev_ops = {
|
||||
|
@ -12589,7 +12591,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
|
|||
#endif
|
||||
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
|
||||
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
|
||||
.ndo_gso_check = bnx2x_gso_check,
|
||||
.ndo_features_check = bnx2x_features_check,
|
||||
};
|
||||
|
||||
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
|
||||
|
|
|
@ -17800,23 +17800,6 @@ static int tg3_init_one(struct pci_dev *pdev,
|
|||
goto err_out_apeunmap;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset chip in case UNDI or EFI driver did not shutdown
|
||||
* DMA self test will enable WDMAC and we'll see (spurious)
|
||||
* pending DMA on the PCI bus at that point.
|
||||
*/
|
||||
if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
|
||||
(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
|
||||
tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
|
||||
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
|
||||
}
|
||||
|
||||
err = tg3_test_dma(tp);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
|
||||
goto err_out_apeunmap;
|
||||
}
|
||||
|
||||
intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
|
||||
rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
|
||||
sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
|
||||
|
@ -17861,6 +17844,23 @@ static int tg3_init_one(struct pci_dev *pdev,
|
|||
sndmbx += 0xc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset chip in case UNDI or EFI driver did not shutdown
|
||||
* DMA self test will enable WDMAC and we'll see (spurious)
|
||||
* pending DMA on the PCI bus at that point.
|
||||
*/
|
||||
if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
|
||||
(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
|
||||
tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
|
||||
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
|
||||
}
|
||||
|
||||
err = tg3_test_dma(tp);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
|
||||
goto err_out_apeunmap;
|
||||
}
|
||||
|
||||
tg3_init_coal(tp);
|
||||
|
||||
pci_set_drvdata(pdev, dev);
|
||||
|
|
|
@ -172,7 +172,7 @@ bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len)
|
|||
|
||||
/* Retrieve flash partition info */
|
||||
fcomp.comp_status = 0;
|
||||
init_completion(&fcomp.comp);
|
||||
reinit_completion(&fcomp.comp);
|
||||
spin_lock_irqsave(&bnad->bna_lock, flags);
|
||||
ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr,
|
||||
bnad_cb_completion, &fcomp);
|
||||
|
|
|
@ -96,6 +96,9 @@ struct port_info {
|
|||
s16 xact_addr_filt; /* index of our MAC address filter */
|
||||
u16 rss_size; /* size of VI's RSS table slice */
|
||||
u8 pidx; /* index into adapter port[] */
|
||||
s8 mdio_addr;
|
||||
u8 port_type; /* firmware port type */
|
||||
u8 mod_type; /* firmware module type */
|
||||
u8 port_id; /* physical port ID */
|
||||
u8 nqsets; /* # of "Queue Sets" */
|
||||
u8 first_qset; /* index of first "Queue Set" */
|
||||
|
@ -522,6 +525,7 @@ static inline struct adapter *netdev2adap(const struct net_device *dev)
|
|||
* is "contracted" to provide for the common code.
|
||||
*/
|
||||
void t4vf_os_link_changed(struct adapter *, int, int);
|
||||
void t4vf_os_portmod_changed(struct adapter *, int);
|
||||
|
||||
/*
|
||||
* SGE function prototype declarations.
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
#include <linux/etherdevice.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/mdio.h>
|
||||
|
||||
#include "t4vf_common.h"
|
||||
#include "t4vf_defs.h"
|
||||
|
@ -209,6 +210,38 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* THe port module type has changed on the indicated "port" (Virtual
|
||||
* Interface).
|
||||
*/
|
||||
void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
|
||||
{
|
||||
static const char * const mod_str[] = {
|
||||
NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
|
||||
};
|
||||
const struct net_device *dev = adapter->port[pidx];
|
||||
const struct port_info *pi = netdev_priv(dev);
|
||||
|
||||
if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
|
||||
dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
|
||||
dev->name);
|
||||
else if (pi->mod_type < ARRAY_SIZE(mod_str))
|
||||
dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
|
||||
dev->name, mod_str[pi->mod_type]);
|
||||
else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
|
||||
dev_info(adapter->pdev_dev, "%s: unsupported optical port "
|
||||
"module inserted\n", dev->name);
|
||||
else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
|
||||
dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
|
||||
"forcing TWINAX\n", dev->name);
|
||||
else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
|
||||
dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
|
||||
dev->name);
|
||||
else
|
||||
dev_info(adapter->pdev_dev, "%s: unknown module type %d "
|
||||
"inserted\n", dev->name, pi->mod_type);
|
||||
}
|
||||
|
||||
/*
|
||||
* Net device operations.
|
||||
* ======================
|
||||
|
@ -1193,24 +1226,103 @@ static void cxgb4vf_poll_controller(struct net_device *dev)
|
|||
* state of the port to which we're linked.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Return current port link settings.
|
||||
*/
|
||||
static int cxgb4vf_get_settings(struct net_device *dev,
|
||||
struct ethtool_cmd *cmd)
|
||||
static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type,
|
||||
unsigned int caps)
|
||||
{
|
||||
const struct port_info *pi = netdev_priv(dev);
|
||||
unsigned int v = 0;
|
||||
|
||||
cmd->supported = pi->link_cfg.supported;
|
||||
cmd->advertising = pi->link_cfg.advertising;
|
||||
if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
|
||||
type == FW_PORT_TYPE_BT_XAUI) {
|
||||
v |= SUPPORTED_TP;
|
||||
if (caps & FW_PORT_CAP_SPEED_100M)
|
||||
v |= SUPPORTED_100baseT_Full;
|
||||
if (caps & FW_PORT_CAP_SPEED_1G)
|
||||
v |= SUPPORTED_1000baseT_Full;
|
||||
if (caps & FW_PORT_CAP_SPEED_10G)
|
||||
v |= SUPPORTED_10000baseT_Full;
|
||||
} else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
|
||||
v |= SUPPORTED_Backplane;
|
||||
if (caps & FW_PORT_CAP_SPEED_1G)
|
||||
v |= SUPPORTED_1000baseKX_Full;
|
||||
if (caps & FW_PORT_CAP_SPEED_10G)
|
||||
v |= SUPPORTED_10000baseKX4_Full;
|
||||
} else if (type == FW_PORT_TYPE_KR)
|
||||
v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
|
||||
else if (type == FW_PORT_TYPE_BP_AP)
|
||||
v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
|
||||
SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
|
||||
else if (type == FW_PORT_TYPE_BP4_AP)
|
||||
v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
|
||||
SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
|
||||
SUPPORTED_10000baseKX4_Full;
|
||||
else if (type == FW_PORT_TYPE_FIBER_XFI ||
|
||||
type == FW_PORT_TYPE_FIBER_XAUI ||
|
||||
type == FW_PORT_TYPE_SFP ||
|
||||
type == FW_PORT_TYPE_QSFP_10G ||
|
||||
type == FW_PORT_TYPE_QSA) {
|
||||
v |= SUPPORTED_FIBRE;
|
||||
if (caps & FW_PORT_CAP_SPEED_1G)
|
||||
v |= SUPPORTED_1000baseT_Full;
|
||||
if (caps & FW_PORT_CAP_SPEED_10G)
|
||||
v |= SUPPORTED_10000baseT_Full;
|
||||
} else if (type == FW_PORT_TYPE_BP40_BA ||
|
||||
type == FW_PORT_TYPE_QSFP) {
|
||||
v |= SUPPORTED_40000baseSR4_Full;
|
||||
v |= SUPPORTED_FIBRE;
|
||||
}
|
||||
|
||||
if (caps & FW_PORT_CAP_ANEG)
|
||||
v |= SUPPORTED_Autoneg;
|
||||
return v;
|
||||
}
|
||||
|
||||
static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
{
|
||||
const struct port_info *p = netdev_priv(dev);
|
||||
|
||||
if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
|
||||
p->port_type == FW_PORT_TYPE_BT_XFI ||
|
||||
p->port_type == FW_PORT_TYPE_BT_XAUI)
|
||||
cmd->port = PORT_TP;
|
||||
else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
|
||||
p->port_type == FW_PORT_TYPE_FIBER_XAUI)
|
||||
cmd->port = PORT_FIBRE;
|
||||
else if (p->port_type == FW_PORT_TYPE_SFP ||
|
||||
p->port_type == FW_PORT_TYPE_QSFP_10G ||
|
||||
p->port_type == FW_PORT_TYPE_QSA ||
|
||||
p->port_type == FW_PORT_TYPE_QSFP) {
|
||||
if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
|
||||
p->mod_type == FW_PORT_MOD_TYPE_SR ||
|
||||
p->mod_type == FW_PORT_MOD_TYPE_ER ||
|
||||
p->mod_type == FW_PORT_MOD_TYPE_LRM)
|
||||
cmd->port = PORT_FIBRE;
|
||||
else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
|
||||
p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
|
||||
cmd->port = PORT_DA;
|
||||
else
|
||||
cmd->port = PORT_OTHER;
|
||||
} else
|
||||
cmd->port = PORT_OTHER;
|
||||
|
||||
if (p->mdio_addr >= 0) {
|
||||
cmd->phy_address = p->mdio_addr;
|
||||
cmd->transceiver = XCVR_EXTERNAL;
|
||||
cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
|
||||
MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
|
||||
} else {
|
||||
cmd->phy_address = 0; /* not really, but no better option */
|
||||
cmd->transceiver = XCVR_INTERNAL;
|
||||
cmd->mdio_support = 0;
|
||||
}
|
||||
|
||||
cmd->supported = t4vf_from_fw_linkcaps(p->port_type,
|
||||
p->link_cfg.supported);
|
||||
cmd->advertising = t4vf_from_fw_linkcaps(p->port_type,
|
||||
p->link_cfg.advertising);
|
||||
ethtool_cmd_speed_set(cmd,
|
||||
netif_carrier_ok(dev) ? pi->link_cfg.speed : -1);
|
||||
netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
|
||||
cmd->duplex = DUPLEX_FULL;
|
||||
|
||||
cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
|
||||
cmd->phy_address = pi->port_id;
|
||||
cmd->transceiver = XCVR_EXTERNAL;
|
||||
cmd->autoneg = pi->link_cfg.autoneg;
|
||||
cmd->autoneg = p->link_cfg.autoneg;
|
||||
cmd->maxtxpkt = 0;
|
||||
cmd->maxrxpkt = 0;
|
||||
return 0;
|
||||
|
|
|
@ -230,7 +230,7 @@ struct adapter_params {
|
|||
|
||||
static inline bool is_10g_port(const struct link_config *lc)
|
||||
{
|
||||
return (lc->supported & SUPPORTED_10000baseT_Full) != 0;
|
||||
return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
|
||||
}
|
||||
|
||||
static inline bool is_x_10g_port(const struct link_config *lc)
|
||||
|
|
|
@ -245,6 +245,10 @@ static int hash_mac_addr(const u8 *addr)
|
|||
return a & 0x3f;
|
||||
}
|
||||
|
||||
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
|
||||
FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
|
||||
FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
|
||||
|
||||
/**
|
||||
* init_link_config - initialize a link's SW state
|
||||
* @lc: structure holding the link state
|
||||
|
@ -259,8 +263,8 @@ static void init_link_config(struct link_config *lc, unsigned int caps)
|
|||
lc->requested_speed = 0;
|
||||
lc->speed = 0;
|
||||
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
|
||||
if (lc->supported & SUPPORTED_Autoneg) {
|
||||
lc->advertising = lc->supported;
|
||||
if (lc->supported & FW_PORT_CAP_ANEG) {
|
||||
lc->advertising = lc->supported & ADVERT_MASK;
|
||||
lc->autoneg = AUTONEG_ENABLE;
|
||||
lc->requested_fc |= PAUSE_AUTONEG;
|
||||
} else {
|
||||
|
@ -280,7 +284,6 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
|
|||
struct fw_vi_cmd vi_cmd, vi_rpl;
|
||||
struct fw_port_cmd port_cmd, port_rpl;
|
||||
int v;
|
||||
u32 word;
|
||||
|
||||
/*
|
||||
* Execute a VI Read command to get our Virtual Interface information
|
||||
|
@ -319,19 +322,11 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
|
|||
if (v)
|
||||
return v;
|
||||
|
||||
v = 0;
|
||||
word = be16_to_cpu(port_rpl.u.info.pcap);
|
||||
if (word & FW_PORT_CAP_SPEED_100M)
|
||||
v |= SUPPORTED_100baseT_Full;
|
||||
if (word & FW_PORT_CAP_SPEED_1G)
|
||||
v |= SUPPORTED_1000baseT_Full;
|
||||
if (word & FW_PORT_CAP_SPEED_10G)
|
||||
v |= SUPPORTED_10000baseT_Full;
|
||||
if (word & FW_PORT_CAP_SPEED_40G)
|
||||
v |= SUPPORTED_40000baseSR4_Full;
|
||||
if (word & FW_PORT_CAP_ANEG)
|
||||
v |= SUPPORTED_Autoneg;
|
||||
init_link_config(&pi->link_cfg, v);
|
||||
v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
|
||||
pi->port_type = FW_PORT_CMD_PTYPE_G(v);
|
||||
pi->mod_type = FW_PORT_MOD_TYPE_NA;
|
||||
|
||||
init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1491,7 +1486,7 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
|
|||
*/
|
||||
const struct fw_port_cmd *port_cmd =
|
||||
(const struct fw_port_cmd *)rpl;
|
||||
u32 word;
|
||||
u32 stat, mod;
|
||||
int action, port_id, link_ok, speed, fc, pidx;
|
||||
|
||||
/*
|
||||
|
@ -1509,21 +1504,21 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
|
|||
port_id = FW_PORT_CMD_PORTID_G(
|
||||
be32_to_cpu(port_cmd->op_to_portid));
|
||||
|
||||
word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
|
||||
link_ok = (word & FW_PORT_CMD_LSTATUS_F) != 0;
|
||||
stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
|
||||
link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
|
||||
speed = 0;
|
||||
fc = 0;
|
||||
if (word & FW_PORT_CMD_RXPAUSE_F)
|
||||
if (stat & FW_PORT_CMD_RXPAUSE_F)
|
||||
fc |= PAUSE_RX;
|
||||
if (word & FW_PORT_CMD_TXPAUSE_F)
|
||||
if (stat & FW_PORT_CMD_TXPAUSE_F)
|
||||
fc |= PAUSE_TX;
|
||||
if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
|
||||
if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
|
||||
speed = 100;
|
||||
else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
|
||||
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
|
||||
speed = 1000;
|
||||
else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
|
||||
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
|
||||
speed = 10000;
|
||||
else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
|
||||
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
|
||||
speed = 40000;
|
||||
|
||||
/*
|
||||
|
@ -1540,12 +1535,21 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
|
|||
continue;
|
||||
|
||||
lc = &pi->link_cfg;
|
||||
|
||||
mod = FW_PORT_CMD_MODTYPE_G(stat);
|
||||
if (mod != pi->mod_type) {
|
||||
pi->mod_type = mod;
|
||||
t4vf_os_portmod_changed(adapter, pidx);
|
||||
}
|
||||
|
||||
if (link_ok != lc->link_ok || speed != lc->speed ||
|
||||
fc != lc->fc) {
|
||||
/* something changed */
|
||||
lc->link_ok = link_ok;
|
||||
lc->speed = speed;
|
||||
lc->fc = fc;
|
||||
lc->supported =
|
||||
be16_to_cpu(port_cmd->u.info.pcap);
|
||||
t4vf_os_link_changed(adapter, pidx, link_ok);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1060,10 +1060,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
|||
PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
|
||||
}
|
||||
|
||||
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
|
||||
skb->csum = htons(checksum);
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
}
|
||||
/* Hardware does not provide whole packet checksum. It only
|
||||
* provides pseudo checksum. Since hw validates the packet
|
||||
* checksum but not provide us the checksum value. use
|
||||
* CHECSUM_UNNECESSARY.
|
||||
*/
|
||||
if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
|
||||
ipv4_csum_ok)
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
if (vlan_stripped)
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
|
||||
|
|
|
@ -4459,9 +4459,11 @@ done:
|
|||
adapter->vxlan_port_count--;
|
||||
}
|
||||
|
||||
static bool be_gso_check(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_features_t be_features_check(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
return vxlan_gso_check(skb);
|
||||
return vxlan_features_check(skb, features);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -4492,7 +4494,7 @@ static const struct net_device_ops be_netdev_ops = {
|
|||
#ifdef CONFIG_BE2NET_VXLAN
|
||||
.ndo_add_vxlan_port = be_add_vxlan_port,
|
||||
.ndo_del_vxlan_port = be_del_vxlan_port,
|
||||
.ndo_gso_check = be_gso_check,
|
||||
.ndo_features_check = be_features_check,
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -2365,9 +2365,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev,
|
|||
queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
|
||||
}
|
||||
|
||||
static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
return vxlan_gso_check(skb);
|
||||
return vxlan_features_check(skb, features);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -2400,7 +2402,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
|
|||
#ifdef CONFIG_MLX4_EN_VXLAN
|
||||
.ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
|
||||
.ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
|
||||
.ndo_gso_check = mlx4_en_gso_check,
|
||||
.ndo_features_check = mlx4_en_features_check,
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -2434,7 +2436,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
|
|||
#ifdef CONFIG_MLX4_EN_VXLAN
|
||||
.ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
|
||||
.ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
|
||||
.ndo_gso_check = mlx4_en_gso_check,
|
||||
.ndo_features_check = mlx4_en_features_check,
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -962,7 +962,17 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
tx_desc->ctrl.owner_opcode = op_own;
|
||||
if (send_doorbell) {
|
||||
wmb();
|
||||
iowrite32(ring->doorbell_qpn,
|
||||
/* Since there is no iowrite*_native() that writes the
|
||||
* value as is, without byteswapping - using the one
|
||||
* the doesn't do byteswapping in the relevant arch
|
||||
* endianness.
|
||||
*/
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
iowrite32(
|
||||
#else
|
||||
iowrite32be(
|
||||
#endif
|
||||
ring->doorbell_qpn,
|
||||
ring->bf.uar->map + MLX4_SEND_DOORBELL);
|
||||
} else {
|
||||
ring->xmit_more++;
|
||||
|
|
|
@ -2303,12 +2303,6 @@ static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
|
|||
|
||||
/* Spanning Tree */
|
||||
|
||||
static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
|
||||
{
|
||||
port_cfg(hw, p,
|
||||
KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
|
||||
}
|
||||
|
||||
static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
|
||||
{
|
||||
port_cfg(hw, p,
|
||||
|
|
|
@ -505,9 +505,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
|
|||
adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
|
||||
}
|
||||
|
||||
static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
return vxlan_gso_check(skb);
|
||||
return vxlan_features_check(skb, features);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -532,7 +534,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
|
|||
#ifdef CONFIG_QLCNIC_VXLAN
|
||||
.ndo_add_vxlan_port = qlcnic_add_vxlan_port,
|
||||
.ndo_del_vxlan_port = qlcnic_del_vxlan_port,
|
||||
.ndo_gso_check = qlcnic_gso_check,
|
||||
.ndo_features_check = qlcnic_features_check,
|
||||
#endif
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = qlcnic_poll_controller,
|
||||
|
|
|
@ -787,10 +787,10 @@ static struct net_device *rtl8139_init_board(struct pci_dev *pdev)
|
|||
if (rc)
|
||||
goto err_out;
|
||||
|
||||
disable_dev_on_err = 1;
|
||||
rc = pci_request_regions (pdev, DRV_NAME);
|
||||
if (rc)
|
||||
goto err_out;
|
||||
disable_dev_on_err = 1;
|
||||
|
||||
pci_set_master (pdev);
|
||||
|
||||
|
@ -1110,6 +1110,7 @@ static int rtl8139_init_one(struct pci_dev *pdev,
|
|||
return 0;
|
||||
|
||||
err_out:
|
||||
netif_napi_del(&tp->napi);
|
||||
__rtl8139_cleanup_dev (dev);
|
||||
pci_disable_device (pdev);
|
||||
return i;
|
||||
|
@ -1124,6 +1125,7 @@ static void rtl8139_remove_one(struct pci_dev *pdev)
|
|||
assert (dev != NULL);
|
||||
|
||||
cancel_delayed_work_sync(&tp->thread);
|
||||
netif_napi_del(&tp->napi);
|
||||
|
||||
unregister_netdev (dev);
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1671,7 +1671,7 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
|
|||
* 0 on success and an appropriate (-)ve integer as defined in errno.h
|
||||
* file on failure.
|
||||
*/
|
||||
static int stmmac_hw_setup(struct net_device *dev)
|
||||
static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
int ret;
|
||||
|
@ -1708,9 +1708,11 @@ static int stmmac_hw_setup(struct net_device *dev)
|
|||
|
||||
stmmac_mmc_setup(priv);
|
||||
|
||||
ret = stmmac_init_ptp(priv);
|
||||
if (ret && ret != -EOPNOTSUPP)
|
||||
pr_warn("%s: failed PTP initialisation\n", __func__);
|
||||
if (init_ptp) {
|
||||
ret = stmmac_init_ptp(priv);
|
||||
if (ret && ret != -EOPNOTSUPP)
|
||||
pr_warn("%s: failed PTP initialisation\n", __func__);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
ret = stmmac_init_fs(dev);
|
||||
|
@ -1787,7 +1789,7 @@ static int stmmac_open(struct net_device *dev)
|
|||
goto init_error;
|
||||
}
|
||||
|
||||
ret = stmmac_hw_setup(dev);
|
||||
ret = stmmac_hw_setup(dev, true);
|
||||
if (ret < 0) {
|
||||
pr_err("%s: Hw setup failed\n", __func__);
|
||||
goto init_error;
|
||||
|
@ -3036,7 +3038,7 @@ int stmmac_resume(struct net_device *ndev)
|
|||
netif_device_attach(ndev);
|
||||
|
||||
init_dma_desc_rings(ndev, GFP_ATOMIC);
|
||||
stmmac_hw_setup(ndev);
|
||||
stmmac_hw_setup(ndev, false);
|
||||
stmmac_init_tx_coalesce(priv);
|
||||
|
||||
napi_enable(&priv->napi);
|
||||
|
|
|
@ -430,7 +430,6 @@ static struct platform_driver stmmac_pltfr_driver = {
|
|||
.remove = stmmac_pltfr_remove,
|
||||
.driver = {
|
||||
.name = STMMAC_RESOURCE_NAME,
|
||||
.owner = THIS_MODULE,
|
||||
.pm = &stmmac_pltfr_pm_ops,
|
||||
.of_match_table = of_match_ptr(stmmac_dt_ids),
|
||||
},
|
||||
|
|
|
@ -1201,6 +1201,7 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
|
|||
segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
|
||||
if (IS_ERR(segs)) {
|
||||
dev->stats.tx_dropped++;
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -388,7 +388,6 @@ struct axidma_bd {
|
|||
* @dma_err_tasklet: Tasklet structure to process Axi DMA errors
|
||||
* @tx_irq: Axidma TX IRQ number
|
||||
* @rx_irq: Axidma RX IRQ number
|
||||
* @temac_type: axienet type to identify between soft and hard temac
|
||||
* @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
|
||||
* @options: AxiEthernet option word
|
||||
* @last_link: Phy link state in which the PHY was negotiated earlier
|
||||
|
@ -431,7 +430,6 @@ struct axienet_local {
|
|||
|
||||
int tx_irq;
|
||||
int rx_irq;
|
||||
u32 temac_type;
|
||||
u32 phy_type;
|
||||
|
||||
u32 options; /* Current options word */
|
||||
|
|
|
@ -1555,10 +1555,6 @@ static int axienet_of_probe(struct platform_device *op)
|
|||
if ((be32_to_cpup(p)) >= 0x4000)
|
||||
lp->jumbo_support = 1;
|
||||
}
|
||||
p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
|
||||
NULL);
|
||||
if (p)
|
||||
lp->temac_type = be32_to_cpup(p);
|
||||
p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
|
||||
if (p)
|
||||
lp->phy_type = be32_to_cpup(p);
|
||||
|
|
|
@ -590,6 +590,7 @@ struct nvsp_message {
|
|||
|
||||
|
||||
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
|
||||
#define NETVSC_SEND_BUFFER_ID 0
|
||||
|
||||
#define NETVSC_PACKET_SIZE 4096
|
||||
|
||||
|
|
|
@ -161,8 +161,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
|
|||
|
||||
/* Deal with the send buffer we may have setup.
|
||||
* If we got a send section size, it means we received a
|
||||
* SendsendBufferComplete msg (ie sent
|
||||
* NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
|
||||
* NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
|
||||
* NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
|
||||
* to send a revoke msg here
|
||||
*/
|
||||
if (net_device->send_section_size) {
|
||||
|
@ -172,7 +172,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
|
|||
|
||||
revoke_packet->hdr.msg_type =
|
||||
NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
|
||||
revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;
|
||||
revoke_packet->msg.v1_msg.revoke_send_buf.id =
|
||||
NETVSC_SEND_BUFFER_ID;
|
||||
|
||||
ret = vmbus_sendpacket(net_device->dev->channel,
|
||||
revoke_packet,
|
||||
|
@ -204,7 +205,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
|
|||
net_device->send_buf_gpadl_handle = 0;
|
||||
}
|
||||
if (net_device->send_buf) {
|
||||
/* Free up the receive buffer */
|
||||
/* Free up the send buffer */
|
||||
vfree(net_device->send_buf);
|
||||
net_device->send_buf = NULL;
|
||||
}
|
||||
|
@ -339,9 +340,9 @@ static int netvsc_init_buf(struct hv_device *device)
|
|||
init_packet = &net_device->channel_init_pkt;
|
||||
memset(init_packet, 0, sizeof(struct nvsp_message));
|
||||
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
|
||||
init_packet->msg.v1_msg.send_recv_buf.gpadl_handle =
|
||||
init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
|
||||
net_device->send_buf_gpadl_handle;
|
||||
init_packet->msg.v1_msg.send_recv_buf.id = 0;
|
||||
init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
|
||||
|
||||
/* Send the gpadl notification request */
|
||||
ret = vmbus_sendpacket(device->channel, init_packet,
|
||||
|
@ -364,7 +365,7 @@ static int netvsc_init_buf(struct hv_device *device)
|
|||
netdev_err(ndev, "Unable to complete send buffer "
|
||||
"initialization with NetVsp - status %d\n",
|
||||
init_packet->msg.v1_msg.
|
||||
send_recv_buf_complete.status);
|
||||
send_send_buf_complete.status);
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
|
|
@ -88,6 +88,7 @@ struct kszphy_priv {
|
|||
|
||||
static const struct kszphy_type ksz8021_type = {
|
||||
.led_mode_reg = MII_KSZPHY_CTRL_2,
|
||||
.has_broadcast_disable = true,
|
||||
.has_rmii_ref_clk_sel = true,
|
||||
};
|
||||
|
||||
|
@ -258,19 +259,6 @@ static int kszphy_config_init(struct phy_device *phydev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ksz8021_config_init(struct phy_device *phydev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = kszphy_config_init(phydev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = kszphy_broadcast_disable(phydev);
|
||||
|
||||
return rc < 0 ? rc : 0;
|
||||
}
|
||||
|
||||
static int ksz9021_load_values_from_of(struct phy_device *phydev,
|
||||
struct device_node *of_node, u16 reg,
|
||||
char *field1, char *field2,
|
||||
|
@ -584,7 +572,7 @@ static struct phy_driver ksphy_driver[] = {
|
|||
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
|
||||
.driver_data = &ksz8021_type,
|
||||
.probe = kszphy_probe,
|
||||
.config_init = ksz8021_config_init,
|
||||
.config_init = kszphy_config_init,
|
||||
.config_aneg = genphy_config_aneg,
|
||||
.read_status = genphy_read_status,
|
||||
.ack_interrupt = kszphy_ack_interrupt,
|
||||
|
@ -601,7 +589,7 @@ static struct phy_driver ksphy_driver[] = {
|
|||
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
|
||||
.driver_data = &ksz8021_type,
|
||||
.probe = kszphy_probe,
|
||||
.config_init = ksz8021_config_init,
|
||||
.config_init = kszphy_config_init,
|
||||
.config_aneg = genphy_config_aneg,
|
||||
.read_status = genphy_read_status,
|
||||
.ack_interrupt = kszphy_ack_interrupt,
|
||||
|
|
|
@ -760,7 +760,6 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
|
|||
container_of(napi, struct receive_queue, napi);
|
||||
unsigned int r, received = 0;
|
||||
|
||||
again:
|
||||
received += virtnet_receive(rq, budget - received);
|
||||
|
||||
/* Out of packets? */
|
||||
|
@ -771,7 +770,6 @@ again:
|
|||
napi_schedule_prep(napi)) {
|
||||
virtqueue_disable_cb(rq->vq);
|
||||
__napi_schedule(napi);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1579,8 +1579,10 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
|
|||
bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
|
||||
|
||||
skb = udp_tunnel_handle_offloads(skb, udp_sum);
|
||||
if (IS_ERR(skb))
|
||||
return -EINVAL;
|
||||
if (IS_ERR(skb)) {
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
skb_scrub_packet(skb, xnet);
|
||||
|
||||
|
@ -1590,12 +1592,16 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
|
|||
|
||||
/* Need space for new headers (invalidates iph ptr) */
|
||||
err = skb_cow_head(skb, min_headroom);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
if (unlikely(err)) {
|
||||
kfree_skb(skb);
|
||||
goto err;
|
||||
}
|
||||
|
||||
skb = vlan_hwaccel_push_inside(skb);
|
||||
if (WARN_ON(!skb))
|
||||
return -ENOMEM;
|
||||
if (WARN_ON(!skb)) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
|
||||
vxh->vx_flags = htonl(VXLAN_FLAGS);
|
||||
|
@ -1606,6 +1612,9 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
|
|||
udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio,
|
||||
ttl, src_port, dst_port);
|
||||
return 0;
|
||||
err:
|
||||
dst_release(dst);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1621,7 +1630,7 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
|
|||
|
||||
skb = udp_tunnel_handle_offloads(skb, udp_sum);
|
||||
if (IS_ERR(skb))
|
||||
return -EINVAL;
|
||||
return PTR_ERR(skb);
|
||||
|
||||
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
|
||||
+ VXLAN_HLEN + sizeof(struct iphdr)
|
||||
|
@ -1629,8 +1638,10 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
|
|||
|
||||
/* Need space for new headers (invalidates iph ptr) */
|
||||
err = skb_cow_head(skb, min_headroom);
|
||||
if (unlikely(err))
|
||||
if (unlikely(err)) {
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
skb = vlan_hwaccel_push_inside(skb);
|
||||
if (WARN_ON(!skb))
|
||||
|
@ -1776,9 +1787,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
|||
tos, ttl, df, src_port, dst_port,
|
||||
htonl(vni << 8),
|
||||
!net_eq(vxlan->net, dev_net(vxlan->dev)));
|
||||
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
/* skb is already freed. */
|
||||
skb = NULL;
|
||||
goto rt_tx_error;
|
||||
}
|
||||
|
||||
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
} else {
|
||||
|
|
|
@ -1070,7 +1070,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
|
|||
*/
|
||||
if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
|
||||
((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
|
||||
(sdiodev->pdata->oob_irq_supported)))
|
||||
(sdiodev->pdata && sdiodev->pdata->oob_irq_supported)))
|
||||
bus_if->wowl_supported = true;
|
||||
#endif
|
||||
|
||||
|
@ -1167,7 +1167,7 @@ static int brcmf_ops_sdio_resume(struct device *dev)
|
|||
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
|
||||
|
||||
brcmf_dbg(SDIO, "Enter\n");
|
||||
if (sdiodev->pdata->oob_irq_supported)
|
||||
if (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)
|
||||
disable_irq_wake(sdiodev->pdata->oob_irq_nr);
|
||||
brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
|
||||
atomic_set(&sdiodev->suspend, false);
|
||||
|
|
|
@ -65,7 +65,8 @@ config IPW2100_DEBUG
|
|||
|
||||
config IPW2200
|
||||
tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
|
||||
depends on PCI && CFG80211 && CFG80211_WEXT
|
||||
depends on PCI && CFG80211
|
||||
select CFG80211_WEXT
|
||||
select WIRELESS_EXT
|
||||
select WEXT_SPY
|
||||
select WEXT_PRIV
|
||||
|
|
|
@ -1323,10 +1323,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
|||
|
||||
try_again:
|
||||
/* try next, if any */
|
||||
kfree(pieces);
|
||||
release_firmware(ucode_raw);
|
||||
if (iwl_request_firmware(drv, false))
|
||||
goto out_unbind;
|
||||
kfree(pieces);
|
||||
return;
|
||||
|
||||
out_free_fw:
|
||||
|
|
|
@ -310,6 +310,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
|
|||
#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
|
||||
|
||||
#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
|
||||
#define FH_MEM_TB_MAX_LENGTH (0x00020000)
|
||||
|
||||
/* TFDB Area - TFDs buffer table */
|
||||
#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
|
||||
|
|
|
@ -1004,8 +1004,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
|
|||
{
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
/* disallow low power states when the FW is down */
|
||||
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
|
||||
/*
|
||||
* Disallow low power states when the FW is down by taking
|
||||
* the UCODE_DOWN ref. in case of ongoing hw restart the
|
||||
* ref is already taken, so don't take it again.
|
||||
*/
|
||||
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
|
||||
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
|
||||
|
||||
/* async_handlers_wk is now blocked */
|
||||
|
||||
|
@ -1023,6 +1028,12 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
|
|||
/* the fw is stopped, the aux sta is dead: clean up driver state */
|
||||
iwl_mvm_del_aux_sta(mvm);
|
||||
|
||||
/*
|
||||
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
|
||||
* won't be called in this case).
|
||||
*/
|
||||
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
|
||||
|
||||
mvm->ucode_loaded = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -367,7 +367,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
|
||||
/* 3165 Series */
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
|
||||
|
||||
/* 7265 Series */
|
||||
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
|
||||
|
|
|
@ -614,7 +614,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
|
|||
{
|
||||
u8 *v_addr;
|
||||
dma_addr_t p_addr;
|
||||
u32 offset, chunk_sz = section->len;
|
||||
u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
|
||||
int ret = 0;
|
||||
|
||||
IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
|
||||
|
@ -1012,16 +1012,21 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
|
|||
/* Stop the device, and put it in low power state */
|
||||
iwl_pcie_apm_stop(trans);
|
||||
|
||||
/* Upon stop, the APM issues an interrupt if HW RF kill is set.
|
||||
* Clean again the interrupt here
|
||||
/* stop and reset the on-board processor */
|
||||
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
|
||||
udelay(20);
|
||||
|
||||
/*
|
||||
* Upon stop, the APM issues an interrupt if HW RF kill is set.
|
||||
* This is a bug in certain verions of the hardware.
|
||||
* Certain devices also keep sending HW RF kill interrupt all
|
||||
* the time, unless the interrupt is ACKed even if the interrupt
|
||||
* should be masked. Re-ACK all the interrupts here.
|
||||
*/
|
||||
spin_lock(&trans_pcie->irq_lock);
|
||||
iwl_disable_interrupts(trans);
|
||||
spin_unlock(&trans_pcie->irq_lock);
|
||||
|
||||
/* stop and reset the on-board processor */
|
||||
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
|
||||
udelay(20);
|
||||
|
||||
/* clear all status bits */
|
||||
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
|
||||
|
|
|
@ -1041,6 +1041,7 @@ static const struct x86_cpu_id rapl_ids[] = {
|
|||
RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
|
||||
RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
|
||||
RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
|
||||
RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
|
||||
RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */
|
||||
{}
|
||||
};
|
||||
|
|
|
@ -570,7 +570,7 @@ static struct regulator_ops s2mps14_reg_ops = {
|
|||
.enable_mask = S2MPS14_ENABLE_MASK \
|
||||
}
|
||||
|
||||
#define regulator_desc_s2mps14_buck(num, min, step) { \
|
||||
#define regulator_desc_s2mps14_buck(num, min, step, min_sel) { \
|
||||
.name = "BUCK"#num, \
|
||||
.id = S2MPS14_BUCK##num, \
|
||||
.ops = &s2mps14_reg_ops, \
|
||||
|
@ -579,7 +579,7 @@ static struct regulator_ops s2mps14_reg_ops = {
|
|||
.min_uV = min, \
|
||||
.uV_step = step, \
|
||||
.n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
|
||||
.linear_min_sel = S2MPS14_BUCK1235_START_SEL, \
|
||||
.linear_min_sel = min_sel, \
|
||||
.ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \
|
||||
.vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \
|
||||
.vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
|
||||
|
@ -613,11 +613,16 @@ static const struct regulator_desc s2mps14_regulators[] = {
|
|||
regulator_desc_s2mps14_ldo(23, MIN_800_MV, STEP_25_MV),
|
||||
regulator_desc_s2mps14_ldo(24, MIN_1800_MV, STEP_25_MV),
|
||||
regulator_desc_s2mps14_ldo(25, MIN_1800_MV, STEP_25_MV),
|
||||
regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV),
|
||||
regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV),
|
||||
regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV),
|
||||
regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV),
|
||||
regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV),
|
||||
regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV,
|
||||
S2MPS14_BUCK1235_START_SEL),
|
||||
regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV,
|
||||
S2MPS14_BUCK1235_START_SEL),
|
||||
regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV,
|
||||
S2MPS14_BUCK1235_START_SEL),
|
||||
regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV,
|
||||
S2MPS14_BUCK4_START_SEL),
|
||||
regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV,
|
||||
S2MPS14_BUCK1235_START_SEL),
|
||||
};
|
||||
|
||||
static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11,
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
|
||||
#define DRV_NAME "fnic"
|
||||
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
|
||||
#define DRV_VERSION "1.6.0.16"
|
||||
#define DRV_VERSION "1.6.0.17"
|
||||
#define PFX DRV_NAME ": "
|
||||
#define DFX DRV_NAME "%d: "
|
||||
|
||||
|
|
|
@ -1892,6 +1892,21 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
|
|||
goto fnic_abort_cmd_end;
|
||||
}
|
||||
|
||||
/* IO out of order */
|
||||
|
||||
if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"Issuing Host reset due to out of order IO\n");
|
||||
|
||||
if (fnic_host_reset(sc) == FAILED) {
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"fnic_host_reset failed.\n");
|
||||
}
|
||||
ret = FAILED;
|
||||
goto fnic_abort_cmd_end;
|
||||
}
|
||||
|
||||
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
|
||||
|
||||
/*
|
||||
|
|
|
@ -1041,7 +1041,7 @@ retry:
|
|||
}
|
||||
/* signal not to enter either branch of the if () below */
|
||||
timeleft = 0;
|
||||
rtn = NEEDS_RETRY;
|
||||
rtn = FAILED;
|
||||
} else {
|
||||
timeleft = wait_for_completion_timeout(&done, timeout);
|
||||
rtn = SUCCESS;
|
||||
|
@ -1081,7 +1081,7 @@ retry:
|
|||
rtn = FAILED;
|
||||
break;
|
||||
}
|
||||
} else if (!rtn) {
|
||||
} else if (rtn != FAILED) {
|
||||
scsi_abort_eh_cmnd(scmd);
|
||||
rtn = FAILED;
|
||||
}
|
||||
|
|
|
@ -2623,8 +2623,9 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
|
|||
sd_config_discard(sdkp, SD_LBP_WS16);
|
||||
|
||||
} else { /* LBP VPD page tells us what to use */
|
||||
|
||||
if (sdkp->lbpws)
|
||||
if (sdkp->lbpu && sdkp->max_unmap_blocks && !sdkp->lbprz)
|
||||
sd_config_discard(sdkp, SD_LBP_UNMAP);
|
||||
else if (sdkp->lbpws)
|
||||
sd_config_discard(sdkp, SD_LBP_WS16);
|
||||
else if (sdkp->lbpws10)
|
||||
sd_config_discard(sdkp, SD_LBP_WS10);
|
||||
|
|
|
@ -341,7 +341,7 @@ static int img_spfi_start_dma(struct spi_master *master,
|
|||
default:
|
||||
rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
|
||||
rxconf.src_addr_width = 1;
|
||||
rxconf.src_maxburst = 1;
|
||||
rxconf.src_maxburst = 4;
|
||||
}
|
||||
dmaengine_slave_config(spfi->rx_ch, &rxconf);
|
||||
|
||||
|
@ -368,7 +368,7 @@ static int img_spfi_start_dma(struct spi_master *master,
|
|||
default:
|
||||
txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
|
||||
txconf.dst_addr_width = 1;
|
||||
txconf.dst_maxburst = 1;
|
||||
txconf.dst_maxburst = 4;
|
||||
break;
|
||||
}
|
||||
dmaengine_slave_config(spfi->tx_ch, &txconf);
|
||||
|
@ -390,14 +390,14 @@ static int img_spfi_start_dma(struct spi_master *master,
|
|||
dma_async_issue_pending(spfi->rx_ch);
|
||||
}
|
||||
|
||||
spfi_start(spfi);
|
||||
|
||||
if (xfer->tx_buf) {
|
||||
spfi->tx_dma_busy = true;
|
||||
dmaengine_submit(txdesc);
|
||||
dma_async_issue_pending(spfi->tx_ch);
|
||||
}
|
||||
|
||||
spfi_start(spfi);
|
||||
|
||||
return 1;
|
||||
|
||||
stop_dma:
|
||||
|
|
|
@ -480,6 +480,8 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
|
|||
struct device_node *np = spi->master->dev.of_node;
|
||||
struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
|
||||
|
||||
pm_runtime_get_sync(&p->pdev->dev);
|
||||
|
||||
if (!np) {
|
||||
/*
|
||||
* Use spi->controller_data for CS (same strategy as spi_gpio),
|
||||
|
@ -498,6 +500,9 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
|
|||
if (spi->cs_gpio >= 0)
|
||||
gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
|
||||
|
||||
|
||||
pm_runtime_put_sync(&p->pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
|
||||
* Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
|
||||
*
|
||||
* Copyright (C) 2014 Viresh Kumar <viresh.kumar@linaro.org>
|
||||
*
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -28,6 +30,20 @@
|
|||
#include <linux/cpu.h>
|
||||
#include <linux/cpu_cooling.h>
|
||||
|
||||
/*
|
||||
* Cooling state <-> CPUFreq frequency
|
||||
*
|
||||
* Cooling states are translated to frequencies throughout this driver and this
|
||||
* is the relation between them.
|
||||
*
|
||||
* Highest cooling state corresponds to lowest possible frequency.
|
||||
*
|
||||
* i.e.
|
||||
* level 0 --> 1st Max Freq
|
||||
* level 1 --> 2nd Max Freq
|
||||
* ...
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct cpufreq_cooling_device - data for cooling device with cpufreq
|
||||
* @id: unique integer value corresponding to each cpufreq_cooling_device
|
||||
|
@ -38,25 +54,27 @@
|
|||
* cooling devices.
|
||||
* @cpufreq_val: integer value representing the absolute value of the clipped
|
||||
* frequency.
|
||||
* @max_level: maximum cooling level. One less than total number of valid
|
||||
* cpufreq frequencies.
|
||||
* @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
|
||||
* @node: list_head to link all cpufreq_cooling_device together.
|
||||
*
|
||||
* This structure is required for keeping information of each
|
||||
* cpufreq_cooling_device registered. In order to prevent corruption of this a
|
||||
* mutex lock cooling_cpufreq_lock is used.
|
||||
* This structure is required for keeping information of each registered
|
||||
* cpufreq_cooling_device.
|
||||
*/
|
||||
struct cpufreq_cooling_device {
|
||||
int id;
|
||||
struct thermal_cooling_device *cool_dev;
|
||||
unsigned int cpufreq_state;
|
||||
unsigned int cpufreq_val;
|
||||
unsigned int max_level;
|
||||
unsigned int *freq_table; /* In descending order */
|
||||
struct cpumask allowed_cpus;
|
||||
struct list_head node;
|
||||
};
|
||||
static DEFINE_IDR(cpufreq_idr);
|
||||
static DEFINE_MUTEX(cooling_cpufreq_lock);
|
||||
|
||||
static unsigned int cpufreq_dev_count;
|
||||
|
||||
static LIST_HEAD(cpufreq_dev_list);
|
||||
|
||||
/**
|
||||
|
@ -98,120 +116,30 @@ static void release_idr(struct idr *idr, int id)
|
|||
/* Below code defines functions to be used for cpufreq as cooling device */
|
||||
|
||||
/**
|
||||
* is_cpufreq_valid - function to check frequency transitioning capability.
|
||||
* @cpu: cpu for which check is needed.
|
||||
* get_level: Find the level for a particular frequency
|
||||
* @cpufreq_dev: cpufreq_dev for which the property is required
|
||||
* @freq: Frequency
|
||||
*
|
||||
* This function will check the current state of the system if
|
||||
* it is capable of changing the frequency for a given @cpu.
|
||||
*
|
||||
* Return: 0 if the system is not currently capable of changing
|
||||
* the frequency of given cpu. !0 in case the frequency is changeable.
|
||||
* Return: level on success, THERMAL_CSTATE_INVALID on error.
|
||||
*/
|
||||
static int is_cpufreq_valid(int cpu)
|
||||
static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_dev,
|
||||
unsigned int freq)
|
||||
{
|
||||
struct cpufreq_policy policy;
|
||||
unsigned long level;
|
||||
|
||||
return !cpufreq_get_policy(&policy, cpu);
|
||||
}
|
||||
for (level = 0; level <= cpufreq_dev->max_level; level++) {
|
||||
if (freq == cpufreq_dev->freq_table[level])
|
||||
return level;
|
||||
|
||||
enum cpufreq_cooling_property {
|
||||
GET_LEVEL,
|
||||
GET_FREQ,
|
||||
GET_MAXL,
|
||||
};
|
||||
|
||||
/**
|
||||
* get_property - fetch a property of interest for a give cpu.
|
||||
* @cpu: cpu for which the property is required
|
||||
* @input: query parameter
|
||||
* @output: query return
|
||||
* @property: type of query (frequency, level, max level)
|
||||
*
|
||||
* This is the common function to
|
||||
* 1. get maximum cpu cooling states
|
||||
* 2. translate frequency to cooling state
|
||||
* 3. translate cooling state to frequency
|
||||
* Note that the code may be not in good shape
|
||||
* but it is written in this way in order to:
|
||||
* a) reduce duplicate code as most of the code can be shared.
|
||||
* b) make sure the logic is consistent when translating between
|
||||
* cooling states and frequencies.
|
||||
*
|
||||
* Return: 0 on success, -EINVAL when invalid parameters are passed.
|
||||
*/
|
||||
static int get_property(unsigned int cpu, unsigned long input,
|
||||
unsigned int *output,
|
||||
enum cpufreq_cooling_property property)
|
||||
{
|
||||
int i;
|
||||
unsigned long max_level = 0, level = 0;
|
||||
unsigned int freq = CPUFREQ_ENTRY_INVALID;
|
||||
int descend = -1;
|
||||
struct cpufreq_frequency_table *pos, *table =
|
||||
cpufreq_frequency_get_table(cpu);
|
||||
|
||||
if (!output)
|
||||
return -EINVAL;
|
||||
|
||||
if (!table)
|
||||
return -EINVAL;
|
||||
|
||||
cpufreq_for_each_valid_entry(pos, table) {
|
||||
/* ignore duplicate entry */
|
||||
if (freq == pos->frequency)
|
||||
continue;
|
||||
|
||||
/* get the frequency order */
|
||||
if (freq != CPUFREQ_ENTRY_INVALID && descend == -1)
|
||||
descend = freq > pos->frequency;
|
||||
|
||||
freq = pos->frequency;
|
||||
max_level++;
|
||||
if (freq > cpufreq_dev->freq_table[level])
|
||||
break;
|
||||
}
|
||||
|
||||
/* No valid cpu frequency entry */
|
||||
if (max_level == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* max_level is an index, not a counter */
|
||||
max_level--;
|
||||
|
||||
/* get max level */
|
||||
if (property == GET_MAXL) {
|
||||
*output = (unsigned int)max_level;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (property == GET_FREQ)
|
||||
level = descend ? input : (max_level - input);
|
||||
|
||||
i = 0;
|
||||
cpufreq_for_each_valid_entry(pos, table) {
|
||||
/* ignore duplicate entry */
|
||||
if (freq == pos->frequency)
|
||||
continue;
|
||||
|
||||
/* now we have a valid frequency entry */
|
||||
freq = pos->frequency;
|
||||
|
||||
if (property == GET_LEVEL && (unsigned int)input == freq) {
|
||||
/* get level by frequency */
|
||||
*output = descend ? i : (max_level - i);
|
||||
return 0;
|
||||
}
|
||||
if (property == GET_FREQ && level == i) {
|
||||
/* get frequency by level */
|
||||
*output = freq;
|
||||
return 0;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
return THERMAL_CSTATE_INVALID;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpufreq_cooling_get_level - for a give cpu, return the cooling level.
|
||||
* cpufreq_cooling_get_level - for a given cpu, return the cooling level.
|
||||
* @cpu: cpu for which the level is required
|
||||
* @freq: the frequency of interest
|
||||
*
|
||||
|
@ -223,78 +151,22 @@ static int get_property(unsigned int cpu, unsigned long input,
|
|||
*/
|
||||
unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
|
||||
{
|
||||
unsigned int val;
|
||||
struct cpufreq_cooling_device *cpufreq_dev;
|
||||
|
||||
if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL))
|
||||
return THERMAL_CSTATE_INVALID;
|
||||
mutex_lock(&cooling_cpufreq_lock);
|
||||
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
|
||||
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
|
||||
mutex_unlock(&cooling_cpufreq_lock);
|
||||
return get_level(cpufreq_dev, freq);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&cooling_cpufreq_lock);
|
||||
|
||||
return (unsigned long)val;
|
||||
pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
|
||||
return THERMAL_CSTATE_INVALID;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
|
||||
|
||||
/**
|
||||
* get_cpu_frequency - get the absolute value of frequency from level.
|
||||
* @cpu: cpu for which frequency is fetched.
|
||||
* @level: cooling level
|
||||
*
|
||||
* This function matches cooling level with frequency. Based on a cooling level
|
||||
* of frequency, equals cooling state of cpu cooling device, it will return
|
||||
* the corresponding frequency.
|
||||
* e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
|
||||
*
|
||||
* Return: 0 on error, the corresponding frequency otherwise.
|
||||
*/
|
||||
static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int freq;
|
||||
|
||||
ret = get_property(cpu, level, &freq, GET_FREQ);
|
||||
if (ret)
|
||||
return 0;
|
||||
|
||||
return freq;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpufreq_apply_cooling - function to apply frequency clipping.
|
||||
* @cpufreq_device: cpufreq_cooling_device pointer containing frequency
|
||||
* clipping data.
|
||||
* @cooling_state: value of the cooling state.
|
||||
*
|
||||
* Function used to make sure the cpufreq layer is aware of current thermal
|
||||
* limits. The limits are applied by updating the cpufreq policy.
|
||||
*
|
||||
* Return: 0 on success, an error code otherwise (-EINVAL in case wrong
|
||||
* cooling state).
|
||||
*/
|
||||
static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
|
||||
unsigned long cooling_state)
|
||||
{
|
||||
unsigned int cpuid, clip_freq;
|
||||
struct cpumask *mask = &cpufreq_device->allowed_cpus;
|
||||
unsigned int cpu = cpumask_any(mask);
|
||||
|
||||
|
||||
/* Check if the old cooling action is same as new cooling action */
|
||||
if (cpufreq_device->cpufreq_state == cooling_state)
|
||||
return 0;
|
||||
|
||||
clip_freq = get_cpu_frequency(cpu, cooling_state);
|
||||
if (!clip_freq)
|
||||
return -EINVAL;
|
||||
|
||||
cpufreq_device->cpufreq_state = cooling_state;
|
||||
cpufreq_device->cpufreq_val = clip_freq;
|
||||
|
||||
for_each_cpu(cpuid, mask) {
|
||||
if (is_cpufreq_valid(cpuid))
|
||||
cpufreq_update_policy(cpuid);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
|
||||
* @nb: struct notifier_block * with callback info.
|
||||
|
@ -323,11 +195,6 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
|
|||
&cpufreq_dev->allowed_cpus))
|
||||
continue;
|
||||
|
||||
if (!cpufreq_dev->cpufreq_val)
|
||||
cpufreq_dev->cpufreq_val = get_cpu_frequency(
|
||||
cpumask_any(&cpufreq_dev->allowed_cpus),
|
||||
cpufreq_dev->cpufreq_state);
|
||||
|
||||
max_freq = cpufreq_dev->cpufreq_val;
|
||||
|
||||
if (policy->max != max_freq)
|
||||
|
@ -354,19 +221,9 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
|
|||
unsigned long *state)
|
||||
{
|
||||
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
|
||||
struct cpumask *mask = &cpufreq_device->allowed_cpus;
|
||||
unsigned int cpu;
|
||||
unsigned int count = 0;
|
||||
int ret;
|
||||
|
||||
cpu = cpumask_any(mask);
|
||||
|
||||
ret = get_property(cpu, 0, &count, GET_MAXL);
|
||||
|
||||
if (count > 0)
|
||||
*state = count;
|
||||
|
||||
return ret;
|
||||
*state = cpufreq_device->max_level;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -403,8 +260,24 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
|
|||
unsigned long state)
|
||||
{
|
||||
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
|
||||
unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
|
||||
unsigned int clip_freq;
|
||||
|
||||
return cpufreq_apply_cooling(cpufreq_device, state);
|
||||
/* Request state should be less than max_level */
|
||||
if (WARN_ON(state > cpufreq_device->max_level))
|
||||
return -EINVAL;
|
||||
|
||||
/* Check if the old cooling action is same as new cooling action */
|
||||
if (cpufreq_device->cpufreq_state == state)
|
||||
return 0;
|
||||
|
||||
clip_freq = cpufreq_device->freq_table[state];
|
||||
cpufreq_device->cpufreq_state = state;
|
||||
cpufreq_device->cpufreq_val = clip_freq;
|
||||
|
||||
cpufreq_update_policy(cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Bind cpufreq callbacks to thermal cooling device ops */
|
||||
|
@ -419,10 +292,25 @@ static struct notifier_block thermal_cpufreq_notifier_block = {
|
|||
.notifier_call = cpufreq_thermal_notifier,
|
||||
};
|
||||
|
||||
static unsigned int find_next_max(struct cpufreq_frequency_table *table,
|
||||
unsigned int prev_max)
|
||||
{
|
||||
struct cpufreq_frequency_table *pos;
|
||||
unsigned int max = 0;
|
||||
|
||||
cpufreq_for_each_valid_entry(pos, table) {
|
||||
if (pos->frequency > max && pos->frequency < prev_max)
|
||||
max = pos->frequency;
|
||||
}
|
||||
|
||||
return max;
|
||||
}
|
||||
|
||||
/**
|
||||
* __cpufreq_cooling_register - helper function to create cpufreq cooling device
|
||||
* @np: a valid struct device_node to the cooling device device tree node
|
||||
* @clip_cpus: cpumask of cpus where the frequency constraints will happen.
|
||||
* Normally this should be same as cpufreq policy->related_cpus.
|
||||
*
|
||||
* This interface function registers the cpufreq cooling device with the name
|
||||
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
|
||||
|
@ -437,37 +325,42 @@ __cpufreq_cooling_register(struct device_node *np,
|
|||
const struct cpumask *clip_cpus)
|
||||
{
|
||||
struct thermal_cooling_device *cool_dev;
|
||||
struct cpufreq_cooling_device *cpufreq_dev = NULL;
|
||||
unsigned int min = 0, max = 0;
|
||||
struct cpufreq_cooling_device *cpufreq_dev;
|
||||
char dev_name[THERMAL_NAME_LENGTH];
|
||||
int ret = 0, i;
|
||||
struct cpufreq_policy policy;
|
||||
struct cpufreq_frequency_table *pos, *table;
|
||||
unsigned int freq, i;
|
||||
int ret;
|
||||
|
||||
/* Verify that all the clip cpus have same freq_min, freq_max limit */
|
||||
for_each_cpu(i, clip_cpus) {
|
||||
/* continue if cpufreq policy not found and not return error */
|
||||
if (!cpufreq_get_policy(&policy, i))
|
||||
continue;
|
||||
if (min == 0 && max == 0) {
|
||||
min = policy.cpuinfo.min_freq;
|
||||
max = policy.cpuinfo.max_freq;
|
||||
} else {
|
||||
if (min != policy.cpuinfo.min_freq ||
|
||||
max != policy.cpuinfo.max_freq)
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
table = cpufreq_frequency_get_table(cpumask_first(clip_cpus));
|
||||
if (!table) {
|
||||
pr_debug("%s: CPUFreq table not found\n", __func__);
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
}
|
||||
cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
|
||||
GFP_KERNEL);
|
||||
|
||||
cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
|
||||
if (!cpufreq_dev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* Find max levels */
|
||||
cpufreq_for_each_valid_entry(pos, table)
|
||||
cpufreq_dev->max_level++;
|
||||
|
||||
cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) *
|
||||
cpufreq_dev->max_level, GFP_KERNEL);
|
||||
if (!cpufreq_dev->freq_table) {
|
||||
cool_dev = ERR_PTR(-ENOMEM);
|
||||
goto free_cdev;
|
||||
}
|
||||
|
||||
/* max_level is an index, not a counter */
|
||||
cpufreq_dev->max_level--;
|
||||
|
||||
cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
|
||||
|
||||
ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
|
||||
if (ret) {
|
||||
kfree(cpufreq_dev);
|
||||
return ERR_PTR(-EINVAL);
|
||||
cool_dev = ERR_PTR(ret);
|
||||
goto free_table;
|
||||
}
|
||||
|
||||
snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
|
||||
|
@ -475,24 +368,43 @@ __cpufreq_cooling_register(struct device_node *np,
|
|||
|
||||
cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
|
||||
&cpufreq_cooling_ops);
|
||||
if (IS_ERR(cool_dev)) {
|
||||
release_idr(&cpufreq_idr, cpufreq_dev->id);
|
||||
kfree(cpufreq_dev);
|
||||
return cool_dev;
|
||||
if (IS_ERR(cool_dev))
|
||||
goto remove_idr;
|
||||
|
||||
/* Fill freq-table in descending order of frequencies */
|
||||
for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
|
||||
freq = find_next_max(table, freq);
|
||||
cpufreq_dev->freq_table[i] = freq;
|
||||
|
||||
/* Warn for duplicate entries */
|
||||
if (!freq)
|
||||
pr_warn("%s: table has duplicate entries\n", __func__);
|
||||
else
|
||||
pr_debug("%s: freq:%u KHz\n", __func__, freq);
|
||||
}
|
||||
|
||||
cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
|
||||
cpufreq_dev->cool_dev = cool_dev;
|
||||
cpufreq_dev->cpufreq_state = 0;
|
||||
|
||||
mutex_lock(&cooling_cpufreq_lock);
|
||||
|
||||
/* Register the notifier for first cpufreq cooling device */
|
||||
if (cpufreq_dev_count == 0)
|
||||
if (list_empty(&cpufreq_dev_list))
|
||||
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
|
||||
CPUFREQ_POLICY_NOTIFIER);
|
||||
cpufreq_dev_count++;
|
||||
list_add(&cpufreq_dev->node, &cpufreq_dev_list);
|
||||
|
||||
mutex_unlock(&cooling_cpufreq_lock);
|
||||
|
||||
return cool_dev;
|
||||
|
||||
remove_idr:
|
||||
release_idr(&cpufreq_idr, cpufreq_dev->id);
|
||||
free_table:
|
||||
kfree(cpufreq_dev->freq_table);
|
||||
free_cdev:
|
||||
kfree(cpufreq_dev);
|
||||
|
||||
return cool_dev;
|
||||
}
|
||||
|
||||
|
@ -554,16 +466,16 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
|
|||
cpufreq_dev = cdev->devdata;
|
||||
mutex_lock(&cooling_cpufreq_lock);
|
||||
list_del(&cpufreq_dev->node);
|
||||
cpufreq_dev_count--;
|
||||
|
||||
/* Unregister the notifier for the last cpufreq cooling device */
|
||||
if (cpufreq_dev_count == 0)
|
||||
if (list_empty(&cpufreq_dev_list))
|
||||
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
|
||||
CPUFREQ_POLICY_NOTIFIER);
|
||||
mutex_unlock(&cooling_cpufreq_lock);
|
||||
|
||||
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
|
||||
release_idr(&cpufreq_idr, cpufreq_dev->id);
|
||||
kfree(cpufreq_dev->freq_table);
|
||||
kfree(cpufreq_dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/cpu_cooling.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
|
@ -28,18 +27,17 @@
|
|||
static int db8500_cpufreq_cooling_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct thermal_cooling_device *cdev;
|
||||
struct cpumask mask_val;
|
||||
|
||||
/* make sure cpufreq driver has been initialized */
|
||||
if (!cpufreq_frequency_get_table(0))
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
cpumask_set_cpu(0, &mask_val);
|
||||
cdev = cpufreq_cooling_register(&mask_val);
|
||||
|
||||
cdev = cpufreq_cooling_register(cpu_present_mask);
|
||||
if (IS_ERR(cdev)) {
|
||||
dev_err(&pdev->dev, "Failed to register cooling device\n");
|
||||
return PTR_ERR(cdev);
|
||||
int ret = PTR_ERR(cdev);
|
||||
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev,
|
||||
"Failed to register cooling device %d\n",
|
||||
ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, cdev);
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/cpu_cooling.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -454,15 +453,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
|
|||
const struct of_device_id *of_id =
|
||||
of_match_device(of_imx_thermal_match, &pdev->dev);
|
||||
struct imx_thermal_data *data;
|
||||
struct cpumask clip_cpus;
|
||||
struct regmap *map;
|
||||
int measure_freq;
|
||||
int ret;
|
||||
|
||||
if (!cpufreq_get_current_driver()) {
|
||||
dev_dbg(&pdev->dev, "no cpufreq driver!");
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
@ -516,12 +510,13 @@ static int imx_thermal_probe(struct platform_device *pdev)
|
|||
regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF);
|
||||
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
|
||||
|
||||
cpumask_set_cpu(0, &clip_cpus);
|
||||
data->cdev = cpufreq_cooling_register(&clip_cpus);
|
||||
data->cdev = cpufreq_cooling_register(cpu_present_mask);
|
||||
if (IS_ERR(data->cdev)) {
|
||||
ret = PTR_ERR(data->cdev);
|
||||
dev_err(&pdev->dev,
|
||||
"failed to register cpufreq cooling device: %d\n", ret);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev,
|
||||
"failed to register cpufreq cooling device: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o
|
||||
obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o
|
||||
obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o
|
||||
obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o
|
||||
obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o
|
||||
|
|
|
@ -82,7 +82,7 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
|
|||
struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" };
|
||||
|
||||
if (!acpi_has_method(handle, "_TRT"))
|
||||
return 0;
|
||||
return -ENODEV;
|
||||
|
||||
status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer);
|
||||
if (ACPI_FAILURE(status))
|
||||
|
@ -167,7 +167,7 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
|
|||
sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" };
|
||||
|
||||
if (!acpi_has_method(handle, "_ART"))
|
||||
return 0;
|
||||
return -ENODEV;
|
||||
|
||||
status = acpi_evaluate_object(handle, "_ART", NULL, &buffer);
|
||||
if (ACPI_FAILURE(status))
|
||||
|
@ -321,8 +321,8 @@ static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
|
|||
unsigned long length = 0;
|
||||
int count = 0;
|
||||
char __user *arg = (void __user *)__arg;
|
||||
struct trt *trts;
|
||||
struct art *arts;
|
||||
struct trt *trts = NULL;
|
||||
struct art *arts = NULL;
|
||||
|
||||
switch (cmd) {
|
||||
case ACPI_THERMAL_GET_TRT_COUNT:
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue