Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
commit
c749fa181b
221 changed files with 2738 additions and 1484 deletions
32
MAINTAINERS
32
MAINTAINERS
|
@ -1373,7 +1373,8 @@ F: arch/arm/mach-ebsa110/
|
|||
F: drivers/net/ethernet/amd/am79c961a.*
|
||||
|
||||
ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT
|
||||
M: Uwe Kleine-König <kernel@pengutronix.de>
|
||||
M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
|
||||
R: Pengutronix Kernel Team <kernel@pengutronix.de>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
N: efm32
|
||||
|
@ -1401,7 +1402,8 @@ F: arch/arm/mach-footbridge/
|
|||
|
||||
ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
|
||||
M: Shawn Guo <shawnguo@kernel.org>
|
||||
M: Sascha Hauer <kernel@pengutronix.de>
|
||||
M: Sascha Hauer <s.hauer@pengutronix.de>
|
||||
R: Pengutronix Kernel Team <kernel@pengutronix.de>
|
||||
R: Fabio Estevam <fabio.estevam@nxp.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
|
@ -1416,7 +1418,8 @@ F: include/soc/imx/
|
|||
|
||||
ARM/FREESCALE VYBRID ARM ARCHITECTURE
|
||||
M: Shawn Guo <shawnguo@kernel.org>
|
||||
M: Sascha Hauer <kernel@pengutronix.de>
|
||||
M: Sascha Hauer <s.hauer@pengutronix.de>
|
||||
R: Pengutronix Kernel Team <kernel@pengutronix.de>
|
||||
R: Stefan Agner <stefan@agner.ch>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
|
@ -4245,6 +4248,9 @@ F: include/trace/events/fs_dax.h
|
|||
|
||||
DEVICE DIRECT ACCESS (DAX)
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
M: Dave Jiang <dave.jiang@intel.com>
|
||||
M: Ross Zwisler <ross.zwisler@linux.intel.com>
|
||||
M: Vishal Verma <vishal.l.verma@intel.com>
|
||||
L: linux-nvdimm@lists.01.org
|
||||
S: Supported
|
||||
F: drivers/dax/
|
||||
|
@ -5651,7 +5657,8 @@ F: drivers/net/ethernet/freescale/fec.h
|
|||
F: Documentation/devicetree/bindings/net/fsl-fec.txt
|
||||
|
||||
FREESCALE IMX / MXC FRAMEBUFFER DRIVER
|
||||
M: Sascha Hauer <kernel@pengutronix.de>
|
||||
M: Sascha Hauer <s.hauer@pengutronix.de>
|
||||
R: Pengutronix Kernel Team <kernel@pengutronix.de>
|
||||
L: linux-fbdev@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
|
@ -6263,7 +6270,7 @@ S: Odd Fixes
|
|||
F: drivers/media/usb/hdpvr/
|
||||
|
||||
HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER
|
||||
M: Jimmy Vance <jimmy.vance@hpe.com>
|
||||
M: Jerry Hoemann <jerry.hoemann@hpe.com>
|
||||
S: Supported
|
||||
F: Documentation/watchdog/hpwdt.txt
|
||||
F: drivers/watchdog/hpwdt.c
|
||||
|
@ -8055,6 +8062,9 @@ F: tools/lib/lockdep/
|
|||
|
||||
LIBNVDIMM BLK: MMIO-APERTURE DRIVER
|
||||
M: Ross Zwisler <ross.zwisler@linux.intel.com>
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
M: Vishal Verma <vishal.l.verma@intel.com>
|
||||
M: Dave Jiang <dave.jiang@intel.com>
|
||||
L: linux-nvdimm@lists.01.org
|
||||
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
|
||||
S: Supported
|
||||
|
@ -8063,6 +8073,9 @@ F: drivers/nvdimm/region_devs.c
|
|||
|
||||
LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
|
||||
M: Vishal Verma <vishal.l.verma@intel.com>
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
M: Ross Zwisler <ross.zwisler@linux.intel.com>
|
||||
M: Dave Jiang <dave.jiang@intel.com>
|
||||
L: linux-nvdimm@lists.01.org
|
||||
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
|
||||
S: Supported
|
||||
|
@ -8070,6 +8083,9 @@ F: drivers/nvdimm/btt*
|
|||
|
||||
LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER
|
||||
M: Ross Zwisler <ross.zwisler@linux.intel.com>
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
M: Vishal Verma <vishal.l.verma@intel.com>
|
||||
M: Dave Jiang <dave.jiang@intel.com>
|
||||
L: linux-nvdimm@lists.01.org
|
||||
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
|
||||
S: Supported
|
||||
|
@ -8085,6 +8101,9 @@ F: Documentation/devicetree/bindings/pmem/pmem-region.txt
|
|||
|
||||
LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
M: Ross Zwisler <ross.zwisler@linux.intel.com>
|
||||
M: Vishal Verma <vishal.l.verma@intel.com>
|
||||
M: Dave Jiang <dave.jiang@intel.com>
|
||||
L: linux-nvdimm@lists.01.org
|
||||
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git
|
||||
|
@ -12824,7 +12843,8 @@ F: include/linux/siphash.h
|
|||
|
||||
SIOX
|
||||
M: Gavin Schenk <g.schenk@eckelmann.de>
|
||||
M: Uwe Kleine-König <kernel@pengutronix.de>
|
||||
M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
|
||||
R: Pengutronix Kernel Team <kernel@pengutronix.de>
|
||||
S: Supported
|
||||
F: drivers/siox/*
|
||||
F: include/trace/events/siox.h
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 17
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -366,7 +366,7 @@ void force_signal_inject(int signal, int code, unsigned long address)
|
|||
}
|
||||
|
||||
/* Force signals we don't understand to SIGKILL */
|
||||
if (WARN_ON(signal != SIGKILL ||
|
||||
if (WARN_ON(signal != SIGKILL &&
|
||||
siginfo_layout(signal, code) != SIL_FAULT)) {
|
||||
signal = SIGKILL;
|
||||
}
|
||||
|
|
|
@ -204,7 +204,7 @@ void __init kasan_init(void)
|
|||
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
|
||||
|
||||
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
|
||||
pfn_to_nid(virt_to_pfn(lm_alias(_text))));
|
||||
early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
|
||||
|
||||
kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
|
||||
(void *)mod_shadow_start);
|
||||
|
@ -224,7 +224,7 @@ void __init kasan_init(void)
|
|||
|
||||
kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
|
||||
(unsigned long)kasan_mem_to_shadow(end),
|
||||
pfn_to_nid(virt_to_pfn(start)));
|
||||
early_pfn_to_nid(virt_to_pfn(start)));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1093,7 +1093,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
|
|||
LOAD_INT(c), LOAD_FRAC(c),
|
||||
count_active_contexts(),
|
||||
atomic_read(&nr_spu_contexts),
|
||||
idr_get_cursor(&task_active_pid_ns(current)->idr));
|
||||
idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -3028,10 +3028,27 @@ static struct intel_uncore_type bdx_uncore_cbox = {
|
|||
.format_group = &hswep_uncore_cbox_format_group,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type bdx_uncore_sbox = {
|
||||
.name = "sbox",
|
||||
.num_counters = 4,
|
||||
.num_boxes = 4,
|
||||
.perf_ctr_bits = 48,
|
||||
.event_ctl = HSWEP_S0_MSR_PMON_CTL0,
|
||||
.perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
|
||||
.event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
|
||||
.box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
|
||||
.msr_offset = HSWEP_SBOX_MSR_OFFSET,
|
||||
.ops = &hswep_uncore_sbox_msr_ops,
|
||||
.format_group = &hswep_uncore_sbox_format_group,
|
||||
};
|
||||
|
||||
#define BDX_MSR_UNCORE_SBOX 3
|
||||
|
||||
static struct intel_uncore_type *bdx_msr_uncores[] = {
|
||||
&bdx_uncore_ubox,
|
||||
&bdx_uncore_cbox,
|
||||
&hswep_uncore_pcu,
|
||||
&bdx_uncore_sbox,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -3043,10 +3060,25 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
|
|||
|
||||
void bdx_uncore_cpu_init(void)
|
||||
{
|
||||
int pkg = topology_phys_to_logical_pkg(0);
|
||||
|
||||
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
uncore_msr_uncores = bdx_msr_uncores;
|
||||
|
||||
/* BDX-DE doesn't have SBOX */
|
||||
if (boot_cpu_data.x86_model == 86) {
|
||||
uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
|
||||
/* Detect systems with no SBOXes */
|
||||
} else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
|
||||
struct pci_dev *pdev;
|
||||
u32 capid4;
|
||||
|
||||
pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
|
||||
pci_read_config_dword(pdev, 0x94, &capid4);
|
||||
if (((capid4 >> 6) & 0x3) == 0)
|
||||
bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
|
||||
}
|
||||
hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
|
||||
}
|
||||
|
||||
|
@ -3264,6 +3296,11 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
|
|||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
|
||||
},
|
||||
{ /* PCU.3 (for Capability registers) */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
|
||||
HSWEP_PCI_PCU_3),
|
||||
},
|
||||
{ /* end: all zeroes */ }
|
||||
};
|
||||
|
||||
|
|
|
@ -136,7 +136,6 @@
|
|||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#ifndef __BPF__
|
||||
/*
|
||||
* This output constraint should be used for any inline asm which has a "call"
|
||||
* instruction. Otherwise the asm may be inserted before the frame pointer
|
||||
|
@ -146,6 +145,5 @@
|
|||
register unsigned long current_stack_pointer asm(_ASM_SP);
|
||||
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_ASM_H */
|
||||
|
|
|
@ -749,13 +749,11 @@ enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
|
|||
extern void enable_sep_cpu(void);
|
||||
extern int sysenter_setup(void);
|
||||
|
||||
extern void early_trap_init(void);
|
||||
void early_trap_pf_init(void);
|
||||
|
||||
/* Defined in head.S */
|
||||
extern struct desc_ptr early_gdt_descr;
|
||||
|
||||
extern void cpu_set_gdt(int);
|
||||
extern void switch_to_new_gdt(int);
|
||||
extern void load_direct_gdt(int);
|
||||
extern void load_fixmap_gdt(int);
|
||||
|
|
|
@ -215,6 +215,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
|
|||
apic_id = processor->local_apic_id;
|
||||
enabled = processor->lapic_flags & ACPI_MADT_ENABLED;
|
||||
|
||||
/* Ignore invalid ID */
|
||||
if (apic_id == 0xffffffff)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We need to register disabled CPU as well to permit
|
||||
* counting disabled CPUs. This allows us to size
|
||||
|
|
|
@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
|
|||
* little bit simple
|
||||
*/
|
||||
efi_map_sz = efi_get_runtime_map_size();
|
||||
efi_map_sz = ALIGN(efi_map_sz, 16);
|
||||
params_cmdline_sz = sizeof(struct boot_params) + cmdline_len +
|
||||
MAX_ELFCOREHDR_STR_LEN;
|
||||
params_cmdline_sz = ALIGN(params_cmdline_sz, 16);
|
||||
kbuf.bufsz = params_cmdline_sz + efi_map_sz +
|
||||
kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) +
|
||||
sizeof(struct setup_data) +
|
||||
sizeof(struct efi_setup_data);
|
||||
|
||||
|
@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
|
|||
if (!params)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
efi_map_offset = params_cmdline_sz;
|
||||
efi_setup_data_offset = efi_map_offset + efi_map_sz;
|
||||
efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16);
|
||||
|
||||
/* Copy setup header onto bootparams. Documentation/x86/boot.txt */
|
||||
setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset;
|
||||
|
|
|
@ -1,90 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Fallback functions when the main IOMMU code is not compiled in. This
|
||||
code is roughly equivalent to i386. */
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/dma.h>
|
||||
|
||||
#define NOMMU_MAPPING_ERROR 0
|
||||
|
||||
static int
|
||||
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
|
||||
{
|
||||
if (hwdev && !dma_capable(hwdev, bus, size)) {
|
||||
if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
|
||||
printk(KERN_ERR
|
||||
"nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
|
||||
name, (long long)bus, size,
|
||||
(long long)*hwdev->dma_mask);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
dma_addr_t bus = phys_to_dma(dev, page_to_phys(page)) + offset;
|
||||
WARN_ON(size == 0);
|
||||
if (!check_addr("map_single", dev, bus, size))
|
||||
return NOMMU_MAPPING_ERROR;
|
||||
return bus;
|
||||
}
|
||||
|
||||
/* Map a set of buffers described by scatterlist in streaming
|
||||
* mode for DMA. This is the scatter-gather version of the
|
||||
* above pci_map_single interface. Here the scatter gather list
|
||||
* elements are each tagged with the appropriate dma address
|
||||
* and length. They are obtained via sg_dma_{address,length}(SG).
|
||||
*
|
||||
* NOTE: An implementation may be able to use a smaller number of
|
||||
* DMA address/length pairs than there are SG table elements.
|
||||
* (for example via virtual mapping capabilities)
|
||||
* The routine returns the number of addr/length pairs actually
|
||||
* used, at most nents.
|
||||
*
|
||||
* Device ownership issues as mentioned above for pci_map_single are
|
||||
* the same here.
|
||||
*/
|
||||
static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
WARN_ON(nents == 0 || sg[0].length == 0);
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
BUG_ON(!sg_page(s));
|
||||
s->dma_address = sg_phys(s);
|
||||
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
|
||||
return 0;
|
||||
s->dma_length = s->length;
|
||||
}
|
||||
return nents;
|
||||
}
|
||||
|
||||
static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == NOMMU_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
const struct dma_map_ops nommu_dma_ops = {
|
||||
.alloc = dma_generic_alloc_coherent,
|
||||
.free = dma_generic_free_coherent,
|
||||
.map_sg = nommu_map_sg,
|
||||
.map_page = nommu_map_page,
|
||||
.is_phys = 1,
|
||||
.mapping_error = nommu_mapping_error,
|
||||
.dma_supported = x86_dma_supported,
|
||||
};
|
|
@ -77,6 +77,8 @@
|
|||
#include <asm/i8259.h>
|
||||
#include <asm/misc.h>
|
||||
#include <asm/qspinlock.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
|
||||
/* Number of siblings per CPU package */
|
||||
int smp_num_siblings = 1;
|
||||
|
@ -390,15 +392,47 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
|||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
|
||||
*
|
||||
* These are Intel CPUs that enumerate an LLC that is shared by
|
||||
* multiple NUMA nodes. The LLC on these systems is shared for
|
||||
* off-package data access but private to the NUMA node (half
|
||||
* of the package) for on-package access.
|
||||
*
|
||||
* CPUID (the source of the information about the LLC) can only
|
||||
* enumerate the cache as being shared *or* unshared, but not
|
||||
* this particular configuration. The CPU in this case enumerates
|
||||
* the cache to be shared across the entire package (spanning both
|
||||
* NUMA nodes).
|
||||
*/
|
||||
|
||||
static const struct x86_cpu_id snc_cpu[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X },
|
||||
{}
|
||||
};
|
||||
|
||||
static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
{
|
||||
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
|
||||
|
||||
if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID &&
|
||||
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2))
|
||||
return topology_sane(c, o, "llc");
|
||||
/* Do not match if we do not have a valid APICID for cpu: */
|
||||
if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
|
||||
return false;
|
||||
|
||||
return false;
|
||||
/* Do not match if LLC id does not match: */
|
||||
if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Allow the SNC topology without warning. Return of false
|
||||
* means 'c' does not share the LLC of 'o'. This will be
|
||||
* reflected to userspace.
|
||||
*/
|
||||
if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
|
||||
return false;
|
||||
|
||||
return topology_sane(c, o, "llc");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -456,7 +490,8 @@ static struct sched_domain_topology_level x86_topology[] = {
|
|||
|
||||
/*
|
||||
* Set if a package/die has multiple NUMA nodes inside.
|
||||
* AMD Magny-Cours and Intel Cluster-on-Die have this.
|
||||
* AMD Magny-Cours, Intel Cluster-on-Die, and Intel
|
||||
* Sub-NUMA Clustering have this.
|
||||
*/
|
||||
static bool x86_has_numa_in_package;
|
||||
|
||||
|
|
|
@ -317,7 +317,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
|
|||
hpet2 -= hpet1;
|
||||
tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
|
||||
do_div(tmp, 1000000);
|
||||
do_div(deltatsc, tmp);
|
||||
deltatsc = div64_u64(deltatsc, tmp);
|
||||
|
||||
return (unsigned long) deltatsc;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
|
@ -334,16 +335,16 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
|
|||
pgprotval_t eff_in, unsigned long P)
|
||||
{
|
||||
int i;
|
||||
pte_t *start;
|
||||
pte_t *pte;
|
||||
pgprotval_t prot, eff;
|
||||
|
||||
start = (pte_t *)pmd_page_vaddr(addr);
|
||||
for (i = 0; i < PTRS_PER_PTE; i++) {
|
||||
prot = pte_flags(*start);
|
||||
eff = effective_prot(eff_in, prot);
|
||||
st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
|
||||
pte = pte_offset_map(&addr, st->current_address);
|
||||
prot = pte_flags(*pte);
|
||||
eff = effective_prot(eff_in, prot);
|
||||
note_page(m, st, __pgprot(prot), eff, 5);
|
||||
start++;
|
||||
pte_unmap(pte);
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_KASAN
|
||||
|
|
|
@ -98,7 +98,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
|
|||
set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
|
||||
} else {
|
||||
/* No p4d for 4-level paging: point the pgd to the pud page table */
|
||||
pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
|
||||
pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
|
||||
set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
|
||||
}
|
||||
|
||||
|
|
|
@ -427,8 +427,9 @@ struct crng_state primary_crng = {
|
|||
* its value (from 0->1->2).
|
||||
*/
|
||||
static int crng_init = 0;
|
||||
#define crng_ready() (likely(crng_init > 0))
|
||||
#define crng_ready() (likely(crng_init > 1))
|
||||
static int crng_init_cnt = 0;
|
||||
static unsigned long crng_global_init_time = 0;
|
||||
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
|
||||
static void _extract_crng(struct crng_state *crng,
|
||||
__u32 out[CHACHA20_BLOCK_WORDS]);
|
||||
|
@ -787,6 +788,36 @@ static void crng_initialize(struct crng_state *crng)
|
|||
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
static void numa_crng_init(void)
|
||||
{
|
||||
int i;
|
||||
struct crng_state *crng;
|
||||
struct crng_state **pool;
|
||||
|
||||
pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
|
||||
for_each_online_node(i) {
|
||||
crng = kmalloc_node(sizeof(struct crng_state),
|
||||
GFP_KERNEL | __GFP_NOFAIL, i);
|
||||
spin_lock_init(&crng->lock);
|
||||
crng_initialize(crng);
|
||||
pool[i] = crng;
|
||||
}
|
||||
mb();
|
||||
if (cmpxchg(&crng_node_pool, NULL, pool)) {
|
||||
for_each_node(i)
|
||||
kfree(pool[i]);
|
||||
kfree(pool);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void numa_crng_init(void) {}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* crng_fast_load() can be called by code in the interrupt service
|
||||
* path. So we can't afford to dilly-dally.
|
||||
*/
|
||||
static int crng_fast_load(const char *cp, size_t len)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -794,7 +825,7 @@ static int crng_fast_load(const char *cp, size_t len)
|
|||
|
||||
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
|
||||
return 0;
|
||||
if (crng_ready()) {
|
||||
if (crng_init != 0) {
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
@ -813,6 +844,51 @@ static int crng_fast_load(const char *cp, size_t len)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* crng_slow_load() is called by add_device_randomness, which has two
|
||||
* attributes. (1) We can't trust the buffer passed to it is
|
||||
* guaranteed to be unpredictable (so it might not have any entropy at
|
||||
* all), and (2) it doesn't have the performance constraints of
|
||||
* crng_fast_load().
|
||||
*
|
||||
* So we do something more comprehensive which is guaranteed to touch
|
||||
* all of the primary_crng's state, and which uses a LFSR with a
|
||||
* period of 255 as part of the mixing algorithm. Finally, we do
|
||||
* *not* advance crng_init_cnt since buffer we may get may be something
|
||||
* like a fixed DMI table (for example), which might very well be
|
||||
* unique to the machine, but is otherwise unvarying.
|
||||
*/
|
||||
static int crng_slow_load(const char *cp, size_t len)
|
||||
{
|
||||
unsigned long flags;
|
||||
static unsigned char lfsr = 1;
|
||||
unsigned char tmp;
|
||||
unsigned i, max = CHACHA20_KEY_SIZE;
|
||||
const char * src_buf = cp;
|
||||
char * dest_buf = (char *) &primary_crng.state[4];
|
||||
|
||||
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
|
||||
return 0;
|
||||
if (crng_init != 0) {
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
return 0;
|
||||
}
|
||||
if (len > max)
|
||||
max = len;
|
||||
|
||||
for (i = 0; i < max ; i++) {
|
||||
tmp = lfsr;
|
||||
lfsr >>= 1;
|
||||
if (tmp & 1)
|
||||
lfsr ^= 0xE1;
|
||||
tmp = dest_buf[i % CHACHA20_KEY_SIZE];
|
||||
dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
|
||||
lfsr += (tmp << 3) | (tmp >> 5);
|
||||
}
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -831,7 +907,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
|
|||
_crng_backtrack_protect(&primary_crng, buf.block,
|
||||
CHACHA20_KEY_SIZE);
|
||||
}
|
||||
spin_lock_irqsave(&primary_crng.lock, flags);
|
||||
spin_lock_irqsave(&crng->lock, flags);
|
||||
for (i = 0; i < 8; i++) {
|
||||
unsigned long rv;
|
||||
if (!arch_get_random_seed_long(&rv) &&
|
||||
|
@ -841,9 +917,10 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
|
|||
}
|
||||
memzero_explicit(&buf, sizeof(buf));
|
||||
crng->init_time = jiffies;
|
||||
spin_unlock_irqrestore(&primary_crng.lock, flags);
|
||||
spin_unlock_irqrestore(&crng->lock, flags);
|
||||
if (crng == &primary_crng && crng_init < 2) {
|
||||
invalidate_batched_entropy();
|
||||
numa_crng_init();
|
||||
crng_init = 2;
|
||||
process_random_ready_list();
|
||||
wake_up_interruptible(&crng_init_wait);
|
||||
|
@ -856,8 +933,9 @@ static void _extract_crng(struct crng_state *crng,
|
|||
{
|
||||
unsigned long v, flags;
|
||||
|
||||
if (crng_init > 1 &&
|
||||
time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))
|
||||
if (crng_ready() &&
|
||||
(time_after(crng_global_init_time, crng->init_time) ||
|
||||
time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
|
||||
crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
|
||||
spin_lock_irqsave(&crng->lock, flags);
|
||||
if (arch_get_random_long(&v))
|
||||
|
@ -981,10 +1059,8 @@ void add_device_randomness(const void *buf, unsigned int size)
|
|||
unsigned long time = random_get_entropy() ^ jiffies;
|
||||
unsigned long flags;
|
||||
|
||||
if (!crng_ready()) {
|
||||
crng_fast_load(buf, size);
|
||||
return;
|
||||
}
|
||||
if (!crng_ready() && size)
|
||||
crng_slow_load(buf, size);
|
||||
|
||||
trace_add_device_randomness(size, _RET_IP_);
|
||||
spin_lock_irqsave(&input_pool.lock, flags);
|
||||
|
@ -1139,7 +1215,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
|
|||
fast_mix(fast_pool);
|
||||
add_interrupt_bench(cycles);
|
||||
|
||||
if (!crng_ready()) {
|
||||
if (unlikely(crng_init == 0)) {
|
||||
if ((fast_pool->count >= 64) &&
|
||||
crng_fast_load((char *) fast_pool->pool,
|
||||
sizeof(fast_pool->pool))) {
|
||||
|
@ -1680,28 +1756,10 @@ static void init_std_data(struct entropy_store *r)
|
|||
*/
|
||||
static int rand_initialize(void)
|
||||
{
|
||||
#ifdef CONFIG_NUMA
|
||||
int i;
|
||||
struct crng_state *crng;
|
||||
struct crng_state **pool;
|
||||
#endif
|
||||
|
||||
init_std_data(&input_pool);
|
||||
init_std_data(&blocking_pool);
|
||||
crng_initialize(&primary_crng);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
|
||||
for_each_online_node(i) {
|
||||
crng = kmalloc_node(sizeof(struct crng_state),
|
||||
GFP_KERNEL | __GFP_NOFAIL, i);
|
||||
spin_lock_init(&crng->lock);
|
||||
crng_initialize(crng);
|
||||
pool[i] = crng;
|
||||
}
|
||||
mb();
|
||||
crng_node_pool = pool;
|
||||
#endif
|
||||
crng_global_init_time = jiffies;
|
||||
return 0;
|
||||
}
|
||||
early_initcall(rand_initialize);
|
||||
|
@ -1875,6 +1933,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
|
|||
input_pool.entropy_count = 0;
|
||||
blocking_pool.entropy_count = 0;
|
||||
return 0;
|
||||
case RNDRESEEDCRNG:
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
if (crng_init < 2)
|
||||
return -ENODATA;
|
||||
crng_reseed(&primary_crng, NULL);
|
||||
crng_global_init_time = jiffies - 1;
|
||||
return 0;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2212,7 +2278,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
|
|||
{
|
||||
struct entropy_store *poolp = &input_pool;
|
||||
|
||||
if (!crng_ready()) {
|
||||
if (unlikely(crng_init == 0)) {
|
||||
crng_fast_load(buffer, count);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ static int tpm_set_next_event(unsigned long delta,
|
|||
* of writing CNT registers which may cause the min_delta event got
|
||||
* missed, so we need add a ETIME check here in case it happened.
|
||||
*/
|
||||
return (int)((next - now) <= 0) ? -ETIME : 0;
|
||||
return (int)(next - now) <= 0 ? -ETIME : 0;
|
||||
}
|
||||
|
||||
static int tpm_set_state_oneshot(struct clock_event_device *evt)
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/dax.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mman.h>
|
||||
#include "dax-private.h"
|
||||
#include "dax.h"
|
||||
|
||||
|
@ -540,6 +541,7 @@ static const struct file_operations dax_fops = {
|
|||
.release = dax_release,
|
||||
.get_unmapped_area = dax_get_unmapped_area,
|
||||
.mmap = dax_mmap,
|
||||
.mmap_supported_flags = MAP_SYNC,
|
||||
};
|
||||
|
||||
static void dev_dax_release(struct device *dev)
|
||||
|
|
|
@ -138,13 +138,6 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
|
|||
lut = (struct drm_color_lut *)blob->data;
|
||||
lut_size = blob->length / sizeof(struct drm_color_lut);
|
||||
|
||||
if (__is_lut_linear(lut, lut_size)) {
|
||||
/* Set to bypass if lut is set to linear */
|
||||
stream->out_transfer_func->type = TF_TYPE_BYPASS;
|
||||
stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
|
||||
return 0;
|
||||
}
|
||||
|
||||
gamma = dc_create_gamma();
|
||||
if (!gamma)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -4743,23 +4743,27 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
|
|||
|
||||
for (i=0; i < dep_table->count; i++) {
|
||||
if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
|
||||
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
|
||||
break;
|
||||
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (i == dep_table->count)
|
||||
if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
|
||||
data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
|
||||
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
|
||||
}
|
||||
|
||||
dep_table = table_info->vdd_dep_on_sclk;
|
||||
odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
|
||||
for (i=0; i < dep_table->count; i++) {
|
||||
if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
|
||||
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
|
||||
break;
|
||||
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (i == dep_table->count)
|
||||
if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
|
||||
data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
|
||||
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
|
||||
}
|
||||
}
|
||||
|
||||
static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
|
||||
|
|
|
@ -412,8 +412,10 @@ typedef struct {
|
|||
QuadraticInt_t ReservedEquation2;
|
||||
QuadraticInt_t ReservedEquation3;
|
||||
|
||||
uint16_t MinVoltageUlvGfx;
|
||||
uint16_t MinVoltageUlvSoc;
|
||||
|
||||
uint32_t Reserved[15];
|
||||
uint32_t Reserved[14];
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
|
|||
{
|
||||
uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
|
||||
ssize_t ret;
|
||||
int retry;
|
||||
|
||||
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
|
||||
return 0;
|
||||
|
||||
ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
|
||||
&tmds_oen, sizeof(tmds_oen));
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
|
||||
enable ? "enable" : "disable");
|
||||
return ret;
|
||||
/*
|
||||
* LSPCON adapters in low-power state may ignore the first write, so
|
||||
* read back and verify the written value a few times.
|
||||
*/
|
||||
for (retry = 0; retry < 3; retry++) {
|
||||
uint8_t tmp;
|
||||
|
||||
ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
|
||||
&tmds_oen, sizeof(tmds_oen));
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
|
||||
enable ? "enable" : "disable",
|
||||
retry + 1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
|
||||
&tmp, sizeof(tmp));
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
|
||||
enable ? "enabling" : "disabling",
|
||||
retry + 1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (tmp == tmds_oen)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
|
||||
enable ? "enabling" : "disabling");
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <uapi/drm/exynos_drm.h>
|
||||
|
||||
#include "exynos_drm_drv.h"
|
||||
|
@ -26,20 +27,6 @@
|
|||
#include "exynos_drm_iommu.h"
|
||||
#include "exynos_drm_crtc.h"
|
||||
|
||||
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
|
||||
|
||||
/*
|
||||
* exynos specific framebuffer structure.
|
||||
*
|
||||
* @fb: drm framebuffer obejct.
|
||||
* @exynos_gem: array of exynos specific gem object containing a gem object.
|
||||
*/
|
||||
struct exynos_drm_fb {
|
||||
struct drm_framebuffer fb;
|
||||
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
|
||||
dma_addr_t dma_addr[MAX_FB_BUFFER];
|
||||
};
|
||||
|
||||
static int check_fb_gem_memory_type(struct drm_device *drm_dev,
|
||||
struct exynos_drm_gem *exynos_gem)
|
||||
{
|
||||
|
@ -66,40 +53,9 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
|
||||
unsigned int i;
|
||||
|
||||
drm_framebuffer_cleanup(fb);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) {
|
||||
struct drm_gem_object *obj;
|
||||
|
||||
if (exynos_fb->exynos_gem[i] == NULL)
|
||||
continue;
|
||||
|
||||
obj = &exynos_fb->exynos_gem[i]->base;
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
}
|
||||
|
||||
kfree(exynos_fb);
|
||||
exynos_fb = NULL;
|
||||
}
|
||||
|
||||
static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
|
||||
struct drm_file *file_priv,
|
||||
unsigned int *handle)
|
||||
{
|
||||
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
|
||||
|
||||
return drm_gem_handle_create(file_priv,
|
||||
&exynos_fb->exynos_gem[0]->base, handle);
|
||||
}
|
||||
|
||||
static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
|
||||
.destroy = exynos_drm_fb_destroy,
|
||||
.create_handle = exynos_drm_fb_create_handle,
|
||||
.destroy = drm_gem_fb_destroy,
|
||||
.create_handle = drm_gem_fb_create_handle,
|
||||
};
|
||||
|
||||
struct drm_framebuffer *
|
||||
|
@ -108,12 +64,12 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
|
|||
struct exynos_drm_gem **exynos_gem,
|
||||
int count)
|
||||
{
|
||||
struct exynos_drm_fb *exynos_fb;
|
||||
struct drm_framebuffer *fb;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
|
||||
if (!exynos_fb)
|
||||
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
|
||||
if (!fb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
@ -121,23 +77,21 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
|
|||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
exynos_fb->exynos_gem[i] = exynos_gem[i];
|
||||
exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr
|
||||
+ mode_cmd->offsets[i];
|
||||
fb->obj[i] = &exynos_gem[i]->base;
|
||||
}
|
||||
|
||||
drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd);
|
||||
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
|
||||
|
||||
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
|
||||
ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to initialize framebuffer\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
return &exynos_fb->fb;
|
||||
return fb;
|
||||
|
||||
err:
|
||||
kfree(exynos_fb);
|
||||
kfree(fb);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -191,12 +145,13 @@ err:
|
|||
|
||||
dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
|
||||
{
|
||||
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
|
||||
struct exynos_drm_gem *exynos_gem;
|
||||
|
||||
if (WARN_ON_ONCE(index >= MAX_FB_BUFFER))
|
||||
return 0;
|
||||
|
||||
return exynos_fb->dma_addr[index];
|
||||
exynos_gem = to_exynos_gem(fb->obj[index]);
|
||||
return exynos_gem->dma_addr + fb->offsets[index];
|
||||
}
|
||||
|
||||
static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
|
||||
|
|
|
@ -1080,6 +1080,7 @@ static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
|
|||
{
|
||||
set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
|
||||
s->workload->pending_events);
|
||||
patch_value(s, cmd_ptr(s, 0), MI_NOOP);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -169,6 +169,8 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
|
|||
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
int pipe;
|
||||
|
||||
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
|
||||
SDE_PORTC_HOTPLUG_CPT |
|
||||
SDE_PORTD_HOTPLUG_CPT);
|
||||
|
@ -267,6 +269,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
|||
if (IS_BROADWELL(dev_priv))
|
||||
vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
|
||||
|
||||
/* Disable Primary/Sprite/Cursor plane */
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
|
||||
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
|
||||
vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE;
|
||||
vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE;
|
||||
}
|
||||
|
||||
vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
|
||||
}
|
||||
|
||||
|
|
|
@ -323,6 +323,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
|
|||
struct intel_vgpu_fb_info *fb_info)
|
||||
{
|
||||
gvt_dmabuf->drm_format = fb_info->drm_format;
|
||||
gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
|
||||
gvt_dmabuf->width = fb_info->width;
|
||||
gvt_dmabuf->height = fb_info->height;
|
||||
gvt_dmabuf->stride = fb_info->stride;
|
||||
|
|
|
@ -245,16 +245,13 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
|
|||
plane->hw_format = fmt;
|
||||
|
||||
plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
|
||||
gvt_vgpu_err("invalid gma address: %lx\n",
|
||||
(unsigned long)plane->base);
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
|
||||
if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_vgpu_err("invalid gma address: %lx\n",
|
||||
(unsigned long)plane->base);
|
||||
gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n",
|
||||
plane->base);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -371,16 +368,13 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
|
|||
alpha_plane, alpha_force);
|
||||
|
||||
plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
|
||||
gvt_vgpu_err("invalid gma address: %lx\n",
|
||||
(unsigned long)plane->base);
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
|
||||
if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_vgpu_err("invalid gma address: %lx\n",
|
||||
(unsigned long)plane->base);
|
||||
gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n",
|
||||
plane->base);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -476,16 +470,13 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
|
|||
plane->drm_format = drm_format;
|
||||
|
||||
plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
|
||||
gvt_vgpu_err("invalid gma address: %lx\n",
|
||||
(unsigned long)plane->base);
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
|
||||
if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_vgpu_err("invalid gma address: %lx\n",
|
||||
(unsigned long)plane->base);
|
||||
gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n",
|
||||
plane->base);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -530,6 +530,16 @@ static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
|
|||
false, 0, mm->vgpu);
|
||||
}
|
||||
|
||||
static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
|
||||
struct intel_gvt_gtt_entry *entry, unsigned long index)
|
||||
{
|
||||
struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
|
||||
|
||||
GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
|
||||
|
||||
pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
|
||||
}
|
||||
|
||||
static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
|
||||
struct intel_gvt_gtt_entry *entry, unsigned long index)
|
||||
{
|
||||
|
@ -1818,6 +1828,18 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
|
||||
struct intel_gvt_gtt_entry *entry)
|
||||
{
|
||||
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
|
||||
unsigned long pfn;
|
||||
|
||||
pfn = pte_ops->get_pfn(entry);
|
||||
if (pfn != vgpu->gvt->gtt.scratch_mfn)
|
||||
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
|
||||
pfn << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
|
@ -1844,10 +1866,10 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||
|
||||
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
|
||||
bytes);
|
||||
m = e;
|
||||
|
||||
if (ops->test_present(&e)) {
|
||||
gfn = ops->get_pfn(&e);
|
||||
m = e;
|
||||
|
||||
/* one PTE update may be issued in multiple writes and the
|
||||
* first write may not construct a valid gfn
|
||||
|
@ -1868,8 +1890,12 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
|
||||
} else
|
||||
ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
|
||||
} else
|
||||
} else {
|
||||
ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
|
||||
ggtt_invalidate_pte(vgpu, &m);
|
||||
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
|
||||
ops->clear_present(&m);
|
||||
}
|
||||
|
||||
out:
|
||||
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
|
||||
|
@ -2030,7 +2056,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
|
|||
return PTR_ERR(gtt->ggtt_mm);
|
||||
}
|
||||
|
||||
intel_vgpu_reset_ggtt(vgpu);
|
||||
intel_vgpu_reset_ggtt(vgpu, false);
|
||||
|
||||
return create_scratch_page_tree(vgpu);
|
||||
}
|
||||
|
@ -2315,17 +2341,19 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
|
|||
/**
|
||||
* intel_vgpu_reset_ggtt - reset the GGTT entry
|
||||
* @vgpu: a vGPU
|
||||
* @invalidate_old: invalidate old entries
|
||||
*
|
||||
* This function is called at the vGPU create stage
|
||||
* to reset all the GGTT entries.
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
|
||||
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
|
||||
struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
|
||||
struct intel_gvt_gtt_entry old_entry;
|
||||
u32 index;
|
||||
u32 num_entries;
|
||||
|
||||
|
@ -2334,13 +2362,23 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
|
|||
|
||||
index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
|
||||
num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
|
||||
while (num_entries--)
|
||||
while (num_entries--) {
|
||||
if (invalidate_old) {
|
||||
ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
|
||||
ggtt_invalidate_pte(vgpu, &old_entry);
|
||||
}
|
||||
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
|
||||
}
|
||||
|
||||
index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
|
||||
num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
|
||||
while (num_entries--)
|
||||
while (num_entries--) {
|
||||
if (invalidate_old) {
|
||||
ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
|
||||
ggtt_invalidate_pte(vgpu, &old_entry);
|
||||
}
|
||||
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
|
||||
}
|
||||
|
||||
ggtt_invalidate(dev_priv);
|
||||
}
|
||||
|
@ -2360,5 +2398,5 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
|
|||
* removing the shadow pages.
|
||||
*/
|
||||
intel_vgpu_destroy_all_ppgtt_mm(vgpu);
|
||||
intel_vgpu_reset_ggtt(vgpu);
|
||||
intel_vgpu_reset_ggtt(vgpu, true);
|
||||
}
|
||||
|
|
|
@ -193,7 +193,7 @@ struct intel_vgpu_gtt {
|
|||
|
||||
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
|
||||
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
|
||||
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
|
||||
|
||||
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
|
||||
|
|
|
@ -1150,6 +1150,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
|
|||
switch (notification) {
|
||||
case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
|
||||
root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
|
||||
/* fall through */
|
||||
case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
|
||||
mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
|
||||
return PTR_ERR_OR_ZERO(mm);
|
||||
|
|
|
@ -1301,7 +1301,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
|||
|
||||
}
|
||||
|
||||
return 0;
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
|
|
@ -1105,30 +1105,32 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|||
|
||||
ret = i915_ggtt_probe_hw(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_perf;
|
||||
|
||||
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
|
||||
* otherwise the vga fbdev driver falls over. */
|
||||
/*
|
||||
* WARNING: Apparently we must kick fbdev drivers before vgacon,
|
||||
* otherwise the vga fbdev driver falls over.
|
||||
*/
|
||||
ret = i915_kick_out_firmware_fb(dev_priv);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
|
||||
goto out_ggtt;
|
||||
goto err_ggtt;
|
||||
}
|
||||
|
||||
ret = i915_kick_out_vgacon(dev_priv);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to remove conflicting VGA console\n");
|
||||
goto out_ggtt;
|
||||
goto err_ggtt;
|
||||
}
|
||||
|
||||
ret = i915_ggtt_init_hw(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_ggtt;
|
||||
|
||||
ret = i915_ggtt_enable_hw(dev_priv);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to enable GGTT\n");
|
||||
goto out_ggtt;
|
||||
goto err_ggtt;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
@ -1139,7 +1141,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|||
if (ret) {
|
||||
DRM_ERROR("failed to set DMA mask\n");
|
||||
|
||||
goto out_ggtt;
|
||||
goto err_ggtt;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1157,7 +1159,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|||
if (ret) {
|
||||
DRM_ERROR("failed to set DMA mask\n");
|
||||
|
||||
goto out_ggtt;
|
||||
goto err_ggtt;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1190,13 +1192,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|||
|
||||
ret = intel_gvt_init(dev_priv);
|
||||
if (ret)
|
||||
goto out_ggtt;
|
||||
goto err_ggtt;
|
||||
|
||||
return 0;
|
||||
|
||||
out_ggtt:
|
||||
err_ggtt:
|
||||
i915_ggtt_cleanup_hw(dev_priv);
|
||||
|
||||
err_perf:
|
||||
i915_perf_fini(dev_priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -728,7 +728,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
|
|||
|
||||
err = radix_tree_insert(handles_vma, handle, vma);
|
||||
if (unlikely(err)) {
|
||||
kfree(lut);
|
||||
kmem_cache_free(eb->i915->luts, lut);
|
||||
goto err_obj;
|
||||
}
|
||||
|
||||
|
|
|
@ -473,20 +473,37 @@ static u64 get_rc6(struct drm_i915_private *i915)
|
|||
spin_lock_irqsave(&i915->pmu.lock, flags);
|
||||
spin_lock(&kdev->power.lock);
|
||||
|
||||
if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
|
||||
i915->pmu.suspended_jiffies_last =
|
||||
kdev->power.suspended_jiffies;
|
||||
/*
|
||||
* After the above branch intel_runtime_pm_get_if_in_use failed
|
||||
* to get the runtime PM reference we cannot assume we are in
|
||||
* runtime suspend since we can either: a) race with coming out
|
||||
* of it before we took the power.lock, or b) there are other
|
||||
* states than suspended which can bring us here.
|
||||
*
|
||||
* We need to double-check that we are indeed currently runtime
|
||||
* suspended and if not we cannot do better than report the last
|
||||
* known RC6 value.
|
||||
*/
|
||||
if (kdev->power.runtime_status == RPM_SUSPENDED) {
|
||||
if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
|
||||
i915->pmu.suspended_jiffies_last =
|
||||
kdev->power.suspended_jiffies;
|
||||
|
||||
val = kdev->power.suspended_jiffies -
|
||||
i915->pmu.suspended_jiffies_last;
|
||||
val += jiffies - kdev->power.accounting_timestamp;
|
||||
val = kdev->power.suspended_jiffies -
|
||||
i915->pmu.suspended_jiffies_last;
|
||||
val += jiffies - kdev->power.accounting_timestamp;
|
||||
|
||||
val = jiffies_to_nsecs(val);
|
||||
val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
|
||||
|
||||
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
|
||||
} else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
|
||||
val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
|
||||
} else {
|
||||
val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
|
||||
}
|
||||
|
||||
spin_unlock(&kdev->power.lock);
|
||||
|
||||
val = jiffies_to_nsecs(val);
|
||||
val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
|
||||
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
|
||||
|
||||
spin_unlock_irqrestore(&i915->pmu.lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
|
|||
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
|
||||
u32 tmp;
|
||||
|
||||
if (!IS_GEN9_BC(dev_priv))
|
||||
if (!IS_GEN9(dev_priv))
|
||||
return;
|
||||
|
||||
i915_audio_component_get_power(kdev);
|
||||
|
|
|
@ -1256,7 +1256,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
|||
return;
|
||||
|
||||
aux_channel = child->aux_channel;
|
||||
ddc_pin = child->ddc_pin;
|
||||
|
||||
is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
|
||||
is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
|
||||
|
@ -1303,9 +1302,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
|||
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
|
||||
|
||||
if (is_dvi) {
|
||||
info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin);
|
||||
|
||||
sanitize_ddc_pin(dev_priv, port);
|
||||
ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
|
||||
if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
|
||||
info->alternate_ddc_pin = ddc_pin;
|
||||
sanitize_ddc_pin(dev_priv, port);
|
||||
} else {
|
||||
DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
|
||||
"sticking to defaults\n",
|
||||
port_name(port), ddc_pin);
|
||||
}
|
||||
}
|
||||
|
||||
if (is_dp) {
|
||||
|
|
|
@ -577,6 +577,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
|||
* know the next preemption status we see corresponds
|
||||
* to this ELSP update.
|
||||
*/
|
||||
GEM_BUG_ON(!execlists_is_active(execlists,
|
||||
EXECLISTS_ACTIVE_USER));
|
||||
GEM_BUG_ON(!port_count(&port[0]));
|
||||
if (port_count(&port[0]) > 1)
|
||||
goto unlock;
|
||||
|
@ -738,6 +740,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
|
|||
memset(port, 0, sizeof(*port));
|
||||
port++;
|
||||
}
|
||||
|
||||
execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
|
||||
}
|
||||
|
||||
static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
||||
|
@ -1001,6 +1005,11 @@ static void execlists_submission_tasklet(unsigned long data)
|
|||
|
||||
if (fw)
|
||||
intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
|
||||
|
||||
/* If the engine is now idle, so should be the flag; and vice versa. */
|
||||
GEM_BUG_ON(execlists_is_active(&engine->execlists,
|
||||
EXECLISTS_ACTIVE_USER) ==
|
||||
!port_isset(engine->execlists.port));
|
||||
}
|
||||
|
||||
static void queue_request(struct intel_engine_cs *engine,
|
||||
|
|
|
@ -195,6 +195,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
|
|||
vc4_bo_set_label(obj, -1);
|
||||
|
||||
if (bo->validated_shader) {
|
||||
kfree(bo->validated_shader->uniform_addr_offsets);
|
||||
kfree(bo->validated_shader->texture_samples);
|
||||
kfree(bo->validated_shader);
|
||||
bo->validated_shader = NULL;
|
||||
|
@ -591,6 +592,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
|
|||
}
|
||||
|
||||
if (bo->validated_shader) {
|
||||
kfree(bo->validated_shader->uniform_addr_offsets);
|
||||
kfree(bo->validated_shader->texture_samples);
|
||||
kfree(bo->validated_shader);
|
||||
bo->validated_shader = NULL;
|
||||
|
|
|
@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
|
|||
fail:
|
||||
kfree(validation_state.branch_targets);
|
||||
if (validated_shader) {
|
||||
kfree(validated_shader->uniform_addr_offsets);
|
||||
kfree(validated_shader->texture_samples);
|
||||
kfree(validated_shader);
|
||||
}
|
||||
|
|
|
@ -1660,8 +1660,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
|
|||
} /* switch(bond_mode) */
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
slave_dev->npinfo = bond->dev->npinfo;
|
||||
if (slave_dev->npinfo) {
|
||||
if (bond->dev->npinfo) {
|
||||
if (slave_enable_netpoll(new_slave)) {
|
||||
netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
|
||||
res = -EBUSY;
|
||||
|
|
|
@ -1321,6 +1321,10 @@
|
|||
#define MDIO_VEND2_AN_STAT 0x8002
|
||||
#endif
|
||||
|
||||
#ifndef MDIO_VEND2_PMA_CDR_CONTROL
|
||||
#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
|
||||
#endif
|
||||
|
||||
#ifndef MDIO_CTRL1_SPEED1G
|
||||
#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
|
||||
#endif
|
||||
|
@ -1369,6 +1373,10 @@
|
|||
#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08
|
||||
#define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100
|
||||
|
||||
#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01
|
||||
#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
|
||||
#define XGBE_PMA_CDR_TRACK_EN_ON 0x01
|
||||
|
||||
/* Bit setting and getting macros
|
||||
* The get macro will extract the current bit field value from within
|
||||
* the variable
|
||||
|
|
|
@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
|
|||
"debugfs_create_file failed\n");
|
||||
}
|
||||
|
||||
if (pdata->vdata->an_cdr_workaround) {
|
||||
pfile = debugfs_create_bool("an_cdr_workaround", 0600,
|
||||
pdata->xgbe_debugfs,
|
||||
&pdata->debugfs_an_cdr_workaround);
|
||||
if (!pfile)
|
||||
netdev_err(pdata->netdev,
|
||||
"debugfs_create_bool failed\n");
|
||||
|
||||
pfile = debugfs_create_bool("an_cdr_track_early", 0600,
|
||||
pdata->xgbe_debugfs,
|
||||
&pdata->debugfs_an_cdr_track_early);
|
||||
if (!pfile)
|
||||
netdev_err(pdata->netdev,
|
||||
"debugfs_create_bool failed\n");
|
||||
}
|
||||
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
|
|
|
@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
|
|||
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
|
||||
|
||||
/* Call MDIO/PHY initialization routine */
|
||||
pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
|
||||
ret = pdata->phy_if.phy_init(pdata);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -432,11 +432,16 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata)
|
|||
xgbe_an73_set(pdata, false, false);
|
||||
xgbe_an73_disable_interrupts(pdata);
|
||||
|
||||
pdata->an_start = 0;
|
||||
|
||||
netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n");
|
||||
}
|
||||
|
||||
static void xgbe_an_restart(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
if (pdata->phy_if.phy_impl.an_pre)
|
||||
pdata->phy_if.phy_impl.an_pre(pdata);
|
||||
|
||||
switch (pdata->an_mode) {
|
||||
case XGBE_AN_MODE_CL73:
|
||||
case XGBE_AN_MODE_CL73_REDRV:
|
||||
|
@ -453,6 +458,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata)
|
|||
|
||||
static void xgbe_an_disable(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
if (pdata->phy_if.phy_impl.an_post)
|
||||
pdata->phy_if.phy_impl.an_post(pdata);
|
||||
|
||||
switch (pdata->an_mode) {
|
||||
case XGBE_AN_MODE_CL73:
|
||||
case XGBE_AN_MODE_CL73_REDRV:
|
||||
|
@ -505,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
|
|||
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
|
||||
reg);
|
||||
|
||||
if (pdata->phy_if.phy_impl.kr_training_post)
|
||||
pdata->phy_if.phy_impl.kr_training_post(pdata);
|
||||
|
||||
netif_dbg(pdata, link, pdata->netdev,
|
||||
"KR training initiated\n");
|
||||
|
||||
if (pdata->phy_if.phy_impl.kr_training_post)
|
||||
pdata->phy_if.phy_impl.kr_training_post(pdata);
|
||||
}
|
||||
|
||||
return XGBE_AN_PAGE_RECEIVED;
|
||||
|
@ -637,11 +645,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
|
|||
return XGBE_AN_NO_LINK;
|
||||
}
|
||||
|
||||
xgbe_an73_disable(pdata);
|
||||
xgbe_an_disable(pdata);
|
||||
|
||||
xgbe_switch_mode(pdata);
|
||||
|
||||
xgbe_an73_restart(pdata);
|
||||
xgbe_an_restart(pdata);
|
||||
|
||||
return XGBE_AN_INCOMPAT_LINK;
|
||||
}
|
||||
|
@ -820,6 +828,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata)
|
|||
pdata->an_result = pdata->an_state;
|
||||
pdata->an_state = XGBE_AN_READY;
|
||||
|
||||
if (pdata->phy_if.phy_impl.an_post)
|
||||
pdata->phy_if.phy_impl.an_post(pdata);
|
||||
|
||||
netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n",
|
||||
xgbe_state_as_string(pdata->an_result));
|
||||
}
|
||||
|
@ -903,6 +914,9 @@ again:
|
|||
pdata->kx_state = XGBE_RX_BPA;
|
||||
pdata->an_start = 0;
|
||||
|
||||
if (pdata->phy_if.phy_impl.an_post)
|
||||
pdata->phy_if.phy_impl.an_post(pdata);
|
||||
|
||||
netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n",
|
||||
xgbe_state_as_string(pdata->an_result));
|
||||
}
|
||||
|
|
|
@ -456,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = {
|
|||
.irq_reissue_support = 1,
|
||||
.tx_desc_prefetch = 5,
|
||||
.rx_desc_prefetch = 5,
|
||||
.an_cdr_workaround = 1,
|
||||
};
|
||||
|
||||
static const struct xgbe_version_data xgbe_v2b = {
|
||||
|
@ -470,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = {
|
|||
.irq_reissue_support = 1,
|
||||
.tx_desc_prefetch = 5,
|
||||
.rx_desc_prefetch = 5,
|
||||
.an_cdr_workaround = 1,
|
||||
};
|
||||
|
||||
static const struct pci_device_id xgbe_pci_table[] = {
|
||||
|
|
|
@ -147,6 +147,14 @@
|
|||
/* Rate-change complete wait/retry count */
|
||||
#define XGBE_RATECHANGE_COUNT 500
|
||||
|
||||
/* CDR delay values for KR support (in usec) */
|
||||
#define XGBE_CDR_DELAY_INIT 10000
|
||||
#define XGBE_CDR_DELAY_INC 10000
|
||||
#define XGBE_CDR_DELAY_MAX 100000
|
||||
|
||||
/* RRC frequency during link status check */
|
||||
#define XGBE_RRC_FREQUENCY 10
|
||||
|
||||
enum xgbe_port_mode {
|
||||
XGBE_PORT_MODE_RSVD = 0,
|
||||
XGBE_PORT_MODE_BACKPLANE,
|
||||
|
@ -245,6 +253,10 @@ enum xgbe_sfp_speed {
|
|||
#define XGBE_SFP_BASE_VENDOR_SN 4
|
||||
#define XGBE_SFP_BASE_VENDOR_SN_LEN 16
|
||||
|
||||
#define XGBE_SFP_EXTD_OPT1 1
|
||||
#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1)
|
||||
#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3)
|
||||
|
||||
#define XGBE_SFP_EXTD_DIAG 28
|
||||
#define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
|
||||
|
||||
|
@ -324,6 +336,7 @@ struct xgbe_phy_data {
|
|||
|
||||
unsigned int sfp_gpio_address;
|
||||
unsigned int sfp_gpio_mask;
|
||||
unsigned int sfp_gpio_inputs;
|
||||
unsigned int sfp_gpio_rx_los;
|
||||
unsigned int sfp_gpio_tx_fault;
|
||||
unsigned int sfp_gpio_mod_absent;
|
||||
|
@ -355,6 +368,10 @@ struct xgbe_phy_data {
|
|||
unsigned int redrv_addr;
|
||||
unsigned int redrv_lane;
|
||||
unsigned int redrv_model;
|
||||
|
||||
/* KR AN support */
|
||||
unsigned int phy_cdr_notrack;
|
||||
unsigned int phy_cdr_delay;
|
||||
};
|
||||
|
||||
/* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
|
||||
|
@ -974,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata)
|
|||
phy_data->sfp_phy_avail = 1;
|
||||
}
|
||||
|
||||
static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data)
|
||||
{
|
||||
u8 *sfp_extd = phy_data->sfp_eeprom.extd;
|
||||
|
||||
if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS))
|
||||
return false;
|
||||
|
||||
if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS)
|
||||
return false;
|
||||
|
||||
if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data)
|
||||
{
|
||||
u8 *sfp_extd = phy_data->sfp_eeprom.extd;
|
||||
|
||||
if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT))
|
||||
return false;
|
||||
|
||||
if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT)
|
||||
return false;
|
||||
|
||||
if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data)
|
||||
{
|
||||
if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT)
|
||||
return false;
|
||||
|
||||
if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_phy_data *phy_data = pdata->phy_data;
|
||||
|
@ -1019,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
|
|||
if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP)
|
||||
return;
|
||||
|
||||
/* Update transceiver signals (eeprom extd/options) */
|
||||
phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
|
||||
phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
|
||||
|
||||
if (xgbe_phy_sfp_parse_quirks(pdata))
|
||||
return;
|
||||
|
||||
|
@ -1184,7 +1248,6 @@ put:
|
|||
static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_phy_data *phy_data = pdata->phy_data;
|
||||
unsigned int gpio_input;
|
||||
u8 gpio_reg, gpio_ports[2];
|
||||
int ret;
|
||||
|
||||
|
@ -1199,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
|
|||
return;
|
||||
}
|
||||
|
||||
gpio_input = (gpio_ports[1] << 8) | gpio_ports[0];
|
||||
phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0];
|
||||
|
||||
if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) {
|
||||
/* No GPIO, just assume the module is present for now */
|
||||
phy_data->sfp_mod_absent = 0;
|
||||
} else {
|
||||
if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
|
||||
phy_data->sfp_mod_absent = 0;
|
||||
}
|
||||
|
||||
if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) &&
|
||||
(gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
|
||||
phy_data->sfp_rx_los = 1;
|
||||
|
||||
if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) &&
|
||||
(gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
|
||||
phy_data->sfp_tx_fault = 1;
|
||||
phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data);
|
||||
}
|
||||
|
||||
static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata)
|
||||
|
@ -2361,7 +2410,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
|
|||
return 1;
|
||||
|
||||
/* No link, attempt a receiver reset cycle */
|
||||
if (phy_data->rrc_count++) {
|
||||
if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
|
||||
phy_data->rrc_count = 0;
|
||||
xgbe_phy_rrc(pdata);
|
||||
}
|
||||
|
@ -2669,6 +2718,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_phy_data *phy_data = pdata->phy_data;
|
||||
|
||||
if (!pdata->debugfs_an_cdr_workaround)
|
||||
return;
|
||||
|
||||
if (!phy_data->phy_cdr_notrack)
|
||||
return;
|
||||
|
||||
usleep_range(phy_data->phy_cdr_delay,
|
||||
phy_data->phy_cdr_delay + 500);
|
||||
|
||||
XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
|
||||
XGBE_PMA_CDR_TRACK_EN_MASK,
|
||||
XGBE_PMA_CDR_TRACK_EN_ON);
|
||||
|
||||
phy_data->phy_cdr_notrack = 0;
|
||||
}
|
||||
|
||||
static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_phy_data *phy_data = pdata->phy_data;
|
||||
|
||||
if (!pdata->debugfs_an_cdr_workaround)
|
||||
return;
|
||||
|
||||
if (phy_data->phy_cdr_notrack)
|
||||
return;
|
||||
|
||||
XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
|
||||
XGBE_PMA_CDR_TRACK_EN_MASK,
|
||||
XGBE_PMA_CDR_TRACK_EN_OFF);
|
||||
|
||||
xgbe_phy_rrc(pdata);
|
||||
|
||||
phy_data->phy_cdr_notrack = 1;
|
||||
}
|
||||
|
||||
static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
if (!pdata->debugfs_an_cdr_track_early)
|
||||
xgbe_phy_cdr_track(pdata);
|
||||
}
|
||||
|
||||
static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
if (pdata->debugfs_an_cdr_track_early)
|
||||
xgbe_phy_cdr_track(pdata);
|
||||
}
|
||||
|
||||
static void xgbe_phy_an_post(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_phy_data *phy_data = pdata->phy_data;
|
||||
|
||||
switch (pdata->an_mode) {
|
||||
case XGBE_AN_MODE_CL73:
|
||||
case XGBE_AN_MODE_CL73_REDRV:
|
||||
if (phy_data->cur_mode != XGBE_MODE_KR)
|
||||
break;
|
||||
|
||||
xgbe_phy_cdr_track(pdata);
|
||||
|
||||
switch (pdata->an_result) {
|
||||
case XGBE_AN_READY:
|
||||
case XGBE_AN_COMPLETE:
|
||||
break;
|
||||
default:
|
||||
if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX)
|
||||
phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC;
|
||||
else
|
||||
phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_phy_data *phy_data = pdata->phy_data;
|
||||
|
||||
switch (pdata->an_mode) {
|
||||
case XGBE_AN_MODE_CL73:
|
||||
case XGBE_AN_MODE_CL73_REDRV:
|
||||
if (phy_data->cur_mode != XGBE_MODE_KR)
|
||||
break;
|
||||
|
||||
xgbe_phy_cdr_notrack(pdata);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_phy_data *phy_data = pdata->phy_data;
|
||||
|
@ -2680,6 +2826,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
|
|||
xgbe_phy_sfp_reset(phy_data);
|
||||
xgbe_phy_sfp_mod_absent(pdata);
|
||||
|
||||
/* Reset CDR support */
|
||||
xgbe_phy_cdr_track(pdata);
|
||||
|
||||
/* Power off the PHY */
|
||||
xgbe_phy_power_off(pdata);
|
||||
|
||||
|
@ -2712,6 +2861,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
|
|||
/* Start in highest supported mode */
|
||||
xgbe_phy_set_mode(pdata, phy_data->start_mode);
|
||||
|
||||
/* Reset CDR support */
|
||||
xgbe_phy_cdr_track(pdata);
|
||||
|
||||
/* After starting the I2C controller, we can check for an SFP */
|
||||
switch (phy_data->port_mode) {
|
||||
case XGBE_PORT_MODE_SFP:
|
||||
|
@ -3019,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
|
|||
}
|
||||
}
|
||||
|
||||
phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
|
||||
|
||||
/* Register for driving external PHYs */
|
||||
mii = devm_mdiobus_alloc(pdata->dev);
|
||||
if (!mii) {
|
||||
|
@ -3071,4 +3225,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
|
|||
phy_impl->an_advertising = xgbe_phy_an_advertising;
|
||||
|
||||
phy_impl->an_outcome = xgbe_phy_an_outcome;
|
||||
|
||||
phy_impl->an_pre = xgbe_phy_an_pre;
|
||||
phy_impl->an_post = xgbe_phy_an_post;
|
||||
|
||||
phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
|
||||
phy_impl->kr_training_post = xgbe_phy_kr_training_post;
|
||||
}
|
||||
|
|
|
@ -833,6 +833,7 @@ struct xgbe_hw_if {
|
|||
/* This structure represents implementation specific routines for an
|
||||
* implementation of a PHY. All routines are required unless noted below.
|
||||
* Optional routines:
|
||||
* an_pre, an_post
|
||||
* kr_training_pre, kr_training_post
|
||||
*/
|
||||
struct xgbe_phy_impl_if {
|
||||
|
@ -875,6 +876,10 @@ struct xgbe_phy_impl_if {
|
|||
/* Process results of auto-negotiation */
|
||||
enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
|
||||
|
||||
/* Pre/Post auto-negotiation support */
|
||||
void (*an_pre)(struct xgbe_prv_data *);
|
||||
void (*an_post)(struct xgbe_prv_data *);
|
||||
|
||||
/* Pre/Post KR training enablement support */
|
||||
void (*kr_training_pre)(struct xgbe_prv_data *);
|
||||
void (*kr_training_post)(struct xgbe_prv_data *);
|
||||
|
@ -989,6 +994,7 @@ struct xgbe_version_data {
|
|||
unsigned int irq_reissue_support;
|
||||
unsigned int tx_desc_prefetch;
|
||||
unsigned int rx_desc_prefetch;
|
||||
unsigned int an_cdr_workaround;
|
||||
};
|
||||
|
||||
struct xgbe_vxlan_data {
|
||||
|
@ -1257,6 +1263,9 @@ struct xgbe_prv_data {
|
|||
unsigned int debugfs_xprop_reg;
|
||||
|
||||
unsigned int debugfs_xi2c_reg;
|
||||
|
||||
bool debugfs_an_cdr_workaround;
|
||||
bool debugfs_an_cdr_track_early;
|
||||
};
|
||||
|
||||
/* Function prototypes*/
|
||||
|
|
|
@ -1128,7 +1128,7 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter)
|
|||
if (!adapter->rx_pool)
|
||||
return;
|
||||
|
||||
rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
|
||||
rx_scrqs = adapter->num_active_rx_pools;
|
||||
rx_entries = adapter->req_rx_add_entries_per_subcrq;
|
||||
|
||||
/* Free any remaining skbs in the rx buffer pools */
|
||||
|
@ -1177,7 +1177,7 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter)
|
|||
if (!adapter->tx_pool || !adapter->tso_pool)
|
||||
return;
|
||||
|
||||
tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
|
||||
tx_scrqs = adapter->num_active_tx_pools;
|
||||
|
||||
/* Free any remaining skbs in the tx buffer pools */
|
||||
for (i = 0; i < tx_scrqs; i++) {
|
||||
|
|
|
@ -586,7 +586,7 @@ struct ice_sw_rule_lg_act {
|
|||
#define ICE_LG_ACT_MIRROR_VSI_ID_S 3
|
||||
#define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S)
|
||||
|
||||
/* Action type = 5 - Large Action */
|
||||
/* Action type = 5 - Generic Value */
|
||||
#define ICE_LG_ACT_GENERIC 0x5
|
||||
#define ICE_LG_ACT_GENERIC_VALUE_S 3
|
||||
#define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S)
|
||||
|
|
|
@ -78,6 +78,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
|
|||
struct ice_aq_desc desc;
|
||||
enum ice_status status;
|
||||
u16 flags;
|
||||
u8 i;
|
||||
|
||||
cmd = &desc.params.mac_read;
|
||||
|
||||
|
@ -98,8 +99,16 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
|
|||
return ICE_ERR_CFG;
|
||||
}
|
||||
|
||||
ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr);
|
||||
ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr);
|
||||
/* A single port can report up to two (LAN and WoL) addresses */
|
||||
for (i = 0; i < cmd->num_addr; i++)
|
||||
if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
|
||||
ether_addr_copy(hw->port_info->mac.lan_addr,
|
||||
resp[i].mac_addr);
|
||||
ether_addr_copy(hw->port_info->mac.perm_addr,
|
||||
resp[i].mac_addr);
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -464,9 +473,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
|
|||
if (status)
|
||||
goto err_unroll_sched;
|
||||
|
||||
/* Get port MAC information */
|
||||
mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp);
|
||||
mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL);
|
||||
/* Get MAC information */
|
||||
/* A single port can report up to two (LAN and WoL) addresses */
|
||||
mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
|
||||
sizeof(struct ice_aqc_manage_mac_read_resp),
|
||||
GFP_KERNEL);
|
||||
mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
|
||||
|
||||
if (!mac_buf) {
|
||||
status = ICE_ERR_NO_MEMORY;
|
||||
|
|
|
@ -121,8 +121,6 @@
|
|||
#define PFINT_FW_CTL_CAUSE_ENA_S 30
|
||||
#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
|
||||
#define PFINT_OICR 0x0016CA00
|
||||
#define PFINT_OICR_INTEVENT_S 0
|
||||
#define PFINT_OICR_INTEVENT_M BIT(PFINT_OICR_INTEVENT_S)
|
||||
#define PFINT_OICR_HLP_RDY_S 14
|
||||
#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
|
||||
#define PFINT_OICR_CPM_RDY_S 15
|
||||
|
|
|
@ -1722,9 +1722,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
|
|||
oicr = rd32(hw, PFINT_OICR);
|
||||
ena_mask = rd32(hw, PFINT_OICR_ENA);
|
||||
|
||||
if (!(oicr & PFINT_OICR_INTEVENT_M))
|
||||
goto ena_intr;
|
||||
|
||||
if (oicr & PFINT_OICR_GRST_M) {
|
||||
u32 reset;
|
||||
/* we have a reset warning */
|
||||
|
@ -1782,7 +1779,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
|
|||
}
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
ena_intr:
|
||||
/* re-enable interrupt causes that are not handled during this pass */
|
||||
wr32(hw, PFINT_OICR_ENA, ena_mask);
|
||||
if (!test_bit(__ICE_DOWN, pf->state)) {
|
||||
|
|
|
@ -751,14 +751,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
|
|||
u16 num_added = 0;
|
||||
u32 temp;
|
||||
|
||||
*num_nodes_added = 0;
|
||||
|
||||
if (!num_nodes)
|
||||
return status;
|
||||
|
||||
if (!parent || layer < hw->sw_entry_point_layer)
|
||||
return ICE_ERR_PARAM;
|
||||
|
||||
*num_nodes_added = 0;
|
||||
|
||||
/* max children per node per layer */
|
||||
max_child_nodes =
|
||||
le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
|
||||
|
|
|
@ -1700,7 +1700,22 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
|
|||
WARN_ON(hw->mac.type != e1000_i210);
|
||||
WARN_ON(queue < 0 || queue > 1);
|
||||
|
||||
if (enable) {
|
||||
if (enable || queue == 0) {
|
||||
/* i210 does not allow the queue 0 to be in the Strict
|
||||
* Priority mode while the Qav mode is enabled, so,
|
||||
* instead of disabling strict priority mode, we give
|
||||
* queue 0 the maximum of credits possible.
|
||||
*
|
||||
* See section 8.12.19 of the i210 datasheet, "Note:
|
||||
* Queue0 QueueMode must be set to 1b when
|
||||
* TransmitMode is set to Qav."
|
||||
*/
|
||||
if (queue == 0 && !enable) {
|
||||
/* max "linkspeed" idleslope in kbps */
|
||||
idleslope = 1000000;
|
||||
hicredit = ETH_FRAME_LEN;
|
||||
}
|
||||
|
||||
set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
|
||||
set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
|
||||
|
||||
|
|
|
@ -3420,7 +3420,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
|
|||
if (!err)
|
||||
continue;
|
||||
hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
|
||||
break;
|
||||
goto err_setup_tx;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -3999,29 +3999,6 @@ static void efx_ef10_prepare_flr(struct efx_nic *efx)
|
|||
atomic_set(&efx->active_queues, 0);
|
||||
}
|
||||
|
||||
static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
|
||||
const struct efx_filter_spec *right)
|
||||
{
|
||||
if ((left->match_flags ^ right->match_flags) |
|
||||
((left->flags ^ right->flags) &
|
||||
(EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
|
||||
return false;
|
||||
|
||||
return memcmp(&left->outer_vid, &right->outer_vid,
|
||||
sizeof(struct efx_filter_spec) -
|
||||
offsetof(struct efx_filter_spec, outer_vid)) == 0;
|
||||
}
|
||||
|
||||
static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
|
||||
{
|
||||
BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
|
||||
return jhash2((const u32 *)&spec->outer_vid,
|
||||
(sizeof(struct efx_filter_spec) -
|
||||
offsetof(struct efx_filter_spec, outer_vid)) / 4,
|
||||
0);
|
||||
/* XXX should we randomise the initval? */
|
||||
}
|
||||
|
||||
/* Decide whether a filter should be exclusive or else should allow
|
||||
* delivery to additional recipients. Currently we decide that
|
||||
* filters for specific local unicast MAC and IP addresses are
|
||||
|
@ -4346,7 +4323,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
|
|||
goto out_unlock;
|
||||
match_pri = rc;
|
||||
|
||||
hash = efx_ef10_filter_hash(spec);
|
||||
hash = efx_filter_spec_hash(spec);
|
||||
is_mc_recip = efx_filter_is_mc_recipient(spec);
|
||||
if (is_mc_recip)
|
||||
bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
|
||||
|
@ -4378,7 +4355,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
|
|||
if (!saved_spec) {
|
||||
if (ins_index < 0)
|
||||
ins_index = i;
|
||||
} else if (efx_ef10_filter_equal(spec, saved_spec)) {
|
||||
} else if (efx_filter_spec_equal(spec, saved_spec)) {
|
||||
if (spec->priority < saved_spec->priority &&
|
||||
spec->priority != EFX_FILTER_PRI_AUTO) {
|
||||
rc = -EPERM;
|
||||
|
@ -4762,27 +4739,62 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
|
|||
static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
|
||||
unsigned int filter_idx)
|
||||
{
|
||||
struct efx_filter_spec *spec, saved_spec;
|
||||
struct efx_ef10_filter_table *table;
|
||||
struct efx_filter_spec *spec;
|
||||
bool ret;
|
||||
struct efx_arfs_rule *rule = NULL;
|
||||
bool ret = true, force = false;
|
||||
u16 arfs_id;
|
||||
|
||||
down_read(&efx->filter_sem);
|
||||
table = efx->filter_state;
|
||||
down_write(&table->lock);
|
||||
spec = efx_ef10_filter_entry_spec(table, filter_idx);
|
||||
|
||||
if (!spec || spec->priority != EFX_FILTER_PRI_HINT) {
|
||||
ret = true;
|
||||
if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, 0)) {
|
||||
spin_lock_bh(&efx->rps_hash_lock);
|
||||
if (!efx->rps_hash_table) {
|
||||
/* In the absence of the table, we always return 0 to ARFS. */
|
||||
arfs_id = 0;
|
||||
} else {
|
||||
rule = efx_rps_hash_find(efx, spec);
|
||||
if (!rule)
|
||||
/* ARFS table doesn't know of this filter, so remove it */
|
||||
goto expire;
|
||||
arfs_id = rule->arfs_id;
|
||||
ret = efx_rps_check_rule(rule, filter_idx, &force);
|
||||
if (force)
|
||||
goto expire;
|
||||
if (!ret) {
|
||||
spin_unlock_bh(&efx->rps_hash_lock);
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
|
||||
ret = false;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
else if (rule)
|
||||
rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
|
||||
expire:
|
||||
saved_spec = *spec; /* remove operation will kfree spec */
|
||||
spin_unlock_bh(&efx->rps_hash_lock);
|
||||
/* At this point (since we dropped the lock), another thread might queue
|
||||
* up a fresh insertion request (but the actual insertion will be held
|
||||
* up by our possession of the filter table lock). In that case, it
|
||||
* will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
|
||||
* the rule is not removed by efx_rps_hash_del() below.
|
||||
*/
|
||||
ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
|
||||
filter_idx, true) == 0;
|
||||
/* While we can't safely dereference rule (we dropped the lock), we can
|
||||
* still test it for NULL.
|
||||
*/
|
||||
if (ret && rule) {
|
||||
/* Expiring, so remove entry from ARFS table */
|
||||
spin_lock_bh(&efx->rps_hash_lock);
|
||||
efx_rps_hash_del(efx, &saved_spec);
|
||||
spin_unlock_bh(&efx->rps_hash_lock);
|
||||
}
|
||||
out_unlock:
|
||||
up_write(&table->lock);
|
||||
up_read(&efx->filter_sem);
|
||||
|
|
|
@ -3059,6 +3059,10 @@ static int efx_init_struct(struct efx_nic *efx,
|
|||
mutex_init(&efx->mac_lock);
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
mutex_init(&efx->rps_mutex);
|
||||
spin_lock_init(&efx->rps_hash_lock);
|
||||
/* Failure to allocate is not fatal, but may degrade ARFS performance */
|
||||
efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
|
||||
sizeof(*efx->rps_hash_table), GFP_KERNEL);
|
||||
#endif
|
||||
efx->phy_op = &efx_dummy_phy_operations;
|
||||
efx->mdio.dev = net_dev;
|
||||
|
@ -3102,6 +3106,10 @@ static void efx_fini_struct(struct efx_nic *efx)
|
|||
{
|
||||
int i;
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
kfree(efx->rps_hash_table);
|
||||
#endif
|
||||
|
||||
for (i = 0; i < EFX_MAX_CHANNELS; i++)
|
||||
kfree(efx->channel[i]);
|
||||
|
||||
|
@ -3124,6 +3132,141 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
|
|||
stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
|
||||
}
|
||||
|
||||
bool efx_filter_spec_equal(const struct efx_filter_spec *left,
|
||||
const struct efx_filter_spec *right)
|
||||
{
|
||||
if ((left->match_flags ^ right->match_flags) |
|
||||
((left->flags ^ right->flags) &
|
||||
(EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
|
||||
return false;
|
||||
|
||||
return memcmp(&left->outer_vid, &right->outer_vid,
|
||||
sizeof(struct efx_filter_spec) -
|
||||
offsetof(struct efx_filter_spec, outer_vid)) == 0;
|
||||
}
|
||||
|
||||
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
|
||||
{
|
||||
BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
|
||||
return jhash2((const u32 *)&spec->outer_vid,
|
||||
(sizeof(struct efx_filter_spec) -
|
||||
offsetof(struct efx_filter_spec, outer_vid)) / 4,
|
||||
0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
|
||||
bool *force)
|
||||
{
|
||||
if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
|
||||
/* ARFS is currently updating this entry, leave it */
|
||||
return false;
|
||||
}
|
||||
if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
|
||||
/* ARFS tried and failed to update this, so it's probably out
|
||||
* of date. Remove the filter and the ARFS rule entry.
|
||||
*/
|
||||
rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
|
||||
*force = true;
|
||||
return true;
|
||||
} else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
|
||||
/* ARFS has moved on, so old filter is not needed. Since we did
|
||||
* not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
|
||||
* not be removed by efx_rps_hash_del() subsequently.
|
||||
*/
|
||||
*force = true;
|
||||
return true;
|
||||
}
|
||||
/* Remove it iff ARFS wants to. */
|
||||
return true;
|
||||
}
|
||||
|
||||
struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec)
|
||||
{
|
||||
u32 hash = efx_filter_spec_hash(spec);
|
||||
|
||||
WARN_ON(!spin_is_locked(&efx->rps_hash_lock));
|
||||
if (!efx->rps_hash_table)
|
||||
return NULL;
|
||||
return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
|
||||
}
|
||||
|
||||
struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec)
|
||||
{
|
||||
struct efx_arfs_rule *rule;
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node;
|
||||
|
||||
head = efx_rps_hash_bucket(efx, spec);
|
||||
if (!head)
|
||||
return NULL;
|
||||
hlist_for_each(node, head) {
|
||||
rule = container_of(node, struct efx_arfs_rule, node);
|
||||
if (efx_filter_spec_equal(spec, &rule->spec))
|
||||
return rule;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec,
|
||||
bool *new)
|
||||
{
|
||||
struct efx_arfs_rule *rule;
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node;
|
||||
|
||||
head = efx_rps_hash_bucket(efx, spec);
|
||||
if (!head)
|
||||
return NULL;
|
||||
hlist_for_each(node, head) {
|
||||
rule = container_of(node, struct efx_arfs_rule, node);
|
||||
if (efx_filter_spec_equal(spec, &rule->spec)) {
|
||||
*new = false;
|
||||
return rule;
|
||||
}
|
||||
}
|
||||
rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
|
||||
*new = true;
|
||||
if (rule) {
|
||||
memcpy(&rule->spec, spec, sizeof(rule->spec));
|
||||
hlist_add_head(&rule->node, head);
|
||||
}
|
||||
return rule;
|
||||
}
|
||||
|
||||
void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
|
||||
{
|
||||
struct efx_arfs_rule *rule;
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node;
|
||||
|
||||
head = efx_rps_hash_bucket(efx, spec);
|
||||
if (WARN_ON(!head))
|
||||
return;
|
||||
hlist_for_each(node, head) {
|
||||
rule = container_of(node, struct efx_arfs_rule, node);
|
||||
if (efx_filter_spec_equal(spec, &rule->spec)) {
|
||||
/* Someone already reused the entry. We know that if
|
||||
* this check doesn't fire (i.e. filter_id == REMOVING)
|
||||
* then the REMOVING mark was put there by our caller,
|
||||
* because caller is holding a lock on filter table and
|
||||
* only holders of that lock set REMOVING.
|
||||
*/
|
||||
if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
|
||||
return;
|
||||
hlist_del(node);
|
||||
kfree(rule);
|
||||
return;
|
||||
}
|
||||
}
|
||||
/* We didn't find it. */
|
||||
WARN_ON(1);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
|
||||
* (a) this is an infrequent control-plane operation and (b) n is small (max 64)
|
||||
*/
|
||||
|
|
|
@ -186,6 +186,27 @@ static inline void efx_filter_rfs_expire(struct work_struct *data) {}
|
|||
#endif
|
||||
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
|
||||
|
||||
bool efx_filter_spec_equal(const struct efx_filter_spec *left,
|
||||
const struct efx_filter_spec *right);
|
||||
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
|
||||
bool *force);
|
||||
|
||||
struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec);
|
||||
|
||||
/* @new is written to indicate if entry was newly added (true) or if an old
|
||||
* entry was found and returned (false).
|
||||
*/
|
||||
struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec,
|
||||
bool *new);
|
||||
|
||||
void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
|
||||
#endif
|
||||
|
||||
/* RSS contexts */
|
||||
struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
|
||||
struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
|
||||
|
|
|
@ -2905,18 +2905,45 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
|
|||
{
|
||||
struct efx_farch_filter_state *state = efx->filter_state;
|
||||
struct efx_farch_filter_table *table;
|
||||
bool ret = false;
|
||||
bool ret = false, force = false;
|
||||
u16 arfs_id;
|
||||
|
||||
down_write(&state->lock);
|
||||
spin_lock_bh(&efx->rps_hash_lock);
|
||||
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
|
||||
if (test_bit(index, table->used_bitmap) &&
|
||||
table->spec[index].priority == EFX_FILTER_PRI_HINT &&
|
||||
rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
|
||||
flow_id, 0)) {
|
||||
efx_farch_filter_table_clear_entry(efx, table, index);
|
||||
ret = true;
|
||||
}
|
||||
table->spec[index].priority == EFX_FILTER_PRI_HINT) {
|
||||
struct efx_arfs_rule *rule = NULL;
|
||||
struct efx_filter_spec spec;
|
||||
|
||||
efx_farch_filter_to_gen_spec(&spec, &table->spec[index]);
|
||||
if (!efx->rps_hash_table) {
|
||||
/* In the absence of the table, we always returned 0 to
|
||||
* ARFS, so use the same to query it.
|
||||
*/
|
||||
arfs_id = 0;
|
||||
} else {
|
||||
rule = efx_rps_hash_find(efx, &spec);
|
||||
if (!rule) {
|
||||
/* ARFS table doesn't know of this filter, remove it */
|
||||
force = true;
|
||||
} else {
|
||||
arfs_id = rule->arfs_id;
|
||||
if (!efx_rps_check_rule(rule, index, &force))
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id,
|
||||
flow_id, arfs_id)) {
|
||||
if (rule)
|
||||
rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
|
||||
efx_rps_hash_del(efx, &spec);
|
||||
efx_farch_filter_table_clear_entry(efx, table, index);
|
||||
ret = true;
|
||||
}
|
||||
}
|
||||
out_unlock:
|
||||
spin_unlock_bh(&efx->rps_hash_lock);
|
||||
up_write(&state->lock);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -734,6 +734,35 @@ struct efx_rss_context {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING
|
||||
* is used to test if filter does or will exist.
|
||||
*/
|
||||
#define EFX_ARFS_FILTER_ID_PENDING -1
|
||||
#define EFX_ARFS_FILTER_ID_ERROR -2
|
||||
#define EFX_ARFS_FILTER_ID_REMOVING -3
|
||||
/**
|
||||
* struct efx_arfs_rule - record of an ARFS filter and its IDs
|
||||
* @node: linkage into hash table
|
||||
* @spec: details of the filter (used as key for hash table). Use efx->type to
|
||||
* determine which member to use.
|
||||
* @rxq_index: channel to which the filter will steer traffic.
|
||||
* @arfs_id: filter ID which was returned to ARFS
|
||||
* @filter_id: index in software filter table. May be
|
||||
* %EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet,
|
||||
* %EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or
|
||||
* %EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter.
|
||||
*/
|
||||
struct efx_arfs_rule {
|
||||
struct hlist_node node;
|
||||
struct efx_filter_spec spec;
|
||||
u16 rxq_index;
|
||||
u16 arfs_id;
|
||||
s32 filter_id;
|
||||
};
|
||||
|
||||
/* Size chosen so that the table is one page (4kB) */
|
||||
#define EFX_ARFS_HASH_TABLE_SIZE 512
|
||||
|
||||
/**
|
||||
* struct efx_async_filter_insertion - Request to asynchronously insert a filter
|
||||
* @net_dev: Reference to the netdevice
|
||||
|
@ -873,6 +902,10 @@ struct efx_async_filter_insertion {
|
|||
* @rps_expire_channel's @rps_flow_id
|
||||
* @rps_slot_map: bitmap of in-flight entries in @rps_slot
|
||||
* @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work()
|
||||
* @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and
|
||||
* @rps_next_id).
|
||||
* @rps_hash_table: Mapping between ARFS filters and their various IDs
|
||||
* @rps_next_id: next arfs_id for an ARFS filter
|
||||
* @active_queues: Count of RX and TX queues that haven't been flushed and drained.
|
||||
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
|
||||
* Decremented when the efx_flush_rx_queue() is called.
|
||||
|
@ -1029,6 +1062,9 @@ struct efx_nic {
|
|||
unsigned int rps_expire_index;
|
||||
unsigned long rps_slot_map;
|
||||
struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT];
|
||||
spinlock_t rps_hash_lock;
|
||||
struct hlist_head *rps_hash_table;
|
||||
u32 rps_next_id;
|
||||
#endif
|
||||
|
||||
atomic_t active_queues;
|
||||
|
|
|
@ -834,9 +834,29 @@ static void efx_filter_rfs_work(struct work_struct *data)
|
|||
struct efx_nic *efx = netdev_priv(req->net_dev);
|
||||
struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
|
||||
int slot_idx = req - efx->rps_slot;
|
||||
struct efx_arfs_rule *rule;
|
||||
u16 arfs_id = 0;
|
||||
int rc;
|
||||
|
||||
rc = efx->type->filter_insert(efx, &req->spec, true);
|
||||
if (efx->rps_hash_table) {
|
||||
spin_lock_bh(&efx->rps_hash_lock);
|
||||
rule = efx_rps_hash_find(efx, &req->spec);
|
||||
/* The rule might have already gone, if someone else's request
|
||||
* for the same spec was already worked and then expired before
|
||||
* we got around to our work. In that case we have nothing
|
||||
* tying us to an arfs_id, meaning that as soon as the filter
|
||||
* is considered for expiry it will be removed.
|
||||
*/
|
||||
if (rule) {
|
||||
if (rc < 0)
|
||||
rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
|
||||
else
|
||||
rule->filter_id = rc;
|
||||
arfs_id = rule->arfs_id;
|
||||
}
|
||||
spin_unlock_bh(&efx->rps_hash_lock);
|
||||
}
|
||||
if (rc >= 0) {
|
||||
/* Remember this so we can check whether to expire the filter
|
||||
* later.
|
||||
|
@ -848,18 +868,18 @@ static void efx_filter_rfs_work(struct work_struct *data)
|
|||
|
||||
if (req->spec.ether_type == htons(ETH_P_IP))
|
||||
netif_info(efx, rx_status, efx->net_dev,
|
||||
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
|
||||
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
|
||||
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
|
||||
req->spec.rem_host, ntohs(req->spec.rem_port),
|
||||
req->spec.loc_host, ntohs(req->spec.loc_port),
|
||||
req->rxq_index, req->flow_id, rc);
|
||||
req->rxq_index, req->flow_id, rc, arfs_id);
|
||||
else
|
||||
netif_info(efx, rx_status, efx->net_dev,
|
||||
"steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
|
||||
"steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
|
||||
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
|
||||
req->spec.rem_host, ntohs(req->spec.rem_port),
|
||||
req->spec.loc_host, ntohs(req->spec.loc_port),
|
||||
req->rxq_index, req->flow_id, rc);
|
||||
req->rxq_index, req->flow_id, rc, arfs_id);
|
||||
}
|
||||
|
||||
/* Release references */
|
||||
|
@ -872,8 +892,10 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
struct efx_async_filter_insertion *req;
|
||||
struct efx_arfs_rule *rule;
|
||||
struct flow_keys fk;
|
||||
int slot_idx;
|
||||
bool new;
|
||||
int rc;
|
||||
|
||||
/* find a free slot */
|
||||
|
@ -926,12 +948,42 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|||
req->spec.rem_port = fk.ports.src;
|
||||
req->spec.loc_port = fk.ports.dst;
|
||||
|
||||
if (efx->rps_hash_table) {
|
||||
/* Add it to ARFS hash table */
|
||||
spin_lock(&efx->rps_hash_lock);
|
||||
rule = efx_rps_hash_add(efx, &req->spec, &new);
|
||||
if (!rule) {
|
||||
rc = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (new)
|
||||
rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
|
||||
rc = rule->arfs_id;
|
||||
/* Skip if existing or pending filter already does the right thing */
|
||||
if (!new && rule->rxq_index == rxq_index &&
|
||||
rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
|
||||
goto out_unlock;
|
||||
rule->rxq_index = rxq_index;
|
||||
rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
|
||||
spin_unlock(&efx->rps_hash_lock);
|
||||
} else {
|
||||
/* Without an ARFS hash table, we just use arfs_id 0 for all
|
||||
* filters. This means if multiple flows hash to the same
|
||||
* flow_id, all but the most recently touched will be eligible
|
||||
* for expiry.
|
||||
*/
|
||||
rc = 0;
|
||||
}
|
||||
|
||||
/* Queue the request */
|
||||
dev_hold(req->net_dev = net_dev);
|
||||
INIT_WORK(&req->work, efx_filter_rfs_work);
|
||||
req->rxq_index = rxq_index;
|
||||
req->flow_id = flow_id;
|
||||
schedule_work(&req->work);
|
||||
return 0;
|
||||
return rc;
|
||||
out_unlock:
|
||||
spin_unlock(&efx->rps_hash_lock);
|
||||
out_clear:
|
||||
clear_bit(slot_idx, &efx->rps_slot_map);
|
||||
return rc;
|
||||
|
|
|
@ -129,7 +129,7 @@ do { \
|
|||
|
||||
#define RX_PRIORITY_MAPPING 0x76543210
|
||||
#define TX_PRIORITY_MAPPING 0x33221100
|
||||
#define CPDMA_TX_PRIORITY_MAP 0x01234567
|
||||
#define CPDMA_TX_PRIORITY_MAP 0x76543210
|
||||
|
||||
#define CPSW_VLAN_AWARE BIT(1)
|
||||
#define CPSW_RX_VLAN_ENCAP BIT(2)
|
||||
|
|
|
@ -1393,6 +1393,15 @@ static int m88e1318_set_wol(struct phy_device *phydev,
|
|||
if (err < 0)
|
||||
goto error;
|
||||
|
||||
/* If WOL event happened once, the LED[2] interrupt pin
|
||||
* will not be cleared unless we reading the interrupt status
|
||||
* register. If interrupts are in use, the normal interrupt
|
||||
* handling will clear the WOL event. Clear the WOL event
|
||||
* before enabling it if !phy_interrupt_is_valid()
|
||||
*/
|
||||
if (!phy_interrupt_is_valid(phydev))
|
||||
phy_read(phydev, MII_M1011_IEVENT);
|
||||
|
||||
/* Enable the WOL interrupt */
|
||||
err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
|
||||
MII_88E1318S_PHY_CSIER_WOL_EIE);
|
||||
|
|
|
@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
|
|||
lock_sock(sk);
|
||||
|
||||
error = -EINVAL;
|
||||
|
||||
if (sockaddr_len != sizeof(struct sockaddr_pppox))
|
||||
goto end;
|
||||
|
||||
if (sp->sa_protocol != PX_PROTO_OE)
|
||||
goto end;
|
||||
|
||||
|
|
|
@ -1072,14 +1072,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static int team_port_enable_netpoll(struct team *team, struct team_port *port)
|
||||
static int __team_port_enable_netpoll(struct team_port *port)
|
||||
{
|
||||
struct netpoll *np;
|
||||
int err;
|
||||
|
||||
if (!team->dev->npinfo)
|
||||
return 0;
|
||||
|
||||
np = kzalloc(sizeof(*np), GFP_KERNEL);
|
||||
if (!np)
|
||||
return -ENOMEM;
|
||||
|
@ -1093,6 +1090,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int team_port_enable_netpoll(struct team_port *port)
|
||||
{
|
||||
if (!port->team->dev->npinfo)
|
||||
return 0;
|
||||
|
||||
return __team_port_enable_netpoll(port);
|
||||
}
|
||||
|
||||
static void team_port_disable_netpoll(struct team_port *port)
|
||||
{
|
||||
struct netpoll *np = port->np;
|
||||
|
@ -1107,7 +1112,7 @@ static void team_port_disable_netpoll(struct team_port *port)
|
|||
kfree(np);
|
||||
}
|
||||
#else
|
||||
static int team_port_enable_netpoll(struct team *team, struct team_port *port)
|
||||
static int team_port_enable_netpoll(struct team_port *port)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -1221,7 +1226,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
|
|||
goto err_vids_add;
|
||||
}
|
||||
|
||||
err = team_port_enable_netpoll(team, port);
|
||||
err = team_port_enable_netpoll(port);
|
||||
if (err) {
|
||||
netdev_err(dev, "Failed to enable netpoll on device %s\n",
|
||||
portname);
|
||||
|
@ -1918,7 +1923,7 @@ static int team_netpoll_setup(struct net_device *dev,
|
|||
|
||||
mutex_lock(&team->lock);
|
||||
list_for_each_entry(port, &team->port_list, list) {
|
||||
err = team_port_enable_netpoll(team, port);
|
||||
err = __team_port_enable_netpoll(port);
|
||||
if (err) {
|
||||
__team_netpoll_cleanup(team);
|
||||
break;
|
||||
|
|
|
@ -103,8 +103,7 @@ config NVDIMM_DAX
|
|||
Select Y if unsure
|
||||
|
||||
config OF_PMEM
|
||||
# FIXME: make tristate once OF_NUMA dependency removed
|
||||
bool "Device-tree support for persistent memory regions"
|
||||
tristate "Device-tree support for persistent memory regions"
|
||||
depends on OF
|
||||
default LIBNVDIMM
|
||||
help
|
||||
|
|
|
@ -88,9 +88,9 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
|
|||
int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
|
||||
{
|
||||
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
|
||||
int rc = validate_dimm(ndd), cmd_rc = 0;
|
||||
struct nd_cmd_get_config_data_hdr *cmd;
|
||||
struct nvdimm_bus_descriptor *nd_desc;
|
||||
int rc = validate_dimm(ndd);
|
||||
u32 max_cmd_size, config_size;
|
||||
size_t offset;
|
||||
|
||||
|
@ -124,9 +124,11 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
|
|||
cmd->in_offset = offset;
|
||||
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
|
||||
ND_CMD_GET_CONFIG_DATA, cmd,
|
||||
cmd->in_length + sizeof(*cmd), NULL);
|
||||
if (rc || cmd->status) {
|
||||
rc = -ENXIO;
|
||||
cmd->in_length + sizeof(*cmd), &cmd_rc);
|
||||
if (rc < 0)
|
||||
break;
|
||||
if (cmd_rc < 0) {
|
||||
rc = cmd_rc;
|
||||
break;
|
||||
}
|
||||
memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
|
||||
|
@ -140,9 +142,9 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
|
|||
int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
|
||||
void *buf, size_t len)
|
||||
{
|
||||
int rc = validate_dimm(ndd);
|
||||
size_t max_cmd_size, buf_offset;
|
||||
struct nd_cmd_set_config_hdr *cmd;
|
||||
int rc = validate_dimm(ndd), cmd_rc = 0;
|
||||
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
|
||||
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
|
||||
|
||||
|
@ -164,7 +166,6 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
|
|||
for (buf_offset = 0; len; len -= cmd->in_length,
|
||||
buf_offset += cmd->in_length) {
|
||||
size_t cmd_size;
|
||||
u32 *status;
|
||||
|
||||
cmd->in_offset = offset + buf_offset;
|
||||
cmd->in_length = min(max_cmd_size, len);
|
||||
|
@ -172,12 +173,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
|
|||
|
||||
/* status is output in the last 4-bytes of the command buffer */
|
||||
cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
|
||||
status = ((void *) cmd) + cmd_size - sizeof(u32);
|
||||
|
||||
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
|
||||
ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL);
|
||||
if (rc || *status) {
|
||||
rc = rc ? rc : -ENXIO;
|
||||
ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
|
||||
if (rc < 0)
|
||||
break;
|
||||
if (cmd_rc < 0) {
|
||||
rc = cmd_rc;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ static int of_pmem_region_probe(struct platform_device *pdev)
|
|||
*/
|
||||
memset(&ndr_desc, 0, sizeof(ndr_desc));
|
||||
ndr_desc.attr_groups = region_attr_groups;
|
||||
ndr_desc.numa_node = of_node_to_nid(np);
|
||||
ndr_desc.numa_node = dev_to_node(&pdev->dev);
|
||||
ndr_desc.res = &pdev->resource[i];
|
||||
ndr_desc.of_node = np;
|
||||
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
|
||||
|
|
|
@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req,
|
|||
tx->callback = dma_xfer_callback;
|
||||
tx->callback_param = req;
|
||||
|
||||
req->dmach = chan;
|
||||
req->sync = sync;
|
||||
req->status = DMA_IN_PROGRESS;
|
||||
init_completion(&req->req_comp);
|
||||
kref_get(&req->refcount);
|
||||
|
||||
cookie = dmaengine_submit(tx);
|
||||
|
@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
|
|||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
kref_init(&req->refcount);
|
||||
|
||||
ret = get_dma_channel(priv);
|
||||
if (ret) {
|
||||
kfree(req);
|
||||
return ret;
|
||||
}
|
||||
chan = priv->dmach;
|
||||
|
||||
kref_init(&req->refcount);
|
||||
init_completion(&req->req_comp);
|
||||
req->dir = dir;
|
||||
req->filp = filp;
|
||||
req->priv = priv;
|
||||
req->dmach = chan;
|
||||
req->sync = sync;
|
||||
|
||||
/*
|
||||
* If parameter loc_addr != NULL, we are transferring data from/to
|
||||
|
@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
|
|||
xfer->offset, xfer->length);
|
||||
}
|
||||
|
||||
req->dir = dir;
|
||||
req->filp = filp;
|
||||
req->priv = priv;
|
||||
chan = priv->dmach;
|
||||
|
||||
nents = dma_map_sg(chan->device->dev,
|
||||
req->sgt.sgl, req->sgt.nents, dir);
|
||||
if (nents == 0) {
|
||||
|
|
|
@ -557,7 +557,6 @@ enum qeth_prot_versions {
|
|||
enum qeth_cmd_buffer_state {
|
||||
BUF_STATE_FREE,
|
||||
BUF_STATE_LOCKED,
|
||||
BUF_STATE_PROCESSED,
|
||||
};
|
||||
|
||||
enum qeth_cq {
|
||||
|
@ -601,7 +600,6 @@ struct qeth_channel {
|
|||
struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
|
||||
atomic_t irq_pending;
|
||||
int io_buf_no;
|
||||
int buf_no;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -706,7 +706,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
|
|||
qeth_put_reply(reply);
|
||||
}
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
atomic_set(&card->write.irq_pending, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
|
||||
|
||||
|
@ -818,7 +817,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel)
|
|||
|
||||
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
|
||||
qeth_release_buffer(channel, &channel->iob[cnt]);
|
||||
channel->buf_no = 0;
|
||||
channel->io_buf_no = 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
|
||||
|
@ -924,7 +922,6 @@ static int qeth_setup_channel(struct qeth_channel *channel)
|
|||
kfree(channel->iob[cnt].data);
|
||||
return -ENOMEM;
|
||||
}
|
||||
channel->buf_no = 0;
|
||||
channel->io_buf_no = 0;
|
||||
atomic_set(&channel->irq_pending, 0);
|
||||
spin_lock_init(&channel->iob_lock);
|
||||
|
@ -1100,16 +1097,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
|
|||
{
|
||||
int rc;
|
||||
int cstat, dstat;
|
||||
struct qeth_cmd_buffer *buffer;
|
||||
struct qeth_cmd_buffer *iob = NULL;
|
||||
struct qeth_channel *channel;
|
||||
struct qeth_card *card;
|
||||
struct qeth_cmd_buffer *iob;
|
||||
__u8 index;
|
||||
|
||||
if (__qeth_check_irb_error(cdev, intparm, irb))
|
||||
return;
|
||||
cstat = irb->scsw.cmd.cstat;
|
||||
dstat = irb->scsw.cmd.dstat;
|
||||
|
||||
card = CARD_FROM_CDEV(cdev);
|
||||
if (!card)
|
||||
|
@ -1127,6 +1117,19 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
|
|||
channel = &card->data;
|
||||
QETH_CARD_TEXT(card, 5, "data");
|
||||
}
|
||||
|
||||
if (qeth_intparm_is_iob(intparm))
|
||||
iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
|
||||
|
||||
if (__qeth_check_irb_error(cdev, intparm, irb)) {
|
||||
/* IO was terminated, free its resources. */
|
||||
if (iob)
|
||||
qeth_release_buffer(iob->channel, iob);
|
||||
atomic_set(&channel->irq_pending, 0);
|
||||
wake_up(&card->wait_q);
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_set(&channel->irq_pending, 0);
|
||||
|
||||
if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
|
||||
|
@ -1150,6 +1153,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
|
|||
/* we don't have to handle this further */
|
||||
intparm = 0;
|
||||
}
|
||||
|
||||
cstat = irb->scsw.cmd.cstat;
|
||||
dstat = irb->scsw.cmd.dstat;
|
||||
|
||||
if ((dstat & DEV_STAT_UNIT_EXCEP) ||
|
||||
(dstat & DEV_STAT_UNIT_CHECK) ||
|
||||
(cstat)) {
|
||||
|
@ -1182,25 +1189,15 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
|
|||
channel->state = CH_STATE_RCD_DONE;
|
||||
goto out;
|
||||
}
|
||||
if (intparm) {
|
||||
buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
|
||||
buffer->state = BUF_STATE_PROCESSED;
|
||||
}
|
||||
if (channel == &card->data)
|
||||
return;
|
||||
if (channel == &card->read &&
|
||||
channel->state == CH_STATE_UP)
|
||||
__qeth_issue_next_read(card);
|
||||
|
||||
iob = channel->iob;
|
||||
index = channel->buf_no;
|
||||
while (iob[index].state == BUF_STATE_PROCESSED) {
|
||||
if (iob[index].callback != NULL)
|
||||
iob[index].callback(channel, iob + index);
|
||||
if (iob && iob->callback)
|
||||
iob->callback(iob->channel, iob);
|
||||
|
||||
index = (index + 1) % QETH_CMD_BUFFER_NO;
|
||||
}
|
||||
channel->buf_no = index;
|
||||
out:
|
||||
wake_up(&card->wait_q);
|
||||
return;
|
||||
|
@ -1870,8 +1867,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
|
|||
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
|
||||
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
|
||||
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
rc = ccw_device_start(channel->ccwdev,
|
||||
&channel->ccw, (addr_t) iob, 0, 0);
|
||||
rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
|
||||
(addr_t) iob, 0, 0, QETH_TIMEOUT);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
|
||||
if (rc) {
|
||||
|
@ -1888,7 +1885,6 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
|
|||
if (channel->state != CH_STATE_UP) {
|
||||
rc = -ETIME;
|
||||
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
|
||||
qeth_clear_cmd_buffers(channel);
|
||||
} else
|
||||
rc = 0;
|
||||
return rc;
|
||||
|
@ -1942,8 +1938,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
|
|||
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
|
||||
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
|
||||
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
rc = ccw_device_start(channel->ccwdev,
|
||||
&channel->ccw, (addr_t) iob, 0, 0);
|
||||
rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
|
||||
(addr_t) iob, 0, 0, QETH_TIMEOUT);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
|
||||
|
||||
if (rc) {
|
||||
|
@ -1964,7 +1960,6 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
|
|||
QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
|
||||
dev_name(&channel->ccwdev->dev));
|
||||
QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
|
||||
qeth_clear_cmd_buffers(channel);
|
||||
return -ETIME;
|
||||
}
|
||||
return qeth_idx_activate_get_answer(channel, idx_reply_cb);
|
||||
|
@ -2166,8 +2161,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
|||
|
||||
QETH_CARD_TEXT(card, 6, "noirqpnd");
|
||||
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
|
||||
rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
|
||||
(addr_t) iob, 0, 0);
|
||||
rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
|
||||
(addr_t) iob, 0, 0, event_timeout);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
|
||||
if (rc) {
|
||||
QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
|
||||
|
@ -2199,8 +2194,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
|||
}
|
||||
}
|
||||
|
||||
if (reply->rc == -EIO)
|
||||
goto error;
|
||||
rc = reply->rc;
|
||||
qeth_put_reply(reply);
|
||||
return rc;
|
||||
|
@ -2211,10 +2204,6 @@ time_err:
|
|||
list_del_init(&reply->list);
|
||||
spin_unlock_irqrestore(&reply->card->lock, flags);
|
||||
atomic_inc(&reply->received);
|
||||
error:
|
||||
atomic_set(&card->write.irq_pending, 0);
|
||||
qeth_release_buffer(iob->channel, iob);
|
||||
card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
|
||||
rc = reply->rc;
|
||||
qeth_put_reply(reply);
|
||||
return rc;
|
||||
|
@ -3033,28 +3022,23 @@ static int qeth_send_startlan(struct qeth_card *card)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int qeth_default_setadapterparms_cb(struct qeth_card *card,
|
||||
struct qeth_reply *reply, unsigned long data)
|
||||
static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
|
||||
{
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
|
||||
QETH_CARD_TEXT(card, 4, "defadpcb");
|
||||
|
||||
cmd = (struct qeth_ipa_cmd *) data;
|
||||
if (cmd->hdr.return_code == 0)
|
||||
if (!cmd->hdr.return_code)
|
||||
cmd->hdr.return_code =
|
||||
cmd->data.setadapterparms.hdr.return_code;
|
||||
return 0;
|
||||
return cmd->hdr.return_code;
|
||||
}
|
||||
|
||||
static int qeth_query_setadapterparms_cb(struct qeth_card *card,
|
||||
struct qeth_reply *reply, unsigned long data)
|
||||
{
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
||||
|
||||
QETH_CARD_TEXT(card, 3, "quyadpcb");
|
||||
if (qeth_setadpparms_inspect_rc(cmd))
|
||||
return 0;
|
||||
|
||||
cmd = (struct qeth_ipa_cmd *) data;
|
||||
if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
|
||||
card->info.link_type =
|
||||
cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
|
||||
|
@ -3062,7 +3046,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
|
|||
}
|
||||
card->options.adp.supported_funcs =
|
||||
cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
|
||||
return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
|
||||
|
@ -3154,22 +3138,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists);
|
|||
static int qeth_query_switch_attributes_cb(struct qeth_card *card,
|
||||
struct qeth_reply *reply, unsigned long data)
|
||||
{
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
struct qeth_switch_info *sw_info;
|
||||
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
||||
struct qeth_query_switch_attributes *attrs;
|
||||
struct qeth_switch_info *sw_info;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "qswiatcb");
|
||||
cmd = (struct qeth_ipa_cmd *) data;
|
||||
sw_info = (struct qeth_switch_info *)reply->param;
|
||||
if (cmd->data.setadapterparms.hdr.return_code == 0) {
|
||||
attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
|
||||
sw_info->capabilities = attrs->capabilities;
|
||||
sw_info->settings = attrs->settings;
|
||||
QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
|
||||
sw_info->settings);
|
||||
}
|
||||
qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
|
||||
if (qeth_setadpparms_inspect_rc(cmd))
|
||||
return 0;
|
||||
|
||||
sw_info = (struct qeth_switch_info *)reply->param;
|
||||
attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
|
||||
sw_info->capabilities = attrs->capabilities;
|
||||
sw_info->settings = attrs->settings;
|
||||
QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
|
||||
sw_info->settings);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4207,16 +4189,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet);
|
|||
static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
|
||||
struct qeth_reply *reply, unsigned long data)
|
||||
{
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
||||
struct qeth_ipacmd_setadpparms *setparms;
|
||||
|
||||
QETH_CARD_TEXT(card, 4, "prmadpcb");
|
||||
|
||||
cmd = (struct qeth_ipa_cmd *) data;
|
||||
setparms = &(cmd->data.setadapterparms);
|
||||
|
||||
qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
|
||||
if (cmd->hdr.return_code) {
|
||||
if (qeth_setadpparms_inspect_rc(cmd)) {
|
||||
QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
|
||||
setparms->data.mode = SET_PROMISC_MODE_OFF;
|
||||
}
|
||||
|
@ -4286,18 +4265,18 @@ EXPORT_SYMBOL_GPL(qeth_get_stats);
|
|||
static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
|
||||
struct qeth_reply *reply, unsigned long data)
|
||||
{
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
||||
|
||||
QETH_CARD_TEXT(card, 4, "chgmaccb");
|
||||
if (qeth_setadpparms_inspect_rc(cmd))
|
||||
return 0;
|
||||
|
||||
cmd = (struct qeth_ipa_cmd *) data;
|
||||
if (!card->options.layer2 ||
|
||||
!(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
|
||||
ether_addr_copy(card->dev->dev_addr,
|
||||
cmd->data.setadapterparms.data.change_addr.addr);
|
||||
card->info.mac_bits |= QETH_LAYER2_MAC_READ;
|
||||
}
|
||||
qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4328,13 +4307,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
|
|||
static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
|
||||
struct qeth_reply *reply, unsigned long data)
|
||||
{
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
|
||||
struct qeth_set_access_ctrl *access_ctrl_req;
|
||||
int fallback = *(int *)reply->param;
|
||||
|
||||
QETH_CARD_TEXT(card, 4, "setaccb");
|
||||
if (cmd->hdr.return_code)
|
||||
return 0;
|
||||
qeth_setadpparms_inspect_rc(cmd);
|
||||
|
||||
cmd = (struct qeth_ipa_cmd *) data;
|
||||
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
|
||||
QETH_DBF_TEXT_(SETUP, 2, "setaccb");
|
||||
QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
|
||||
|
@ -4407,7 +4388,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
|
|||
card->options.isolation = card->options.prev_isolation;
|
||||
break;
|
||||
}
|
||||
qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4695,14 +4675,15 @@ out:
|
|||
static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
|
||||
struct qeth_reply *reply, unsigned long data)
|
||||
{
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
|
||||
struct qeth_qoat_priv *priv;
|
||||
char *resdata;
|
||||
int resdatalen;
|
||||
|
||||
QETH_CARD_TEXT(card, 3, "qoatcb");
|
||||
if (qeth_setadpparms_inspect_rc(cmd))
|
||||
return 0;
|
||||
|
||||
cmd = (struct qeth_ipa_cmd *)data;
|
||||
priv = (struct qeth_qoat_priv *)reply->param;
|
||||
resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
|
||||
resdata = (char *)data + 28;
|
||||
|
@ -4796,21 +4777,18 @@ out:
|
|||
static int qeth_query_card_info_cb(struct qeth_card *card,
|
||||
struct qeth_reply *reply, unsigned long data)
|
||||
{
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
|
||||
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
|
||||
struct qeth_query_card_info *card_info;
|
||||
struct carrier_info *carrier_info;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "qcrdincb");
|
||||
carrier_info = (struct carrier_info *)reply->param;
|
||||
cmd = (struct qeth_ipa_cmd *)data;
|
||||
card_info = &cmd->data.setadapterparms.data.card_info;
|
||||
if (cmd->data.setadapterparms.hdr.return_code == 0) {
|
||||
carrier_info->card_type = card_info->card_type;
|
||||
carrier_info->port_mode = card_info->port_mode;
|
||||
carrier_info->port_speed = card_info->port_speed;
|
||||
}
|
||||
if (qeth_setadpparms_inspect_rc(cmd))
|
||||
return 0;
|
||||
|
||||
qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
|
||||
card_info = &cmd->data.setadapterparms.data.card_info;
|
||||
carrier_info->card_type = card_info->card_type;
|
||||
carrier_info->port_mode = card_info->port_mode;
|
||||
carrier_info->port_speed = card_info->port_speed;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4857,7 +4835,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
|
|||
goto out;
|
||||
}
|
||||
|
||||
ccw_device_get_id(CARD_DDEV(card), &id);
|
||||
ccw_device_get_id(CARD_RDEV(card), &id);
|
||||
request->resp_buf_len = sizeof(*response);
|
||||
request->resp_version = DIAG26C_VERSION2;
|
||||
request->op_code = DIAG26C_GET_MAC;
|
||||
|
@ -6563,10 +6541,14 @@ static int __init qeth_core_init(void)
|
|||
mutex_init(&qeth_mod_mutex);
|
||||
|
||||
qeth_wq = create_singlethread_workqueue("qeth_wq");
|
||||
if (!qeth_wq) {
|
||||
rc = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
rc = qeth_register_dbf_views();
|
||||
if (rc)
|
||||
goto out_err;
|
||||
goto dbf_err;
|
||||
qeth_core_root_dev = root_device_register("qeth");
|
||||
rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
|
||||
if (rc)
|
||||
|
@ -6603,6 +6585,8 @@ slab_err:
|
|||
root_device_unregister(qeth_core_root_dev);
|
||||
register_err:
|
||||
qeth_unregister_dbf_views();
|
||||
dbf_err:
|
||||
destroy_workqueue(qeth_wq);
|
||||
out_err:
|
||||
pr_err("Initializing the qeth device driver failed\n");
|
||||
return rc;
|
||||
|
|
|
@ -35,6 +35,18 @@ extern unsigned char IPA_PDU_HEADER[];
|
|||
#define QETH_HALT_CHANNEL_PARM -11
|
||||
#define QETH_RCD_PARM -12
|
||||
|
||||
static inline bool qeth_intparm_is_iob(unsigned long intparm)
|
||||
{
|
||||
switch (intparm) {
|
||||
case QETH_CLEAR_CHANNEL_PARM:
|
||||
case QETH_HALT_CHANNEL_PARM:
|
||||
case QETH_RCD_PARM:
|
||||
case 0:
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
/* IP Assist related definitions */
|
||||
/*****************************************************************************/
|
||||
|
|
|
@ -121,13 +121,10 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
|
|||
QETH_CARD_TEXT(card, 2, "L2Setmac");
|
||||
rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
|
||||
if (rc == 0) {
|
||||
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
|
||||
ether_addr_copy(card->dev->dev_addr, mac);
|
||||
dev_info(&card->gdev->dev,
|
||||
"MAC address %pM successfully registered on device %s\n",
|
||||
card->dev->dev_addr, card->dev->name);
|
||||
"MAC address %pM successfully registered on device %s\n",
|
||||
mac, card->dev->name);
|
||||
} else {
|
||||
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
|
||||
switch (rc) {
|
||||
case -EEXIST:
|
||||
dev_warn(&card->gdev->dev,
|
||||
|
@ -142,19 +139,6 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
|
||||
{
|
||||
int rc;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "L2Delmac");
|
||||
if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
|
||||
return 0;
|
||||
rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC);
|
||||
if (rc == 0)
|
||||
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
|
||||
{
|
||||
enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
|
||||
|
@ -519,6 +503,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
|
|||
{
|
||||
struct sockaddr *addr = p;
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
u8 old_addr[ETH_ALEN];
|
||||
int rc = 0;
|
||||
|
||||
QETH_CARD_TEXT(card, 3, "setmac");
|
||||
|
@ -530,14 +515,35 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
|
||||
if (!is_valid_ether_addr(addr->sa_data))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
|
||||
QETH_CARD_TEXT(card, 3, "setmcREC");
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
|
||||
if (!rc || (rc == -ENOENT))
|
||||
rc = qeth_l2_send_setmac(card, addr->sa_data);
|
||||
return rc ? -EINVAL : 0;
|
||||
|
||||
if (!qeth_card_hw_is_reachable(card)) {
|
||||
ether_addr_copy(dev->dev_addr, addr->sa_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* don't register the same address twice */
|
||||
if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
|
||||
(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
|
||||
return 0;
|
||||
|
||||
/* add the new address, switch over, drop the old */
|
||||
rc = qeth_l2_send_setmac(card, addr->sa_data);
|
||||
if (rc)
|
||||
return rc;
|
||||
ether_addr_copy(old_addr, dev->dev_addr);
|
||||
ether_addr_copy(dev->dev_addr, addr->sa_data);
|
||||
|
||||
if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
|
||||
qeth_l2_remove_mac(card, old_addr);
|
||||
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qeth_promisc_to_bridge(struct qeth_card *card)
|
||||
|
@ -1067,8 +1073,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
|
|||
goto out_remove;
|
||||
}
|
||||
|
||||
if (card->info.type != QETH_CARD_TYPE_OSN)
|
||||
qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
|
||||
if (card->info.type != QETH_CARD_TYPE_OSN &&
|
||||
!qeth_l2_send_setmac(card, card->dev->dev_addr))
|
||||
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
|
||||
|
||||
if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
|
||||
if (card->info.hwtrap &&
|
||||
|
@ -1338,8 +1345,8 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
|
|||
qeth_prepare_control_data(card, len, iob);
|
||||
QETH_CARD_TEXT(card, 6, "osnoirqp");
|
||||
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
|
||||
rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
|
||||
(addr_t) iob, 0, 0);
|
||||
rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
|
||||
(addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
|
||||
spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
|
||||
if (rc) {
|
||||
QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
|
||||
|
|
|
@ -55,6 +55,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
|
|||
#define WDT_CTRL_WDT_INTR BIT(2)
|
||||
#define WDT_CTRL_RESET_SYSTEM BIT(1)
|
||||
#define WDT_CTRL_ENABLE BIT(0)
|
||||
#define WDT_TIMEOUT_STATUS 0x10
|
||||
#define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1)
|
||||
|
||||
/*
|
||||
* WDT_RESET_WIDTH controls the characteristics of the external pulse (if
|
||||
|
@ -192,6 +194,7 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
|
|||
struct device_node *np;
|
||||
const char *reset_type;
|
||||
u32 duration;
|
||||
u32 status;
|
||||
int ret;
|
||||
|
||||
wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
|
||||
|
@ -307,6 +310,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
|
|||
writel(duration - 1, wdt->base + WDT_RESET_WIDTH);
|
||||
}
|
||||
|
||||
status = readl(wdt->base + WDT_TIMEOUT_STATUS);
|
||||
if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY)
|
||||
wdt->wdd.bootstatus = WDIOF_CARDRESET;
|
||||
|
||||
ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to register\n");
|
||||
|
|
|
@ -121,7 +121,8 @@ static int rwdt_restart(struct watchdog_device *wdev, unsigned long action,
|
|||
}
|
||||
|
||||
static const struct watchdog_info rwdt_ident = {
|
||||
.options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
|
||||
.options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
|
||||
WDIOF_CARDRESET,
|
||||
.identity = "Renesas WDT Watchdog",
|
||||
};
|
||||
|
||||
|
@ -197,9 +198,10 @@ static int rwdt_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(clk);
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
priv->clk_rate = clk_get_rate(clk);
|
||||
priv->wdev.bootstatus = (readb_relaxed(priv->base + RWTCSRA) &
|
||||
RWTCSRA_WOVF) ? WDIOF_CARDRESET : 0;
|
||||
pm_runtime_put(&pdev->dev);
|
||||
|
||||
if (!priv->clk_rate) {
|
||||
|
|
|
@ -299,7 +299,7 @@ static long sch311x_wdt_ioctl(struct file *file, unsigned int cmd,
|
|||
if (sch311x_wdt_set_heartbeat(new_timeout))
|
||||
return -EINVAL;
|
||||
sch311x_wdt_keepalive();
|
||||
/* Fall */
|
||||
/* Fall through */
|
||||
case WDIOC_GETTIMEOUT:
|
||||
return put_user(timeout, p);
|
||||
default:
|
||||
|
|
|
@ -427,7 +427,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
return -EINVAL;
|
||||
|
||||
wdt_keepalive();
|
||||
/* Fall */
|
||||
/* Fall through */
|
||||
|
||||
case WDIOC_GETTIMEOUT:
|
||||
return put_user(timeout, uarg.i);
|
||||
|
|
|
@ -178,7 +178,7 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd,
|
|||
timeout = new_timeout;
|
||||
wafwdt_stop();
|
||||
wafwdt_start();
|
||||
/* Fall */
|
||||
/* Fall through */
|
||||
case WDIOC_GETTIMEOUT:
|
||||
return put_user(timeout, p);
|
||||
|
||||
|
|
|
@ -749,7 +749,7 @@ static int autofs4_dir_mkdir(struct inode *dir,
|
|||
|
||||
autofs4_del_active(dentry);
|
||||
|
||||
inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555);
|
||||
inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
|
||||
if (!inode)
|
||||
return -ENOMEM;
|
||||
d_add(dentry, inode);
|
||||
|
|
|
@ -377,10 +377,10 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
|
|||
} else
|
||||
map_addr = vm_mmap(filep, addr, size, prot, type, off);
|
||||
|
||||
if ((type & MAP_FIXED_NOREPLACE) && BAD_ADDR(map_addr))
|
||||
pr_info("%d (%s): Uhuuh, elf segment at %p requested but the memory is mapped already\n",
|
||||
task_pid_nr(current), current->comm,
|
||||
(void *)addr);
|
||||
if ((type & MAP_FIXED_NOREPLACE) &&
|
||||
PTR_ERR((void *)map_addr) == -EEXIST)
|
||||
pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n",
|
||||
task_pid_nr(current), current->comm, (void *)addr);
|
||||
|
||||
return(map_addr);
|
||||
}
|
||||
|
|
|
@ -459,6 +459,25 @@ struct btrfs_block_rsv {
|
|||
unsigned short full;
|
||||
unsigned short type;
|
||||
unsigned short failfast;
|
||||
|
||||
/*
|
||||
* Qgroup equivalent for @size @reserved
|
||||
*
|
||||
* Unlike normal @size/@reserved for inode rsv, qgroup doesn't care
|
||||
* about things like csum size nor how many tree blocks it will need to
|
||||
* reserve.
|
||||
*
|
||||
* Qgroup cares more about net change of the extent usage.
|
||||
*
|
||||
* So for one newly inserted file extent, in worst case it will cause
|
||||
* leaf split and level increase, nodesize for each file extent is
|
||||
* already too much.
|
||||
*
|
||||
* In short, qgroup_size/reserved is the upper limit of possible needed
|
||||
* qgroup metadata reservation.
|
||||
*/
|
||||
u64 qgroup_rsv_size;
|
||||
u64 qgroup_rsv_reserved;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -714,6 +733,12 @@ struct btrfs_delayed_root;
|
|||
*/
|
||||
#define BTRFS_FS_EXCL_OP 16
|
||||
|
||||
/*
|
||||
* To info transaction_kthread we need an immediate commit so it doesn't
|
||||
* need to wait for commit_interval
|
||||
*/
|
||||
#define BTRFS_FS_NEED_ASYNC_COMMIT 17
|
||||
|
||||
struct btrfs_fs_info {
|
||||
u8 fsid[BTRFS_FSID_SIZE];
|
||||
u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
|
||||
|
|
|
@ -556,6 +556,12 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
|
|||
dst_rsv = &fs_info->delayed_block_rsv;
|
||||
|
||||
num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
|
||||
|
||||
/*
|
||||
* Here we migrate space rsv from transaction rsv, since have already
|
||||
* reserved space when starting a transaction. So no need to reserve
|
||||
* qgroup space here.
|
||||
*/
|
||||
ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
|
||||
if (!ret) {
|
||||
trace_btrfs_space_reservation(fs_info, "delayed_item",
|
||||
|
@ -577,7 +583,10 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
|
|||
return;
|
||||
|
||||
rsv = &fs_info->delayed_block_rsv;
|
||||
btrfs_qgroup_convert_reserved_meta(root, item->bytes_reserved);
|
||||
/*
|
||||
* Check btrfs_delayed_item_reserve_metadata() to see why we don't need
|
||||
* to release/reserve qgroup space.
|
||||
*/
|
||||
trace_btrfs_space_reservation(fs_info, "delayed_item",
|
||||
item->key.objectid, item->bytes_reserved,
|
||||
0);
|
||||
|
@ -602,9 +611,6 @@ static int btrfs_delayed_inode_reserve_metadata(
|
|||
|
||||
num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
|
||||
|
||||
ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
/*
|
||||
* btrfs_dirty_inode will update the inode under btrfs_join_transaction
|
||||
* which doesn't reserve space for speed. This is a problem since we
|
||||
|
@ -616,6 +622,10 @@ static int btrfs_delayed_inode_reserve_metadata(
|
|||
*/
|
||||
if (!src_rsv || (!trans->bytes_reserved &&
|
||||
src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
|
||||
ret = btrfs_qgroup_reserve_meta_prealloc(root,
|
||||
fs_info->nodesize, true);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
|
||||
BTRFS_RESERVE_NO_FLUSH);
|
||||
/*
|
||||
|
@ -634,6 +644,8 @@ static int btrfs_delayed_inode_reserve_metadata(
|
|||
"delayed_inode",
|
||||
btrfs_ino(inode),
|
||||
num_bytes, 1);
|
||||
} else {
|
||||
btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -540,8 +540,10 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
|||
struct btrfs_delayed_ref_head *head_ref,
|
||||
struct btrfs_qgroup_extent_record *qrecord,
|
||||
u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
|
||||
int action, int is_data, int *qrecord_inserted_ret,
|
||||
int action, int is_data, int is_system,
|
||||
int *qrecord_inserted_ret,
|
||||
int *old_ref_mod, int *new_ref_mod)
|
||||
|
||||
{
|
||||
struct btrfs_delayed_ref_head *existing;
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
|
@ -585,6 +587,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
|||
head_ref->ref_mod = count_mod;
|
||||
head_ref->must_insert_reserved = must_insert_reserved;
|
||||
head_ref->is_data = is_data;
|
||||
head_ref->is_system = is_system;
|
||||
head_ref->ref_tree = RB_ROOT;
|
||||
INIT_LIST_HEAD(&head_ref->ref_add_list);
|
||||
RB_CLEAR_NODE(&head_ref->href_node);
|
||||
|
@ -772,6 +775,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
struct btrfs_qgroup_extent_record *record = NULL;
|
||||
int qrecord_inserted;
|
||||
int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
|
||||
|
||||
BUG_ON(extent_op && extent_op->is_data);
|
||||
ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
|
||||
|
@ -800,8 +804,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|||
*/
|
||||
head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
|
||||
bytenr, num_bytes, 0, 0, action, 0,
|
||||
&qrecord_inserted, old_ref_mod,
|
||||
new_ref_mod);
|
||||
is_system, &qrecord_inserted,
|
||||
old_ref_mod, new_ref_mod);
|
||||
|
||||
add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
|
||||
num_bytes, parent, ref_root, level, action);
|
||||
|
@ -868,7 +872,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
|||
*/
|
||||
head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
|
||||
bytenr, num_bytes, ref_root, reserved,
|
||||
action, 1, &qrecord_inserted,
|
||||
action, 1, 0, &qrecord_inserted,
|
||||
old_ref_mod, new_ref_mod);
|
||||
|
||||
add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
|
||||
|
@ -898,9 +902,14 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
|
|||
delayed_refs = &trans->transaction->delayed_refs;
|
||||
spin_lock(&delayed_refs->lock);
|
||||
|
||||
/*
|
||||
* extent_ops just modify the flags of an extent and they don't result
|
||||
* in ref count changes, hence it's safe to pass false/0 for is_system
|
||||
* argument
|
||||
*/
|
||||
add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
|
||||
num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
|
||||
extent_op->is_data, NULL, NULL, NULL);
|
||||
extent_op->is_data, 0, NULL, NULL, NULL);
|
||||
|
||||
spin_unlock(&delayed_refs->lock);
|
||||
return 0;
|
||||
|
|
|
@ -127,6 +127,7 @@ struct btrfs_delayed_ref_head {
|
|||
*/
|
||||
unsigned int must_insert_reserved:1;
|
||||
unsigned int is_data:1;
|
||||
unsigned int is_system:1;
|
||||
unsigned int processing:1;
|
||||
};
|
||||
|
||||
|
|
|
@ -1824,6 +1824,7 @@ static int transaction_kthread(void *arg)
|
|||
|
||||
now = get_seconds();
|
||||
if (cur->state < TRANS_STATE_BLOCKED &&
|
||||
!test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) &&
|
||||
(now < cur->start_time ||
|
||||
now - cur->start_time < fs_info->commit_interval)) {
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
|
|
|
@ -2601,13 +2601,19 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
|
|||
trace_run_delayed_ref_head(fs_info, head, 0);
|
||||
|
||||
if (head->total_ref_mod < 0) {
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_space_info *space_info;
|
||||
u64 flags;
|
||||
|
||||
cache = btrfs_lookup_block_group(fs_info, head->bytenr);
|
||||
ASSERT(cache);
|
||||
percpu_counter_add(&cache->space_info->total_bytes_pinned,
|
||||
if (head->is_data)
|
||||
flags = BTRFS_BLOCK_GROUP_DATA;
|
||||
else if (head->is_system)
|
||||
flags = BTRFS_BLOCK_GROUP_SYSTEM;
|
||||
else
|
||||
flags = BTRFS_BLOCK_GROUP_METADATA;
|
||||
space_info = __find_space_info(fs_info, flags);
|
||||
ASSERT(space_info);
|
||||
percpu_counter_add(&space_info->total_bytes_pinned,
|
||||
-head->num_bytes);
|
||||
btrfs_put_block_group(cache);
|
||||
|
||||
if (head->is_data) {
|
||||
spin_lock(&delayed_refs->lock);
|
||||
|
@ -5559,14 +5565,18 @@ again:
|
|||
|
||||
static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_rsv *block_rsv,
|
||||
struct btrfs_block_rsv *dest, u64 num_bytes)
|
||||
struct btrfs_block_rsv *dest, u64 num_bytes,
|
||||
u64 *qgroup_to_release_ret)
|
||||
{
|
||||
struct btrfs_space_info *space_info = block_rsv->space_info;
|
||||
u64 qgroup_to_release = 0;
|
||||
u64 ret;
|
||||
|
||||
spin_lock(&block_rsv->lock);
|
||||
if (num_bytes == (u64)-1)
|
||||
if (num_bytes == (u64)-1) {
|
||||
num_bytes = block_rsv->size;
|
||||
qgroup_to_release = block_rsv->qgroup_rsv_size;
|
||||
}
|
||||
block_rsv->size -= num_bytes;
|
||||
if (block_rsv->reserved >= block_rsv->size) {
|
||||
num_bytes = block_rsv->reserved - block_rsv->size;
|
||||
|
@ -5575,6 +5585,13 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
|
|||
} else {
|
||||
num_bytes = 0;
|
||||
}
|
||||
if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
|
||||
qgroup_to_release = block_rsv->qgroup_rsv_reserved -
|
||||
block_rsv->qgroup_rsv_size;
|
||||
block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
|
||||
} else {
|
||||
qgroup_to_release = 0;
|
||||
}
|
||||
spin_unlock(&block_rsv->lock);
|
||||
|
||||
ret = num_bytes;
|
||||
|
@ -5597,6 +5614,8 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
|
|||
space_info_add_old_bytes(fs_info, space_info,
|
||||
num_bytes);
|
||||
}
|
||||
if (qgroup_to_release_ret)
|
||||
*qgroup_to_release_ret = qgroup_to_release;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -5738,17 +5757,21 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
|
|||
struct btrfs_root *root = inode->root;
|
||||
struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
|
||||
u64 num_bytes = 0;
|
||||
u64 qgroup_num_bytes = 0;
|
||||
int ret = -ENOSPC;
|
||||
|
||||
spin_lock(&block_rsv->lock);
|
||||
if (block_rsv->reserved < block_rsv->size)
|
||||
num_bytes = block_rsv->size - block_rsv->reserved;
|
||||
if (block_rsv->qgroup_rsv_reserved < block_rsv->qgroup_rsv_size)
|
||||
qgroup_num_bytes = block_rsv->qgroup_rsv_size -
|
||||
block_rsv->qgroup_rsv_reserved;
|
||||
spin_unlock(&block_rsv->lock);
|
||||
|
||||
if (num_bytes == 0)
|
||||
return 0;
|
||||
|
||||
ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
|
||||
ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_num_bytes, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
|
||||
|
@ -5756,7 +5779,13 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
|
|||
block_rsv_add_bytes(block_rsv, num_bytes, 0);
|
||||
trace_btrfs_space_reservation(root->fs_info, "delalloc",
|
||||
btrfs_ino(inode), num_bytes, 1);
|
||||
}
|
||||
|
||||
/* Don't forget to increase qgroup_rsv_reserved */
|
||||
spin_lock(&block_rsv->lock);
|
||||
block_rsv->qgroup_rsv_reserved += qgroup_num_bytes;
|
||||
spin_unlock(&block_rsv->lock);
|
||||
} else
|
||||
btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -5777,20 +5806,23 @@ static void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free)
|
|||
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
|
||||
struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
|
||||
u64 released = 0;
|
||||
u64 qgroup_to_release = 0;
|
||||
|
||||
/*
|
||||
* Since we statically set the block_rsv->size we just want to say we
|
||||
* are releasing 0 bytes, and then we'll just get the reservation over
|
||||
* the size free'd.
|
||||
*/
|
||||
released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0);
|
||||
released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0,
|
||||
&qgroup_to_release);
|
||||
if (released > 0)
|
||||
trace_btrfs_space_reservation(fs_info, "delalloc",
|
||||
btrfs_ino(inode), released, 0);
|
||||
if (qgroup_free)
|
||||
btrfs_qgroup_free_meta_prealloc(inode->root, released);
|
||||
btrfs_qgroup_free_meta_prealloc(inode->root, qgroup_to_release);
|
||||
else
|
||||
btrfs_qgroup_convert_reserved_meta(inode->root, released);
|
||||
btrfs_qgroup_convert_reserved_meta(inode->root,
|
||||
qgroup_to_release);
|
||||
}
|
||||
|
||||
void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
|
||||
|
@ -5802,7 +5834,7 @@ void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
|
|||
if (global_rsv == block_rsv ||
|
||||
block_rsv->space_info != global_rsv->space_info)
|
||||
global_rsv = NULL;
|
||||
block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes);
|
||||
block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes, NULL);
|
||||
}
|
||||
|
||||
static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
|
||||
|
@ -5882,7 +5914,7 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
|
|||
static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
|
||||
(u64)-1);
|
||||
(u64)-1, NULL);
|
||||
WARN_ON(fs_info->trans_block_rsv.size > 0);
|
||||
WARN_ON(fs_info->trans_block_rsv.reserved > 0);
|
||||
WARN_ON(fs_info->chunk_block_rsv.size > 0);
|
||||
|
@ -5906,7 +5938,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
|
|||
WARN_ON_ONCE(!list_empty(&trans->new_bgs));
|
||||
|
||||
block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
|
||||
trans->chunk_bytes_reserved);
|
||||
trans->chunk_bytes_reserved, NULL);
|
||||
trans->chunk_bytes_reserved = 0;
|
||||
}
|
||||
|
||||
|
@ -6011,6 +6043,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
|
|||
{
|
||||
struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
|
||||
u64 reserve_size = 0;
|
||||
u64 qgroup_rsv_size = 0;
|
||||
u64 csum_leaves;
|
||||
unsigned outstanding_extents;
|
||||
|
||||
|
@ -6023,9 +6056,17 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
|
|||
inode->csum_bytes);
|
||||
reserve_size += btrfs_calc_trans_metadata_size(fs_info,
|
||||
csum_leaves);
|
||||
/*
|
||||
* For qgroup rsv, the calculation is very simple:
|
||||
* account one nodesize for each outstanding extent
|
||||
*
|
||||
* This is overestimating in most cases.
|
||||
*/
|
||||
qgroup_rsv_size = outstanding_extents * fs_info->nodesize;
|
||||
|
||||
spin_lock(&block_rsv->lock);
|
||||
block_rsv->size = reserve_size;
|
||||
block_rsv->qgroup_rsv_size = qgroup_rsv_size;
|
||||
spin_unlock(&block_rsv->lock);
|
||||
}
|
||||
|
||||
|
@ -8403,7 +8444,7 @@ static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
|
|||
struct btrfs_block_rsv *block_rsv, u32 blocksize)
|
||||
{
|
||||
block_rsv_add_bytes(block_rsv, blocksize, 0);
|
||||
block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
|
||||
block_rsv_release_bytes(fs_info, block_rsv, NULL, 0, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1748,7 +1748,7 @@ again:
|
|||
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
|
||||
lockstart, lockend, &cached_state);
|
||||
btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes,
|
||||
(ret != 0));
|
||||
true);
|
||||
if (ret) {
|
||||
btrfs_drop_pages(pages, num_pages);
|
||||
break;
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <linux/uio.h>
|
||||
#include <linux/magic.h>
|
||||
#include <linux/iversion.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include "ctree.h"
|
||||
#include "disk-io.h"
|
||||
#include "transaction.h"
|
||||
|
@ -5905,11 +5906,13 @@ static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
|
|||
struct dir_entry *entry = addr;
|
||||
char *name = (char *)(entry + 1);
|
||||
|
||||
ctx->pos = entry->offset;
|
||||
if (!dir_emit(ctx, name, entry->name_len, entry->ino,
|
||||
entry->type))
|
||||
ctx->pos = get_unaligned(&entry->offset);
|
||||
if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
|
||||
get_unaligned(&entry->ino),
|
||||
get_unaligned(&entry->type)))
|
||||
return 1;
|
||||
addr += sizeof(struct dir_entry) + entry->name_len;
|
||||
addr += sizeof(struct dir_entry) +
|
||||
get_unaligned(&entry->name_len);
|
||||
ctx->pos++;
|
||||
}
|
||||
return 0;
|
||||
|
@ -5999,14 +6002,15 @@ again:
|
|||
}
|
||||
|
||||
entry = addr;
|
||||
entry->name_len = name_len;
|
||||
put_unaligned(name_len, &entry->name_len);
|
||||
name_ptr = (char *)(entry + 1);
|
||||
read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
|
||||
name_len);
|
||||
entry->type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
|
||||
put_unaligned(btrfs_filetype_table[btrfs_dir_type(leaf, di)],
|
||||
&entry->type);
|
||||
btrfs_dir_item_key_to_cpu(leaf, di, &location);
|
||||
entry->ino = location.objectid;
|
||||
entry->offset = found_key.offset;
|
||||
put_unaligned(location.objectid, &entry->ino);
|
||||
put_unaligned(found_key.offset, &entry->offset);
|
||||
entries++;
|
||||
addr += sizeof(struct dir_entry) + name_len;
|
||||
total_len += sizeof(struct dir_entry) + name_len;
|
||||
|
|
|
@ -189,9 +189,10 @@ void btrfs_print_leaf(struct extent_buffer *l)
|
|||
fs_info = l->fs_info;
|
||||
nr = btrfs_header_nritems(l);
|
||||
|
||||
btrfs_info(fs_info, "leaf %llu total ptrs %d free space %d",
|
||||
btrfs_header_bytenr(l), nr,
|
||||
btrfs_leaf_free_space(fs_info, l));
|
||||
btrfs_info(fs_info,
|
||||
"leaf %llu gen %llu total ptrs %d free space %d owner %llu",
|
||||
btrfs_header_bytenr(l), btrfs_header_generation(l), nr,
|
||||
btrfs_leaf_free_space(fs_info, l), btrfs_header_owner(l));
|
||||
for (i = 0 ; i < nr ; i++) {
|
||||
item = btrfs_item_nr(i);
|
||||
btrfs_item_key_to_cpu(l, &key, i);
|
||||
|
@ -325,7 +326,7 @@ void btrfs_print_leaf(struct extent_buffer *l)
|
|||
}
|
||||
}
|
||||
|
||||
void btrfs_print_tree(struct extent_buffer *c)
|
||||
void btrfs_print_tree(struct extent_buffer *c, bool follow)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info;
|
||||
int i; u32 nr;
|
||||
|
@ -342,15 +343,19 @@ void btrfs_print_tree(struct extent_buffer *c)
|
|||
return;
|
||||
}
|
||||
btrfs_info(fs_info,
|
||||
"node %llu level %d total ptrs %d free spc %u",
|
||||
btrfs_header_bytenr(c), level, nr,
|
||||
(u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr);
|
||||
"node %llu level %d gen %llu total ptrs %d free spc %u owner %llu",
|
||||
btrfs_header_bytenr(c), level, btrfs_header_generation(c),
|
||||
nr, (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr,
|
||||
btrfs_header_owner(c));
|
||||
for (i = 0; i < nr; i++) {
|
||||
btrfs_node_key_to_cpu(c, &key, i);
|
||||
pr_info("\tkey %d (%llu %u %llu) block %llu\n",
|
||||
pr_info("\tkey %d (%llu %u %llu) block %llu gen %llu\n",
|
||||
i, key.objectid, key.type, key.offset,
|
||||
btrfs_node_blockptr(c, i));
|
||||
btrfs_node_blockptr(c, i),
|
||||
btrfs_node_ptr_generation(c, i));
|
||||
}
|
||||
if (!follow)
|
||||
return;
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct btrfs_key first_key;
|
||||
struct extent_buffer *next;
|
||||
|
@ -372,7 +377,7 @@ void btrfs_print_tree(struct extent_buffer *c)
|
|||
if (btrfs_header_level(next) !=
|
||||
level - 1)
|
||||
BUG();
|
||||
btrfs_print_tree(next);
|
||||
btrfs_print_tree(next, follow);
|
||||
free_extent_buffer(next);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,6 +7,6 @@
|
|||
#define BTRFS_PRINT_TREE_H
|
||||
|
||||
void btrfs_print_leaf(struct extent_buffer *l);
|
||||
void btrfs_print_tree(struct extent_buffer *c);
|
||||
void btrfs_print_tree(struct extent_buffer *c, bool follow);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/btrfs.h>
|
||||
#include <linux/sizes.h>
|
||||
|
||||
#include "ctree.h"
|
||||
#include "transaction.h"
|
||||
|
@ -2375,8 +2376,21 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
|
||||
/*
|
||||
* Two limits to commit transaction in advance.
|
||||
*
|
||||
* For RATIO, it will be 1/RATIO of the remaining limit
|
||||
* (excluding data and prealloc meta) as threshold.
|
||||
* For SIZE, it will be in byte unit as threshold.
|
||||
*/
|
||||
#define QGROUP_PERTRANS_RATIO 32
|
||||
#define QGROUP_PERTRANS_SIZE SZ_32M
|
||||
static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
|
||||
const struct btrfs_qgroup *qg, u64 num_bytes)
|
||||
{
|
||||
u64 limit;
|
||||
u64 threshold;
|
||||
|
||||
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
|
||||
qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
|
||||
return false;
|
||||
|
@ -2385,6 +2399,31 @@ static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
|
|||
qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Even if we passed the check, it's better to check if reservation
|
||||
* for meta_pertrans is pushing us near limit.
|
||||
* If there is too much pertrans reservation or it's near the limit,
|
||||
* let's try commit transaction to free some, using transaction_kthread
|
||||
*/
|
||||
if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER |
|
||||
BTRFS_QGROUP_LIMIT_MAX_EXCL))) {
|
||||
if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
|
||||
limit = qg->max_excl;
|
||||
else
|
||||
limit = qg->max_rfer;
|
||||
threshold = (limit - qg->rsv.values[BTRFS_QGROUP_RSV_DATA] -
|
||||
qg->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC]) /
|
||||
QGROUP_PERTRANS_RATIO;
|
||||
threshold = min_t(u64, threshold, QGROUP_PERTRANS_SIZE);
|
||||
|
||||
/*
|
||||
* Use transaction_kthread to commit transaction, so we no
|
||||
* longer need to bother nested transaction nor lock context.
|
||||
*/
|
||||
if (qg->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > threshold)
|
||||
btrfs_commit_transaction_locksafe(fs_info);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2434,7 +2473,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
|
|||
|
||||
qg = unode_aux_to_qgroup(unode);
|
||||
|
||||
if (enforce && !qgroup_check_limits(qg, num_bytes)) {
|
||||
if (enforce && !qgroup_check_limits(fs_info, qg, num_bytes)) {
|
||||
ret = -EDQUOT;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -2267,6 +2267,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
|
|||
*/
|
||||
cur_trans->state = TRANS_STATE_COMPLETED;
|
||||
wake_up(&cur_trans->commit_wait);
|
||||
clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
|
||||
|
||||
spin_lock(&fs_info->trans_lock);
|
||||
list_del_init(&cur_trans->list);
|
||||
|
|
|
@ -199,6 +199,20 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
|
|||
int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
|
||||
int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
|
||||
int wait_for_unblock);
|
||||
|
||||
/*
|
||||
* Try to commit transaction asynchronously, so this is safe to call
|
||||
* even holding a spinlock.
|
||||
*
|
||||
* It's done by informing transaction_kthread to commit transaction without
|
||||
* waiting for commit interval.
|
||||
*/
|
||||
static inline void btrfs_commit_transaction_locksafe(
|
||||
struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
set_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
|
||||
wake_up_process(fs_info->transaction_kthread);
|
||||
}
|
||||
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
|
||||
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
|
||||
void btrfs_throttle(struct btrfs_fs_info *fs_info);
|
||||
|
|
|
@ -54,7 +54,7 @@ do { \
|
|||
pr_debug_ ## ratefunc("%s: " \
|
||||
fmt, __FILE__, ##__VA_ARGS__); \
|
||||
} else if ((type) & VFS) { \
|
||||
pr_err_ ## ratefunc("CuIFS VFS: " \
|
||||
pr_err_ ## ratefunc("CIFS VFS: " \
|
||||
fmt, ##__VA_ARGS__); \
|
||||
} else if ((type) & NOISY && (NOISY != 0)) { \
|
||||
pr_debug_ ## ratefunc(fmt, ##__VA_ARGS__); \
|
||||
|
|
|
@ -684,6 +684,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
|
|||
goto mknod_out;
|
||||
}
|
||||
|
||||
if (!S_ISCHR(mode) && !S_ISBLK(mode))
|
||||
goto mknod_out;
|
||||
|
||||
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
|
||||
goto mknod_out;
|
||||
|
||||
|
@ -692,10 +695,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
|
|||
|
||||
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
|
||||
if (buf == NULL) {
|
||||
kfree(full_path);
|
||||
rc = -ENOMEM;
|
||||
free_xid(xid);
|
||||
return rc;
|
||||
goto mknod_out;
|
||||
}
|
||||
|
||||
if (backup_cred(cifs_sb))
|
||||
|
@ -742,7 +743,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
|
|||
pdev->minor = cpu_to_le64(MINOR(device_number));
|
||||
rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
|
||||
&bytes_written, iov, 1);
|
||||
} /* else if (S_ISFIFO) */
|
||||
}
|
||||
tcon->ses->server->ops->close(xid, tcon, &fid);
|
||||
d_drop(direntry);
|
||||
|
||||
|
|
|
@ -3462,7 +3462,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
|
|||
* If the page is mmap'ed into a process' page tables, then we need to make
|
||||
* sure that it doesn't change while being written back.
|
||||
*/
|
||||
static int
|
||||
static vm_fault_t
|
||||
cifs_page_mkwrite(struct vm_fault *vmf)
|
||||
{
|
||||
struct page *page = vmf->page;
|
||||
|
|
|
@ -1452,7 +1452,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
|
|||
struct cifs_open_parms oparms;
|
||||
struct cifs_fid fid;
|
||||
struct kvec err_iov = {NULL, 0};
|
||||
struct smb2_err_rsp *err_buf = NULL;
|
||||
struct smb2_err_rsp *err_buf;
|
||||
struct smb2_symlink_err_rsp *symlink;
|
||||
unsigned int sub_len;
|
||||
unsigned int sub_offset;
|
||||
|
@ -1476,7 +1476,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
|
|||
|
||||
rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov);
|
||||
|
||||
if (!rc || !err_buf) {
|
||||
if (!rc || !err_iov.iov_base) {
|
||||
kfree(utf16_path);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
|
|
@ -1028,7 +1028,7 @@ static int smbd_post_send(struct smbd_connection *info,
|
|||
for (i = 0; i < request->num_sge; i++) {
|
||||
log_rdma_send(INFO,
|
||||
"rdma_request sge[%d] addr=%llu length=%u\n",
|
||||
i, request->sge[0].addr, request->sge[0].length);
|
||||
i, request->sge[i].addr, request->sge[i].length);
|
||||
ib_dma_sync_single_for_device(
|
||||
info->id->device,
|
||||
request->sge[i].addr,
|
||||
|
@ -2139,6 +2139,10 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
|
|||
goto done;
|
||||
}
|
||||
|
||||
cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
|
||||
for (i = 0; i < rqst->rq_nvec-1; i++)
|
||||
dump_smb(iov[i].iov_base, iov[i].iov_len);
|
||||
|
||||
remaining_data_length = buflen;
|
||||
|
||||
log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
|
||||
|
@ -2194,6 +2198,8 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
|
|||
goto done;
|
||||
}
|
||||
i++;
|
||||
if (i == rqst->rq_nvec)
|
||||
break;
|
||||
}
|
||||
start = i;
|
||||
buflen = 0;
|
||||
|
|
|
@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits)
|
|||
*/
|
||||
if (inode && inode_to_wb_is_valid(inode)) {
|
||||
struct bdi_writeback *wb;
|
||||
bool locked, congested;
|
||||
struct wb_lock_cookie lock_cookie = {};
|
||||
bool congested;
|
||||
|
||||
wb = unlocked_inode_to_wb_begin(inode, &locked);
|
||||
wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
|
||||
congested = wb_congested(wb, cong_bits);
|
||||
unlocked_inode_to_wb_end(inode, locked);
|
||||
unlocked_inode_to_wb_end(inode, &lock_cookie);
|
||||
return congested;
|
||||
}
|
||||
|
||||
|
|
|
@ -1693,6 +1693,12 @@ void task_dump_owner(struct task_struct *task, umode_t mode,
|
|||
kuid_t uid;
|
||||
kgid_t gid;
|
||||
|
||||
if (unlikely(task->flags & PF_KTHREAD)) {
|
||||
*ruid = GLOBAL_ROOT_UID;
|
||||
*rgid = GLOBAL_ROOT_GID;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Default to the tasks effective ownership */
|
||||
rcu_read_lock();
|
||||
cred = __task_cred(task);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue