Merge branch 'for-4.11/libnvdimm' into for-4.12/dax
This commit is contained in:
commit
bfca9acf1a
733 changed files with 9346 additions and 5709 deletions
|
@ -1142,16 +1142,17 @@ used by the kernel.
|
|||
|
||||
pids.max
|
||||
|
||||
A read-write single value file which exists on non-root cgroups. The
|
||||
default is "max".
|
||||
A read-write single value file which exists on non-root
|
||||
cgroups. The default is "max".
|
||||
|
||||
Hard limit of number of processes.
|
||||
Hard limit of number of processes.
|
||||
|
||||
pids.current
|
||||
|
||||
A read-only single value file which exists on all cgroups.
|
||||
A read-only single value file which exists on all cgroups.
|
||||
|
||||
The number of processes currently in the cgroup and its descendants.
|
||||
The number of processes currently in the cgroup and its
|
||||
descendants.
|
||||
|
||||
Organisational operations are not blocked by cgroup policies, so it is
|
||||
possible to have pids.current > pids.max. This can be done by either
|
||||
|
|
|
@ -45,7 +45,7 @@ The following clocks are available:
|
|||
- 1 15 SATA
|
||||
- 1 16 SATA USB
|
||||
- 1 17 Main
|
||||
- 1 18 SD/MMC
|
||||
- 1 18 SD/MMC/GOP
|
||||
- 1 21 Slow IO (SPI, NOR, BootROM, I2C, UART)
|
||||
- 1 22 USB3H0
|
||||
- 1 23 USB3H1
|
||||
|
@ -65,7 +65,7 @@ Required properties:
|
|||
"cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
|
||||
"cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
|
||||
"cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
|
||||
"cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io",
|
||||
"cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
|
||||
"cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
|
||||
|
||||
Example:
|
||||
|
@ -78,6 +78,6 @@ Example:
|
|||
gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
|
||||
"cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
|
||||
"cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
|
||||
"cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io",
|
||||
"cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
|
||||
"cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
|
||||
};
|
||||
|
|
|
@ -4,7 +4,6 @@ Required properties:
|
|||
- compatible: value should be one of the following
|
||||
"samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
|
||||
"samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
|
||||
"samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */
|
||||
"samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
|
||||
"samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */
|
||||
"samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */
|
||||
|
|
|
@ -11,7 +11,6 @@ Required properties:
|
|||
"samsung,s5pv210-fimd"; /* for S5PV210 SoC */
|
||||
"samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
|
||||
"samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
|
||||
"samsung,exynos4415-fimd"; /* for Exynos4415 SoC */
|
||||
"samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */
|
||||
"samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ Required Properties:
|
|||
- "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
|
||||
before RK3288
|
||||
- "rockchip,rk3288-dw-mshc": for Rockchip RK3288
|
||||
- "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK1108
|
||||
- "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
|
||||
- "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
|
||||
- "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
|
||||
- "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
|
||||
|
|
|
@ -1,39 +0,0 @@
|
|||
Broadcom USB3 phy binding for northstar plus SoC
|
||||
The USB3 phy is internal to the SoC and is accessed using mdio interface.
|
||||
|
||||
Required mdio bus properties:
|
||||
- reg: Should be 0x0 for SoC internal USB3 phy
|
||||
- #address-cells: must be 1
|
||||
- #size-cells: must be 0
|
||||
|
||||
Required USB3 PHY properties:
|
||||
- compatible: should be "brcm,nsp-usb3-phy"
|
||||
- reg: USB3 Phy address on SoC internal MDIO bus and it should be 0x10.
|
||||
- usb3-ctrl-syscon: handler of syscon node defining physical address
|
||||
of usb3 control register.
|
||||
- #phy-cells: must be 0
|
||||
|
||||
Required usb3 control properties:
|
||||
- compatible: should be "brcm,nsp-usb3-ctrl"
|
||||
- reg: offset and length of the control registers
|
||||
|
||||
Example:
|
||||
|
||||
mdio@0 {
|
||||
reg = <0x0>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
usb3_phy: usb-phy@10 {
|
||||
compatible = "brcm,nsp-usb3-phy";
|
||||
reg = <0x10>;
|
||||
usb3-ctrl-syscon = <&usb3_ctrl>;
|
||||
#phy-cells = <0>;
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
||||
usb3_ctrl: syscon@104408 {
|
||||
compatible = "brcm,nsp-usb3-ctrl", "syscon";
|
||||
reg = <0x104408 0x3fc>;
|
||||
};
|
|
@ -71,6 +71,9 @@
|
|||
For Axon it can be absent, though my current driver
|
||||
doesn't handle phy-address yet so for now, keep
|
||||
0x00ffffff in it.
|
||||
- phy-handle : Used to describe configurations where a external PHY
|
||||
is used. Please refer to:
|
||||
Documentation/devicetree/bindings/net/ethernet.txt
|
||||
- rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
|
||||
operations (if absent the value is the same as
|
||||
rx-fifo-size). For Axon, either absent or 2048.
|
||||
|
@ -81,8 +84,22 @@
|
|||
offload, phandle of the TAH device node.
|
||||
- tah-channel : 1 cell, optional. If appropriate, channel used on the
|
||||
TAH engine.
|
||||
- fixed-link : Fixed-link subnode describing a link to a non-MDIO
|
||||
managed entity. See
|
||||
Documentation/devicetree/bindings/net/fixed-link.txt
|
||||
for details.
|
||||
- mdio subnode : When the EMAC has a phy connected to its local
|
||||
mdio, which us supported by the kernel's network
|
||||
PHY library in drivers/net/phy, there must be device
|
||||
tree subnode with the following required properties:
|
||||
- #address-cells: Must be <1>.
|
||||
- #size-cells: Must be <0>.
|
||||
|
||||
Example:
|
||||
For PHY definitions: Please refer to
|
||||
Documentation/devicetree/bindings/net/phy.txt and
|
||||
Documentation/devicetree/bindings/net/ethernet.txt
|
||||
|
||||
Examples:
|
||||
|
||||
EMAC0: ethernet@40000800 {
|
||||
device_type = "network";
|
||||
|
@ -104,6 +121,48 @@
|
|||
zmii-channel = <0>;
|
||||
};
|
||||
|
||||
EMAC1: ethernet@ef600c00 {
|
||||
device_type = "network";
|
||||
compatible = "ibm,emac-apm821xx", "ibm,emac4sync";
|
||||
interrupt-parent = <&EMAC1>;
|
||||
interrupts = <0 1>;
|
||||
#interrupt-cells = <1>;
|
||||
#address-cells = <0>;
|
||||
#size-cells = <0>;
|
||||
interrupt-map = <0 &UIC2 0x10 IRQ_TYPE_LEVEL_HIGH /* Status */
|
||||
1 &UIC2 0x14 IRQ_TYPE_LEVEL_HIGH /* Wake */>;
|
||||
reg = <0xef600c00 0x000000c4>;
|
||||
local-mac-address = [000000000000]; /* Filled in by U-Boot */
|
||||
mal-device = <&MAL0>;
|
||||
mal-tx-channel = <0>;
|
||||
mal-rx-channel = <0>;
|
||||
cell-index = <0>;
|
||||
max-frame-size = <9000>;
|
||||
rx-fifo-size = <16384>;
|
||||
tx-fifo-size = <2048>;
|
||||
fifo-entry-size = <10>;
|
||||
phy-mode = "rgmii";
|
||||
phy-handle = <&phy0>;
|
||||
phy-map = <0x00000000>;
|
||||
rgmii-device = <&RGMII0>;
|
||||
rgmii-channel = <0>;
|
||||
tah-device = <&TAH0>;
|
||||
tah-channel = <0>;
|
||||
has-inverted-stacr-oc;
|
||||
has-new-stacr-staopc;
|
||||
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
phy0: ethernet-phy@0 {
|
||||
compatible = "ethernet-phy-ieee802.3-c22";
|
||||
reg = <0>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
ii) McMAL node
|
||||
|
||||
Required properties:
|
||||
|
@ -145,4 +204,3 @@
|
|||
- revision : as provided by the RGMII new version register if
|
||||
available.
|
||||
For Axon: 0x0000012a
|
||||
|
||||
|
|
|
@ -20,3 +20,8 @@ Index 1: The output gpio for enabling Vbus output from the device to the otg
|
|||
Index 2: The output gpio for muxing of the data pins between the USB host and
|
||||
the USB peripheral controller, write 1 to mux to the peripheral
|
||||
controller
|
||||
|
||||
There is a mapping between indices and GPIO connection IDs as follows
|
||||
id index 0
|
||||
vbus index 1
|
||||
mux index 2
|
||||
|
|
|
@ -18,8 +18,8 @@ because gcc versions 4.5 and 4.6 are compiled by a C compiler,
|
|||
gcc-4.7 can be compiled by a C or a C++ compiler,
|
||||
and versions 4.8+ can only be compiled by a C++ compiler.
|
||||
|
||||
Currently the GCC plugin infrastructure supports only the x86, arm and arm64
|
||||
architectures.
|
||||
Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and
|
||||
powerpc architectures.
|
||||
|
||||
This infrastructure was ported from grsecurity [6] and PaX [7].
|
||||
|
||||
|
|
|
@ -1006,7 +1006,8 @@ accept_redirects - BOOLEAN
|
|||
FALSE (router)
|
||||
|
||||
forwarding - BOOLEAN
|
||||
Enable IP forwarding on this interface.
|
||||
Enable IP forwarding on this interface. This controls whether packets
|
||||
received _on_ this interface can be forwarded.
|
||||
|
||||
mc_forwarding - BOOLEAN
|
||||
Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE
|
||||
|
|
18
MAINTAINERS
18
MAINTAINERS
|
@ -3216,7 +3216,6 @@ F: drivers/platform/chrome/
|
|||
|
||||
CISCO VIC ETHERNET NIC DRIVER
|
||||
M: Christian Benvenuti <benve@cisco.com>
|
||||
M: Sujith Sankar <ssujith@cisco.com>
|
||||
M: Govindarajulu Varadarajan <_govind@gmx.com>
|
||||
M: Neel Patel <neepatel@cisco.com>
|
||||
S: Supported
|
||||
|
@ -7773,13 +7772,6 @@ F: include/net/mac80211.h
|
|||
F: net/mac80211/
|
||||
F: drivers/net/wireless/mac80211_hwsim.[ch]
|
||||
|
||||
MACVLAN DRIVER
|
||||
M: Patrick McHardy <kaber@trash.net>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/macvlan.c
|
||||
F: include/linux/if_macvlan.h
|
||||
|
||||
MAILBOX API
|
||||
M: Jassi Brar <jassisinghbrar@gmail.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
|
@ -7852,6 +7844,8 @@ F: drivers/net/ethernet/marvell/mvneta.*
|
|||
MARVELL MWIFIEX WIRELESS DRIVER
|
||||
M: Amitkumar Karwar <akarwar@marvell.com>
|
||||
M: Nishant Sarmukadam <nishants@marvell.com>
|
||||
M: Ganapathi Bhat <gbhat@marvell.com>
|
||||
M: Xinming Hu <huxm@marvell.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/marvell/mwifiex/
|
||||
|
@ -13382,14 +13376,6 @@ W: https://linuxtv.org
|
|||
S: Maintained
|
||||
F: drivers/media/platform/vivid/*
|
||||
|
||||
VLAN (802.1Q)
|
||||
M: Patrick McHardy <kaber@trash.net>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/macvlan.c
|
||||
F: include/linux/if_*vlan.h
|
||||
F: net/8021q/
|
||||
|
||||
VLYNQ BUS
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
L: openwrt-devel@lists.openwrt.org (subscribers-only)
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 11
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -63,14 +63,14 @@
|
|||
label = "home";
|
||||
linux,code = <KEY_HOME>;
|
||||
gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>;
|
||||
gpio-key,wakeup;
|
||||
wakeup-source;
|
||||
};
|
||||
|
||||
button@1 {
|
||||
label = "menu";
|
||||
linux,code = <KEY_MENU>;
|
||||
gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>;
|
||||
gpio-key,wakeup;
|
||||
wakeup-source;
|
||||
};
|
||||
|
||||
};
|
||||
|
|
|
@ -315,6 +315,13 @@
|
|||
/* ID & VBUS GPIOs provided in board dts */
|
||||
};
|
||||
};
|
||||
|
||||
tpic2810: tpic2810@60 {
|
||||
compatible = "ti,tpic2810";
|
||||
reg = <0x60>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
};
|
||||
};
|
||||
|
||||
&mcspi3 {
|
||||
|
@ -330,13 +337,6 @@
|
|||
spi-max-frequency = <1000000>;
|
||||
spi-cpol;
|
||||
};
|
||||
|
||||
tpic2810: tpic2810@60 {
|
||||
compatible = "ti,tpic2810";
|
||||
reg = <0x60>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
};
|
||||
};
|
||||
|
||||
&uart3 {
|
||||
|
|
|
@ -66,14 +66,14 @@
|
|||
timer@20200 {
|
||||
compatible = "arm,cortex-a9-global-timer";
|
||||
reg = <0x20200 0x100>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
|
||||
clocks = <&periph_clk>;
|
||||
};
|
||||
|
||||
local-timer@20600 {
|
||||
compatible = "arm,cortex-a9-twd-timer";
|
||||
reg = <0x20600 0x100>;
|
||||
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
|
||||
clocks = <&periph_clk>;
|
||||
};
|
||||
|
||||
|
|
|
@ -48,15 +48,14 @@
|
|||
};
|
||||
|
||||
memory {
|
||||
reg = <0x00000000 0x10000000>;
|
||||
reg = <0x80000000 0x10000000>;
|
||||
};
|
||||
};
|
||||
|
||||
&uart0 {
|
||||
clock-frequency = <62499840>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&uart1 {
|
||||
clock-frequency = <62499840>;
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
gpio-restart {
|
||||
compatible = "gpio-restart";
|
||||
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
|
||||
open-source;
|
||||
priority = <200>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
gpio-restart {
|
||||
compatible = "gpio-restart";
|
||||
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
|
||||
open-source;
|
||||
priority = <200>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
gpio-restart {
|
||||
compatible = "gpio-restart";
|
||||
gpios = <&gpioa 31 GPIO_ACTIVE_LOW>;
|
||||
open-source;
|
||||
priority = <200>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
gpio-restart {
|
||||
compatible = "gpio-restart";
|
||||
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
|
||||
open-source;
|
||||
priority = <200>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
gpio-restart {
|
||||
compatible = "gpio-restart";
|
||||
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
|
||||
open-source;
|
||||
priority = <200>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
gpio-restart {
|
||||
compatible = "gpio-restart";
|
||||
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
|
||||
open-source;
|
||||
priority = <200>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
gpio-restart {
|
||||
compatible = "gpio-restart";
|
||||
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
|
||||
open-source;
|
||||
priority = <200>;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -121,11 +121,6 @@
|
|||
};
|
||||
};
|
||||
|
||||
&cpu0 {
|
||||
arm-supply = <&sw1a_reg>;
|
||||
soc-supply = <&sw1c_reg>;
|
||||
};
|
||||
|
||||
&fec1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_enet1>;
|
||||
|
|
|
@ -266,7 +266,7 @@
|
|||
};
|
||||
|
||||
usb1: ohci@00400000 {
|
||||
compatible = "atmel,sama5d2-ohci", "usb-ohci";
|
||||
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
|
||||
reg = <0x00400000 0x100000>;
|
||||
interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
|
||||
clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <dt-bindings/mfd/dbx500-prcmu.h>
|
||||
#include <dt-bindings/arm/ux500_pm_domains.h>
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
#include <dt-bindings/clock/ste-ab8500.h>
|
||||
#include "skeleton.dtsi"
|
||||
|
||||
/ {
|
||||
|
@ -603,6 +604,11 @@
|
|||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
||||
ab8500_clock: clock-controller {
|
||||
compatible = "stericsson,ab8500-clk";
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
ab8500_gpio: ab8500-gpio {
|
||||
compatible = "stericsson,ab8500-gpio";
|
||||
gpio-controller;
|
||||
|
@ -686,6 +692,8 @@
|
|||
|
||||
ab8500-pwm {
|
||||
compatible = "stericsson,ab8500-pwm";
|
||||
clocks = <&ab8500_clock AB8500_SYSCLK_INT>;
|
||||
clock-names = "intclk";
|
||||
};
|
||||
|
||||
ab8500-debugfs {
|
||||
|
@ -700,6 +708,9 @@
|
|||
V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>;
|
||||
V-DMIC-supply = <&ab8500_ldo_dmic_reg>;
|
||||
|
||||
clocks = <&ab8500_clock AB8500_SYSCLK_AUDIO>;
|
||||
clock-names = "audioclk";
|
||||
|
||||
stericsson,earpeice-cmv = <950>; /* Units in mV. */
|
||||
};
|
||||
|
||||
|
@ -1095,6 +1106,14 @@
|
|||
status = "disabled";
|
||||
};
|
||||
|
||||
sound {
|
||||
compatible = "stericsson,snd-soc-mop500";
|
||||
stericsson,cpu-dai = <&msp1 &msp3>;
|
||||
stericsson,audio-codec = <&codec>;
|
||||
clocks = <&prcmu_clk PRCMU_SYSCLK>, <&ab8500_clock AB8500_SYSCLK_ULP>, <&ab8500_clock AB8500_SYSCLK_INT>;
|
||||
clock-names = "sysclk", "ulpclk", "intclk";
|
||||
};
|
||||
|
||||
msp0: msp@80123000 {
|
||||
compatible = "stericsson,ux500-msp-i2s";
|
||||
reg = <0x80123000 0x1000>;
|
||||
|
|
|
@ -186,15 +186,6 @@
|
|||
status = "okay";
|
||||
};
|
||||
|
||||
sound {
|
||||
compatible = "stericsson,snd-soc-mop500";
|
||||
|
||||
stericsson,cpu-dai = <&msp1 &msp3>;
|
||||
stericsson,audio-codec = <&codec>;
|
||||
clocks = <&prcmu_clk PRCMU_SYSCLK>;
|
||||
clock-names = "sysclk";
|
||||
};
|
||||
|
||||
msp0: msp@80123000 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&msp0_default_mode>;
|
||||
|
|
|
@ -159,15 +159,6 @@
|
|||
"", "", "", "", "", "", "", "";
|
||||
};
|
||||
|
||||
sound {
|
||||
compatible = "stericsson,snd-soc-mop500";
|
||||
|
||||
stericsson,cpu-dai = <&msp1 &msp3>;
|
||||
stericsson,audio-codec = <&codec>;
|
||||
clocks = <&prcmu_clk PRCMU_SYSCLK>;
|
||||
clock-names = "sysclk";
|
||||
};
|
||||
|
||||
msp0: msp@80123000 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&msp0_default_mode>;
|
||||
|
|
|
@ -167,7 +167,7 @@
|
|||
reg = <8>;
|
||||
label = "cpu";
|
||||
ethernet = <&gmac>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-txid";
|
||||
fixed-link {
|
||||
speed = <1000>;
|
||||
full-duplex;
|
||||
|
|
|
@ -495,7 +495,7 @@
|
|||
resets = <&ccu RST_BUS_GPU>;
|
||||
|
||||
assigned-clocks = <&ccu CLK_GPU>;
|
||||
assigned-clock-rates = <408000000>;
|
||||
assigned-clock-rates = <384000000>;
|
||||
};
|
||||
|
||||
gic: interrupt-controller@01c81000 {
|
||||
|
|
|
@ -50,8 +50,6 @@
|
|||
|
||||
backlight: backlight {
|
||||
compatible = "pwm-backlight";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&bl_en_pin>;
|
||||
pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
|
||||
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
|
||||
default-brightness-level = <8>;
|
||||
|
@ -93,11 +91,6 @@
|
|||
};
|
||||
|
||||
&pio {
|
||||
bl_en_pin: bl_en_pin@0 {
|
||||
pins = "PH6";
|
||||
function = "gpio_in";
|
||||
};
|
||||
|
||||
mmc0_cd_pin: mmc0_cd_pin@0 {
|
||||
pins = "PB4";
|
||||
function = "gpio_in";
|
||||
|
|
|
@ -188,6 +188,7 @@ CONFIG_WL12XX=m
|
|||
CONFIG_WL18XX=m
|
||||
CONFIG_WLCORE_SPI=m
|
||||
CONFIG_WLCORE_SDIO=m
|
||||
CONFIG_INPUT_MOUSEDEV=m
|
||||
CONFIG_INPUT_JOYDEV=m
|
||||
CONFIG_INPUT_EVDEV=m
|
||||
CONFIG_KEYBOARD_ATKBD=m
|
||||
|
|
|
@ -289,6 +289,22 @@ static void at91_ddr_standby(void)
|
|||
at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
|
||||
}
|
||||
|
||||
static void sama5d3_ddr_standby(void)
|
||||
{
|
||||
u32 lpr0;
|
||||
u32 saved_lpr0;
|
||||
|
||||
saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
|
||||
lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
|
||||
lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
|
||||
|
||||
at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
|
||||
|
||||
cpu_do_idle();
|
||||
|
||||
at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
|
||||
}
|
||||
|
||||
/* We manage both DDRAM/SDRAM controllers, we need more than one value to
|
||||
* remember.
|
||||
*/
|
||||
|
@ -323,7 +339,7 @@ static const struct of_device_id const ramc_ids[] __initconst = {
|
|||
{ .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
|
||||
{ .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
|
||||
{ .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
|
||||
{ .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby },
|
||||
{ .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
|
||||
{ /*sentinel*/ }
|
||||
};
|
||||
|
||||
|
|
|
@ -241,6 +241,3 @@ obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
|
|||
|
||||
onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o
|
||||
obj-y += $(onenand-m) $(onenand-y)
|
||||
|
||||
nand-$(CONFIG_MTD_NAND_OMAP2) := gpmc-nand.o
|
||||
obj-y += $(nand-m) $(nand-y)
|
||||
|
|
|
@ -1,154 +0,0 @@
|
|||
/*
|
||||
* gpmc-nand.c
|
||||
*
|
||||
* Copyright (C) 2009 Texas Instruments
|
||||
* Vimal Singh <vimalsingh@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/omap-gpmc.h>
|
||||
#include <linux/mtd/nand.h>
|
||||
#include <linux/platform_data/mtd-nand-omap2.h>
|
||||
|
||||
#include <asm/mach/flash.h>
|
||||
|
||||
#include "soc.h"
|
||||
|
||||
/* minimum size for IO mapping */
|
||||
#define NAND_IO_SIZE 4
|
||||
|
||||
static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
|
||||
{
|
||||
/* platforms which support all ECC schemes */
|
||||
if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() ||
|
||||
soc_is_omap54xx() || soc_is_dra7xx())
|
||||
return 1;
|
||||
|
||||
if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
|
||||
ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
|
||||
if (cpu_is_omap24xx())
|
||||
return 0;
|
||||
else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
|
||||
* which require H/W based ECC error detection */
|
||||
if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
|
||||
((ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
|
||||
(ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
|
||||
return 0;
|
||||
|
||||
/* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
|
||||
if (ecc_opt == OMAP_ECC_HAM1_CODE_HW ||
|
||||
ecc_opt == OMAP_ECC_HAM1_CODE_SW)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This function will go away once the device-tree convertion is complete */
|
||||
static void gpmc_set_legacy(struct omap_nand_platform_data *gpmc_nand_data,
|
||||
struct gpmc_settings *s)
|
||||
{
|
||||
/* Enable RD PIN Monitoring Reg */
|
||||
if (gpmc_nand_data->dev_ready) {
|
||||
s->wait_on_read = true;
|
||||
s->wait_on_write = true;
|
||||
}
|
||||
|
||||
if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16)
|
||||
s->device_width = GPMC_DEVWIDTH_16BIT;
|
||||
else
|
||||
s->device_width = GPMC_DEVWIDTH_8BIT;
|
||||
}
|
||||
|
||||
int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data,
|
||||
struct gpmc_timings *gpmc_t)
|
||||
{
|
||||
int err = 0;
|
||||
struct gpmc_settings s;
|
||||
struct platform_device *pdev;
|
||||
struct resource gpmc_nand_res[] = {
|
||||
{ .flags = IORESOURCE_MEM, },
|
||||
{ .flags = IORESOURCE_IRQ, },
|
||||
{ .flags = IORESOURCE_IRQ, },
|
||||
};
|
||||
|
||||
BUG_ON(gpmc_nand_data->cs >= GPMC_CS_NUM);
|
||||
|
||||
err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE,
|
||||
(unsigned long *)&gpmc_nand_res[0].start);
|
||||
if (err < 0) {
|
||||
pr_err("omap2-gpmc: Cannot request GPMC CS %d, error %d\n",
|
||||
gpmc_nand_data->cs, err);
|
||||
return err;
|
||||
}
|
||||
gpmc_nand_res[0].end = gpmc_nand_res[0].start + NAND_IO_SIZE - 1;
|
||||
gpmc_nand_res[1].start = gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE);
|
||||
gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT);
|
||||
|
||||
memset(&s, 0, sizeof(struct gpmc_settings));
|
||||
gpmc_set_legacy(gpmc_nand_data, &s);
|
||||
|
||||
s.device_nand = true;
|
||||
|
||||
if (gpmc_t) {
|
||||
err = gpmc_cs_set_timings(gpmc_nand_data->cs, gpmc_t, &s);
|
||||
if (err < 0) {
|
||||
pr_err("omap2-gpmc: Unable to set gpmc timings: %d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
err = gpmc_cs_program_settings(gpmc_nand_data->cs, &s);
|
||||
if (err < 0)
|
||||
goto out_free_cs;
|
||||
|
||||
err = gpmc_configure(GPMC_CONFIG_WP, 0);
|
||||
if (err < 0)
|
||||
goto out_free_cs;
|
||||
|
||||
if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) {
|
||||
pr_err("omap2-nand: Unsupported NAND ECC scheme selected\n");
|
||||
err = -EINVAL;
|
||||
goto out_free_cs;
|
||||
}
|
||||
|
||||
|
||||
pdev = platform_device_alloc("omap2-nand", gpmc_nand_data->cs);
|
||||
if (pdev) {
|
||||
err = platform_device_add_resources(pdev, gpmc_nand_res,
|
||||
ARRAY_SIZE(gpmc_nand_res));
|
||||
if (!err)
|
||||
pdev->dev.platform_data = gpmc_nand_data;
|
||||
} else {
|
||||
err = -ENOMEM;
|
||||
}
|
||||
if (err)
|
||||
goto out_free_pdev;
|
||||
|
||||
err = platform_device_add(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Unable to register NAND device\n");
|
||||
goto out_free_pdev;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_pdev:
|
||||
platform_device_put(pdev);
|
||||
out_free_cs:
|
||||
gpmc_cs_free(gpmc_nand_data->cs);
|
||||
|
||||
return err;
|
||||
}
|
|
@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
|
||||
int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
|
||||
{
|
||||
int err;
|
||||
struct device *dev = &gpmc_onenand_device.dev;
|
||||
|
@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
|
|||
if (err < 0) {
|
||||
dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
|
||||
gpmc_onenand_data->cs, err);
|
||||
return;
|
||||
return err;
|
||||
}
|
||||
|
||||
gpmc_onenand_resource.end = gpmc_onenand_resource.start +
|
||||
ONENAND_IO_SIZE - 1;
|
||||
|
||||
if (platform_device_register(&gpmc_onenand_device) < 0) {
|
||||
err = platform_device_register(&gpmc_onenand_device);
|
||||
if (err) {
|
||||
dev_err(dev, "Unable to register OneNAND device\n");
|
||||
gpmc_cs_free(gpmc_onenand_data->cs);
|
||||
return;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
#include "omap44xx.h"
|
||||
|
||||
|
@ -66,7 +67,7 @@ wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
|
|||
cmp r0, r4
|
||||
bne wait_2
|
||||
ldr r12, =API_HYP_ENTRY
|
||||
adr r0, hyp_boot
|
||||
badr r0, hyp_boot
|
||||
smc #0
|
||||
hyp_boot:
|
||||
b omap_secondary_startup
|
||||
|
|
|
@ -2112,11 +2112,20 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
|
|||
};
|
||||
|
||||
/* L4 CORE -> SR1 interface */
|
||||
static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
|
||||
{
|
||||
.pa_start = OMAP34XX_SR1_BASE,
|
||||
.pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1,
|
||||
.flags = ADDR_TYPE_RT,
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
||||
static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = {
|
||||
.master = &omap3xxx_l4_core_hwmod,
|
||||
.slave = &omap34xx_sr1_hwmod,
|
||||
.clk = "sr_l4_ick",
|
||||
.addr = omap3_sr1_addr_space,
|
||||
.user = OCP_USER_MPU,
|
||||
};
|
||||
|
||||
|
@ -2124,15 +2133,25 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = {
|
|||
.master = &omap3xxx_l4_core_hwmod,
|
||||
.slave = &omap36xx_sr1_hwmod,
|
||||
.clk = "sr_l4_ick",
|
||||
.addr = omap3_sr1_addr_space,
|
||||
.user = OCP_USER_MPU,
|
||||
};
|
||||
|
||||
/* L4 CORE -> SR1 interface */
|
||||
static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
|
||||
{
|
||||
.pa_start = OMAP34XX_SR2_BASE,
|
||||
.pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1,
|
||||
.flags = ADDR_TYPE_RT,
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
||||
static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = {
|
||||
.master = &omap3xxx_l4_core_hwmod,
|
||||
.slave = &omap34xx_sr2_hwmod,
|
||||
.clk = "sr_l4_ick",
|
||||
.addr = omap3_sr2_addr_space,
|
||||
.user = OCP_USER_MPU,
|
||||
};
|
||||
|
||||
|
@ -2140,6 +2159,7 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = {
|
|||
.master = &omap3xxx_l4_core_hwmod,
|
||||
.slave = &omap36xx_sr2_hwmod,
|
||||
.clk = "sr_l4_ick",
|
||||
.addr = omap3_sr2_addr_space,
|
||||
.user = OCP_USER_MPU,
|
||||
};
|
||||
|
||||
|
@ -3111,16 +3131,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
|
|||
* Return: 0 if device named @dev_name is not likely to be accessible,
|
||||
* or 1 if it is likely to be accessible.
|
||||
*/
|
||||
static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
|
||||
const char *dev_name)
|
||||
static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
|
||||
const char *dev_name)
|
||||
{
|
||||
struct device_node *node;
|
||||
bool available;
|
||||
|
||||
if (!bus)
|
||||
return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0;
|
||||
return omap_type() == OMAP2_DEVICE_TYPE_GP;
|
||||
|
||||
if (of_device_is_available(of_find_node_by_name(bus, dev_name)))
|
||||
return 1;
|
||||
node = of_get_child_by_name(bus, dev_name);
|
||||
available = of_device_is_available(node);
|
||||
of_node_put(node);
|
||||
|
||||
return 0;
|
||||
return available;
|
||||
}
|
||||
|
||||
int __init omap3xxx_hwmod_init(void)
|
||||
|
@ -3189,15 +3213,20 @@ int __init omap3xxx_hwmod_init(void)
|
|||
|
||||
if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
|
||||
r = omap_hwmod_register_links(h_sham);
|
||||
if (r < 0)
|
||||
if (r < 0) {
|
||||
of_node_put(bus);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) {
|
||||
r = omap_hwmod_register_links(h_aes);
|
||||
if (r < 0)
|
||||
if (r < 0) {
|
||||
of_node_put(bus);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
of_node_put(bus);
|
||||
|
||||
/*
|
||||
* Register hwmod links specific to certain ES levels of a
|
||||
|
|
|
@ -411,3 +411,4 @@
|
|||
394 common pkey_mprotect sys_pkey_mprotect
|
||||
395 common pkey_alloc sys_pkey_alloc
|
||||
396 common pkey_free sys_pkey_free
|
||||
397 common statx sys_statx
|
||||
|
|
|
@ -1073,6 +1073,10 @@ config SYSVIPC_COMPAT
|
|||
def_bool y
|
||||
depends on COMPAT && SYSVIPC
|
||||
|
||||
config KEYS_COMPAT
|
||||
def_bool y
|
||||
depends on COMPAT && KEYS
|
||||
|
||||
endmenu
|
||||
|
||||
menu "Power management options"
|
||||
|
|
|
@ -114,6 +114,7 @@
|
|||
pcie0: pcie@20020000 {
|
||||
compatible = "brcm,iproc-pcie";
|
||||
reg = <0 0x20020000 0 0x1000>;
|
||||
dma-coherent;
|
||||
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
|
@ -144,6 +145,7 @@
|
|||
pcie4: pcie@50020000 {
|
||||
compatible = "brcm,iproc-pcie";
|
||||
reg = <0 0x50020000 0 0x1000>;
|
||||
dma-coherent;
|
||||
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
|
@ -174,6 +176,7 @@
|
|||
pcie8: pcie@60c00000 {
|
||||
compatible = "brcm,iproc-pcie-paxc";
|
||||
reg = <0 0x60c00000 0 0x1000>;
|
||||
dma-coherent;
|
||||
linux,pci-domain = <8>;
|
||||
|
||||
bus-range = <0x0 0x1>;
|
||||
|
@ -203,6 +206,7 @@
|
|||
<0x61030000 0x100>;
|
||||
reg-names = "amac_base", "idm_base", "nicpm_base";
|
||||
interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
|
||||
dma-coherent;
|
||||
phy-handle = <&gphy0>;
|
||||
phy-mode = "rgmii";
|
||||
status = "disabled";
|
||||
|
@ -213,6 +217,7 @@
|
|||
reg = <0x612c0000 0x445>; /* PDC FS0 regs */
|
||||
interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#mbox-cells = <1>;
|
||||
dma-coherent;
|
||||
brcm,rx-status-len = <32>;
|
||||
brcm,use-bcm-hdr;
|
||||
};
|
||||
|
@ -222,6 +227,7 @@
|
|||
reg = <0x612e0000 0x445>; /* PDC FS1 regs */
|
||||
interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#mbox-cells = <1>;
|
||||
dma-coherent;
|
||||
brcm,rx-status-len = <32>;
|
||||
brcm,use-bcm-hdr;
|
||||
};
|
||||
|
@ -231,6 +237,7 @@
|
|||
reg = <0x61300000 0x445>; /* PDC FS2 regs */
|
||||
interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#mbox-cells = <1>;
|
||||
dma-coherent;
|
||||
brcm,rx-status-len = <32>;
|
||||
brcm,use-bcm-hdr;
|
||||
};
|
||||
|
@ -240,6 +247,7 @@
|
|||
reg = <0x61320000 0x445>; /* PDC FS3 regs */
|
||||
interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#mbox-cells = <1>;
|
||||
dma-coherent;
|
||||
brcm,rx-status-len = <32>;
|
||||
brcm,use-bcm-hdr;
|
||||
};
|
||||
|
@ -644,6 +652,7 @@
|
|||
sata: ahci@663f2000 {
|
||||
compatible = "brcm,iproc-ahci", "generic-ahci";
|
||||
reg = <0x663f2000 0x1000>;
|
||||
dma-coherent;
|
||||
reg-names = "ahci";
|
||||
interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#address-cells = <1>;
|
||||
|
@ -667,6 +676,7 @@
|
|||
compatible = "brcm,sdhci-iproc-cygnus";
|
||||
reg = <0x66420000 0x100>;
|
||||
interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
|
||||
dma-coherent;
|
||||
bus-width = <8>;
|
||||
clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
|
||||
status = "disabled";
|
||||
|
@ -676,6 +686,7 @@
|
|||
compatible = "brcm,sdhci-iproc-cygnus";
|
||||
reg = <0x66430000 0x100>;
|
||||
interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>;
|
||||
dma-coherent;
|
||||
bus-width = <8>;
|
||||
clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
|
||||
status = "disabled";
|
||||
|
|
|
@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void)
|
|||
static inline bool system_uses_ttbr0_pan(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
|
||||
!cpus_have_cap(ARM64_HAS_PAN);
|
||||
!cpus_have_const_cap(ARM64_HAS_PAN);
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
|
||||
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
|
||||
|
||||
#define __NR_compat_syscalls 394
|
||||
#define __NR_compat_syscalls 398
|
||||
#endif
|
||||
|
||||
#define __ARCH_WANT_SYS_CLONE
|
||||
|
|
|
@ -809,6 +809,14 @@ __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
|
|||
__SYSCALL(__NR_preadv2, compat_sys_preadv2)
|
||||
#define __NR_pwritev2 393
|
||||
__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
|
||||
#define __NR_pkey_mprotect 394
|
||||
__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
|
||||
#define __NR_pkey_alloc 395
|
||||
__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
|
||||
#define __NR_pkey_free 396
|
||||
__SYSCALL(__NR_pkey_free, sys_pkey_free)
|
||||
#define __NR_statx 397
|
||||
__SYSCALL(__NR_statx, sys_statx)
|
||||
|
||||
/*
|
||||
* Please add new compat syscalls above this comment and update
|
||||
|
|
|
@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu)
|
|||
}
|
||||
|
||||
/**
|
||||
* cpu_suspend() - function to enter a low-power idle state
|
||||
* arm_cpuidle_suspend() - function to enter a low-power idle state
|
||||
* @arg: argument to pass to CPU suspend operations
|
||||
*
|
||||
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
|
||||
|
|
|
@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
|
|||
/*
|
||||
* The kernel Image should not extend across a 1GB/32MB/512MB alignment
|
||||
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
|
||||
* happens, increase the KASLR offset by the size of the kernel image.
|
||||
* happens, increase the KASLR offset by the size of the kernel image
|
||||
* rounded up by SWAPPER_BLOCK_SIZE.
|
||||
*/
|
||||
if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
|
||||
(((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT))
|
||||
offset = (offset + (u64)(_end - _text)) & mask;
|
||||
(((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
|
||||
u64 kimg_sz = _end - _text;
|
||||
offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
|
||||
& mask;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_KASAN))
|
||||
/*
|
||||
|
|
|
@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *p, *cur_kprobe;
|
||||
|
|
|
@ -162,7 +162,7 @@ void __init kasan_init(void)
|
|||
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
|
||||
|
||||
vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
|
||||
pfn_to_nid(virt_to_pfn(_text)));
|
||||
pfn_to_nid(virt_to_pfn(lm_alias(_text))));
|
||||
|
||||
/*
|
||||
* vmemmap_populate() has populated the shadow region that covers the
|
||||
|
|
|
@ -77,7 +77,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
|
|||
return val;
|
||||
}
|
||||
|
||||
#define xchg(ptr, with) \
|
||||
((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr))))
|
||||
#define xchg(ptr, with) \
|
||||
({ \
|
||||
(__typeof__(*(ptr))) __xchg((unsigned long)(with), \
|
||||
(ptr), \
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
#endif /* __ASM_OPENRISC_CMPXCHG_H */
|
||||
|
|
|
@ -211,7 +211,7 @@ do { \
|
|||
case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \
|
||||
case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
|
||||
case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
|
||||
case 8: __get_user_asm2(x, ptr, retval); \
|
||||
case 8: __get_user_asm2(x, ptr, retval); break; \
|
||||
default: (x) = __get_user_bad(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <asm/hardirq.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
|
||||
|
||||
|
@ -42,6 +43,9 @@ DECLARE_EXPORT(__muldi3);
|
|||
DECLARE_EXPORT(__ashrdi3);
|
||||
DECLARE_EXPORT(__ashldi3);
|
||||
DECLARE_EXPORT(__lshrdi3);
|
||||
DECLARE_EXPORT(__ucmpdi2);
|
||||
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
EXPORT_SYMBOL(__copy_tofrom_user);
|
||||
EXPORT_SYMBOL(__clear_user);
|
||||
EXPORT_SYMBOL(memset);
|
||||
|
|
|
@ -90,6 +90,7 @@ void arch_cpu_idle(void)
|
|||
}
|
||||
|
||||
void (*pm_power_off) (void) = machine_power_off;
|
||||
EXPORT_SYMBOL(pm_power_off);
|
||||
|
||||
/*
|
||||
* When a process does an "exec", machine state like FPU and debug
|
||||
|
|
|
@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
|
|||
|
||||
#define flush_kernel_dcache_range(start,size) \
|
||||
flush_kernel_dcache_range_asm((start), (start)+(size));
|
||||
/* vmap range flushes and invalidates. Architecturally, we don't need
|
||||
* the invalidate, because the CPU should refuse to speculate once an
|
||||
* area has been flushed, so invalidate is left empty */
|
||||
static inline void flush_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
void *cursor = vaddr;
|
||||
|
||||
for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
|
||||
struct page *page = vmalloc_to_page(cursor);
|
||||
|
||||
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
|
||||
flush_kernel_dcache_page(page);
|
||||
}
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
void flush_kernel_vmap_range(void *vaddr, int size);
|
||||
void invalidate_kernel_vmap_range(void *vaddr, int size);
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
|
|
@ -32,7 +32,8 @@
|
|||
* that put_user is the same as __put_user, etc.
|
||||
*/
|
||||
|
||||
#define access_ok(type, uaddr, size) (1)
|
||||
#define access_ok(type, uaddr, size) \
|
||||
( (uaddr) == (uaddr) )
|
||||
|
||||
#define put_user __put_user
|
||||
#define get_user __get_user
|
||||
|
|
|
@ -362,8 +362,9 @@
|
|||
#define __NR_copy_file_range (__NR_Linux + 346)
|
||||
#define __NR_preadv2 (__NR_Linux + 347)
|
||||
#define __NR_pwritev2 (__NR_Linux + 348)
|
||||
#define __NR_statx (__NR_Linux + 349)
|
||||
|
||||
#define __NR_Linux_syscalls (__NR_pwritev2 + 1)
|
||||
#define __NR_Linux_syscalls (__NR_statx + 1)
|
||||
|
||||
|
||||
#define __IGNORE_select /* newselect */
|
||||
|
|
|
@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
|
|||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
|
||||
void flush_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
|
||||
if ((unsigned long)size > parisc_cache_flush_threshold)
|
||||
flush_data_cache();
|
||||
else
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_vmap_range);
|
||||
|
||||
void invalidate_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
|
||||
if ((unsigned long)size > parisc_cache_flush_threshold)
|
||||
flush_data_cache();
|
||||
else
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
|
||||
|
|
|
@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
|
|||
*/
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_SECREL32:
|
||||
/* 32-bit section relative address. */
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_DPREL21L:
|
||||
/* left 21 bit of relative address */
|
||||
val = lrsel(val - dp, addend);
|
||||
|
@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
|
|||
*/
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_SECREL32:
|
||||
/* 32-bit section relative address. */
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_FPTR64:
|
||||
/* 64-bit function address */
|
||||
if(in_local(me, (void *)(val + addend))) {
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
* the PDC INTRIGUE calls. This is done to eliminate bugs introduced
|
||||
* in various PDC revisions. The code is much more maintainable
|
||||
* and reliable this way vs having to debug on every version of PDC
|
||||
* on every box.
|
||||
* on every box.
|
||||
*/
|
||||
|
||||
#include <linux/capability.h>
|
||||
|
@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr);
|
|||
static int perf_release(struct inode *inode, struct file *file);
|
||||
static int perf_open(struct inode *inode, struct file *file);
|
||||
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
|
||||
loff_t *ppos);
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos);
|
||||
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
static void perf_start_counters(void);
|
||||
static int perf_stop_counters(uint32_t *raddr);
|
||||
|
@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void);
|
|||
/*
|
||||
* configure:
|
||||
*
|
||||
* Configure the cpu with a given data image. First turn off the counters,
|
||||
* Configure the cpu with a given data image. First turn off the counters,
|
||||
* then download the image, then turn the counters back on.
|
||||
*/
|
||||
static int perf_config(uint32_t *image_ptr)
|
||||
|
@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr)
|
|||
error = perf_stop_counters(raddr);
|
||||
if (error != 0) {
|
||||
printk("perf_config: perf_stop_counters = %ld\n", error);
|
||||
return -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
printk("Preparing to write image\n");
|
||||
|
@ -242,7 +242,7 @@ printk("Preparing to write image\n");
|
|||
error = perf_write_image((uint64_t *)image_ptr);
|
||||
if (error != 0) {
|
||||
printk("perf_config: DOWNLOAD = %ld\n", error);
|
||||
return -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
printk("Preparing to start counters\n");
|
||||
|
@ -254,7 +254,7 @@ printk("Preparing to start counters\n");
|
|||
}
|
||||
|
||||
/*
|
||||
* Open the device and initialize all of its memory. The device is only
|
||||
* Open the device and initialize all of its memory. The device is only
|
||||
* opened once, but can be "queried" by multiple processes that know its
|
||||
* file descriptor.
|
||||
*/
|
||||
|
@ -298,19 +298,19 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
|
|||
* called on the processor that the download should happen
|
||||
* on.
|
||||
*/
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
|
||||
loff_t *ppos)
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
size_t image_size;
|
||||
uint32_t image_type;
|
||||
uint32_t interface_type;
|
||||
uint32_t test;
|
||||
|
||||
if (perf_processor_interface == ONYX_INTF)
|
||||
if (perf_processor_interface == ONYX_INTF)
|
||||
image_size = PCXU_IMAGE_SIZE;
|
||||
else if (perf_processor_interface == CUDA_INTF)
|
||||
else if (perf_processor_interface == CUDA_INTF)
|
||||
image_size = PCXW_IMAGE_SIZE;
|
||||
else
|
||||
else
|
||||
return -EFAULT;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
|
@ -330,22 +330,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
|
|||
|
||||
/* First check the machine type is correct for
|
||||
the requested image */
|
||||
if (((perf_processor_interface == CUDA_INTF) &&
|
||||
(interface_type != CUDA_INTF)) ||
|
||||
((perf_processor_interface == ONYX_INTF) &&
|
||||
(interface_type != ONYX_INTF)))
|
||||
if (((perf_processor_interface == CUDA_INTF) &&
|
||||
(interface_type != CUDA_INTF)) ||
|
||||
((perf_processor_interface == ONYX_INTF) &&
|
||||
(interface_type != ONYX_INTF)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Next check to make sure the requested image
|
||||
is valid */
|
||||
if (((interface_type == CUDA_INTF) &&
|
||||
if (((interface_type == CUDA_INTF) &&
|
||||
(test >= MAX_CUDA_IMAGES)) ||
|
||||
((interface_type == ONYX_INTF) &&
|
||||
(test >= MAX_ONYX_IMAGES)))
|
||||
((interface_type == ONYX_INTF) &&
|
||||
(test >= MAX_ONYX_IMAGES)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Copy the image into the processor */
|
||||
if (interface_type == CUDA_INTF)
|
||||
if (interface_type == CUDA_INTF)
|
||||
return perf_config(cuda_images[test]);
|
||||
else
|
||||
return perf_config(onyx_images[test]);
|
||||
|
@ -359,7 +359,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
|
|||
static void perf_patch_images(void)
|
||||
{
|
||||
#if 0 /* FIXME!! */
|
||||
/*
|
||||
/*
|
||||
* NOTE: this routine is VERY specific to the current TLB image.
|
||||
* If the image is changed, this routine might also need to be changed.
|
||||
*/
|
||||
|
@ -367,9 +367,9 @@ static void perf_patch_images(void)
|
|||
extern void $i_dtlb_miss_2_0();
|
||||
extern void PA2_0_iva();
|
||||
|
||||
/*
|
||||
/*
|
||||
* We can only use the lower 32-bits, the upper 32-bits should be 0
|
||||
* anyway given this is in the kernel
|
||||
* anyway given this is in the kernel
|
||||
*/
|
||||
uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
|
||||
uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
|
||||
|
@ -377,21 +377,21 @@ static void perf_patch_images(void)
|
|||
|
||||
if (perf_processor_interface == ONYX_INTF) {
|
||||
/* clear last 2 bytes */
|
||||
onyx_images[TLBMISS][15] &= 0xffffff00;
|
||||
onyx_images[TLBMISS][15] &= 0xffffff00;
|
||||
/* set 2 bytes */
|
||||
onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
|
||||
onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
|
||||
onyx_images[TLBMISS][17] = itlb_addr;
|
||||
|
||||
/* clear last 2 bytes */
|
||||
onyx_images[TLBHANDMISS][15] &= 0xffffff00;
|
||||
onyx_images[TLBHANDMISS][15] &= 0xffffff00;
|
||||
/* set 2 bytes */
|
||||
onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
|
||||
onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
|
||||
onyx_images[TLBHANDMISS][17] = itlb_addr;
|
||||
|
||||
/* clear last 2 bytes */
|
||||
onyx_images[BIG_CPI][15] &= 0xffffff00;
|
||||
onyx_images[BIG_CPI][15] &= 0xffffff00;
|
||||
/* set 2 bytes */
|
||||
onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
|
||||
onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
|
||||
|
@ -404,24 +404,24 @@ static void perf_patch_images(void)
|
|||
|
||||
} else if (perf_processor_interface == CUDA_INTF) {
|
||||
/* Cuda interface */
|
||||
cuda_images[TLBMISS][16] =
|
||||
cuda_images[TLBMISS][16] =
|
||||
(cuda_images[TLBMISS][16]&0xffff0000) |
|
||||
((dtlb_addr >> 8)&0x0000ffff);
|
||||
cuda_images[TLBMISS][17] =
|
||||
cuda_images[TLBMISS][17] =
|
||||
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
|
||||
cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
|
||||
|
||||
cuda_images[TLBHANDMISS][16] =
|
||||
cuda_images[TLBHANDMISS][16] =
|
||||
(cuda_images[TLBHANDMISS][16]&0xffff0000) |
|
||||
((dtlb_addr >> 8)&0x0000ffff);
|
||||
cuda_images[TLBHANDMISS][17] =
|
||||
cuda_images[TLBHANDMISS][17] =
|
||||
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
|
||||
cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
|
||||
|
||||
cuda_images[BIG_CPI][16] =
|
||||
cuda_images[BIG_CPI][16] =
|
||||
(cuda_images[BIG_CPI][16]&0xffff0000) |
|
||||
((dtlb_addr >> 8)&0x0000ffff);
|
||||
cuda_images[BIG_CPI][17] =
|
||||
cuda_images[BIG_CPI][17] =
|
||||
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
|
||||
cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
|
||||
} else {
|
||||
|
@ -433,7 +433,7 @@ static void perf_patch_images(void)
|
|||
|
||||
/*
|
||||
* ioctl routine
|
||||
* All routines effect the processor that they are executed on. Thus you
|
||||
* All routines effect the processor that they are executed on. Thus you
|
||||
* must be running on the processor that you wish to change.
|
||||
*/
|
||||
|
||||
|
@ -459,7 +459,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
}
|
||||
|
||||
/* copy out the Counters */
|
||||
if (copy_to_user((void __user *)arg, raddr,
|
||||
if (copy_to_user((void __user *)arg, raddr,
|
||||
sizeof (raddr)) != 0) {
|
||||
error = -EFAULT;
|
||||
break;
|
||||
|
@ -487,7 +487,7 @@ static const struct file_operations perf_fops = {
|
|||
.open = perf_open,
|
||||
.release = perf_release
|
||||
};
|
||||
|
||||
|
||||
static struct miscdevice perf_dev = {
|
||||
MISC_DYNAMIC_MINOR,
|
||||
PA_PERF_DEV,
|
||||
|
@ -595,7 +595,7 @@ static int perf_stop_counters(uint32_t *raddr)
|
|||
/* OR sticky2 (bit 1496) to counter2 bit 32 */
|
||||
tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
|
||||
raddr[2] = (uint32_t)tmp64;
|
||||
|
||||
|
||||
/* Counter3 is bits 1497 to 1528 */
|
||||
tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
|
||||
/* OR sticky3 (bit 1529) to counter3 bit 32 */
|
||||
|
@ -617,7 +617,7 @@ static int perf_stop_counters(uint32_t *raddr)
|
|||
userbuf[22] = 0;
|
||||
userbuf[23] = 0;
|
||||
|
||||
/*
|
||||
/*
|
||||
* Write back the zeroed bytes + the image given
|
||||
* the read was destructive.
|
||||
*/
|
||||
|
@ -625,13 +625,13 @@ static int perf_stop_counters(uint32_t *raddr)
|
|||
} else {
|
||||
|
||||
/*
|
||||
* Read RDR-15 which contains the counters and sticky bits
|
||||
* Read RDR-15 which contains the counters and sticky bits
|
||||
*/
|
||||
if (!perf_rdr_read_ubuf(15, userbuf)) {
|
||||
return -13;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Clear out the counters
|
||||
*/
|
||||
perf_rdr_clear(15);
|
||||
|
@ -644,7 +644,7 @@ static int perf_stop_counters(uint32_t *raddr)
|
|||
raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
|
||||
raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -682,7 +682,7 @@ static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
|
|||
i = tentry->num_words;
|
||||
while (i--) {
|
||||
buffer[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check for bits an even number of 64 */
|
||||
if ((xbits = width & 0x03f) != 0) {
|
||||
|
@ -808,18 +808,22 @@ static int perf_write_image(uint64_t *memaddr)
|
|||
}
|
||||
|
||||
runway = ioremap_nocache(cpu_device->hpa.start, 4096);
|
||||
if (!runway) {
|
||||
pr_err("perf_write_image: ioremap failed!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Merge intrigue bits into Runway STATUS 0 */
|
||||
tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
|
||||
__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
|
||||
__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
|
||||
runway + RUNWAY_STATUS);
|
||||
|
||||
|
||||
/* Write RUNWAY DEBUG registers */
|
||||
for (i = 0; i < 8; i++) {
|
||||
__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -843,7 +847,7 @@ printk("perf_rdr_write\n");
|
|||
perf_rdr_shift_out_U(rdr_num, buffer[i]);
|
||||
} else {
|
||||
perf_rdr_shift_out_W(rdr_num, buffer[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
printk("perf_rdr_write done\n");
|
||||
}
|
||||
|
|
|
@ -142,6 +142,8 @@ void machine_power_off(void)
|
|||
|
||||
printk(KERN_EMERG "System shut down completed.\n"
|
||||
"Please power this system off now.");
|
||||
|
||||
for (;;);
|
||||
}
|
||||
|
||||
void (*pm_power_off)(void) = machine_power_off;
|
||||
|
|
|
@ -444,6 +444,7 @@
|
|||
ENTRY_SAME(copy_file_range)
|
||||
ENTRY_COMP(preadv2)
|
||||
ENTRY_COMP(pwritev2)
|
||||
ENTRY_SAME(statx)
|
||||
|
||||
|
||||
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
|
||||
|
|
|
@ -68,6 +68,7 @@ SECTIONS
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PPC64_BOOT_WRAPPER
|
||||
. = ALIGN(256);
|
||||
.got :
|
||||
{
|
||||
__toc_start = .;
|
||||
|
|
|
@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
|
|||
{
|
||||
u32 *key = crypto_tfm_ctx(tfm);
|
||||
|
||||
*key = 0;
|
||||
*key = ~0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -51,6 +51,10 @@
|
|||
#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
|
||||
#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
|
||||
|
||||
/* Put a PPC bit into a "normal" bit position */
|
||||
#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \
|
||||
((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
|
||||
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/* Macro for generating the ***_bits() functions */
|
||||
|
|
|
@ -66,6 +66,55 @@
|
|||
|
||||
#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
|
||||
P8_DSISR_MC_ERAT_MULTIHIT_SEC)
|
||||
|
||||
/*
|
||||
* Machine Check bits on power9
|
||||
*/
|
||||
#define P9_SRR1_MC_LOADSTORE(srr1) (((srr1) >> PPC_BITLSHIFT(42)) & 1)
|
||||
|
||||
#define P9_SRR1_MC_IFETCH(srr1) ( \
|
||||
PPC_BITEXTRACT(srr1, 45, 0) | \
|
||||
PPC_BITEXTRACT(srr1, 44, 1) | \
|
||||
PPC_BITEXTRACT(srr1, 43, 2) | \
|
||||
PPC_BITEXTRACT(srr1, 36, 3) )
|
||||
|
||||
/* 0 is reserved */
|
||||
#define P9_SRR1_MC_IFETCH_UE 1
|
||||
#define P9_SRR1_MC_IFETCH_SLB_PARITY 2
|
||||
#define P9_SRR1_MC_IFETCH_SLB_MULTIHIT 3
|
||||
#define P9_SRR1_MC_IFETCH_ERAT_MULTIHIT 4
|
||||
#define P9_SRR1_MC_IFETCH_TLB_MULTIHIT 5
|
||||
#define P9_SRR1_MC_IFETCH_UE_TLB_RELOAD 6
|
||||
/* 7 is reserved */
|
||||
#define P9_SRR1_MC_IFETCH_LINK_TIMEOUT 8
|
||||
#define P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT 9
|
||||
/* 10 ? */
|
||||
#define P9_SRR1_MC_IFETCH_RA 11
|
||||
#define P9_SRR1_MC_IFETCH_RA_TABLEWALK 12
|
||||
#define P9_SRR1_MC_IFETCH_RA_ASYNC_STORE 13
|
||||
#define P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT 14
|
||||
#define P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN 15
|
||||
|
||||
/* DSISR bits for machine check (On Power9) */
|
||||
#define P9_DSISR_MC_UE (PPC_BIT(48))
|
||||
#define P9_DSISR_MC_UE_TABLEWALK (PPC_BIT(49))
|
||||
#define P9_DSISR_MC_LINK_LOAD_TIMEOUT (PPC_BIT(50))
|
||||
#define P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT (PPC_BIT(51))
|
||||
#define P9_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52))
|
||||
#define P9_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53))
|
||||
#define P9_DSISR_MC_USER_TLBIE (PPC_BIT(54))
|
||||
#define P9_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55))
|
||||
#define P9_DSISR_MC_SLB_MULTIHIT_MFSLB (PPC_BIT(56))
|
||||
#define P9_DSISR_MC_RA_LOAD (PPC_BIT(57))
|
||||
#define P9_DSISR_MC_RA_TABLEWALK (PPC_BIT(58))
|
||||
#define P9_DSISR_MC_RA_TABLEWALK_FOREIGN (PPC_BIT(59))
|
||||
#define P9_DSISR_MC_RA_FOREIGN (PPC_BIT(60))
|
||||
|
||||
/* SLB error bits */
|
||||
#define P9_DSISR_MC_SLB_ERRORS (P9_DSISR_MC_ERAT_MULTIHIT | \
|
||||
P9_DSISR_MC_SLB_PARITY_MFSLB | \
|
||||
P9_DSISR_MC_SLB_MULTIHIT_MFSLB)
|
||||
|
||||
enum MCE_Version {
|
||||
MCE_V1 = 1,
|
||||
};
|
||||
|
@ -93,6 +142,9 @@ enum MCE_ErrorType {
|
|||
MCE_ERROR_TYPE_SLB = 2,
|
||||
MCE_ERROR_TYPE_ERAT = 3,
|
||||
MCE_ERROR_TYPE_TLB = 4,
|
||||
MCE_ERROR_TYPE_USER = 5,
|
||||
MCE_ERROR_TYPE_RA = 6,
|
||||
MCE_ERROR_TYPE_LINK = 7,
|
||||
};
|
||||
|
||||
enum MCE_UeErrorType {
|
||||
|
@ -121,6 +173,32 @@ enum MCE_TlbErrorType {
|
|||
MCE_TLB_ERROR_MULTIHIT = 2,
|
||||
};
|
||||
|
||||
enum MCE_UserErrorType {
|
||||
MCE_USER_ERROR_INDETERMINATE = 0,
|
||||
MCE_USER_ERROR_TLBIE = 1,
|
||||
};
|
||||
|
||||
enum MCE_RaErrorType {
|
||||
MCE_RA_ERROR_INDETERMINATE = 0,
|
||||
MCE_RA_ERROR_IFETCH = 1,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3,
|
||||
MCE_RA_ERROR_LOAD = 4,
|
||||
MCE_RA_ERROR_STORE = 5,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7,
|
||||
MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8,
|
||||
};
|
||||
|
||||
enum MCE_LinkErrorType {
|
||||
MCE_LINK_ERROR_INDETERMINATE = 0,
|
||||
MCE_LINK_ERROR_IFETCH_TIMEOUT = 1,
|
||||
MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2,
|
||||
MCE_LINK_ERROR_LOAD_TIMEOUT = 3,
|
||||
MCE_LINK_ERROR_STORE_TIMEOUT = 4,
|
||||
MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5,
|
||||
};
|
||||
|
||||
struct machine_check_event {
|
||||
enum MCE_Version version:8; /* 0x00 */
|
||||
uint8_t in_use; /* 0x01 */
|
||||
|
@ -166,6 +244,30 @@ struct machine_check_event {
|
|||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} tlb_error;
|
||||
|
||||
struct {
|
||||
enum MCE_UserErrorType user_error_type:8;
|
||||
uint8_t effective_address_provided;
|
||||
uint8_t reserved_1[6];
|
||||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} user_error;
|
||||
|
||||
struct {
|
||||
enum MCE_RaErrorType ra_error_type:8;
|
||||
uint8_t effective_address_provided;
|
||||
uint8_t reserved_1[6];
|
||||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} ra_error;
|
||||
|
||||
struct {
|
||||
enum MCE_LinkErrorType link_error_type:8;
|
||||
uint8_t effective_address_provided;
|
||||
uint8_t reserved_1[6];
|
||||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} link_error;
|
||||
} u;
|
||||
};
|
||||
|
||||
|
@ -176,8 +278,12 @@ struct mce_error_info {
|
|||
enum MCE_SlbErrorType slb_error_type:8;
|
||||
enum MCE_EratErrorType erat_error_type:8;
|
||||
enum MCE_TlbErrorType tlb_error_type:8;
|
||||
enum MCE_UserErrorType user_error_type:8;
|
||||
enum MCE_RaErrorType ra_error_type:8;
|
||||
enum MCE_LinkErrorType link_error_type:8;
|
||||
} u;
|
||||
uint8_t reserved[2];
|
||||
enum MCE_Severity severity:8;
|
||||
enum MCE_Initiator initiator:8;
|
||||
};
|
||||
|
||||
#define MAX_MC_EVT 100
|
||||
|
|
|
@ -387,3 +387,4 @@ SYSCALL(copy_file_range)
|
|||
COMPAT_SYS_SPU(preadv2)
|
||||
COMPAT_SYS_SPU(pwritev2)
|
||||
SYSCALL(kexec_file_load)
|
||||
SYSCALL(statx)
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <uapi/asm/unistd.h>
|
||||
|
||||
|
||||
#define NR_syscalls 383
|
||||
#define NR_syscalls 384
|
||||
|
||||
#define __NR__exit __NR_exit
|
||||
|
||||
|
|
|
@ -393,5 +393,6 @@
|
|||
#define __NR_preadv2 380
|
||||
#define __NR_pwritev2 381
|
||||
#define __NR_kexec_file_load 382
|
||||
#define __NR_statx 383
|
||||
|
||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
||||
|
|
|
@ -77,6 +77,7 @@ extern void __flush_tlb_power8(unsigned int action);
|
|||
extern void __flush_tlb_power9(unsigned int action);
|
||||
extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
|
||||
extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
|
||||
extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
|
||||
#endif /* CONFIG_PPC64 */
|
||||
#if defined(CONFIG_E500)
|
||||
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
|
||||
|
@ -540,6 +541,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
|||
.cpu_setup = __setup_cpu_power9,
|
||||
.cpu_restore = __restore_cpu_power9,
|
||||
.flush_tlb = __flush_tlb_power9,
|
||||
.machine_check_early = __machine_check_early_realmode_p9,
|
||||
.platform = "power9",
|
||||
},
|
||||
{ /* Power9 */
|
||||
|
@ -559,6 +561,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
|||
.cpu_setup = __setup_cpu_power9,
|
||||
.cpu_restore = __restore_cpu_power9,
|
||||
.flush_tlb = __flush_tlb_power9,
|
||||
.machine_check_early = __machine_check_early_realmode_p9,
|
||||
.platform = "power9",
|
||||
},
|
||||
{ /* Cell Broadband Engine */
|
||||
|
|
|
@ -449,9 +449,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
|||
_GLOBAL(pnv_wakeup_tb_loss)
|
||||
ld r1,PACAR1(r13)
|
||||
/*
|
||||
* Before entering any idle state, the NVGPRs are saved in the stack
|
||||
* and they are restored before switching to the process context. Hence
|
||||
* until they are restored, they are free to be used.
|
||||
* Before entering any idle state, the NVGPRs are saved in the stack.
|
||||
* If there was a state loss, or PACA_NAPSTATELOST was set, then the
|
||||
* NVGPRs are restored. If we are here, it is likely that state is lost,
|
||||
* but not guaranteed -- neither ISA207 nor ISA300 tests to reach
|
||||
* here are the same as the test to restore NVGPRS:
|
||||
* PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
|
||||
* and SRR1 test for restoring NVGPRs.
|
||||
*
|
||||
* We are about to clobber NVGPRs now, so set NAPSTATELOST to
|
||||
* guarantee they will always be restored. This might be tightened
|
||||
* with careful reading of specs (particularly for ISA300) but this
|
||||
* is already a slow wakeup path and it's simpler to be safe.
|
||||
*/
|
||||
li r0,1
|
||||
stb r0,PACA_NAPSTATELOST(r13)
|
||||
|
||||
/*
|
||||
*
|
||||
* Save SRR1 and LR in NVGPRs as they might be clobbered in
|
||||
* opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
|
||||
|
|
|
@ -58,6 +58,15 @@ static void mce_set_error_info(struct machine_check_event *mce,
|
|||
case MCE_ERROR_TYPE_TLB:
|
||||
mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_USER:
|
||||
mce->u.user_error.user_error_type = mce_err->u.user_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_RA:
|
||||
mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_LINK:
|
||||
mce->u.link_error.link_error_type = mce_err->u.link_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_UNKNOWN:
|
||||
default:
|
||||
break;
|
||||
|
@ -90,13 +99,14 @@ void save_mce_event(struct pt_regs *regs, long handled,
|
|||
mce->gpr3 = regs->gpr[3];
|
||||
mce->in_use = 1;
|
||||
|
||||
mce->initiator = MCE_INITIATOR_CPU;
|
||||
/* Mark it recovered if we have handled it and MSR(RI=1). */
|
||||
if (handled && (regs->msr & MSR_RI))
|
||||
mce->disposition = MCE_DISPOSITION_RECOVERED;
|
||||
else
|
||||
mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
|
||||
mce->severity = MCE_SEV_ERROR_SYNC;
|
||||
|
||||
mce->initiator = mce_err->initiator;
|
||||
mce->severity = mce_err->severity;
|
||||
|
||||
/*
|
||||
* Populate the mce error_type and type-specific error_type.
|
||||
|
@ -115,6 +125,15 @@ void save_mce_event(struct pt_regs *regs, long handled,
|
|||
} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
|
||||
mce->u.erat_error.effective_address_provided = true;
|
||||
mce->u.erat_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
|
||||
mce->u.user_error.effective_address_provided = true;
|
||||
mce->u.user_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
|
||||
mce->u.ra_error.effective_address_provided = true;
|
||||
mce->u.ra_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
|
||||
mce->u.link_error.effective_address_provided = true;
|
||||
mce->u.link_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
|
||||
mce->u.ue_error.effective_address_provided = true;
|
||||
mce->u.ue_error.effective_address = addr;
|
||||
|
@ -239,6 +258,29 @@ void machine_check_print_event_info(struct machine_check_event *evt)
|
|||
"Parity",
|
||||
"Multihit",
|
||||
};
|
||||
static const char *mc_user_types[] = {
|
||||
"Indeterminate",
|
||||
"tlbie(l) invalid",
|
||||
};
|
||||
static const char *mc_ra_types[] = {
|
||||
"Indeterminate",
|
||||
"Instruction fetch (bad)",
|
||||
"Page table walk ifetch (bad)",
|
||||
"Page table walk ifetch (foreign)",
|
||||
"Load (bad)",
|
||||
"Store (bad)",
|
||||
"Page table walk Load/Store (bad)",
|
||||
"Page table walk Load/Store (foreign)",
|
||||
"Load/Store (foreign)",
|
||||
};
|
||||
static const char *mc_link_types[] = {
|
||||
"Indeterminate",
|
||||
"Instruction fetch (timeout)",
|
||||
"Page table walk ifetch (timeout)",
|
||||
"Load (timeout)",
|
||||
"Store (timeout)",
|
||||
"Page table walk Load/Store (timeout)",
|
||||
};
|
||||
|
||||
/* Print things out */
|
||||
if (evt->version != MCE_V1) {
|
||||
|
@ -315,6 +357,36 @@ void machine_check_print_event_info(struct machine_check_event *evt)
|
|||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.tlb_error.effective_address);
|
||||
break;
|
||||
case MCE_ERROR_TYPE_USER:
|
||||
subtype = evt->u.user_error.user_error_type <
|
||||
ARRAY_SIZE(mc_user_types) ?
|
||||
mc_user_types[evt->u.user_error.user_error_type]
|
||||
: "Unknown";
|
||||
printk("%s Error type: User [%s]\n", level, subtype);
|
||||
if (evt->u.user_error.effective_address_provided)
|
||||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.user_error.effective_address);
|
||||
break;
|
||||
case MCE_ERROR_TYPE_RA:
|
||||
subtype = evt->u.ra_error.ra_error_type <
|
||||
ARRAY_SIZE(mc_ra_types) ?
|
||||
mc_ra_types[evt->u.ra_error.ra_error_type]
|
||||
: "Unknown";
|
||||
printk("%s Error type: Real address [%s]\n", level, subtype);
|
||||
if (evt->u.ra_error.effective_address_provided)
|
||||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.ra_error.effective_address);
|
||||
break;
|
||||
case MCE_ERROR_TYPE_LINK:
|
||||
subtype = evt->u.link_error.link_error_type <
|
||||
ARRAY_SIZE(mc_link_types) ?
|
||||
mc_link_types[evt->u.link_error.link_error_type]
|
||||
: "Unknown";
|
||||
printk("%s Error type: Link [%s]\n", level, subtype);
|
||||
if (evt->u.link_error.effective_address_provided)
|
||||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.link_error.effective_address);
|
||||
break;
|
||||
default:
|
||||
case MCE_ERROR_TYPE_UNKNOWN:
|
||||
printk("%s Error type: Unknown\n", level);
|
||||
|
@ -341,6 +413,18 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt)
|
|||
if (evt->u.tlb_error.effective_address_provided)
|
||||
return evt->u.tlb_error.effective_address;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_USER:
|
||||
if (evt->u.user_error.effective_address_provided)
|
||||
return evt->u.user_error.effective_address;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_RA:
|
||||
if (evt->u.ra_error.effective_address_provided)
|
||||
return evt->u.ra_error.effective_address;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_LINK:
|
||||
if (evt->u.link_error.effective_address_provided)
|
||||
return evt->u.link_error.effective_address;
|
||||
break;
|
||||
default:
|
||||
case MCE_ERROR_TYPE_UNKNOWN:
|
||||
break;
|
||||
|
|
|
@ -116,6 +116,51 @@ static void flush_and_reload_slb(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void flush_erat(void)
|
||||
{
|
||||
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
|
||||
}
|
||||
|
||||
#define MCE_FLUSH_SLB 1
|
||||
#define MCE_FLUSH_TLB 2
|
||||
#define MCE_FLUSH_ERAT 3
|
||||
|
||||
static int mce_flush(int what)
|
||||
{
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
if (what == MCE_FLUSH_SLB) {
|
||||
flush_and_reload_slb();
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
if (what == MCE_FLUSH_ERAT) {
|
||||
flush_erat();
|
||||
return 1;
|
||||
}
|
||||
if (what == MCE_FLUSH_TLB) {
|
||||
if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
|
||||
cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mce_handle_flush_derrors(uint64_t dsisr, uint64_t slb, uint64_t tlb, uint64_t erat)
|
||||
{
|
||||
if ((dsisr & slb) && mce_flush(MCE_FLUSH_SLB))
|
||||
dsisr &= ~slb;
|
||||
if ((dsisr & erat) && mce_flush(MCE_FLUSH_ERAT))
|
||||
dsisr &= ~erat;
|
||||
if ((dsisr & tlb) && mce_flush(MCE_FLUSH_TLB))
|
||||
dsisr &= ~tlb;
|
||||
/* Any other errors we don't understand? */
|
||||
if (dsisr)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
|
||||
{
|
||||
long handled = 1;
|
||||
|
@ -281,6 +326,9 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
|
|||
long handled = 1;
|
||||
struct mce_error_info mce_error_info = { 0 };
|
||||
|
||||
mce_error_info.severity = MCE_SEV_ERROR_SYNC;
|
||||
mce_error_info.initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
srr1 = regs->msr;
|
||||
nip = regs->nip;
|
||||
|
||||
|
@ -352,6 +400,9 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
|
|||
long handled = 1;
|
||||
struct mce_error_info mce_error_info = { 0 };
|
||||
|
||||
mce_error_info.severity = MCE_SEV_ERROR_SYNC;
|
||||
mce_error_info.initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
srr1 = regs->msr;
|
||||
nip = regs->nip;
|
||||
|
||||
|
@ -372,3 +423,189 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
|
|||
save_mce_event(regs, handled, &mce_error_info, nip, addr);
|
||||
return handled;
|
||||
}
|
||||
|
||||
static int mce_handle_derror_p9(struct pt_regs *regs)
|
||||
{
|
||||
uint64_t dsisr = regs->dsisr;
|
||||
|
||||
return mce_handle_flush_derrors(dsisr,
|
||||
P9_DSISR_MC_SLB_PARITY_MFSLB |
|
||||
P9_DSISR_MC_SLB_MULTIHIT_MFSLB,
|
||||
|
||||
P9_DSISR_MC_TLB_MULTIHIT_MFTLB,
|
||||
|
||||
P9_DSISR_MC_ERAT_MULTIHIT);
|
||||
}
|
||||
|
||||
static int mce_handle_ierror_p9(struct pt_regs *regs)
|
||||
{
|
||||
uint64_t srr1 = regs->msr;
|
||||
|
||||
switch (P9_SRR1_MC_IFETCH(srr1)) {
|
||||
case P9_SRR1_MC_IFETCH_SLB_PARITY:
|
||||
case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
|
||||
return mce_flush(MCE_FLUSH_SLB);
|
||||
case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
|
||||
return mce_flush(MCE_FLUSH_TLB);
|
||||
case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
|
||||
return mce_flush(MCE_FLUSH_ERAT);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void mce_get_derror_p9(struct pt_regs *regs,
|
||||
struct mce_error_info *mce_err, uint64_t *addr)
|
||||
{
|
||||
uint64_t dsisr = regs->dsisr;
|
||||
|
||||
mce_err->severity = MCE_SEV_ERROR_SYNC;
|
||||
mce_err->initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
if (dsisr & P9_DSISR_MC_USER_TLBIE)
|
||||
*addr = regs->nip;
|
||||
else
|
||||
*addr = regs->dar;
|
||||
|
||||
if (dsisr & P9_DSISR_MC_UE) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
|
||||
} else if (dsisr & P9_DSISR_MC_UE_TABLEWALK) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
|
||||
} else if (dsisr & P9_DSISR_MC_LINK_LOAD_TIMEOUT) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_LOAD_TIMEOUT;
|
||||
} else if (dsisr & P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT;
|
||||
} else if (dsisr & P9_DSISR_MC_ERAT_MULTIHIT) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_ERAT;
|
||||
mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
|
||||
} else if (dsisr & P9_DSISR_MC_TLB_MULTIHIT_MFTLB) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_TLB;
|
||||
mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
|
||||
} else if (dsisr & P9_DSISR_MC_USER_TLBIE) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_USER;
|
||||
mce_err->u.user_error_type = MCE_USER_ERROR_TLBIE;
|
||||
} else if (dsisr & P9_DSISR_MC_SLB_PARITY_MFSLB) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
|
||||
} else if (dsisr & P9_DSISR_MC_SLB_MULTIHIT_MFSLB) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_LOAD) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_TABLEWALK) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_TABLEWALK_FOREIGN) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_FOREIGN) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD_STORE_FOREIGN;
|
||||
}
|
||||
}
|
||||
|
||||
static void mce_get_ierror_p9(struct pt_regs *regs,
|
||||
struct mce_error_info *mce_err, uint64_t *addr)
|
||||
{
|
||||
uint64_t srr1 = regs->msr;
|
||||
|
||||
switch (P9_SRR1_MC_IFETCH(srr1)) {
|
||||
case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
|
||||
case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
|
||||
mce_err->severity = MCE_SEV_FATAL;
|
||||
break;
|
||||
default:
|
||||
mce_err->severity = MCE_SEV_ERROR_SYNC;
|
||||
break;
|
||||
}
|
||||
|
||||
mce_err->initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
*addr = regs->nip;
|
||||
|
||||
switch (P9_SRR1_MC_IFETCH(srr1)) {
|
||||
case P9_SRR1_MC_IFETCH_UE:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_SLB_PARITY:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_ERAT;
|
||||
mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_TLB;
|
||||
mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_UE_TLB_RELOAD:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_LINK_TIMEOUT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_IFETCH_TIMEOUT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA_TABLEWALK:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_STORE;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_STORE_TIMEOUT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
long __machine_check_early_realmode_p9(struct pt_regs *regs)
|
||||
{
|
||||
uint64_t nip, addr;
|
||||
long handled;
|
||||
struct mce_error_info mce_error_info = { 0 };
|
||||
|
||||
nip = regs->nip;
|
||||
|
||||
if (P9_SRR1_MC_LOADSTORE(regs->msr)) {
|
||||
handled = mce_handle_derror_p9(regs);
|
||||
mce_get_derror_p9(regs, &mce_error_info, &addr);
|
||||
} else {
|
||||
handled = mce_handle_ierror_p9(regs);
|
||||
mce_get_ierror_p9(regs, &mce_error_info, &addr);
|
||||
}
|
||||
|
||||
/* Handle UE error. */
|
||||
if (mce_error_info.error_type == MCE_ERROR_TYPE_UE)
|
||||
handled = mce_handle_ue_error(regs);
|
||||
|
||||
save_mce_event(regs, handled, &mce_error_info, nip, addr);
|
||||
return handled;
|
||||
}
|
||||
|
|
|
@ -397,8 +397,7 @@ static void early_check_vec5(void)
|
|||
void __init mmu_early_init_devtree(void)
|
||||
{
|
||||
/* Disable radix mode based on kernel command line. */
|
||||
/* We don't yet have the machinery to do radix as a guest. */
|
||||
if (disable_radix || !(mfmsr() & MSR_HV))
|
||||
if (disable_radix)
|
||||
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
|
||||
|
||||
/*
|
||||
|
|
|
@ -188,6 +188,8 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
|
|||
sdsync = POWER7P_MMCRA_SDAR_VALID;
|
||||
else if (ppmu->flags & PPMU_ALT_SIPR)
|
||||
sdsync = POWER6_MMCRA_SDSYNC;
|
||||
else if (ppmu->flags & PPMU_NO_SIAR)
|
||||
sdsync = MMCRA_SAMPLE_ENABLE;
|
||||
else
|
||||
sdsync = MMCRA_SDSYNC;
|
||||
|
||||
|
|
|
@ -65,12 +65,41 @@ static bool is_event_valid(u64 event)
|
|||
return !(event & ~valid_mask);
|
||||
}
|
||||
|
||||
static u64 mmcra_sdar_mode(u64 event)
|
||||
static inline bool is_event_marked(u64 event)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
|
||||
if (event & EVENT_IS_MARKED)
|
||||
return true;
|
||||
|
||||
return MMCRA_SDAR_MODE_TLB;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
|
||||
{
|
||||
/*
|
||||
* MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
|
||||
* continous sampling mode.
|
||||
*
|
||||
* Incase of Power8:
|
||||
* MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
|
||||
* mode and will be un-changed when setting MMCRA[63] (Marked events).
|
||||
*
|
||||
* Incase of Power9:
|
||||
* Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
|
||||
* or if group already have any marked events.
|
||||
* Non-Marked events (for DD1):
|
||||
* MMCRA[SDAR_MODE] will be set to 0b01
|
||||
* For rest
|
||||
* MMCRA[SDAR_MODE] will be set from event code.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
||||
if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
|
||||
*mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
|
||||
else if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
*mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
|
||||
else if (cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
*mmcra |= MMCRA_SDAR_MODE_TLB;
|
||||
} else
|
||||
*mmcra |= MMCRA_SDAR_MODE_TLB;
|
||||
}
|
||||
|
||||
static u64 thresh_cmp_val(u64 value)
|
||||
|
@ -180,7 +209,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
|
|||
value |= CNST_L1_QUAL_VAL(cache);
|
||||
}
|
||||
|
||||
if (event & EVENT_IS_MARKED) {
|
||||
if (is_event_marked(event)) {
|
||||
mask |= CNST_SAMPLE_MASK;
|
||||
value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
|
||||
}
|
||||
|
@ -276,7 +305,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
|
|||
}
|
||||
|
||||
/* In continuous sampling mode, update SDAR on TLB miss */
|
||||
mmcra |= mmcra_sdar_mode(event[i]);
|
||||
mmcra_sdar_mode(event[i], &mmcra);
|
||||
|
||||
if (event[i] & EVENT_IS_L1) {
|
||||
cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
|
||||
|
@ -285,7 +314,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
|
|||
mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
|
||||
}
|
||||
|
||||
if (event[i] & EVENT_IS_MARKED) {
|
||||
if (is_event_marked(event[i])) {
|
||||
mmcra |= MMCRA_SAMPLE_ENABLE;
|
||||
|
||||
val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
|
||||
|
|
|
@ -246,6 +246,7 @@
|
|||
#define MMCRA_THR_CMP_SHIFT 32
|
||||
#define MMCRA_SDAR_MODE_SHIFT 42
|
||||
#define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT)
|
||||
#define MMCRA_SDAR_MODE_NO_UPDATES ~(0x3ull << MMCRA_SDAR_MODE_SHIFT)
|
||||
#define MMCRA_IFM_SHIFT 30
|
||||
|
||||
/* MMCR1 Threshold Compare bit constant for power9 */
|
||||
|
|
|
@ -395,7 +395,6 @@ static int opal_recover_mce(struct pt_regs *regs,
|
|||
struct machine_check_event *evt)
|
||||
{
|
||||
int recovered = 0;
|
||||
uint64_t ea = get_mce_fault_addr(evt);
|
||||
|
||||
if (!(regs->msr & MSR_RI)) {
|
||||
/* If MSR_RI isn't set, we cannot recover */
|
||||
|
@ -404,26 +403,18 @@ static int opal_recover_mce(struct pt_regs *regs,
|
|||
} else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
|
||||
/* Platform corrected itself */
|
||||
recovered = 1;
|
||||
} else if (ea && !is_kernel_addr(ea)) {
|
||||
} else if (evt->severity == MCE_SEV_FATAL) {
|
||||
/* Fatal machine check */
|
||||
pr_err("Machine check interrupt is fatal\n");
|
||||
recovered = 0;
|
||||
} else if ((evt->severity == MCE_SEV_ERROR_SYNC) &&
|
||||
(user_mode(regs) && !is_global_init(current))) {
|
||||
/*
|
||||
* Faulting address is not in kernel text. We should be fine.
|
||||
* We need to find which process uses this address.
|
||||
* For now, kill the task if we have received exception when
|
||||
* in userspace.
|
||||
*
|
||||
* TODO: Queue up this address for hwpoisioning later.
|
||||
*/
|
||||
if (user_mode(regs) && !is_global_init(current)) {
|
||||
_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
|
||||
recovered = 1;
|
||||
} else
|
||||
recovered = 0;
|
||||
} else if (user_mode(regs) && !is_global_init(current) &&
|
||||
evt->severity == MCE_SEV_ERROR_SYNC) {
|
||||
/*
|
||||
* If we have received a synchronous error when in userspace
|
||||
* kill the task.
|
||||
*/
|
||||
_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
|
||||
recovered = 1;
|
||||
}
|
||||
|
|
|
@ -1775,17 +1775,20 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
|
|||
}
|
||||
|
||||
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
|
||||
struct pci_bus *bus)
|
||||
struct pci_bus *bus,
|
||||
bool add_to_group)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
|
||||
set_dma_offset(&dev->dev, pe->tce_bypass_base);
|
||||
iommu_add_device(&dev->dev);
|
||||
if (add_to_group)
|
||||
iommu_add_device(&dev->dev);
|
||||
|
||||
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
|
||||
pnv_ioda_setup_bus_dma(pe, dev->subordinate);
|
||||
pnv_ioda_setup_bus_dma(pe, dev->subordinate,
|
||||
add_to_group);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2191,7 +2194,7 @@ found:
|
|||
set_iommu_table_base(&pe->pdev->dev, tbl);
|
||||
iommu_add_device(&pe->pdev->dev);
|
||||
} else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus);
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
|
||||
|
||||
return;
|
||||
fail:
|
||||
|
@ -2426,6 +2429,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
|
|||
|
||||
pnv_pci_ioda2_set_bypass(pe, false);
|
||||
pnv_pci_ioda2_unset_window(&pe->table_group, 0);
|
||||
if (pe->pbus)
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
|
||||
pnv_ioda2_table_free(tbl);
|
||||
}
|
||||
|
||||
|
@ -2435,6 +2440,8 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
|
|||
table_group);
|
||||
|
||||
pnv_pci_ioda2_setup_default_config(pe);
|
||||
if (pe->pbus)
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
|
||||
}
|
||||
|
||||
static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
|
||||
|
@ -2624,6 +2631,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|||
level_shift = entries_shift + 3;
|
||||
level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
|
||||
|
||||
if ((level_shift - 3) * levels + page_shift >= 60)
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate TCE table */
|
||||
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
|
||||
levels, tce_table_size, &offset, &total_allocated);
|
||||
|
@ -2728,7 +2738,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
|
|||
if (pe->flags & PNV_IODA_PE_DEV)
|
||||
iommu_add_device(&pe->pdev->dev);
|
||||
else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus);
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
|
|
|
@ -751,7 +751,9 @@ void __init hpte_init_pseries(void)
|
|||
mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
|
||||
mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
|
||||
mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
|
||||
mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
|
||||
|
||||
if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
|
||||
mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
|
||||
}
|
||||
|
||||
void radix_init_pseries(void)
|
||||
|
|
|
@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event)
|
|||
|
||||
static void refresh_pce(void *ignored)
|
||||
{
|
||||
if (current->mm)
|
||||
load_mm_cr4(current->mm);
|
||||
if (current->active_mm)
|
||||
load_mm_cr4(current->active_mm);
|
||||
}
|
||||
|
||||
static void x86_pmu_event_mapped(struct perf_event *event)
|
||||
|
@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event)
|
|||
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
|
||||
return;
|
||||
|
||||
/*
|
||||
* This function relies on not being called concurrently in two
|
||||
* tasks in the same mm. Otherwise one task could observe
|
||||
* perf_rdpmc_allowed > 1 and return all the way back to
|
||||
* userspace with CR4.PCE clear while another task is still
|
||||
* doing on_each_cpu_mask() to propagate CR4.PCE.
|
||||
*
|
||||
* For now, this can't happen because all callers hold mmap_sem
|
||||
* for write. If this changes, we'll need a different solution.
|
||||
*/
|
||||
lockdep_assert_held_exclusive(¤t->mm->mmap_sem);
|
||||
|
||||
if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1)
|
||||
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
|
||||
}
|
||||
|
|
|
@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd)
|
|||
*(tmp + 1) = 0;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \
|
||||
defined(CONFIG_PARAVIRT))
|
||||
static inline void native_pud_clear(pud_t *pudp)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void pud_clear(pud_t *pudp)
|
||||
{
|
||||
|
|
|
@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
|
|||
# define set_pud(pudp, pud) native_set_pud(pudp, pud)
|
||||
#endif
|
||||
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
#ifndef __PAGETABLE_PUD_FOLDED
|
||||
#define pud_clear(pud) native_pud_clear(pud)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -55,7 +55,8 @@ static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
|
|||
* @size: number of bytes to write back
|
||||
*
|
||||
* Write back a cache range using the CLWB (cache line write back)
|
||||
* instruction.
|
||||
* instruction. Note that @size is internally rounded up to be cache
|
||||
* line size aligned.
|
||||
*/
|
||||
static inline void arch_wb_cache_pmem(void *addr, size_t size)
|
||||
{
|
||||
|
@ -69,15 +70,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
|
|||
clwb(p);
|
||||
}
|
||||
|
||||
/*
|
||||
* copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
|
||||
* iterators, so for other types (bvec & kvec) we must do a cache write-back.
|
||||
*/
|
||||
static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
|
||||
{
|
||||
return iter_is_iovec(i) == false;
|
||||
}
|
||||
|
||||
/**
|
||||
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
|
||||
* @addr: PMEM destination address
|
||||
|
@ -94,7 +86,35 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
|
|||
/* TODO: skip the write-back by always using non-temporal stores */
|
||||
len = copy_from_iter_nocache(addr, bytes, i);
|
||||
|
||||
if (__iter_needs_pmem_wb(i))
|
||||
/*
|
||||
* In the iovec case on x86_64 copy_from_iter_nocache() uses
|
||||
* non-temporal stores for the bulk of the transfer, but we need
|
||||
* to manually flush if the transfer is unaligned. A cached
|
||||
* memory copy is used when destination or size is not naturally
|
||||
* aligned. That is:
|
||||
* - Require 8-byte alignment when size is 8 bytes or larger.
|
||||
* - Require 4-byte alignment when size is 4 bytes.
|
||||
*
|
||||
* In the non-iovec case the entire destination needs to be
|
||||
* flushed.
|
||||
*/
|
||||
if (iter_is_iovec(i)) {
|
||||
unsigned long flushed, dest = (unsigned long) addr;
|
||||
|
||||
if (bytes < 8) {
|
||||
if (!IS_ALIGNED(dest, 4) || (bytes != 4))
|
||||
arch_wb_cache_pmem(addr, 1);
|
||||
} else {
|
||||
if (!IS_ALIGNED(dest, 8)) {
|
||||
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
|
||||
arch_wb_cache_pmem(addr, 1);
|
||||
}
|
||||
|
||||
flushed = dest - (unsigned long) addr;
|
||||
if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
|
||||
arch_wb_cache_pmem(addr + bytes - 1, 1);
|
||||
}
|
||||
} else
|
||||
arch_wb_cache_pmem(addr, bytes);
|
||||
|
||||
return len;
|
||||
|
|
|
@ -179,10 +179,15 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!enabled) {
|
||||
++disabled_cpus;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (boot_cpu_physical_apicid != -1U)
|
||||
ver = boot_cpu_apic_version;
|
||||
|
||||
cpu = __generic_processor_info(id, ver, enabled);
|
||||
cpu = generic_processor_info(id, ver);
|
||||
if (cpu >= 0)
|
||||
early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
|
||||
|
||||
|
@ -710,7 +715,7 @@ static void __init acpi_set_irq_model_ioapic(void)
|
|||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
#include <acpi/processor.h>
|
||||
|
||||
int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
{
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
int nid;
|
||||
|
|
|
@ -2063,7 +2063,7 @@ static int allocate_logical_cpuid(int apicid)
|
|||
return nr_logical_cpuids++;
|
||||
}
|
||||
|
||||
int __generic_processor_info(int apicid, int version, bool enabled)
|
||||
int generic_processor_info(int apicid, int version)
|
||||
{
|
||||
int cpu, max = nr_cpu_ids;
|
||||
bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
|
||||
|
@ -2121,11 +2121,9 @@ int __generic_processor_info(int apicid, int version, bool enabled)
|
|||
if (num_processors >= nr_cpu_ids) {
|
||||
int thiscpu = max + disabled_cpus;
|
||||
|
||||
if (enabled) {
|
||||
pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
|
||||
"reached. Processor %d/0x%x ignored.\n",
|
||||
max, thiscpu, apicid);
|
||||
}
|
||||
pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
|
||||
"reached. Processor %d/0x%x ignored.\n",
|
||||
max, thiscpu, apicid);
|
||||
|
||||
disabled_cpus++;
|
||||
return -EINVAL;
|
||||
|
@ -2177,23 +2175,13 @@ int __generic_processor_info(int apicid, int version, bool enabled)
|
|||
apic->x86_32_early_logical_apicid(cpu);
|
||||
#endif
|
||||
set_cpu_possible(cpu, true);
|
||||
|
||||
if (enabled) {
|
||||
num_processors++;
|
||||
physid_set(apicid, phys_cpu_present_map);
|
||||
set_cpu_present(cpu, true);
|
||||
} else {
|
||||
disabled_cpus++;
|
||||
}
|
||||
physid_set(apicid, phys_cpu_present_map);
|
||||
set_cpu_present(cpu, true);
|
||||
num_processors++;
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
int generic_processor_info(int apicid, int version)
|
||||
{
|
||||
return __generic_processor_info(apicid, version, true);
|
||||
}
|
||||
|
||||
int hard_smp_processor_id(void)
|
||||
{
|
||||
return read_apic_id();
|
||||
|
|
|
@ -727,7 +727,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
|
|||
if (atomic_dec_and_test(&rdtgrp->waitcount) &&
|
||||
(rdtgrp->flags & RDT_DELETED)) {
|
||||
kernfs_unbreak_active_protection(kn);
|
||||
kernfs_put(kn);
|
||||
kernfs_put(rdtgrp->kn);
|
||||
kfree(rdtgrp);
|
||||
} else {
|
||||
kernfs_unbreak_active_protection(kn);
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
|
||||
*/
|
||||
|
||||
#define DISABLE_BRANCH_PROFILING
|
||||
#include <linux/init.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/types.h>
|
||||
|
|
|
@ -166,11 +166,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
|
|||
spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
/*
|
||||
* most handlers of type NMI_UNKNOWN never return because
|
||||
* they just assume the NMI is theirs. Just a sanity check
|
||||
* to manage expectations
|
||||
* Indicate if there are multiple registrations on the
|
||||
* internal NMI handler call chains (SERR and IO_CHECK).
|
||||
*/
|
||||
WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
|
||||
WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
|
||||
WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
|
||||
|
||||
|
|
|
@ -1333,6 +1333,8 @@ static int __init init_tsc_clocksource(void)
|
|||
* the refined calibration and directly register it as a clocksource.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
|
||||
if (boot_cpu_has(X86_FEATURE_ART))
|
||||
art_related_clocksource = &clocksource_tsc;
|
||||
clocksource_register_khz(&clocksource_tsc, tsc_khz);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -82,19 +82,43 @@ static size_t regs_size(struct pt_regs *regs)
|
|||
return sizeof(*regs);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define GCC_REALIGN_WORDS 3
|
||||
#else
|
||||
#define GCC_REALIGN_WORDS 1
|
||||
#endif
|
||||
|
||||
static bool is_last_task_frame(struct unwind_state *state)
|
||||
{
|
||||
unsigned long bp = (unsigned long)state->bp;
|
||||
unsigned long regs = (unsigned long)task_pt_regs(state->task);
|
||||
unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2;
|
||||
unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS;
|
||||
|
||||
/*
|
||||
* We have to check for the last task frame at two different locations
|
||||
* because gcc can occasionally decide to realign the stack pointer and
|
||||
* change the offset of the stack frame by a word in the prologue of a
|
||||
* function called by head/entry code.
|
||||
* change the offset of the stack frame in the prologue of a function
|
||||
* called by head/entry code. Examples:
|
||||
*
|
||||
* <start_secondary>:
|
||||
* push %edi
|
||||
* lea 0x8(%esp),%edi
|
||||
* and $0xfffffff8,%esp
|
||||
* pushl -0x4(%edi)
|
||||
* push %ebp
|
||||
* mov %esp,%ebp
|
||||
*
|
||||
* <x86_64_start_kernel>:
|
||||
* lea 0x8(%rsp),%r10
|
||||
* and $0xfffffffffffffff0,%rsp
|
||||
* pushq -0x8(%r10)
|
||||
* push %rbp
|
||||
* mov %rsp,%rbp
|
||||
*
|
||||
* Note that after aligning the stack, it pushes a duplicate copy of
|
||||
* the return address before pushing the frame pointer.
|
||||
*/
|
||||
return bp == regs - FRAME_HEADER_SIZE ||
|
||||
bp == regs - FRAME_HEADER_SIZE - sizeof(long);
|
||||
return (state->bp == last_bp ||
|
||||
(state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#define DISABLE_BRANCH_PROFILING
|
||||
#define pr_fmt(fmt) "kasan: " fmt
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/kasan.h>
|
||||
|
|
|
@ -590,7 +590,7 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
|
|||
* we might run off the end of the bounds table if we are on
|
||||
* a 64-bit kernel and try to get 8 bytes.
|
||||
*/
|
||||
int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
|
||||
static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
|
||||
long __user *bd_entry_ptr)
|
||||
{
|
||||
u32 bd_entry_32;
|
||||
|
|
|
@ -26,5 +26,6 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
|
|||
obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
|
||||
# MISC Devices
|
||||
obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
|
||||
obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o
|
||||
obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o
|
||||
obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
* Intel Merrifield power button support
|
||||
*
|
||||
* (C) Copyright 2017 Intel Corporation
|
||||
*
|
||||
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; version 2
|
||||
* of the License.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/sfi.h>
|
||||
|
||||
#include <asm/intel-mid.h>
|
||||
#include <asm/intel_scu_ipc.h>
|
||||
|
||||
static struct resource mrfld_power_btn_resources[] = {
|
||||
{
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device mrfld_power_btn_dev = {
|
||||
.name = "msic_power_btn",
|
||||
.id = PLATFORM_DEVID_NONE,
|
||||
.num_resources = ARRAY_SIZE(mrfld_power_btn_resources),
|
||||
.resource = mrfld_power_btn_resources,
|
||||
};
|
||||
|
||||
static int mrfld_power_btn_scu_status_change(struct notifier_block *nb,
|
||||
unsigned long code, void *data)
|
||||
{
|
||||
if (code == SCU_DOWN) {
|
||||
platform_device_unregister(&mrfld_power_btn_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return platform_device_register(&mrfld_power_btn_dev);
|
||||
}
|
||||
|
||||
static struct notifier_block mrfld_power_btn_scu_notifier = {
|
||||
.notifier_call = mrfld_power_btn_scu_status_change,
|
||||
};
|
||||
|
||||
static int __init register_mrfld_power_btn(void)
|
||||
{
|
||||
if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* We need to be sure that the SCU IPC is ready before
|
||||
* PMIC power button device can be registered:
|
||||
*/
|
||||
intel_scu_notifier_add(&mrfld_power_btn_scu_notifier);
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(register_mrfld_power_btn);
|
||||
|
||||
static void __init *mrfld_power_btn_platform_data(void *info)
|
||||
{
|
||||
struct resource *res = mrfld_power_btn_resources;
|
||||
struct sfi_device_table_entry *pentry = info;
|
||||
|
||||
res->start = res->end = pentry->irq;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const struct devs_id mrfld_power_btn_dev_id __initconst = {
|
||||
.name = "bcove_power_btn",
|
||||
.type = SFI_DEV_TYPE_IPC,
|
||||
.delay = 1,
|
||||
.msic = 1,
|
||||
.get_platform_data = &mrfld_power_btn_platform_data,
|
||||
};
|
||||
|
||||
sfi_device(mrfld_power_btn_dev_id);
|
|
@ -19,7 +19,7 @@
|
|||
#include <asm/intel_scu_ipc.h>
|
||||
#include <asm/io_apic.h>
|
||||
|
||||
#define TANGIER_EXT_TIMER0_MSI 15
|
||||
#define TANGIER_EXT_TIMER0_MSI 12
|
||||
|
||||
static struct platform_device wdt_dev = {
|
||||
.name = "intel_mid_wdt",
|
||||
|
|
|
@ -17,16 +17,6 @@
|
|||
|
||||
#include "intel_mid_weak_decls.h"
|
||||
|
||||
static void penwell_arch_setup(void);
|
||||
/* penwell arch ops */
|
||||
static struct intel_mid_ops penwell_ops = {
|
||||
.arch_setup = penwell_arch_setup,
|
||||
};
|
||||
|
||||
static void mfld_power_off(void)
|
||||
{
|
||||
}
|
||||
|
||||
static unsigned long __init mfld_calibrate_tsc(void)
|
||||
{
|
||||
unsigned long fast_calibrate;
|
||||
|
@ -63,9 +53,12 @@ static unsigned long __init mfld_calibrate_tsc(void)
|
|||
static void __init penwell_arch_setup(void)
|
||||
{
|
||||
x86_platform.calibrate_tsc = mfld_calibrate_tsc;
|
||||
pm_power_off = mfld_power_off;
|
||||
}
|
||||
|
||||
static struct intel_mid_ops penwell_ops = {
|
||||
.arch_setup = penwell_arch_setup,
|
||||
};
|
||||
|
||||
void *get_penwell_ops(void)
|
||||
{
|
||||
return &penwell_ops;
|
||||
|
|
12
block/bio.c
12
block/bio.c
|
@ -376,10 +376,14 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
|
|||
bio_list_init(&punt);
|
||||
bio_list_init(&nopunt);
|
||||
|
||||
while ((bio = bio_list_pop(current->bio_list)))
|
||||
while ((bio = bio_list_pop(¤t->bio_list[0])))
|
||||
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
|
||||
current->bio_list[0] = nopunt;
|
||||
|
||||
*current->bio_list = nopunt;
|
||||
bio_list_init(&nopunt);
|
||||
while ((bio = bio_list_pop(¤t->bio_list[1])))
|
||||
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
|
||||
current->bio_list[1] = nopunt;
|
||||
|
||||
spin_lock(&bs->rescue_lock);
|
||||
bio_list_merge(&bs->rescue_list, &punt);
|
||||
|
@ -466,7 +470,9 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
|||
* we retry with the original gfp_flags.
|
||||
*/
|
||||
|
||||
if (current->bio_list && !bio_list_empty(current->bio_list))
|
||||
if (current->bio_list &&
|
||||
(!bio_list_empty(¤t->bio_list[0]) ||
|
||||
!bio_list_empty(¤t->bio_list[1])))
|
||||
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
|
||||
|
||||
p = mempool_alloc(bs->bio_pool, gfp_mask);
|
||||
|
|
|
@ -1973,7 +1973,14 @@ end_io:
|
|||
*/
|
||||
blk_qc_t generic_make_request(struct bio *bio)
|
||||
{
|
||||
struct bio_list bio_list_on_stack;
|
||||
/*
|
||||
* bio_list_on_stack[0] contains bios submitted by the current
|
||||
* make_request_fn.
|
||||
* bio_list_on_stack[1] contains bios that were submitted before
|
||||
* the current make_request_fn, but that haven't been processed
|
||||
* yet.
|
||||
*/
|
||||
struct bio_list bio_list_on_stack[2];
|
||||
blk_qc_t ret = BLK_QC_T_NONE;
|
||||
|
||||
if (!generic_make_request_checks(bio))
|
||||
|
@ -1990,7 +1997,7 @@ blk_qc_t generic_make_request(struct bio *bio)
|
|||
* should be added at the tail
|
||||
*/
|
||||
if (current->bio_list) {
|
||||
bio_list_add(current->bio_list, bio);
|
||||
bio_list_add(¤t->bio_list[0], bio);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -2009,18 +2016,17 @@ blk_qc_t generic_make_request(struct bio *bio)
|
|||
* bio_list, and call into ->make_request() again.
|
||||
*/
|
||||
BUG_ON(bio->bi_next);
|
||||
bio_list_init(&bio_list_on_stack);
|
||||
current->bio_list = &bio_list_on_stack;
|
||||
bio_list_init(&bio_list_on_stack[0]);
|
||||
current->bio_list = bio_list_on_stack;
|
||||
do {
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
|
||||
if (likely(blk_queue_enter(q, false) == 0)) {
|
||||
struct bio_list hold;
|
||||
struct bio_list lower, same;
|
||||
|
||||
/* Create a fresh bio_list for all subordinate requests */
|
||||
hold = bio_list_on_stack;
|
||||
bio_list_init(&bio_list_on_stack);
|
||||
bio_list_on_stack[1] = bio_list_on_stack[0];
|
||||
bio_list_init(&bio_list_on_stack[0]);
|
||||
ret = q->make_request_fn(q, bio);
|
||||
|
||||
blk_queue_exit(q);
|
||||
|
@ -2030,19 +2036,19 @@ blk_qc_t generic_make_request(struct bio *bio)
|
|||
*/
|
||||
bio_list_init(&lower);
|
||||
bio_list_init(&same);
|
||||
while ((bio = bio_list_pop(&bio_list_on_stack)) != NULL)
|
||||
while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
|
||||
if (q == bdev_get_queue(bio->bi_bdev))
|
||||
bio_list_add(&same, bio);
|
||||
else
|
||||
bio_list_add(&lower, bio);
|
||||
/* now assemble so we handle the lowest level first */
|
||||
bio_list_merge(&bio_list_on_stack, &lower);
|
||||
bio_list_merge(&bio_list_on_stack, &same);
|
||||
bio_list_merge(&bio_list_on_stack, &hold);
|
||||
bio_list_merge(&bio_list_on_stack[0], &lower);
|
||||
bio_list_merge(&bio_list_on_stack[0], &same);
|
||||
bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
|
||||
} else {
|
||||
bio_io_error(bio);
|
||||
}
|
||||
bio = bio_list_pop(current->bio_list);
|
||||
bio = bio_list_pop(&bio_list_on_stack[0]);
|
||||
} while (bio);
|
||||
current->bio_list = NULL; /* deactivate */
|
||||
|
||||
|
|
|
@ -295,6 +295,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
|
|||
for (i = 0; i < set->nr_hw_queues; i++) {
|
||||
struct blk_mq_tags *tags = set->tags[i];
|
||||
|
||||
if (!tags)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < tags->nr_tags; j++) {
|
||||
if (!tags->static_rqs[j])
|
||||
continue;
|
||||
|
|
|
@ -697,17 +697,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
|||
{
|
||||
struct blk_mq_timeout_data *data = priv;
|
||||
|
||||
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
|
||||
/*
|
||||
* If a request wasn't started before the queue was
|
||||
* marked dying, kill it here or it'll go unnoticed.
|
||||
*/
|
||||
if (unlikely(blk_queue_dying(rq->q))) {
|
||||
rq->errors = -EIO;
|
||||
blk_mq_end_request(rq, rq->errors);
|
||||
}
|
||||
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
|
||||
return;
|
||||
}
|
||||
|
||||
if (time_after_eq(jiffies, rq->deadline)) {
|
||||
if (!blk_mark_rq_complete(rq))
|
||||
|
@ -1434,7 +1425,8 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
|
|||
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
|
||||
}
|
||||
|
||||
static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
|
||||
static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
|
||||
bool may_sleep)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct blk_mq_queue_data bd = {
|
||||
|
@ -1475,7 +1467,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
|
|||
}
|
||||
|
||||
insert:
|
||||
blk_mq_sched_insert_request(rq, false, true, true, false);
|
||||
blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1569,11 +1561,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
|
||||
if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
|
||||
rcu_read_lock();
|
||||
blk_mq_try_issue_directly(old_rq, &cookie);
|
||||
blk_mq_try_issue_directly(old_rq, &cookie, false);
|
||||
rcu_read_unlock();
|
||||
} else {
|
||||
srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
|
||||
blk_mq_try_issue_directly(old_rq, &cookie);
|
||||
blk_mq_try_issue_directly(old_rq, &cookie, true);
|
||||
srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
|
||||
}
|
||||
goto done;
|
||||
|
|
|
@ -30,11 +30,11 @@ static void blk_stat_flush_batch(struct blk_rq_stat *stat)
|
|||
|
||||
static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
|
||||
{
|
||||
blk_stat_flush_batch(src);
|
||||
|
||||
if (!src->nr_samples)
|
||||
return;
|
||||
|
||||
blk_stat_flush_batch(src);
|
||||
|
||||
dst->min = min(dst->min, src->min);
|
||||
dst->max = max(dst->max, src->max);
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue