Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 platform changes from Ingo Molnar:
 "The main changes in this cycle were:

   - SGI UV updates (Andrew Banman)

   - Intel MID updates (Andy Shevchenko)

   - Initial Mellanox systems platform (Vadim Pasternak)"

* 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/platform/mellanox: Fix return value check in mlxplat_init()
  x86/platform/mellanox: Introduce support for Mellanox systems platform
  x86/platform/uv/BAU: Add UV4-specific functions
  x86/platform/uv/BAU: Fix payload queue setup on UV4 hardware
  x86/platform/uv/BAU: Disable software timeout on UV4 hardware
  x86/platform/uv/BAU: Populate ->uvhub_version with UV4 version information
  x86/platform/uv/BAU: Use generic function pointers
  x86/platform/uv/BAU: Add generic function pointers
  x86/platform/uv/BAU: Convert uv_physnodeaddr() use to uv_gpa_to_offset()
  x86/platform/uv/BAU: Clean up pq_init()
  x86/platform/uv/BAU: Clean up and update printks
  x86/platform/uv/BAU: Clean up vertical alignment
  x86/platform/intel-mid: Keep SRAM powered on at boot
  x86/platform/intel-mid: Add Intel Penwell to ID table
  x86/cpu: Rename Merrifield2 to Moorefield
  x86/platform/intel-mid: Implement power off sequence
  x86/platform/intel-mid: Enable SD card detection on Merrifield
  x86/platform/intel-mid: Enable WiFi on Intel Edison
  x86/platform/intel-mid: Run PWRMU command immediately
This commit is contained in:
Linus Torvalds 2016-10-03 17:22:25 -07:00
commit a6c4e4cd44
18 changed files with 669 additions and 88 deletions

View file

@ -7683,6 +7683,12 @@ W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlxsw/
MELLANOX PLATFORM DRIVER
M: Vadim Pasternak <vadimp@mellanox.com>
L: platform-driver-x86@vger.kernel.org
S: Supported
F: arch/x86/platform/mellanox/mlx-platform.c
SOFT-ROCE DRIVER (rxe)
M: Moni Shoua <monis@mellanox.com>
L: linux-rdma@vger.kernel.org

View file

@ -550,6 +550,18 @@ config X86_INTEL_QUARK
Say Y here if you have a Quark based system such as the Arduino
compatible Intel Galileo.
config MLX_PLATFORM
tristate "Mellanox Technologies platform support"
depends on X86_64
depends on X86_EXTENDED_PLATFORM
---help---
This option enables system support for the Mellanox Technologies
platform.
Say Y here if you are building a kernel for Mellanox system.
Otherwise, say N.
config X86_INTEL_LPSS
bool "Intel Low Power Subsystem Support"
depends on X86 && ACPI

View file

@ -56,8 +56,8 @@
#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */
#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */
#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Annidale */
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */

View file

@ -18,6 +18,8 @@
extern int intel_mid_pci_init(void);
extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
extern void intel_mid_pwr_power_off(void);
#define INTEL_MID_PWR_LSS_OFFSET 4
#define INTEL_MID_PWR_LSS_TYPE (1 << 7)

View file

@ -3,6 +3,8 @@
#include <linux/notifier.h>
#define IPCMSG_COLD_OFF 0x80 /* Only for Tangier */
#define IPCMSG_WARM_RESET 0xF0
#define IPCMSG_COLD_RESET 0xF1
#define IPCMSG_SOFT_RESET 0xF2

View file

@ -49,14 +49,12 @@
#define UV_NET_ENDPOINT_INTD (is_uv1_hub() ? \
UV1_NET_ENDPOINT_INTD : UV2_NET_ENDPOINT_INTD)
#define UV_DESC_PSHIFT 49
#define UV_PAYLOADQ_PNODE_SHIFT 49
#define UV_PAYLOADQ_GNODE_SHIFT 49
#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
#define UV_BAU_TUNABLES_DIR "sgi_uv"
#define UV_BAU_TUNABLES_FILE "bau_tunables"
#define WHITESPACE " \t\n"
#define uv_mmask ((1UL << uv_hub_info->m_val) - 1)
#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
#define cpubit_isset(cpu, bau_local_cpumask) \
test_bit((cpu), (bau_local_cpumask).bits)
@ -387,6 +385,17 @@ struct uv2_3_bau_msg_header {
/* bits 127:120 */
};
/* Abstracted BAU functions */
struct bau_operations {
unsigned long (*read_l_sw_ack)(void);
unsigned long (*read_g_sw_ack)(int pnode);
unsigned long (*bau_gpa_to_offset)(unsigned long vaddr);
void (*write_l_sw_ack)(unsigned long mmr);
void (*write_g_sw_ack)(int pnode, unsigned long mmr);
void (*write_payload_first)(int pnode, unsigned long mmr);
void (*write_payload_last)(int pnode, unsigned long mmr);
};
/*
* The activation descriptor:
* The format of the message to send, plus all accompanying control
@ -655,6 +664,16 @@ static inline void write_gmmr_activation(int pnode, unsigned long mmr_image)
write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image);
}
static inline void write_mmr_proc_payload_first(int pnode, unsigned long mmr_image)
{
write_gmmr(pnode, UV4H_LB_PROC_INTD_QUEUE_FIRST, mmr_image);
}
static inline void write_mmr_proc_payload_last(int pnode, unsigned long mmr_image)
{
write_gmmr(pnode, UV4H_LB_PROC_INTD_QUEUE_LAST, mmr_image);
}
static inline void write_mmr_payload_first(int pnode, unsigned long mmr_image)
{
write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image);
@ -700,6 +719,26 @@ static inline unsigned long read_gmmr_sw_ack(int pnode)
return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
}
static inline void write_mmr_proc_sw_ack(unsigned long mr)
{
uv_write_local_mmr(UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR, mr);
}
static inline void write_gmmr_proc_sw_ack(int pnode, unsigned long mr)
{
write_gmmr(pnode, UV4H_LB_PROC_INTD_SOFT_ACK_CLEAR, mr);
}
static inline unsigned long read_mmr_proc_sw_ack(void)
{
return read_lmmr(UV4H_LB_PROC_INTD_SOFT_ACK_PENDING);
}
static inline unsigned long read_gmmr_proc_sw_ack(int pnode)
{
return read_gmmr(pnode, UV4H_LB_PROC_INTD_SOFT_ACK_PENDING);
}
static inline void write_mmr_data_config(int pnode, unsigned long mr)
{
uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr);

View file

@ -8,6 +8,7 @@ obj-y += iris/
obj-y += intel/
obj-y += intel-mid/
obj-y += intel-quark/
obj-y += mellanox/
obj-y += olpc/
obj-y += scx200/
obj-y += sfi/

View file

@ -155,7 +155,7 @@ static void punit_dbgfs_unregister(void)
static const struct x86_cpu_id intel_punit_cpu_ids[] = {
ICPU(INTEL_FAM6_ATOM_SILVERMONT1, punit_device_byt),
ICPU(INTEL_FAM6_ATOM_MERRIFIELD1, punit_device_tng),
ICPU(INTEL_FAM6_ATOM_MERRIFIELD, punit_device_tng),
ICPU(INTEL_FAM6_ATOM_AIRMONT, punit_device_cht),
{}
};

View file

@ -1,5 +1,9 @@
# Family-Level Interface Shim (FLIS)
obj-$(subst m,y,$(CONFIG_PINCTRL_MERRIFIELD)) += platform_mrfld_pinctrl.o
# SDHCI Devices
obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += platform_mrfld_sd.o
# WiFi
obj-$(subst m,y,$(CONFIG_BRCMFMAC_SDIO)) += platform_bcm43xx.o
# IPC Devices
obj-y += platform_ipc.o
obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic.o

View file

@ -0,0 +1,95 @@
/*
* platform_bcm43xx.c: bcm43xx platform data initilization file
*
* (C) Copyright 2016 Intel Corporation
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*/
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/fixed.h>
#include <linux/sfi.h>
#include <asm/intel-mid.h>
#define WLAN_SFI_GPIO_IRQ_NAME "WLAN-interrupt"
#define WLAN_SFI_GPIO_ENABLE_NAME "WLAN-enable"
#define WLAN_DEV_NAME "0000:00:01.3"
static struct regulator_consumer_supply bcm43xx_vmmc_supply = {
.dev_name = WLAN_DEV_NAME,
.supply = "vmmc",
};
static struct regulator_init_data bcm43xx_vmmc_data = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = 1,
.consumer_supplies = &bcm43xx_vmmc_supply,
};
static struct fixed_voltage_config bcm43xx_vmmc = {
.supply_name = "bcm43xx-vmmc-regulator",
/*
* Announce 2.0V here to be compatible with SDIO specification. The
* real voltage and signaling are still 1.8V.
*/
.microvolts = 2000000, /* 1.8V */
.gpio = -EINVAL,
.startup_delay = 250 * 1000, /* 250ms */
.enable_high = 1, /* active high */
.enabled_at_boot = 0, /* disabled at boot */
.init_data = &bcm43xx_vmmc_data,
};
static struct platform_device bcm43xx_vmmc_regulator = {
.name = "reg-fixed-voltage",
.id = PLATFORM_DEVID_AUTO,
.dev = {
.platform_data = &bcm43xx_vmmc,
},
};
static int __init bcm43xx_regulator_register(void)
{
int ret;
bcm43xx_vmmc.gpio = get_gpio_by_name(WLAN_SFI_GPIO_ENABLE_NAME);
ret = platform_device_register(&bcm43xx_vmmc_regulator);
if (ret) {
pr_err("%s: vmmc regulator register failed\n", __func__);
return ret;
}
return 0;
}
static void __init *bcm43xx_platform_data(void *info)
{
int ret;
ret = bcm43xx_regulator_register();
if (ret)
return NULL;
pr_info("Using generic wifi platform data\n");
/* For now it's empty */
return NULL;
}
static const struct devs_id bcm43xx_clk_vmmc_dev_id __initconst = {
.name = "bcm43xx_clk_vmmc",
.type = SFI_DEV_TYPE_SD,
.get_platform_data = &bcm43xx_platform_data,
};
sfi_device(bcm43xx_clk_vmmc_dev_id);

View file

@ -0,0 +1,47 @@
/*
* SDHCI platform data initilisation file
*
* (C) Copyright 2016 Intel Corporation
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/mmc/sdhci-pci-data.h>
#include <asm/intel-mid.h>
#define INTEL_MRFLD_SD 2
#define INTEL_MRFLD_SD_CD_GPIO 77
static struct sdhci_pci_data mrfld_sdhci_pci_data = {
.rst_n_gpio = -EINVAL,
.cd_gpio = INTEL_MRFLD_SD_CD_GPIO,
};
static struct sdhci_pci_data *
mrfld_sdhci_pci_get_data(struct pci_dev *pdev, int slotno)
{
unsigned int func = PCI_FUNC(pdev->devfn);
if (func == INTEL_MRFLD_SD)
return &mrfld_sdhci_pci_data;
return NULL;
}
static int __init mrfld_sd_init(void)
{
if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
return -ENODEV;
sdhci_pci_get_data = mrfld_sdhci_pci_get_data;
return 0;
}
arch_initcall(mrfld_sd_init);

View file

@ -70,6 +70,11 @@ EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
static void intel_mid_power_off(void)
{
/* Shut down South Complex via PWRMU */
intel_mid_pwr_power_off();
/* Only for Tangier, the rest will ignore this command */
intel_scu_ipc_simple_command(IPCMSG_COLD_OFF, 1);
};
static void intel_mid_reboot(void)

View file

@ -44,7 +44,19 @@
/* Bits in PM_CMD */
#define PM_CMD_CMD(x) ((x) << 0)
#define PM_CMD_IOC (1 << 8)
#define PM_CMD_D3cold (1 << 21)
#define PM_CMD_CM_NOP (0 << 9)
#define PM_CMD_CM_IMMEDIATE (1 << 9)
#define PM_CMD_CM_DELAY (2 << 9)
#define PM_CMD_CM_TRIGGER (3 << 9)
/* System states */
#define PM_CMD_SYS_STATE_S5 (5 << 16)
/* Trigger variants */
#define PM_CMD_CFG_TRIGGER_NC (3 << 19)
/* Message to wait for TRIGGER_NC case */
#define TRIGGER_NC_MSG_2 (2 << 22)
/* List of commands */
#define CMD_SET_CFG 0x01
@ -137,7 +149,7 @@ static int mid_pwr_wait(struct mid_pwr *pwr)
static int mid_pwr_wait_for_cmd(struct mid_pwr *pwr, u8 cmd)
{
writel(PM_CMD_CMD(cmd), pwr->regs + PM_CMD);
writel(PM_CMD_CMD(cmd) | PM_CMD_CM_IMMEDIATE, pwr->regs + PM_CMD);
return mid_pwr_wait(pwr);
}
@ -260,6 +272,20 @@ int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
}
EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state);
void intel_mid_pwr_power_off(void)
{
struct mid_pwr *pwr = midpwr;
u32 cmd = PM_CMD_SYS_STATE_S5 |
PM_CMD_CMD(CMD_SET_CFG) |
PM_CMD_CM_TRIGGER |
PM_CMD_CFG_TRIGGER_NC |
TRIGGER_NC_MSG_2;
/* Send command to SCU */
writel(cmd, pwr->regs + PM_CMD);
mid_pwr_wait(pwr);
}
int intel_mid_pwr_get_lss_id(struct pci_dev *pdev)
{
int vndr;
@ -354,7 +380,7 @@ static int mid_pwr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
}
static int mid_set_initial_state(struct mid_pwr *pwr)
static int mid_set_initial_state(struct mid_pwr *pwr, const u32 *states)
{
unsigned int i, j;
int ret;
@ -379,10 +405,10 @@ static int mid_set_initial_state(struct mid_pwr *pwr)
* NOTE: The actual device mapping is provided by a platform at run
* time using vendor capability of PCI configuration space.
*/
mid_pwr_set_state(pwr, 0, 0xffffffff);
mid_pwr_set_state(pwr, 1, 0xffffffff);
mid_pwr_set_state(pwr, 2, 0xffffffff);
mid_pwr_set_state(pwr, 3, 0xffffffff);
mid_pwr_set_state(pwr, 0, states[0]);
mid_pwr_set_state(pwr, 1, states[1]);
mid_pwr_set_state(pwr, 2, states[2]);
mid_pwr_set_state(pwr, 3, states[3]);
/* Send command to SCU */
ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG);
@ -397,13 +423,41 @@ static int mid_set_initial_state(struct mid_pwr *pwr)
return 0;
}
static const struct mid_pwr_device_info mid_info = {
.set_initial_state = mid_set_initial_state,
static int pnw_set_initial_state(struct mid_pwr *pwr)
{
/* On Penwell SRAM must stay powered on */
const u32 states[] = {
0xf00fffff, /* PM_SSC(0) */
0xffffffff, /* PM_SSC(1) */
0xffffffff, /* PM_SSC(2) */
0xffffffff, /* PM_SSC(3) */
};
return mid_set_initial_state(pwr, states);
}
static int tng_set_initial_state(struct mid_pwr *pwr)
{
const u32 states[] = {
0xffffffff, /* PM_SSC(0) */
0xffffffff, /* PM_SSC(1) */
0xffffffff, /* PM_SSC(2) */
0xffffffff, /* PM_SSC(3) */
};
return mid_set_initial_state(pwr, states);
}
static const struct mid_pwr_device_info pnw_info = {
.set_initial_state = pnw_set_initial_state,
};
static const struct mid_pwr_device_info tng_info = {
.set_initial_state = tng_set_initial_state,
};
/* This table should be in sync with the one in drivers/pci/pci-mid.c */
static const struct pci_device_id mid_pwr_pci_ids[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL), (kernel_ulong_t)&mid_info },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER), (kernel_ulong_t)&mid_info },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL), (kernel_ulong_t)&pnw_info },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER), (kernel_ulong_t)&tng_info },
{}
};

View file

@ -0,0 +1 @@
obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o

View file

@ -0,0 +1,266 @@
/*
* arch/x86/platform/mellanox/mlx-platform.c
* Copyright (c) 2016 Mellanox Technologies. All rights reserved.
* Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/platform_data/i2c-mux-reg.h>
#define MLX_PLAT_DEVICE_NAME "mlxplat"
/* LPC bus IO offsets */
#define MLXPLAT_CPLD_LPC_I2C_BASE_ADRR 0x2000
#define MLXPLAT_CPLD_LPC_REG_BASE_ADRR 0x2500
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
#define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL
#define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
MLXPLAT_CPLD_LPC_I2C_CH1_OFF) | \
MLXPLAT_CPLD_LPC_PIO_OFFSET)
#define MLXPLAT_CPLD_LPC_REG2 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \
MLXPLAT_CPLD_LPC_PIO_OFFSET)
/* Start channel numbers */
#define MLXPLAT_CPLD_CH1 2
#define MLXPLAT_CPLD_CH2 10
/* Number of LPC attached MUX platform devices */
#define MLXPLAT_CPLD_LPC_MUX_DEVS 2
/* mlxplat_priv - platform private data
* @pdev_i2c - i2c controller platform device
* @pdev_mux - array of mux platform devices
*/
struct mlxplat_priv {
struct platform_device *pdev_i2c;
struct platform_device *pdev_mux[MLXPLAT_CPLD_LPC_MUX_DEVS];
};
/* Regions for LPC I2C controller and LPC base register space */
static const struct resource mlxplat_lpc_resources[] = {
[0] = DEFINE_RES_NAMED(MLXPLAT_CPLD_LPC_I2C_BASE_ADRR,
MLXPLAT_CPLD_LPC_IO_RANGE,
"mlxplat_cpld_lpc_i2c_ctrl", IORESOURCE_IO),
[1] = DEFINE_RES_NAMED(MLXPLAT_CPLD_LPC_REG_BASE_ADRR,
MLXPLAT_CPLD_LPC_IO_RANGE,
"mlxplat_cpld_lpc_regs",
IORESOURCE_IO),
};
/* Platform default channels */
static const int mlxplat_default_channels[][8] = {
{
MLXPLAT_CPLD_CH1, MLXPLAT_CPLD_CH1 + 1, MLXPLAT_CPLD_CH1 + 2,
MLXPLAT_CPLD_CH1 + 3, MLXPLAT_CPLD_CH1 + 4, MLXPLAT_CPLD_CH1 +
5, MLXPLAT_CPLD_CH1 + 6, MLXPLAT_CPLD_CH1 + 7
},
{
MLXPLAT_CPLD_CH2, MLXPLAT_CPLD_CH2 + 1, MLXPLAT_CPLD_CH2 + 2,
MLXPLAT_CPLD_CH2 + 3, MLXPLAT_CPLD_CH2 + 4, MLXPLAT_CPLD_CH2 +
5, MLXPLAT_CPLD_CH2 + 6, MLXPLAT_CPLD_CH2 + 7
},
};
/* Platform channels for MSN21xx system family */
static const int mlxplat_msn21xx_channels[] = { 1, 2, 3, 4, 5, 6, 7, 8 };
/* Platform mux data */
static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH1,
.write_only = 1,
.reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
.reg_size = 1,
.idle_in_use = 1,
},
{
.parent = 1,
.base_nr = MLXPLAT_CPLD_CH2,
.write_only = 1,
.reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
.reg_size = 1,
.idle_in_use = 1,
},
};
static struct platform_device *mlxplat_dev;
static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
{
int i;
for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
mlxplat_mux_data[i].values = mlxplat_default_channels[i];
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_default_channels[i]);
}
return 1;
};
static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
{
int i;
for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
}
return 1;
};
static struct dmi_system_id mlxplat_dmi_table[] __initdata = {
{
.callback = mlxplat_dmi_default_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MSN24"),
},
},
{
.callback = mlxplat_dmi_default_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MSN27"),
},
},
{
.callback = mlxplat_dmi_default_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MSB"),
},
},
{
.callback = mlxplat_dmi_default_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MSX"),
},
},
{
.callback = mlxplat_dmi_msn21xx_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
DMI_MATCH(DMI_PRODUCT_NAME, "MSN21"),
},
},
{ }
};
static int __init mlxplat_init(void)
{
struct mlxplat_priv *priv;
int i, err;
if (!dmi_check_system(mlxplat_dmi_table))
return -ENODEV;
mlxplat_dev = platform_device_register_simple(MLX_PLAT_DEVICE_NAME, -1,
mlxplat_lpc_resources,
ARRAY_SIZE(mlxplat_lpc_resources));
if (IS_ERR(mlxplat_dev))
return PTR_ERR(mlxplat_dev);
priv = devm_kzalloc(&mlxplat_dev->dev, sizeof(struct mlxplat_priv),
GFP_KERNEL);
if (!priv) {
err = -ENOMEM;
goto fail_alloc;
}
platform_set_drvdata(mlxplat_dev, priv);
priv->pdev_i2c = platform_device_register_simple("i2c_mlxcpld", -1,
NULL, 0);
if (IS_ERR(priv->pdev_i2c)) {
err = PTR_ERR(priv->pdev_i2c);
goto fail_alloc;
};
for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
priv->pdev_mux[i] = platform_device_register_resndata(
&mlxplat_dev->dev,
"i2c-mux-reg", i, NULL,
0, &mlxplat_mux_data[i],
sizeof(mlxplat_mux_data[i]));
if (IS_ERR(priv->pdev_mux[i])) {
err = PTR_ERR(priv->pdev_mux[i]);
goto fail_platform_mux_register;
}
}
return 0;
fail_platform_mux_register:
for (i--; i > 0 ; i--)
platform_device_unregister(priv->pdev_mux[i]);
platform_device_unregister(priv->pdev_i2c);
fail_alloc:
platform_device_unregister(mlxplat_dev);
return err;
}
module_init(mlxplat_init);
static void __exit mlxplat_exit(void)
{
struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
int i;
for (i = ARRAY_SIZE(mlxplat_mux_data) - 1; i >= 0 ; i--)
platform_device_unregister(priv->pdev_mux[i]);
platform_device_unregister(priv->pdev_i2c);
platform_device_unregister(mlxplat_dev);
}
module_exit(mlxplat_exit);
MODULE_AUTHOR("Vadim Pasternak (vadimp@mellanox.com)");
MODULE_DESCRIPTION("Mellanox platform driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("dmi:*:*Mellanox*:MSN24*:");
MODULE_ALIAS("dmi:*:*Mellanox*:MSN27*:");
MODULE_ALIAS("dmi:*:*Mellanox*:MSB*:");
MODULE_ALIAS("dmi:*:*Mellanox*:MSX*:");
MODULE_ALIAS("dmi:*:*Mellanox*:MSN21*:");

View file

@ -24,6 +24,29 @@
#include <asm/irq_vectors.h>
#include <asm/timer.h>
static struct bau_operations ops;
static struct bau_operations uv123_bau_ops = {
.bau_gpa_to_offset = uv_gpa_to_offset,
.read_l_sw_ack = read_mmr_sw_ack,
.read_g_sw_ack = read_gmmr_sw_ack,
.write_l_sw_ack = write_mmr_sw_ack,
.write_g_sw_ack = write_gmmr_sw_ack,
.write_payload_first = write_mmr_payload_first,
.write_payload_last = write_mmr_payload_last,
};
static struct bau_operations uv4_bau_ops = {
.bau_gpa_to_offset = uv_gpa_to_soc_phys_ram,
.read_l_sw_ack = read_mmr_proc_sw_ack,
.read_g_sw_ack = read_gmmr_proc_sw_ack,
.write_l_sw_ack = write_mmr_proc_sw_ack,
.write_g_sw_ack = write_gmmr_proc_sw_ack,
.write_payload_first = write_mmr_proc_payload_first,
.write_payload_last = write_mmr_proc_payload_last,
};
/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
static int timeout_base_ns[] = {
20,
@ -55,16 +78,16 @@ static int congested_reps = CONGESTED_REPS;
static int disabled_period = DISABLED_PERIOD;
static struct tunables tunables[] = {
{&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
{&plugged_delay, PLUGGED_DELAY},
{&plugsb4reset, PLUGSB4RESET},
{&timeoutsb4reset, TIMEOUTSB4RESET},
{&ipi_reset_limit, IPI_RESET_LIMIT},
{&complete_threshold, COMPLETE_THRESHOLD},
{&congested_respns_us, CONGESTED_RESPONSE_US},
{&congested_reps, CONGESTED_REPS},
{&disabled_period, DISABLED_PERIOD},
{&giveup_limit, GIVEUP_LIMIT}
{&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
{&plugged_delay, PLUGGED_DELAY},
{&plugsb4reset, PLUGSB4RESET},
{&timeoutsb4reset, TIMEOUTSB4RESET},
{&ipi_reset_limit, IPI_RESET_LIMIT},
{&complete_threshold, COMPLETE_THRESHOLD},
{&congested_respns_us, CONGESTED_RESPONSE_US},
{&congested_reps, CONGESTED_REPS},
{&disabled_period, DISABLED_PERIOD},
{&giveup_limit, GIVEUP_LIMIT}
};
static struct dentry *tunables_dir;
@ -216,7 +239,7 @@ static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
msg = mdp->msg;
if (!msg->canceled && do_acknowledge) {
dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
write_mmr_sw_ack(dw);
ops.write_l_sw_ack(dw);
}
msg->replied_to = 1;
msg->swack_vec = 0;
@ -252,7 +275,7 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
msg->swack_vec) == 0) &&
(msg2->sending_cpu == msg->sending_cpu) &&
(msg2->msg_type != MSG_NOOP)) {
mmr = read_mmr_sw_ack();
mmr = ops.read_l_sw_ack();
msg_res = msg2->swack_vec;
/*
* This is a message retry; clear the resources held
@ -270,7 +293,7 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
stat->d_canceled++;
cancel_count++;
mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
write_mmr_sw_ack(mr);
ops.write_l_sw_ack(mr);
}
}
}
@ -403,12 +426,12 @@ static void do_reset(void *ptr)
/*
* only reset the resource if it is still pending
*/
mmr = read_mmr_sw_ack();
mmr = ops.read_l_sw_ack();
msg_res = msg->swack_vec;
mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
if (mmr & msg_res) {
stat->d_rcanceled++;
write_mmr_sw_ack(mr);
ops.write_l_sw_ack(mr);
}
}
}
@ -1198,7 +1221,7 @@ void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
struct bau_pq_entry *msg = mdp->msg;
struct bau_pq_entry *other_msg;
mmr_image = read_mmr_sw_ack();
mmr_image = ops.read_l_sw_ack();
swack_vec = msg->swack_vec;
if ((swack_vec & mmr_image) == 0) {
@ -1427,7 +1450,7 @@ static int ptc_seq_show(struct seq_file *file, void *data)
/* destination side statistics */
seq_printf(file,
"%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)),
ops.read_g_sw_ack(uv_cpu_to_pnode(cpu)),
stat->d_requestee, cycles_2_us(stat->d_time),
stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
stat->d_nomsg, stat->d_retries, stat->d_canceled,
@ -1493,16 +1516,16 @@ static ssize_t ptc_proc_write(struct file *file, const char __user *user,
}
if (kstrtol(optstr, 10, &input_arg) < 0) {
printk(KERN_DEBUG "%s is invalid\n", optstr);
pr_debug("%s is invalid\n", optstr);
return -EINVAL;
}
if (input_arg == 0) {
elements = ARRAY_SIZE(stat_description);
printk(KERN_DEBUG "# cpu: cpu number\n");
printk(KERN_DEBUG "Sender statistics:\n");
pr_debug("# cpu: cpu number\n");
pr_debug("Sender statistics:\n");
for (i = 0; i < elements; i++)
printk(KERN_DEBUG "%s\n", stat_description[i]);
pr_debug("%s\n", stat_description[i]);
} else if (input_arg == -1) {
for_each_present_cpu(cpu) {
stat = &per_cpu(ptcstats, cpu);
@ -1550,7 +1573,7 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr,
break;
}
if (cnt != e) {
printk(KERN_INFO "bau tunable error: should be %d values\n", e);
pr_info("bau tunable error: should be %d values\n", e);
return -EINVAL;
}
@ -1567,7 +1590,7 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr,
continue;
}
if (val < 1 || val > bcp->cpus_in_uvhub) {
printk(KERN_DEBUG
pr_debug(
"Error: BAU max concurrent %d is invalid\n",
val);
return -EINVAL;
@ -1615,17 +1638,17 @@ static ssize_t tunables_write(struct file *file, const char __user *user,
for_each_present_cpu(cpu) {
bcp = &per_cpu(bau_control, cpu);
bcp->max_concurr = max_concurr;
bcp->max_concurr_const = max_concurr;
bcp->plugged_delay = plugged_delay;
bcp->plugsb4reset = plugsb4reset;
bcp->timeoutsb4reset = timeoutsb4reset;
bcp->ipi_reset_limit = ipi_reset_limit;
bcp->complete_threshold = complete_threshold;
bcp->cong_response_us = congested_respns_us;
bcp->cong_reps = congested_reps;
bcp->disabled_period = sec_2_cycles(disabled_period);
bcp->giveup_limit = giveup_limit;
bcp->max_concurr = max_concurr;
bcp->max_concurr_const = max_concurr;
bcp->plugged_delay = plugged_delay;
bcp->plugsb4reset = plugsb4reset;
bcp->timeoutsb4reset = timeoutsb4reset;
bcp->ipi_reset_limit = ipi_reset_limit;
bcp->complete_threshold = complete_threshold;
bcp->cong_response_us = congested_respns_us;
bcp->cong_reps = congested_reps;
bcp->disabled_period = sec_2_cycles(disabled_period);
bcp->giveup_limit = giveup_limit;
}
return count;
}
@ -1672,21 +1695,21 @@ static int __init uv_ptc_init(void)
proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
&proc_uv_ptc_operations);
if (!proc_uv_ptc) {
printk(KERN_ERR "unable to create %s proc entry\n",
pr_err("unable to create %s proc entry\n",
UV_PTC_BASENAME);
return -EINVAL;
}
tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
if (!tunables_dir) {
printk(KERN_ERR "unable to create debugfs directory %s\n",
pr_err("unable to create debugfs directory %s\n",
UV_BAU_TUNABLES_DIR);
return -EINVAL;
}
tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
tunables_dir, NULL, &tunables_fops);
if (!tunables_file) {
printk(KERN_ERR "unable to create debugfs file %s\n",
pr_err("unable to create debugfs file %s\n",
UV_BAU_TUNABLES_FILE);
return -EINVAL;
}
@ -1721,7 +1744,7 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
gpa = uv_gpa(bau_desc);
n = uv_gpa_to_gnode(gpa);
m = uv_gpa_to_offset(gpa);
m = ops.bau_gpa_to_offset(gpa);
if (is_uv1_hub())
uv1 = 1;
@ -1736,7 +1759,7 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
memset(bd2, 0, sizeof(struct bau_desc));
if (uv1) {
uv1_hdr = &bd2->header.uv1_hdr;
uv1_hdr->swack_flag = 1;
uv1_hdr->swack_flag = 1;
/*
* The base_dest_nasid set in the message header
* is the nasid of the first uvhub in the partition.
@ -1745,10 +1768,10 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
* if nasid striding is being used.
*/
uv1_hdr->base_dest_nasid =
UV_PNODE_TO_NASID(base_pnode);
uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
uv1_hdr->command = UV_NET_ENDPOINT_INTD;
uv1_hdr->int_both = 1;
UV_PNODE_TO_NASID(base_pnode);
uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
uv1_hdr->command = UV_NET_ENDPOINT_INTD;
uv1_hdr->int_both = 1;
/*
* all others need to be set to zero:
* fairness chaining multilevel count replied_to
@ -1759,11 +1782,11 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
* uses native mode for selective broadcasts.
*/
uv2_3_hdr = &bd2->header.uv2_3_hdr;
uv2_3_hdr->swack_flag = 1;
uv2_3_hdr->swack_flag = 1;
uv2_3_hdr->base_dest_nasid =
UV_PNODE_TO_NASID(base_pnode);
uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
UV_PNODE_TO_NASID(base_pnode);
uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
}
}
for_each_present_cpu(cpu) {
@ -1786,10 +1809,7 @@ static void pq_init(int node, int pnode)
size_t plsize;
char *cp;
void *vp;
unsigned long pn;
unsigned long first;
unsigned long pn_first;
unsigned long last;
unsigned long gnode, first, last, tail;
struct bau_pq_entry *pqp;
struct bau_control *bcp;
@ -1810,17 +1830,25 @@ static void pq_init(int node, int pnode)
bcp->bau_msg_head = pqp;
bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
}
first = ops.bau_gpa_to_offset(uv_gpa(pqp));
last = ops.bau_gpa_to_offset(uv_gpa(pqp + (DEST_Q_SIZE - 1)));
/*
* need the gnode of where the memory was really allocated
* Pre UV4, the gnode is required to locate the payload queue
* and the payload queue tail must be maintained by the kernel.
*/
pn = uv_gpa_to_gnode(uv_gpa(pqp));
first = uv_physnodeaddr(pqp);
pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first;
last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1));
write_mmr_payload_first(pnode, pn_first);
write_mmr_payload_tail(pnode, first);
write_mmr_payload_last(pnode, last);
write_gmmr_sw_ack(pnode, 0xffffUL);
bcp = &per_cpu(bau_control, smp_processor_id());
if (bcp->uvhub_version <= 3) {
tail = first;
gnode = uv_gpa_to_gnode(uv_gpa(pqp));
first = (gnode << UV_PAYLOADQ_GNODE_SHIFT) | tail;
write_mmr_payload_tail(pnode, tail);
}
ops.write_payload_first(pnode, first);
ops.write_payload_last(pnode, last);
ops.write_g_sw_ack(pnode, 0xffffUL);
/* in effect, all msg_type's are set to MSG_NOOP */
memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
@ -1910,8 +1938,8 @@ static void __init init_per_cpu_tunables(void)
bcp->complete_threshold = complete_threshold;
bcp->cong_response_us = congested_respns_us;
bcp->cong_reps = congested_reps;
bcp->disabled_period = sec_2_cycles(disabled_period);
bcp->giveup_limit = giveup_limit;
bcp->disabled_period = sec_2_cycles(disabled_period);
bcp->giveup_limit = giveup_limit;
spin_lock_init(&bcp->queue_lock);
spin_lock_init(&bcp->uvhub_lock);
spin_lock_init(&bcp->disable_lock);
@ -1940,7 +1968,7 @@ static int __init get_cpu_topology(int base_pnode,
pnode = uv_cpu_hub_info(cpu)->pnode;
if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
printk(KERN_EMERG
pr_emerg(
"cpu %d pnode %d-%d beyond %d; BAU disabled\n",
cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
return 1;
@ -1965,7 +1993,7 @@ static int __init get_cpu_topology(int base_pnode,
sdp->cpu_number[sdp->num_cpus] = cpu;
sdp->num_cpus++;
if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
printk(KERN_EMERG "%d cpus per socket invalid\n",
pr_emerg("%d cpus per socket invalid\n",
sdp->num_cpus);
return 1;
}
@ -2031,15 +2059,17 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
bcp->uvhub_version = 2;
else if (is_uv3_hub())
bcp->uvhub_version = 3;
else if (is_uv4_hub())
bcp->uvhub_version = 4;
else {
printk(KERN_EMERG "uvhub version not 1, 2 or 3\n");
pr_emerg("uvhub version not 1, 2, 3, or 4\n");
return 1;
}
bcp->uvhub_master = *hmasterp;
bcp->uvhub_cpu = uv_cpu_blade_processor_id(cpu);
if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
printk(KERN_EMERG "%d cpus per uvhub invalid\n",
pr_emerg("%d cpus per uvhub invalid\n",
bcp->uvhub_cpu);
return 1;
}
@ -2094,7 +2124,8 @@ static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
void *vp;
struct uvhub_desc *uvhub_descs;
timeout_us = calculate_destination_timeout();
if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
timeout_us = calculate_destination_timeout();
vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
uvhub_descs = (struct uvhub_desc *)vp;
@ -2134,6 +2165,15 @@ static int __init uv_bau_init(void)
if (!is_uv_system())
return 0;
if (is_uv4_hub())
ops = uv4_bau_ops;
else if (is_uv3_hub())
ops = uv123_bau_ops;
else if (is_uv2_hub())
ops = uv123_bau_ops;
else if (is_uv1_hub())
ops = uv123_bau_ops;
for_each_possible_cpu(cur_cpu) {
mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
@ -2149,7 +2189,9 @@ static int __init uv_bau_init(void)
uv_base_pnode = uv_blade_to_pnode(uvhub);
}
enable_timeouts();
/* software timeouts are not supported on UV4 */
if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
enable_timeouts();
if (init_per_cpu(nuvhubs, uv_base_pnode)) {
set_bau_off();

View file

@ -60,8 +60,13 @@ static struct pci_platform_pm_ops mid_pci_platform_pm = {
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
/*
* This table should be in sync with the one in
* arch/x86/platform/intel-mid/pwr.c.
*/
static const struct x86_cpu_id lpss_cpu_ids[] = {
ICPU(INTEL_FAM6_ATOM_MERRIFIELD1),
ICPU(INTEL_FAM6_ATOM_PENWELL),
ICPU(INTEL_FAM6_ATOM_MERRIFIELD),
{}
};

View file

@ -1154,8 +1154,8 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT1, rapl_defaults_byt),
RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT, rapl_defaults_cht),
RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD1, rapl_defaults_tng),
RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD2, rapl_defaults_ann),
RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD, rapl_defaults_tng),
RAPL_CPU(INTEL_FAM6_ATOM_MOOREFIELD, rapl_defaults_ann),
RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core),
RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core),