Merge branches 'cxgb4', 'ipoib' and 'qib' into for-next
This commit is contained in:
commit
9e770044a0
9 changed files with 106 additions and 41 deletions
|
@ -686,6 +686,7 @@ struct qib_devdata {
|
|||
void __iomem *piobase;
|
||||
/* mem-mapped pointer to base of user chip regs (if using WC PAT) */
|
||||
u64 __iomem *userbase;
|
||||
void __iomem *piovl15base; /* base of VL15 buffers, if not WC */
|
||||
/*
|
||||
* points to area where PIOavail registers will be DMA'ed.
|
||||
* Has to be on a page of it's own, because the page will be
|
||||
|
|
|
@ -742,15 +742,15 @@
|
|||
#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF
|
||||
#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF
|
||||
#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1
|
||||
#define QIB_7322_HwErrMask_statusValidNoEopMask_1_LSB 0xE
|
||||
#define QIB_7322_HwErrMask_statusValidNoEopMask_1_MSB 0xE
|
||||
#define QIB_7322_HwErrMask_statusValidNoEopMask_1_RMASK 0x1
|
||||
#define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_LSB 0xE
|
||||
#define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_MSB 0xE
|
||||
#define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_RMASK 0x1
|
||||
#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD
|
||||
#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD
|
||||
#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1
|
||||
#define QIB_7322_HwErrMask_statusValidNoEopMask_0_LSB 0xC
|
||||
#define QIB_7322_HwErrMask_statusValidNoEopMask_0_MSB 0xC
|
||||
#define QIB_7322_HwErrMask_statusValidNoEopMask_0_RMASK 0x1
|
||||
#define QIB_7322_HwErrMask_statusValidNoEopMask_LSB 0xC
|
||||
#define QIB_7322_HwErrMask_statusValidNoEopMask_MSB 0xC
|
||||
#define QIB_7322_HwErrMask_statusValidNoEopMask_RMASK 0x1
|
||||
#define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB
|
||||
#define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB
|
||||
#define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1
|
||||
|
@ -796,15 +796,15 @@
|
|||
#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF
|
||||
#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF
|
||||
#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1
|
||||
#define QIB_7322_HwErrStatus_statusValidNoEop_1_LSB 0xE
|
||||
#define QIB_7322_HwErrStatus_statusValidNoEop_1_MSB 0xE
|
||||
#define QIB_7322_HwErrStatus_statusValidNoEop_1_RMASK 0x1
|
||||
#define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_LSB 0xE
|
||||
#define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_MSB 0xE
|
||||
#define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_RMASK 0x1
|
||||
#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD
|
||||
#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD
|
||||
#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1
|
||||
#define QIB_7322_HwErrStatus_statusValidNoEop_0_LSB 0xC
|
||||
#define QIB_7322_HwErrStatus_statusValidNoEop_0_MSB 0xC
|
||||
#define QIB_7322_HwErrStatus_statusValidNoEop_0_RMASK 0x1
|
||||
#define QIB_7322_HwErrStatus_statusValidNoEop_LSB 0xC
|
||||
#define QIB_7322_HwErrStatus_statusValidNoEop_MSB 0xC
|
||||
#define QIB_7322_HwErrStatus_statusValidNoEop_RMASK 0x1
|
||||
#define QIB_7322_HwErrStatus_LATriggered_LSB 0xB
|
||||
#define QIB_7322_HwErrStatus_LATriggered_MSB 0xB
|
||||
#define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1
|
||||
|
@ -850,15 +850,15 @@
|
|||
#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF
|
||||
#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF
|
||||
#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1
|
||||
#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_LSB 0xE
|
||||
#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_MSB 0xE
|
||||
#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_RMASK 0x1
|
||||
#define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_LSB 0xE
|
||||
#define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_MSB 0xE
|
||||
#define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_RMASK 0x1
|
||||
#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD
|
||||
#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD
|
||||
#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1
|
||||
#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_LSB 0xC
|
||||
#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_MSB 0xC
|
||||
#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_RMASK 0x1
|
||||
#define QIB_7322_HwErrClear_statusValidNoEopClear_LSB 0xC
|
||||
#define QIB_7322_HwErrClear_statusValidNoEopClear_MSB 0xC
|
||||
#define QIB_7322_HwErrClear_statusValidNoEopClear_RMASK 0x1
|
||||
#define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB
|
||||
#define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB
|
||||
#define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1
|
||||
|
@ -880,15 +880,15 @@
|
|||
#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF
|
||||
#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF
|
||||
#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1
|
||||
#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_LSB 0xE
|
||||
#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_MSB 0xE
|
||||
#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_RMASK 0x1
|
||||
#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_LSB 0xE
|
||||
#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_MSB 0xE
|
||||
#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_RMASK 0x1
|
||||
#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD
|
||||
#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD
|
||||
#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1
|
||||
#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_LSB 0xC
|
||||
#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_MSB 0xC
|
||||
#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_RMASK 0x1
|
||||
#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_LSB 0xC
|
||||
#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_MSB 0xC
|
||||
#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_RMASK 0x1
|
||||
|
||||
#define QIB_7322_EXTStatus_OFFS 0xC0
|
||||
#define QIB_7322_EXTStatus_DEF 0x000000000000X000
|
||||
|
|
|
@ -233,6 +233,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
|
|||
u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;
|
||||
u32 __iomem *map = NULL;
|
||||
u32 cnt = 0;
|
||||
u32 tot4k, offs4k;
|
||||
|
||||
/* First, simplest case, offset is within the first map. */
|
||||
kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);
|
||||
|
@ -250,7 +251,8 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
|
|||
if (dd->userbase) {
|
||||
/* If user regs mapped, they are after send, so set limit. */
|
||||
u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
|
||||
snd_lim = dd->uregbase;
|
||||
if (!dd->piovl15base)
|
||||
snd_lim = dd->uregbase;
|
||||
krb32 = (u32 __iomem *)dd->userbase;
|
||||
if (offset >= dd->uregbase && offset < ulim) {
|
||||
map = krb32 + (offset - dd->uregbase) / sizeof(u32);
|
||||
|
@ -277,14 +279,14 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
|
|||
/* If 4k buffers exist, account for them by bumping
|
||||
* appropriate limit.
|
||||
*/
|
||||
tot4k = dd->piobcnt4k * dd->align4k;
|
||||
offs4k = dd->piobufbase >> 32;
|
||||
if (dd->piobcnt4k) {
|
||||
u32 tot4k = dd->piobcnt4k * dd->align4k;
|
||||
u32 offs4k = dd->piobufbase >> 32;
|
||||
if (snd_bottom > offs4k)
|
||||
snd_bottom = offs4k;
|
||||
else {
|
||||
/* 4k above 2k. Bump snd_lim, if needed*/
|
||||
if (!dd->userbase)
|
||||
if (!dd->userbase || dd->piovl15base)
|
||||
snd_lim = offs4k + tot4k;
|
||||
}
|
||||
}
|
||||
|
@ -298,6 +300,15 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
|
|||
cnt = snd_lim - offset;
|
||||
}
|
||||
|
||||
if (!map && offs4k && dd->piovl15base) {
|
||||
snd_lim = offs4k + tot4k + 2 * dd->align4k;
|
||||
if (offset >= (offs4k + tot4k) && offset < snd_lim) {
|
||||
map = (u32 __iomem *)dd->piovl15base +
|
||||
((offset - (offs4k + tot4k)) / sizeof(u32));
|
||||
cnt = snd_lim - offset;
|
||||
}
|
||||
}
|
||||
|
||||
mapped:
|
||||
if (cntp)
|
||||
*cntp = cnt;
|
||||
|
|
|
@ -1355,8 +1355,7 @@ static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)
|
|||
hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
|
||||
if (hwstat) {
|
||||
/* should just have PLL, clear all set, in an case */
|
||||
if (hwstat & ~QLOGIC_IB_HWE_SERDESPLLFAILED)
|
||||
qib_write_kreg(dd, kr_hwerrclear, hwstat);
|
||||
qib_write_kreg(dd, kr_hwerrclear, hwstat);
|
||||
qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
|
||||
}
|
||||
|
||||
|
|
|
@ -543,7 +543,7 @@ struct vendor_txdds_ent {
|
|||
static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
|
||||
|
||||
#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
|
||||
#define TXDDS_EXTRA_SZ 11 /* number of extra tx settings entries */
|
||||
#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
|
||||
#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
|
||||
|
||||
#define H1_FORCE_VAL 8
|
||||
|
@ -1100,9 +1100,9 @@ static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
|
|||
HWE_AUTO_P(SDmaMemReadErr, 1),
|
||||
HWE_AUTO_P(SDmaMemReadErr, 0),
|
||||
HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
|
||||
HWE_AUTO_P(IBCBusToSPCParityErr, 1),
|
||||
HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
|
||||
HWE_AUTO_P(statusValidNoEop, 1),
|
||||
HWE_AUTO_P(statusValidNoEop, 0),
|
||||
HWE_AUTO(statusValidNoEop),
|
||||
HWE_AUTO(LATriggered),
|
||||
{ .mask = 0 }
|
||||
};
|
||||
|
@ -4763,6 +4763,8 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
|
|||
SYM_MASK(IBPCSConfig_0, tx_rx_reset);
|
||||
|
||||
val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
|
||||
qib_write_kreg(dd, kr_hwerrmask,
|
||||
dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
|
||||
qib_write_kreg_port(ppd, krp_ibcctrl_a,
|
||||
ppd->cpspec->ibcctrl_a &
|
||||
~SYM_MASK(IBCCtrlA_0, IBLinkEn));
|
||||
|
@ -4772,6 +4774,9 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
|
|||
qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
|
||||
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
|
||||
qib_write_kreg(dd, kr_scratch, 0ULL);
|
||||
qib_write_kreg(dd, kr_hwerrclear,
|
||||
SYM_MASK(HwErrClear, statusValidNoEopClear));
|
||||
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -5624,6 +5629,8 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
|
|||
if (ppd->port != port || !ppd->link_speed_supported)
|
||||
continue;
|
||||
ppd->cpspec->no_eep = val;
|
||||
if (seth1)
|
||||
ppd->cpspec->h1_val = h1;
|
||||
/* now change the IBC and serdes, overriding generic */
|
||||
init_txdds_table(ppd, 1);
|
||||
any++;
|
||||
|
@ -6064,9 +6071,9 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
|
|||
* the "cable info" setup here. Can be overridden
|
||||
* in adapter-specific routines.
|
||||
*/
|
||||
if (!(ppd->dd->flags & QIB_HAS_QSFP)) {
|
||||
if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd))
|
||||
qib_devinfo(ppd->dd->pcidev, "IB%u:%u: "
|
||||
if (!(dd->flags & QIB_HAS_QSFP)) {
|
||||
if (!IS_QMH(dd) && !IS_QME(dd))
|
||||
qib_devinfo(dd->pcidev, "IB%u:%u: "
|
||||
"Unknown mezzanine card type\n",
|
||||
dd->unit, ppd->port);
|
||||
cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
|
||||
|
@ -6119,9 +6126,25 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
|
|||
qib_set_ctxtcnt(dd);
|
||||
|
||||
if (qib_wc_pat) {
|
||||
ret = init_chip_wc_pat(dd, NUM_VL15_BUFS * dd->align4k);
|
||||
resource_size_t vl15off;
|
||||
/*
|
||||
* We do not set WC on the VL15 buffers to avoid
|
||||
* a rare problem with unaligned writes from
|
||||
* interrupt-flushed store buffers, so we need
|
||||
* to map those separately here. We can't solve
|
||||
* this for the rarely used mtrr case.
|
||||
*/
|
||||
ret = init_chip_wc_pat(dd, 0);
|
||||
if (ret)
|
||||
goto bail;
|
||||
|
||||
/* vl15 buffers start just after the 4k buffers */
|
||||
vl15off = dd->physaddr + (dd->piobufbase >> 32) +
|
||||
dd->piobcnt4k * dd->align4k;
|
||||
dd->piovl15base = ioremap_nocache(vl15off,
|
||||
NUM_VL15_BUFS * dd->align4k);
|
||||
if (!dd->piovl15base)
|
||||
goto bail;
|
||||
}
|
||||
qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
|
||||
|
||||
|
@ -6932,6 +6955,8 @@ static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
|
|||
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 3 }, /* QMH7342 backplane settings */
|
||||
{ 0, 0, 0, 4 }, /* QMH7342 backplane settings */
|
||||
};
|
||||
|
||||
static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
|
||||
|
@ -6947,6 +6972,8 @@ static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
|
|||
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */
|
||||
{ 0, 0, 0, 9 }, /* QMH7342 backplane settings */
|
||||
{ 0, 0, 0, 10 }, /* QMH7342 backplane settings */
|
||||
};
|
||||
|
||||
static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
|
||||
|
@ -6962,6 +6989,8 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
|
|||
{ 0, 1, 12, 6 }, /* QME7342 backplane setting */
|
||||
{ 0, 1, 12, 7 }, /* QME7342 backplane setting */
|
||||
{ 0, 1, 12, 8 }, /* QME7342 backplane setting */
|
||||
{ 0, 1, 0, 10 }, /* QMH7342 backplane settings */
|
||||
{ 0, 1, 0, 12 }, /* QMH7342 backplane settings */
|
||||
};
|
||||
|
||||
static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
|
||||
|
|
|
@ -1059,7 +1059,7 @@ static int __init qlogic_ib_init(void)
|
|||
goto bail_dev;
|
||||
}
|
||||
|
||||
qib_cq_wq = create_workqueue("qib_cq");
|
||||
qib_cq_wq = create_singlethread_workqueue("qib_cq");
|
||||
if (!qib_cq_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto bail_wq;
|
||||
|
@ -1289,8 +1289,18 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
|
|||
|
||||
if (qib_mini_init || initfail || ret) {
|
||||
qib_stop_timers(dd);
|
||||
flush_scheduled_work();
|
||||
for (pidx = 0; pidx < dd->num_pports; ++pidx)
|
||||
dd->f_quiet_serdes(dd->pport + pidx);
|
||||
if (qib_mini_init)
|
||||
goto bail;
|
||||
if (!j) {
|
||||
(void) qibfs_remove(dd);
|
||||
qib_device_remove(dd);
|
||||
}
|
||||
if (!ret)
|
||||
qib_unregister_ib_device(dd);
|
||||
qib_postinit_cleanup(dd);
|
||||
if (initfail)
|
||||
ret = initfail;
|
||||
goto bail;
|
||||
|
@ -1472,6 +1482,9 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
|
|||
dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
|
||||
unsigned i;
|
||||
|
||||
/* clear for security and sanity on each use */
|
||||
memset(rcd->rcvegrbuf[chunk], 0, size);
|
||||
|
||||
for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
|
||||
dd->f_put_tid(dd, e + egroff +
|
||||
(u64 __iomem *)
|
||||
|
@ -1499,6 +1512,12 @@ bail:
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: Changes to this routine should be mirrored
|
||||
* for the diagnostics routine qib_remap_ioaddr32().
|
||||
* There is also related code for VL15 buffers in qib_init_7322_variables().
|
||||
* The teardown code that unmaps is in qib_pcie_ddcleanup()
|
||||
*/
|
||||
int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
|
||||
{
|
||||
u64 __iomem *qib_kregbase = NULL;
|
||||
|
|
|
@ -179,6 +179,8 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd)
|
|||
iounmap(dd->piobase);
|
||||
if (dd->userbase)
|
||||
iounmap(dd->userbase);
|
||||
if (dd->piovl15base)
|
||||
iounmap(dd->piovl15base);
|
||||
|
||||
pci_disable_device(dd->pcidev);
|
||||
pci_release_regions(dd->pcidev);
|
||||
|
|
|
@ -340,9 +340,13 @@ rescan:
|
|||
if (i < dd->piobcnt2k)
|
||||
buf = (u32 __iomem *)(dd->pio2kbase +
|
||||
i * dd->palign);
|
||||
else
|
||||
else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base)
|
||||
buf = (u32 __iomem *)(dd->pio4kbase +
|
||||
(i - dd->piobcnt2k) * dd->align4k);
|
||||
else
|
||||
buf = (u32 __iomem *)(dd->piovl15base +
|
||||
(i - (dd->piobcnt2k + dd->piobcnt4k)) *
|
||||
dd->align4k);
|
||||
if (pbufnum)
|
||||
*pbufnum = i;
|
||||
dd->upd_pio_shadow = 0;
|
||||
|
|
|
@ -1163,7 +1163,7 @@ static ssize_t create_child(struct device *dev,
|
|||
|
||||
return ret ? ret : count;
|
||||
}
|
||||
static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
|
||||
static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
|
||||
|
||||
static ssize_t delete_child(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
|
@ -1183,7 +1183,7 @@ static ssize_t delete_child(struct device *dev,
|
|||
return ret ? ret : count;
|
||||
|
||||
}
|
||||
static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
|
||||
static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
|
||||
|
||||
int ipoib_add_pkey_attr(struct net_device *dev)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue