IB/ehca: Use proper GFP_ flags for get_zeroed_page()
Here is a patch for ehca to use proper flag, ie. GFP_ATOMIC resp. GFP_KERNEL, when calling get_zeroed_page() to prevent "Bug: scheduling while atomic...". This error does not cause a kernel panic but makes ipoib un-usable afterwards. It is reproducible on 2.6.20-rc4 if one does ifconfig down during a flood ping test. I have not observed this error in earlier releases incl. 2.6.20-rc1. This error occurs when a qp event/irq is received and ehca event handler allocates a control block/page to obtain HCA error data block. Use of GFP_ATOMIC when in interrupt context prevents this issue. Signed-off-by Hoang-Nam Nguyen <hnguyen@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
98714cb161
commit
f2d9136133
6 changed files with 16 additions and 16 deletions
|
@ -50,7 +50,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
|
|||
ib_device);
|
||||
struct hipz_query_hca *rblock;
|
||||
|
||||
rblock = ehca_alloc_fw_ctrlblock();
|
||||
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
|
||||
if (!rblock) {
|
||||
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
|
||||
return -ENOMEM;
|
||||
|
@ -110,7 +110,7 @@ int ehca_query_port(struct ib_device *ibdev,
|
|||
ib_device);
|
||||
struct hipz_query_port *rblock;
|
||||
|
||||
rblock = ehca_alloc_fw_ctrlblock();
|
||||
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
|
||||
if (!rblock) {
|
||||
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
|
||||
return -ENOMEM;
|
||||
|
@ -179,7 +179,7 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
rblock = ehca_alloc_fw_ctrlblock();
|
||||
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
|
||||
if (!rblock) {
|
||||
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
|
||||
return -ENOMEM;
|
||||
|
@ -212,7 +212,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
rblock = ehca_alloc_fw_ctrlblock();
|
||||
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
|
||||
if (!rblock) {
|
||||
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -138,7 +138,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data,
|
|||
u64 *rblock;
|
||||
unsigned long block_count;
|
||||
|
||||
rblock = ehca_alloc_fw_ctrlblock();
|
||||
rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
|
||||
if (!rblock) {
|
||||
ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -180,10 +180,10 @@ int ehca_mmap_register(u64 physical,void **mapped,
|
|||
int ehca_munmap(unsigned long addr, size_t len);
|
||||
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
void *ehca_alloc_fw_ctrlblock(void);
|
||||
void *ehca_alloc_fw_ctrlblock(gfp_t flags);
|
||||
void ehca_free_fw_ctrlblock(void *ptr);
|
||||
#else
|
||||
#define ehca_alloc_fw_ctrlblock() ((void *) get_zeroed_page(GFP_KERNEL))
|
||||
#define ehca_alloc_fw_ctrlblock(flags) ((void *) get_zeroed_page(flags))
|
||||
#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
|
||||
#endif
|
||||
|
||||
|
|
|
@ -106,9 +106,9 @@ static struct timer_list poll_eqs_timer;
|
|||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
static struct kmem_cache *ctblk_cache = NULL;
|
||||
|
||||
void *ehca_alloc_fw_ctrlblock(void)
|
||||
void *ehca_alloc_fw_ctrlblock(gfp_t flags)
|
||||
{
|
||||
void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL);
|
||||
void *ret = kmem_cache_zalloc(ctblk_cache, flags);
|
||||
if (!ret)
|
||||
ehca_gen_err("Out of memory for ctblk");
|
||||
return ret;
|
||||
|
@ -206,7 +206,7 @@ int ehca_sense_attributes(struct ehca_shca *shca)
|
|||
u64 h_ret;
|
||||
struct hipz_query_hca *rblock;
|
||||
|
||||
rblock = ehca_alloc_fw_ctrlblock();
|
||||
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
|
||||
if (!rblock) {
|
||||
ehca_gen_err("Cannot allocate rblock memory.");
|
||||
return -ENOMEM;
|
||||
|
@ -258,7 +258,7 @@ static int init_node_guid(struct ehca_shca *shca)
|
|||
int ret = 0;
|
||||
struct hipz_query_hca *rblock;
|
||||
|
||||
rblock = ehca_alloc_fw_ctrlblock();
|
||||
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
|
||||
if (!rblock) {
|
||||
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
|
||||
return -ENOMEM;
|
||||
|
@ -469,7 +469,7 @@ static ssize_t ehca_show_##name(struct device *dev, \
|
|||
\
|
||||
shca = dev->driver_data; \
|
||||
\
|
||||
rblock = ehca_alloc_fw_ctrlblock(); \
|
||||
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
|
||||
if (!rblock) { \
|
||||
dev_err(dev, "Can't allocate rblock memory."); \
|
||||
return 0; \
|
||||
|
|
|
@ -1013,7 +1013,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
|
|||
u32 i;
|
||||
u64 *kpage;
|
||||
|
||||
kpage = ehca_alloc_fw_ctrlblock();
|
||||
kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
|
||||
if (!kpage) {
|
||||
ehca_err(&shca->ib_device, "kpage alloc failed");
|
||||
ret = -ENOMEM;
|
||||
|
@ -1124,7 +1124,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
|
|||
ehca_mrmw_map_acl(acl, &hipz_acl);
|
||||
ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
|
||||
|
||||
kpage = ehca_alloc_fw_ctrlblock();
|
||||
kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
|
||||
if (!kpage) {
|
||||
ehca_err(&shca->ib_device, "kpage alloc failed");
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -807,7 +807,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
|
|||
unsigned long spl_flags = 0;
|
||||
|
||||
/* do query_qp to obtain current attr values */
|
||||
mqpcb = ehca_alloc_fw_ctrlblock();
|
||||
mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
|
||||
if (!mqpcb) {
|
||||
ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
|
||||
"ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
|
||||
|
@ -1273,7 +1273,7 @@ int ehca_query_qp(struct ib_qp *qp,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
qpcb = ehca_alloc_fw_ctrlblock();
|
||||
qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
|
||||
if (!qpcb) {
|
||||
ehca_err(qp->device,"Out of memory for qpcb "
|
||||
"ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
|
||||
|
|
Loading…
Reference in a new issue