net/mlx5: Pages management commands via mlx5 ifc
Remove old representation of manually created Pages management commands layout, and use mlx5_ifc canonical structures and defines. Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: Leon Romanovsky <leon@kernel.org>
This commit is contained in:
parent
20bb566bda
commit
a533ed5e17
1 changed files with 58 additions and 107 deletions
|
@ -44,12 +44,6 @@ enum {
|
||||||
MLX5_PAGES_TAKE = 2
|
MLX5_PAGES_TAKE = 2
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
|
||||||
MLX5_BOOT_PAGES = 1,
|
|
||||||
MLX5_INIT_PAGES = 2,
|
|
||||||
MLX5_POST_INIT_PAGES = 3
|
|
||||||
};
|
|
||||||
|
|
||||||
struct mlx5_pages_req {
|
struct mlx5_pages_req {
|
||||||
struct mlx5_core_dev *dev;
|
struct mlx5_core_dev *dev;
|
||||||
u16 func_id;
|
u16 func_id;
|
||||||
|
@ -67,33 +61,6 @@ struct fw_page {
|
||||||
unsigned free_count;
|
unsigned free_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx5_query_pages_inbox {
|
|
||||||
struct mlx5_inbox_hdr hdr;
|
|
||||||
u8 rsvd[8];
|
|
||||||
};
|
|
||||||
|
|
||||||
struct mlx5_query_pages_outbox {
|
|
||||||
struct mlx5_outbox_hdr hdr;
|
|
||||||
__be16 rsvd;
|
|
||||||
__be16 func_id;
|
|
||||||
__be32 num_pages;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct mlx5_manage_pages_inbox {
|
|
||||||
struct mlx5_inbox_hdr hdr;
|
|
||||||
__be16 rsvd;
|
|
||||||
__be16 func_id;
|
|
||||||
__be32 num_entries;
|
|
||||||
__be64 pas[0];
|
|
||||||
};
|
|
||||||
|
|
||||||
struct mlx5_manage_pages_outbox {
|
|
||||||
struct mlx5_outbox_hdr hdr;
|
|
||||||
__be32 num_entries;
|
|
||||||
u8 rsvd[4];
|
|
||||||
__be64 pas[0];
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
MAX_RECLAIM_TIME_MSECS = 5000,
|
MAX_RECLAIM_TIME_MSECS = 5000,
|
||||||
MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
|
MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
|
||||||
|
@ -167,24 +134,22 @@ static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
|
||||||
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
|
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
|
||||||
s32 *npages, int boot)
|
s32 *npages, int boot)
|
||||||
{
|
{
|
||||||
struct mlx5_query_pages_inbox in;
|
u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
|
||||||
struct mlx5_query_pages_outbox out;
|
u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
memset(&in, 0, sizeof(in));
|
MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
|
||||||
memset(&out, 0, sizeof(out));
|
MLX5_SET(query_pages_in, in, op_mod, boot ?
|
||||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
|
MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
|
||||||
in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
|
MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
|
||||||
|
|
||||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
|
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||||
|
err = err ? : mlx5_cmd_status_to_err_v2(out);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
if (out.hdr.status)
|
*npages = MLX5_GET(query_pages_out, out, num_pages);
|
||||||
return mlx5_cmd_status_to_err(&out.hdr);
|
*func_id = MLX5_GET(query_pages_out, out, function_id);
|
||||||
|
|
||||||
*npages = be32_to_cpu(out.num_pages);
|
|
||||||
*func_id = be16_to_cpu(out.func_id);
|
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -280,46 +245,37 @@ out_alloc:
|
||||||
|
|
||||||
static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
|
static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
|
||||||
{
|
{
|
||||||
struct mlx5_manage_pages_inbox *in;
|
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
|
||||||
struct mlx5_manage_pages_outbox out;
|
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
in = kzalloc(sizeof(*in), GFP_KERNEL);
|
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
|
||||||
if (!in)
|
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
|
||||||
return;
|
MLX5_SET(manage_pages_in, in, function_id, func_id);
|
||||||
|
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
|
||||||
memset(&out, 0, sizeof(out));
|
err = err ? : mlx5_cmd_status_to_err_v2(out);
|
||||||
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
|
|
||||||
in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
|
|
||||||
in->func_id = cpu_to_be16(func_id);
|
|
||||||
err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
|
|
||||||
if (!err)
|
|
||||||
err = mlx5_cmd_status_to_err(&out.hdr);
|
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
mlx5_core_warn(dev, "page notify failed\n");
|
mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
|
||||||
|
func_id, err);
|
||||||
kfree(in);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
|
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
|
||||||
int notify_fail)
|
int notify_fail)
|
||||||
{
|
{
|
||||||
struct mlx5_manage_pages_inbox *in;
|
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
|
||||||
struct mlx5_manage_pages_outbox out;
|
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
|
||||||
int inlen;
|
|
||||||
u64 addr;
|
u64 addr;
|
||||||
int err;
|
int err;
|
||||||
|
u32 *in;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
|
inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
|
||||||
in = mlx5_vzalloc(inlen);
|
in = mlx5_vzalloc(inlen);
|
||||||
if (!in) {
|
if (!in) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
|
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
memset(&out, 0, sizeof(out));
|
|
||||||
|
|
||||||
for (i = 0; i < npages; i++) {
|
for (i = 0; i < npages; i++) {
|
||||||
retry:
|
retry:
|
||||||
|
@ -332,27 +288,22 @@ retry:
|
||||||
|
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
in->pas[i] = cpu_to_be64(addr);
|
MLX5_SET64(manage_pages_in, in, pas[i], addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
|
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
|
||||||
in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
|
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
|
||||||
in->func_id = cpu_to_be16(func_id);
|
MLX5_SET(manage_pages_in, in, function_id, func_id);
|
||||||
in->num_entries = cpu_to_be32(npages);
|
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
|
||||||
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
|
|
||||||
|
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
|
||||||
|
err = err ? : mlx5_cmd_status_to_err_v2(out);
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
|
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
|
||||||
func_id, npages, err);
|
func_id, npages, err);
|
||||||
goto out_4k;
|
goto out_4k;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = mlx5_cmd_status_to_err(&out.hdr);
|
|
||||||
if (err) {
|
|
||||||
mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
|
|
||||||
func_id, npages, out.hdr.status);
|
|
||||||
goto out_4k;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev->priv.fw_pages += npages;
|
dev->priv.fw_pages += npages;
|
||||||
if (func_id)
|
if (func_id)
|
||||||
dev->priv.vfs_pages += npages;
|
dev->priv.vfs_pages += npages;
|
||||||
|
@ -364,7 +315,7 @@ retry:
|
||||||
|
|
||||||
out_4k:
|
out_4k:
|
||||||
for (i--; i >= 0; i--)
|
for (i--; i >= 0; i--)
|
||||||
free_4k(dev, be64_to_cpu(in->pas[i]));
|
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
|
||||||
out_free:
|
out_free:
|
||||||
kvfree(in);
|
kvfree(in);
|
||||||
if (notify_fail)
|
if (notify_fail)
|
||||||
|
@ -373,64 +324,65 @@ out_free:
|
||||||
}
|
}
|
||||||
|
|
||||||
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
|
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
|
||||||
struct mlx5_manage_pages_inbox *in, int in_size,
|
u32 *in, int in_size, u32 *out, int out_size)
|
||||||
struct mlx5_manage_pages_outbox *out, int out_size)
|
|
||||||
{
|
{
|
||||||
struct fw_page *fwp;
|
struct fw_page *fwp;
|
||||||
struct rb_node *p;
|
struct rb_node *p;
|
||||||
u32 npages;
|
u32 npages;
|
||||||
u32 i = 0;
|
u32 i = 0;
|
||||||
|
|
||||||
if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
|
if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
|
||||||
return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
|
int err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
|
||||||
(u32 *)out, out_size);
|
|
||||||
|
|
||||||
npages = be32_to_cpu(in->num_entries);
|
return err ? : mlx5_cmd_status_to_err_v2(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* No hard feelings, we want our pages back! */
|
||||||
|
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
|
||||||
|
|
||||||
p = rb_first(&dev->priv.page_root);
|
p = rb_first(&dev->priv.page_root);
|
||||||
while (p && i < npages) {
|
while (p && i < npages) {
|
||||||
fwp = rb_entry(p, struct fw_page, rb_node);
|
fwp = rb_entry(p, struct fw_page, rb_node);
|
||||||
out->pas[i] = cpu_to_be64(fwp->addr);
|
MLX5_SET64(manage_pages_out, out, pas[i], fwp->addr);
|
||||||
p = rb_next(p);
|
p = rb_next(p);
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
out->num_entries = cpu_to_be32(i);
|
MLX5_SET(manage_pages_out, out, output_num_entries, i);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
|
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
|
||||||
int *nclaimed)
|
int *nclaimed)
|
||||||
{
|
{
|
||||||
struct mlx5_manage_pages_inbox in;
|
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
|
||||||
struct mlx5_manage_pages_outbox *out;
|
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
|
||||||
int num_claimed;
|
int num_claimed;
|
||||||
int outlen;
|
u32 *out;
|
||||||
u64 addr;
|
|
||||||
int err;
|
int err;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (nclaimed)
|
if (nclaimed)
|
||||||
*nclaimed = 0;
|
*nclaimed = 0;
|
||||||
|
|
||||||
memset(&in, 0, sizeof(in));
|
outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
|
||||||
outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
|
|
||||||
out = mlx5_vzalloc(outlen);
|
out = mlx5_vzalloc(outlen);
|
||||||
if (!out)
|
if (!out)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
|
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
|
||||||
in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
|
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
|
||||||
in.func_id = cpu_to_be16(func_id);
|
MLX5_SET(manage_pages_in, in, function_id, func_id);
|
||||||
in.num_entries = cpu_to_be32(npages);
|
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
|
||||||
|
|
||||||
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
|
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
|
||||||
err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
|
err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
|
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
num_claimed = be32_to_cpu(out->num_entries);
|
num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
|
||||||
if (num_claimed > npages) {
|
if (num_claimed > npages) {
|
||||||
mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
|
mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
|
||||||
num_claimed, npages);
|
num_claimed, npages);
|
||||||
|
@ -438,10 +390,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < num_claimed; i++) {
|
for (i = 0; i < num_claimed; i++)
|
||||||
addr = be64_to_cpu(out->pas[i]);
|
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
|
||||||
free_4k(dev, addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (nclaimed)
|
if (nclaimed)
|
||||||
*nclaimed = num_claimed;
|
*nclaimed = num_claimed;
|
||||||
|
@ -518,8 +469,8 @@ static int optimal_reclaimed_pages(void)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
|
ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
|
||||||
sizeof(struct mlx5_manage_pages_outbox)) /
|
MLX5_ST_SZ_BYTES(manage_pages_out)) /
|
||||||
FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
|
MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue