e76239a374
Dispatching a report zones command through the request queue is a major pain due to the command reply payload rewriting necessary. Given that blkdev_report_zones() is executing everything synchronously, implement report zones as a block device file operation instead, allowing major simplification of the code in many places. sd, null-blk, dm-linear and dm-flakey being the only block device drivers supporting exposing zoned block devices, these drivers are modified to provide the device side implementation of the report_zones() block device file operation. For device mappers, a new report_zones() target type operation is defined so that the upper block layer calls blkdev_report_zones() can be propagated down to the underlying devices of the dm targets. Implementation for this new operation is added to the dm-linear and dm-flakey targets. Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Christoph Hellwig <hch@lst.de> [Damien] * Changed method block_device argument to gendisk * Various bug fixes and improvements * Added support for null_blk, dm-linear and dm-flakey. Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
251 lines
5.9 KiB
C
251 lines
5.9 KiB
C
/*
|
|
* Copyright (C) 2001-2003 Sistina Software (UK) Limited.
|
|
*
|
|
* This file is released under the GPL.
|
|
*/
|
|
|
|
#include "dm.h"
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/bio.h>
|
|
#include <linux/dax.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/device-mapper.h>
|
|
|
|
#define DM_MSG_PREFIX "linear"
|
|
|
|
/*
|
|
* Linear: maps a linear range of a device.
|
|
*/
|
|
struct linear_c {
|
|
struct dm_dev *dev;
|
|
sector_t start;
|
|
};
|
|
|
|
/*
|
|
* Construct a linear mapping: <dev_path> <offset>
|
|
*/
|
|
static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
{
|
|
struct linear_c *lc;
|
|
unsigned long long tmp;
|
|
char dummy;
|
|
int ret;
|
|
|
|
if (argc != 2) {
|
|
ti->error = "Invalid argument count";
|
|
return -EINVAL;
|
|
}
|
|
|
|
lc = kmalloc(sizeof(*lc), GFP_KERNEL);
|
|
if (lc == NULL) {
|
|
ti->error = "Cannot allocate linear context";
|
|
return -ENOMEM;
|
|
}
|
|
|
|
ret = -EINVAL;
|
|
if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
|
|
ti->error = "Invalid device sector";
|
|
goto bad;
|
|
}
|
|
lc->start = tmp;
|
|
|
|
ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
|
|
if (ret) {
|
|
ti->error = "Device lookup failed";
|
|
goto bad;
|
|
}
|
|
|
|
ti->num_flush_bios = 1;
|
|
ti->num_discard_bios = 1;
|
|
ti->num_secure_erase_bios = 1;
|
|
ti->num_write_same_bios = 1;
|
|
ti->num_write_zeroes_bios = 1;
|
|
ti->private = lc;
|
|
return 0;
|
|
|
|
bad:
|
|
kfree(lc);
|
|
return ret;
|
|
}
|
|
|
|
static void linear_dtr(struct dm_target *ti)
|
|
{
|
|
struct linear_c *lc = (struct linear_c *) ti->private;
|
|
|
|
dm_put_device(ti, lc->dev);
|
|
kfree(lc);
|
|
}
|
|
|
|
static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
|
|
{
|
|
struct linear_c *lc = ti->private;
|
|
|
|
return lc->start + dm_target_offset(ti, bi_sector);
|
|
}
|
|
|
|
static void linear_map_bio(struct dm_target *ti, struct bio *bio)
|
|
{
|
|
struct linear_c *lc = ti->private;
|
|
|
|
bio_set_dev(bio, lc->dev->bdev);
|
|
if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
|
|
bio->bi_iter.bi_sector =
|
|
linear_map_sector(ti, bio->bi_iter.bi_sector);
|
|
}
|
|
|
|
static int linear_map(struct dm_target *ti, struct bio *bio)
|
|
{
|
|
linear_map_bio(ti, bio);
|
|
|
|
return DM_MAPIO_REMAPPED;
|
|
}
|
|
|
|
static void linear_status(struct dm_target *ti, status_type_t type,
|
|
unsigned status_flags, char *result, unsigned maxlen)
|
|
{
|
|
struct linear_c *lc = (struct linear_c *) ti->private;
|
|
|
|
switch (type) {
|
|
case STATUSTYPE_INFO:
|
|
result[0] = '\0';
|
|
break;
|
|
|
|
case STATUSTYPE_TABLE:
|
|
snprintf(result, maxlen, "%s %llu", lc->dev->name,
|
|
(unsigned long long)lc->start);
|
|
break;
|
|
}
|
|
}
|
|
|
|
static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
|
|
{
|
|
struct linear_c *lc = (struct linear_c *) ti->private;
|
|
struct dm_dev *dev = lc->dev;
|
|
|
|
*bdev = dev->bdev;
|
|
|
|
/*
|
|
* Only pass ioctls through if the device sizes match exactly.
|
|
*/
|
|
if (lc->start ||
|
|
ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
|
|
return 1;
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_BLK_DEV_ZONED
|
|
static int linear_report_zones(struct dm_target *ti, sector_t sector,
|
|
struct blk_zone *zones, unsigned int *nr_zones,
|
|
gfp_t gfp_mask)
|
|
{
|
|
struct linear_c *lc = (struct linear_c *) ti->private;
|
|
int ret;
|
|
|
|
/* Do report and remap it */
|
|
ret = blkdev_report_zones(lc->dev->bdev, linear_map_sector(ti, sector),
|
|
zones, nr_zones, gfp_mask);
|
|
if (ret != 0)
|
|
return ret;
|
|
|
|
if (*nr_zones)
|
|
dm_remap_zone_report(ti, lc->start, zones, nr_zones);
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static int linear_iterate_devices(struct dm_target *ti,
|
|
iterate_devices_callout_fn fn, void *data)
|
|
{
|
|
struct linear_c *lc = ti->private;
|
|
|
|
return fn(ti, lc->dev, lc->start, ti->len, data);
|
|
}
|
|
|
|
#if IS_ENABLED(CONFIG_DAX_DRIVER)
|
|
static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
|
|
long nr_pages, void **kaddr, pfn_t *pfn)
|
|
{
|
|
long ret;
|
|
struct linear_c *lc = ti->private;
|
|
struct block_device *bdev = lc->dev->bdev;
|
|
struct dax_device *dax_dev = lc->dev->dax_dev;
|
|
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
|
|
|
|
dev_sector = linear_map_sector(ti, sector);
|
|
ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
|
|
if (ret)
|
|
return ret;
|
|
return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
|
|
}
|
|
|
|
static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
|
|
void *addr, size_t bytes, struct iov_iter *i)
|
|
{
|
|
struct linear_c *lc = ti->private;
|
|
struct block_device *bdev = lc->dev->bdev;
|
|
struct dax_device *dax_dev = lc->dev->dax_dev;
|
|
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
|
|
|
|
dev_sector = linear_map_sector(ti, sector);
|
|
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
|
|
return 0;
|
|
return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
|
|
}
|
|
|
|
static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
|
|
void *addr, size_t bytes, struct iov_iter *i)
|
|
{
|
|
struct linear_c *lc = ti->private;
|
|
struct block_device *bdev = lc->dev->bdev;
|
|
struct dax_device *dax_dev = lc->dev->dax_dev;
|
|
sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
|
|
|
|
dev_sector = linear_map_sector(ti, sector);
|
|
if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
|
|
return 0;
|
|
return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
|
|
}
|
|
|
|
#else
|
|
#define linear_dax_direct_access NULL
|
|
#define linear_dax_copy_from_iter NULL
|
|
#define linear_dax_copy_to_iter NULL
|
|
#endif
|
|
|
|
static struct target_type linear_target = {
|
|
.name = "linear",
|
|
.version = {1, 4, 0},
|
|
#ifdef CONFIG_BLK_DEV_ZONED
|
|
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
|
|
.report_zones = linear_report_zones,
|
|
#else
|
|
.features = DM_TARGET_PASSES_INTEGRITY,
|
|
#endif
|
|
.module = THIS_MODULE,
|
|
.ctr = linear_ctr,
|
|
.dtr = linear_dtr,
|
|
.map = linear_map,
|
|
.status = linear_status,
|
|
.prepare_ioctl = linear_prepare_ioctl,
|
|
.iterate_devices = linear_iterate_devices,
|
|
.direct_access = linear_dax_direct_access,
|
|
.dax_copy_from_iter = linear_dax_copy_from_iter,
|
|
.dax_copy_to_iter = linear_dax_copy_to_iter,
|
|
};
|
|
|
|
int __init dm_linear_init(void)
|
|
{
|
|
int r = dm_register_target(&linear_target);
|
|
|
|
if (r < 0)
|
|
DMERR("register failed %d", r);
|
|
|
|
return r;
|
|
}
|
|
|
|
void dm_linear_exit(void)
|
|
{
|
|
dm_unregister_target(&linear_target);
|
|
}
|