libnvdimm fixes v5.2-rc2
- Fix a regression that disabled device-mapper dax support - Remove unnecessary hardened-user-copy overhead (>30%) for dax read(2)/write(2). - Fix some compilation warnings. -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJc6WWQAAoJEB7SkWpmfYgCpVwP/0Vfq/3ChbH5T7s4x2MkpLX+ metYwCyzPJK32mVMbAmizGWEBn8Np+eZcU7jvKYpDXJLWdbUUz4oZD04RYmgkYp7 SHmjn9VdpfMSziWUx6zrrbyAtBq04x7GT7IIkCzlGIuNVCYqXBnRSVGz06tDFEEd pU9HtZr32C425pdFK5D4sorJED2JKG7CwLPdSVHayuyHmg7jp78T7U5Y31WgOhSw +JF6UwQIJ+UPg30PYBPG32Zmh8E7Fv/AaYF3JGbp4xRS+B/xbakZhJtYuBzWRjlp BlwUg9nUaVgEnjE9KpTcJk8VlXDz6ZjpYXXdY4Hv5g+PPWm5kdZBhPYjaymrtI3o 7DjtKmNd4F5qhU06oTXtFoBbgoiOBM7fOqsyVZ6tsNguVojlt8lnUvkTKqvznw4n K4TGzi0Zgu511umMumF1Q/d0BlNXz+gptcC4qwuEUyQa7sEPSWSfcC66SvY/Y5ym VGG4roO3Jz6p3JniuFEXakifzU57vPPv7OxGD3d0PKUSDHVU5yPjWRpJju8wJeVW DmTZ+SBo2Q/YP9vDlULPqxGJNkP31SaRg/9PnB8W1z2yqyuA+Pjv+Qjt1X618PFq 1c2+ufeJoOb1Zc3k6Jw1bovilpb2GDW+4QucC3J0/zFtK00PYcGyyqo3jWlUgINf QWPgwBIW/yFcb7xOazFS =nko1 -----END PGP SIGNATURE----- Merge tag 'libnvdimm-fixes-5.2-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm Pull libnvdimm fixes from Dan Williams: - Fix a regression that disabled device-mapper dax support - Remove unnecessary hardened-user-copy overhead (>30%) for dax read(2)/write(2). - Fix some compilation warnings. * tag 'libnvdimm-fixes-5.2-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: libnvdimm/pmem: Bypass CONFIG_HARDENED_USERCOPY overhead dax: Arrange for dax_supported check to span multiple devices libnvdimm: Fix compilation warnings with W=1
This commit is contained in:
commit
b2ad81363f
10 changed files with 129 additions and 43 deletions
|
@ -73,22 +73,12 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
|
||||||
EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
|
EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
bool __generic_fsdax_supported(struct dax_device *dax_dev,
|
||||||
* __bdev_dax_supported() - Check if the device supports dax for filesystem
|
struct block_device *bdev, int blocksize, sector_t start,
|
||||||
* @bdev: block device to check
|
sector_t sectors)
|
||||||
* @blocksize: The block size of the device
|
|
||||||
*
|
|
||||||
* This is a library function for filesystems to check if the block device
|
|
||||||
* can be mounted with dax option.
|
|
||||||
*
|
|
||||||
* Return: true if supported, false if unsupported
|
|
||||||
*/
|
|
||||||
bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
|
|
||||||
{
|
{
|
||||||
struct dax_device *dax_dev;
|
|
||||||
bool dax_enabled = false;
|
bool dax_enabled = false;
|
||||||
pgoff_t pgoff, pgoff_end;
|
pgoff_t pgoff, pgoff_end;
|
||||||
struct request_queue *q;
|
|
||||||
char buf[BDEVNAME_SIZE];
|
char buf[BDEVNAME_SIZE];
|
||||||
void *kaddr, *end_kaddr;
|
void *kaddr, *end_kaddr;
|
||||||
pfn_t pfn, end_pfn;
|
pfn_t pfn, end_pfn;
|
||||||
|
@ -102,21 +92,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
q = bdev_get_queue(bdev);
|
err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
|
||||||
if (!q || !blk_queue_dax(q)) {
|
|
||||||
pr_debug("%s: error: request queue doesn't support dax\n",
|
|
||||||
bdevname(bdev, buf));
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_debug("%s: error: unaligned partition for dax\n",
|
pr_debug("%s: error: unaligned partition for dax\n",
|
||||||
bdevname(bdev, buf));
|
bdevname(bdev, buf));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
last_page = PFN_DOWN(i_size_read(bdev->bd_inode) - 1) * 8;
|
last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
|
||||||
err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
|
err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_debug("%s: error: unaligned partition for dax\n",
|
pr_debug("%s: error: unaligned partition for dax\n",
|
||||||
|
@ -124,20 +107,11 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
|
|
||||||
if (!dax_dev) {
|
|
||||||
pr_debug("%s: error: device does not support dax\n",
|
|
||||||
bdevname(bdev, buf));
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
id = dax_read_lock();
|
id = dax_read_lock();
|
||||||
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
|
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
|
||||||
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
|
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
|
||||||
dax_read_unlock(id);
|
dax_read_unlock(id);
|
||||||
|
|
||||||
put_dax(dax_dev);
|
|
||||||
|
|
||||||
if (len < 1 || len2 < 1) {
|
if (len < 1 || len2 < 1) {
|
||||||
pr_debug("%s: error: dax access failed (%ld)\n",
|
pr_debug("%s: error: dax access failed (%ld)\n",
|
||||||
bdevname(bdev, buf), len < 1 ? len : len2);
|
bdevname(bdev, buf), len < 1 ? len : len2);
|
||||||
|
@ -178,6 +152,49 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(__generic_fsdax_supported);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __bdev_dax_supported() - Check if the device supports dax for filesystem
|
||||||
|
* @bdev: block device to check
|
||||||
|
* @blocksize: The block size of the device
|
||||||
|
*
|
||||||
|
* This is a library function for filesystems to check if the block device
|
||||||
|
* can be mounted with dax option.
|
||||||
|
*
|
||||||
|
* Return: true if supported, false if unsupported
|
||||||
|
*/
|
||||||
|
bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
|
||||||
|
{
|
||||||
|
struct dax_device *dax_dev;
|
||||||
|
struct request_queue *q;
|
||||||
|
char buf[BDEVNAME_SIZE];
|
||||||
|
bool ret;
|
||||||
|
int id;
|
||||||
|
|
||||||
|
q = bdev_get_queue(bdev);
|
||||||
|
if (!q || !blk_queue_dax(q)) {
|
||||||
|
pr_debug("%s: error: request queue doesn't support dax\n",
|
||||||
|
bdevname(bdev, buf));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
|
||||||
|
if (!dax_dev) {
|
||||||
|
pr_debug("%s: error: device does not support dax\n",
|
||||||
|
bdevname(bdev, buf));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
id = dax_read_lock();
|
||||||
|
ret = dax_supported(dax_dev, bdev, blocksize, 0,
|
||||||
|
i_size_read(bdev->bd_inode) / 512);
|
||||||
|
dax_read_unlock(id);
|
||||||
|
|
||||||
|
put_dax(dax_dev);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
EXPORT_SYMBOL_GPL(__bdev_dax_supported);
|
EXPORT_SYMBOL_GPL(__bdev_dax_supported);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -303,6 +320,15 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dax_direct_access);
|
EXPORT_SYMBOL_GPL(dax_direct_access);
|
||||||
|
|
||||||
|
bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
|
||||||
|
int blocksize, sector_t start, sector_t len)
|
||||||
|
{
|
||||||
|
if (!dax_alive(dax_dev))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
|
||||||
|
}
|
||||||
|
|
||||||
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
||||||
size_t bytes, struct iov_iter *i)
|
size_t bytes, struct iov_iter *i)
|
||||||
{
|
{
|
||||||
|
|
|
@ -880,13 +880,17 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dm_table_set_type);
|
EXPORT_SYMBOL_GPL(dm_table_set_type);
|
||||||
|
|
||||||
|
/* validate the dax capability of the target device span */
|
||||||
static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
|
static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
|
||||||
sector_t start, sector_t len, void *data)
|
sector_t start, sector_t len, void *data)
|
||||||
{
|
{
|
||||||
return bdev_dax_supported(dev->bdev, PAGE_SIZE);
|
int blocksize = *(int *) data;
|
||||||
|
|
||||||
|
return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
|
||||||
|
start, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool dm_table_supports_dax(struct dm_table *t)
|
bool dm_table_supports_dax(struct dm_table *t, int blocksize)
|
||||||
{
|
{
|
||||||
struct dm_target *ti;
|
struct dm_target *ti;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
@ -899,7 +903,8 @@ static bool dm_table_supports_dax(struct dm_table *t)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!ti->type->iterate_devices ||
|
if (!ti->type->iterate_devices ||
|
||||||
!ti->type->iterate_devices(ti, device_supports_dax, NULL))
|
!ti->type->iterate_devices(ti, device_supports_dax,
|
||||||
|
&blocksize))
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -979,7 +984,7 @@ static int dm_table_determine_type(struct dm_table *t)
|
||||||
verify_bio_based:
|
verify_bio_based:
|
||||||
/* We must use this table as bio-based */
|
/* We must use this table as bio-based */
|
||||||
t->type = DM_TYPE_BIO_BASED;
|
t->type = DM_TYPE_BIO_BASED;
|
||||||
if (dm_table_supports_dax(t) ||
|
if (dm_table_supports_dax(t, PAGE_SIZE) ||
|
||||||
(list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
|
(list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
|
||||||
t->type = DM_TYPE_DAX_BIO_BASED;
|
t->type = DM_TYPE_DAX_BIO_BASED;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1905,7 +1910,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
||||||
}
|
}
|
||||||
blk_queue_write_cache(q, wc, fua);
|
blk_queue_write_cache(q, wc, fua);
|
||||||
|
|
||||||
if (dm_table_supports_dax(t))
|
if (dm_table_supports_dax(t, PAGE_SIZE))
|
||||||
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
|
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
|
||||||
else
|
else
|
||||||
blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
|
blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
|
||||||
|
|
|
@ -1107,6 +1107,25 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
|
||||||
|
int blocksize, sector_t start, sector_t len)
|
||||||
|
{
|
||||||
|
struct mapped_device *md = dax_get_private(dax_dev);
|
||||||
|
struct dm_table *map;
|
||||||
|
int srcu_idx;
|
||||||
|
bool ret;
|
||||||
|
|
||||||
|
map = dm_get_live_table(md, &srcu_idx);
|
||||||
|
if (!map)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
ret = dm_table_supports_dax(map, blocksize);
|
||||||
|
|
||||||
|
dm_put_live_table(md, srcu_idx);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
|
static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
void *addr, size_t bytes, struct iov_iter *i)
|
void *addr, size_t bytes, struct iov_iter *i)
|
||||||
{
|
{
|
||||||
|
@ -3194,6 +3213,7 @@ static const struct block_device_operations dm_blk_dops = {
|
||||||
|
|
||||||
static const struct dax_operations dm_dax_ops = {
|
static const struct dax_operations dm_dax_ops = {
|
||||||
.direct_access = dm_dax_direct_access,
|
.direct_access = dm_dax_direct_access,
|
||||||
|
.dax_supported = dm_dax_supported,
|
||||||
.copy_from_iter = dm_dax_copy_from_iter,
|
.copy_from_iter = dm_dax_copy_from_iter,
|
||||||
.copy_to_iter = dm_dax_copy_to_iter,
|
.copy_to_iter = dm_dax_copy_to_iter,
|
||||||
};
|
};
|
||||||
|
|
|
@ -72,6 +72,7 @@ bool dm_table_bio_based(struct dm_table *t);
|
||||||
bool dm_table_request_based(struct dm_table *t);
|
bool dm_table_request_based(struct dm_table *t);
|
||||||
void dm_table_free_md_mempools(struct dm_table *t);
|
void dm_table_free_md_mempools(struct dm_table *t);
|
||||||
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
|
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
|
||||||
|
bool dm_table_supports_dax(struct dm_table *t, int blocksize);
|
||||||
|
|
||||||
void dm_lock_md_type(struct mapped_device *md);
|
void dm_lock_md_type(struct mapped_device *md);
|
||||||
void dm_unlock_md_type(struct mapped_device *md);
|
void dm_unlock_md_type(struct mapped_device *md);
|
||||||
|
|
|
@ -642,7 +642,7 @@ static struct attribute *nd_device_attributes[] = {
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* nd_device_attribute_group - generic attributes for all devices on an nd bus
|
* nd_device_attribute_group - generic attributes for all devices on an nd bus
|
||||||
*/
|
*/
|
||||||
struct attribute_group nd_device_attribute_group = {
|
struct attribute_group nd_device_attribute_group = {
|
||||||
|
@ -671,7 +671,7 @@ static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a,
|
||||||
return a->mode;
|
return a->mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* nd_numa_attribute_group - NUMA attributes for all devices on an nd bus
|
* nd_numa_attribute_group - NUMA attributes for all devices on an nd bus
|
||||||
*/
|
*/
|
||||||
struct attribute_group nd_numa_attribute_group = {
|
struct attribute_group nd_numa_attribute_group = {
|
||||||
|
|
|
@ -25,6 +25,8 @@ static guid_t nvdimm_btt2_guid;
|
||||||
static guid_t nvdimm_pfn_guid;
|
static guid_t nvdimm_pfn_guid;
|
||||||
static guid_t nvdimm_dax_guid;
|
static guid_t nvdimm_dax_guid;
|
||||||
|
|
||||||
|
static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
|
||||||
|
|
||||||
static u32 best_seq(u32 a, u32 b)
|
static u32 best_seq(u32 a, u32 b)
|
||||||
{
|
{
|
||||||
a &= NSINDEX_SEQ_MASK;
|
a &= NSINDEX_SEQ_MASK;
|
||||||
|
|
|
@ -38,8 +38,6 @@ enum {
|
||||||
ND_NSINDEX_INIT = 0x1,
|
ND_NSINDEX_INIT = 0x1,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct nd_namespace_index - label set superblock
|
* struct nd_namespace_index - label set superblock
|
||||||
* @sig: NAMESPACE_INDEX\0
|
* @sig: NAMESPACE_INDEX\0
|
||||||
|
|
|
@ -281,20 +281,27 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev,
|
||||||
return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
|
return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Use the 'no check' versions of copy_from_iter_flushcache() and
|
||||||
|
* copy_to_iter_mcsafe() to bypass HARDENED_USERCOPY overhead. Bounds
|
||||||
|
* checking, both file offset and device offset, is handled by
|
||||||
|
* dax_iomap_actor()
|
||||||
|
*/
|
||||||
static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
|
static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
void *addr, size_t bytes, struct iov_iter *i)
|
void *addr, size_t bytes, struct iov_iter *i)
|
||||||
{
|
{
|
||||||
return copy_from_iter_flushcache(addr, bytes, i);
|
return _copy_from_iter_flushcache(addr, bytes, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
|
static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
void *addr, size_t bytes, struct iov_iter *i)
|
void *addr, size_t bytes, struct iov_iter *i)
|
||||||
{
|
{
|
||||||
return copy_to_iter_mcsafe(addr, bytes, i);
|
return _copy_to_iter_mcsafe(addr, bytes, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct dax_operations pmem_dax_ops = {
|
static const struct dax_operations pmem_dax_ops = {
|
||||||
.direct_access = pmem_dax_direct_access,
|
.direct_access = pmem_dax_direct_access,
|
||||||
|
.dax_supported = generic_fsdax_supported,
|
||||||
.copy_from_iter = pmem_copy_from_iter,
|
.copy_from_iter = pmem_copy_from_iter,
|
||||||
.copy_to_iter = pmem_copy_to_iter,
|
.copy_to_iter = pmem_copy_to_iter,
|
||||||
};
|
};
|
||||||
|
|
|
@ -59,6 +59,7 @@ static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev,
|
||||||
|
|
||||||
static const struct dax_operations dcssblk_dax_ops = {
|
static const struct dax_operations dcssblk_dax_ops = {
|
||||||
.direct_access = dcssblk_dax_direct_access,
|
.direct_access = dcssblk_dax_direct_access,
|
||||||
|
.dax_supported = generic_fsdax_supported,
|
||||||
.copy_from_iter = dcssblk_dax_copy_from_iter,
|
.copy_from_iter = dcssblk_dax_copy_from_iter,
|
||||||
.copy_to_iter = dcssblk_dax_copy_to_iter,
|
.copy_to_iter = dcssblk_dax_copy_to_iter,
|
||||||
};
|
};
|
||||||
|
|
|
@ -19,6 +19,12 @@ struct dax_operations {
|
||||||
*/
|
*/
|
||||||
long (*direct_access)(struct dax_device *, pgoff_t, long,
|
long (*direct_access)(struct dax_device *, pgoff_t, long,
|
||||||
void **, pfn_t *);
|
void **, pfn_t *);
|
||||||
|
/*
|
||||||
|
* Validate whether this device is usable as an fsdax backing
|
||||||
|
* device.
|
||||||
|
*/
|
||||||
|
bool (*dax_supported)(struct dax_device *, struct block_device *, int,
|
||||||
|
sector_t, sector_t);
|
||||||
/* copy_from_iter: required operation for fs-dax direct-i/o */
|
/* copy_from_iter: required operation for fs-dax direct-i/o */
|
||||||
size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
|
size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
|
||||||
struct iov_iter *);
|
struct iov_iter *);
|
||||||
|
@ -75,6 +81,17 @@ static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
|
||||||
return __bdev_dax_supported(bdev, blocksize);
|
return __bdev_dax_supported(bdev, blocksize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool __generic_fsdax_supported(struct dax_device *dax_dev,
|
||||||
|
struct block_device *bdev, int blocksize, sector_t start,
|
||||||
|
sector_t sectors);
|
||||||
|
static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
|
||||||
|
struct block_device *bdev, int blocksize, sector_t start,
|
||||||
|
sector_t sectors)
|
||||||
|
{
|
||||||
|
return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
|
||||||
|
sectors);
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct dax_device *fs_dax_get_by_host(const char *host)
|
static inline struct dax_device *fs_dax_get_by_host(const char *host)
|
||||||
{
|
{
|
||||||
return dax_get_by_host(host);
|
return dax_get_by_host(host);
|
||||||
|
@ -99,6 +116,13 @@ static inline bool bdev_dax_supported(struct block_device *bdev,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
|
||||||
|
struct block_device *bdev, int blocksize, sector_t start,
|
||||||
|
sector_t sectors)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct dax_device *fs_dax_get_by_host(const char *host)
|
static inline struct dax_device *fs_dax_get_by_host(const char *host)
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -142,6 +166,8 @@ bool dax_alive(struct dax_device *dax_dev);
|
||||||
void *dax_get_private(struct dax_device *dax_dev);
|
void *dax_get_private(struct dax_device *dax_dev);
|
||||||
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
||||||
void **kaddr, pfn_t *pfn);
|
void **kaddr, pfn_t *pfn);
|
||||||
|
bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
|
||||||
|
int blocksize, sector_t start, sector_t len);
|
||||||
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
||||||
size_t bytes, struct iov_iter *i);
|
size_t bytes, struct iov_iter *i);
|
||||||
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
||||||
|
|
Loading…
Reference in a new issue