mirror of
https://github.com/torvalds/linux.git
synced 2026-01-12 00:42:35 +08:00
nvme: remove virtual boundary for sgl capable devices
The nvme virtual boundary is only required for the PRP format. Devices that can use SGL for DMA don't need it for IO queues. Drop reporting it for such devices; rdma fabrics controllers will continue to use the limit as they currently don't report any boundary requirements, but tcp and fc never needed it in the first place so they get to report no virtual boundary. Applications may continue to align to the same virtual boundaries for optimization purposes if they want, and the driver will continue to decide whether to use the PRP format the same as before if the IO allows it. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Keith Busch <kbusch@kernel.org> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -1283,6 +1283,7 @@ static const struct nvme_ctrl_ops nvme_ctrl_ops = {
|
||||
.reg_read64 = apple_nvme_reg_read64,
|
||||
.free_ctrl = apple_nvme_free_ctrl,
|
||||
.get_address = apple_nvme_get_address,
|
||||
.get_virt_boundary = nvme_get_virt_boundary,
|
||||
};
|
||||
|
||||
static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
|
||||
|
||||
@@ -2069,13 +2069,13 @@ static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
|
||||
}
|
||||
|
||||
static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl,
|
||||
struct queue_limits *lim)
|
||||
struct queue_limits *lim, bool is_admin)
|
||||
{
|
||||
lim->max_hw_sectors = ctrl->max_hw_sectors;
|
||||
lim->max_segments = min_t(u32, USHRT_MAX,
|
||||
min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments));
|
||||
lim->max_integrity_segments = ctrl->max_integrity_segments;
|
||||
lim->virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1;
|
||||
lim->virt_boundary_mask = ctrl->ops->get_virt_boundary(ctrl, is_admin);
|
||||
lim->max_segment_size = UINT_MAX;
|
||||
lim->dma_alignment = 3;
|
||||
}
|
||||
@@ -2177,7 +2177,7 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns,
|
||||
int ret;
|
||||
|
||||
lim = queue_limits_start_update(ns->disk->queue);
|
||||
nvme_set_ctrl_limits(ns->ctrl, &lim);
|
||||
nvme_set_ctrl_limits(ns->ctrl, &lim, false);
|
||||
|
||||
memflags = blk_mq_freeze_queue(ns->disk->queue);
|
||||
ret = queue_limits_commit_update(ns->disk->queue, &lim);
|
||||
@@ -2381,7 +2381,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
|
||||
ns->head->lba_shift = id->lbaf[lbaf].ds;
|
||||
ns->head->nuse = le64_to_cpu(id->nuse);
|
||||
capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
|
||||
nvme_set_ctrl_limits(ns->ctrl, &lim);
|
||||
nvme_set_ctrl_limits(ns->ctrl, &lim, false);
|
||||
nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info);
|
||||
nvme_set_chunk_sectors(ns, id, &lim);
|
||||
if (!nvme_update_disk_info(ns, id, &lim))
|
||||
@@ -3588,7 +3588,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||
min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
|
||||
|
||||
lim = queue_limits_start_update(ctrl->admin_q);
|
||||
nvme_set_ctrl_limits(ctrl, &lim);
|
||||
nvme_set_ctrl_limits(ctrl, &lim, true);
|
||||
ret = queue_limits_commit_update(ctrl->admin_q, &lim);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
@@ -217,6 +217,12 @@ static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts)
|
||||
min(opts->nr_poll_queues, num_online_cpus());
|
||||
}
|
||||
|
||||
static inline unsigned long nvmf_get_virt_boundary(struct nvme_ctrl *ctrl,
|
||||
bool is_admin)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
|
||||
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
|
||||
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
|
||||
|
||||
@@ -3360,6 +3360,7 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
|
||||
.submit_async_event = nvme_fc_submit_async_event,
|
||||
.delete_ctrl = nvme_fc_delete_ctrl,
|
||||
.get_address = nvmf_get_address,
|
||||
.get_virt_boundary = nvmf_get_virt_boundary,
|
||||
};
|
||||
|
||||
static void
|
||||
|
||||
@@ -558,6 +558,12 @@ static inline bool nvme_ns_has_pi(struct nvme_ns_head *head)
|
||||
return head->pi_type && head->ms == head->pi_size;
|
||||
}
|
||||
|
||||
static inline unsigned long nvme_get_virt_boundary(struct nvme_ctrl *ctrl,
|
||||
bool is_admin)
|
||||
{
|
||||
return NVME_CTRL_PAGE_SIZE - 1;
|
||||
}
|
||||
|
||||
struct nvme_ctrl_ops {
|
||||
const char *name;
|
||||
struct module *module;
|
||||
@@ -578,6 +584,7 @@ struct nvme_ctrl_ops {
|
||||
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
|
||||
void (*print_device_info)(struct nvme_ctrl *ctrl);
|
||||
bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
|
||||
unsigned long (*get_virt_boundary)(struct nvme_ctrl *ctrl, bool is_admin);
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@@ -613,9 +613,22 @@ static inline enum nvme_use_sgl nvme_pci_use_sgls(struct nvme_dev *dev,
|
||||
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
|
||||
|
||||
if (nvmeq->qid && nvme_ctrl_sgl_supported(&dev->ctrl)) {
|
||||
if (nvme_req(req)->flags & NVME_REQ_USERCMD)
|
||||
return SGL_FORCED;
|
||||
if (req->nr_integrity_segments > 1)
|
||||
/*
|
||||
* When the controller is capable of using SGL, there are
|
||||
* several conditions that we force to use it:
|
||||
*
|
||||
* 1. A request containing page gaps within the controller's
|
||||
* mask can not use the PRP format.
|
||||
*
|
||||
* 2. User commands use SGL because that lets the device
|
||||
* validate the requested transfer lengths.
|
||||
*
|
||||
* 3. Multiple integrity segments must use SGL as that's the
|
||||
* only way to describe such a command in NVMe.
|
||||
*/
|
||||
if (req_phys_gap_mask(req) & (NVME_CTRL_PAGE_SIZE - 1) ||
|
||||
nvme_req(req)->flags & NVME_REQ_USERCMD ||
|
||||
req->nr_integrity_segments > 1)
|
||||
return SGL_FORCED;
|
||||
return SGL_SUPPORTED;
|
||||
}
|
||||
@@ -3243,6 +3256,14 @@ static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl)
|
||||
return dma_pci_p2pdma_supported(dev->dev);
|
||||
}
|
||||
|
||||
static unsigned long nvme_pci_get_virt_boundary(struct nvme_ctrl *ctrl,
|
||||
bool is_admin)
|
||||
{
|
||||
if (!nvme_ctrl_sgl_supported(ctrl) || is_admin)
|
||||
return NVME_CTRL_PAGE_SIZE - 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
|
||||
.name = "pcie",
|
||||
.module = THIS_MODULE,
|
||||
@@ -3257,6 +3278,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
|
||||
.get_address = nvme_pci_get_address,
|
||||
.print_device_info = nvme_pci_print_device_info,
|
||||
.supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
|
||||
.get_virt_boundary = nvme_pci_get_virt_boundary,
|
||||
};
|
||||
|
||||
static int nvme_dev_map(struct nvme_dev *dev)
|
||||
|
||||
@@ -2202,6 +2202,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
|
||||
.delete_ctrl = nvme_rdma_delete_ctrl,
|
||||
.get_address = nvmf_get_address,
|
||||
.stop_ctrl = nvme_rdma_stop_ctrl,
|
||||
.get_virt_boundary = nvme_get_virt_boundary,
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@@ -2865,6 +2865,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
|
||||
.delete_ctrl = nvme_tcp_delete_ctrl,
|
||||
.get_address = nvme_tcp_get_address,
|
||||
.stop_ctrl = nvme_tcp_stop_ctrl,
|
||||
.get_virt_boundary = nvmf_get_virt_boundary,
|
||||
};
|
||||
|
||||
static bool
|
||||
|
||||
@@ -511,6 +511,7 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
|
||||
.submit_async_event = nvme_loop_submit_async_event,
|
||||
.delete_ctrl = nvme_loop_delete_ctrl_host,
|
||||
.get_address = nvmf_get_address,
|
||||
.get_virt_boundary = nvme_get_virt_boundary,
|
||||
};
|
||||
|
||||
static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
|
||||
|
||||
Reference in New Issue
Block a user