From 0b857b44b5e445dc850cd91c45ce6edeb7797480 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Sun, 31 Jul 2016 00:27:39 -0700 Subject: nvme-rdma: Don't leak uninitialized memory in connect request private data Zero out the full nvme_rdma_cm_req structure before sending it. Otherwise we end up leaking kernel memory in the reserved field, which might break forward compatibility in the future. Signed-off-by: Roland Dreier Reviewed-by: Christoph Hellwig Signed-off-by: Sagi Grimberg diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 3e3ce2b..b96b883 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1269,7 +1269,7 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) { struct nvme_rdma_ctrl *ctrl = queue->ctrl; struct rdma_conn_param param = { }; - struct nvme_rdma_cm_req priv; + struct nvme_rdma_cm_req priv = { }; int ret; param.qp_num = queue->qp->qp_num; -- cgit v0.10.2 From 5f372eb3e76317b4fe4ba53ad1547f39fc883350 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 31 Jul 2016 18:43:15 +0300 Subject: nvme-rdma: Queue ns scanning after a sucessful reconnection On an ordered target shutdown, the target can send a AEN on a namespace removal, this will trigger the host to queue ns-list query. The shutdown will trigger error recovery which will attepmt periodic reconnect. We can hit a race where the ns rescanning fails (error recovery kicked in and we're not connected) causing removing all the namespaces and when we reconnect we won't see any namespaces for this controller. So, queue a namespace rescan after we successfully reconnected to the target. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index b96b883..7fd1d73 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -748,8 +748,10 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); WARN_ON_ONCE(!changed); - if (ctrl->queue_count > 1) + if (ctrl->queue_count > 1) { nvme_start_queues(&ctrl->ctrl); + nvme_queue_scan(&ctrl->ctrl); + } dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); -- cgit v0.10.2 From 57de5a0a40db97bb390d3ac1f4c2e74b9f3515c3 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 14 Jul 2016 17:39:47 +0300 Subject: nvme-rdma: Fix device removal handling Device removal sequence may have crashed because the controller (and admin queue space) was freed before we destroyed the admin queue resources. Thus we want to destroy the admin queue and only then queue controller deletion and wait for it to complete. More specifically we: 1. own the controller deletion (make sure we are not competing with another deletion). 2. get rid of inflight reconnects if exists (which also destroy and create queues). 3. destroy the queue. 4. safely queue controller deletion (and wait for it to complete). Reported-by: Steve Wise Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 7fd1d73..3ffec37 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -169,7 +169,6 @@ MODULE_PARM_DESC(register_always, static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event); static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); -static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl); /* XXX: really should move to a generic header sooner or later.. */ static inline void put_unaligned_le24(u32 val, u8 *p) @@ -1320,37 +1319,39 @@ out_destroy_queue_ib: * that caught the event. Since we hold the callout until the controller * deletion is completed, we'll deadlock if the controller deletion will * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership - * of destroying this queue before-hand, destroy the queue resources - * after the controller deletion completed with the exception of destroying - * the cm_id implicitely by returning a non-zero rc to the callout. + * of destroying this queue before-hand, destroy the queue resources, + * then queue the controller deletion which won't destroy this queue and + * we destroy the cm_id implicitely by returning a non-zero rc to the callout. */ static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue) { struct nvme_rdma_ctrl *ctrl = queue->ctrl; - int ret, ctrl_deleted = 0; + int ret; - /* First disable the queue so ctrl delete won't free it */ - if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) - goto out; + /* Own the controller deletion */ + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) + return 0; - /* delete the controller */ - ret = __nvme_rdma_del_ctrl(ctrl); - if (!ret) { - dev_warn(ctrl->ctrl.device, - "Got rdma device removal event, deleting ctrl\n"); - flush_work(&ctrl->delete_work); + dev_warn(ctrl->ctrl.device, + "Got rdma device removal event, deleting ctrl\n"); - /* Return non-zero so the cm_id will destroy implicitly */ - ctrl_deleted = 1; + /* Get rid of reconnect work if its running */ + cancel_delayed_work_sync(&ctrl->reconnect_work); + /* Disable the queue so ctrl delete won't free it */ + if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) { /* Free this queue ourselves */ - rdma_disconnect(queue->cm_id); - ib_drain_qp(queue->qp); + nvme_rdma_stop_queue(queue); nvme_rdma_destroy_queue_ib(queue); + + /* Return non-zero so the cm_id will destroy implicitly */ + ret = 1; } -out: - return ctrl_deleted; + /* Queue controller deletion */ + queue_work(nvme_rdma_wq, &ctrl->delete_work); + flush_work(&ctrl->delete_work); + return ret; } static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, -- cgit v0.10.2 From 2461a8dd38bea3cb5b1c1f0323794483292fb03f Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 24 Jul 2016 09:29:51 +0300 Subject: nvme-rdma: Remove duplicate call to nvme_remove_namespaces nvme_uninit_ctrl already does that for us. Note that we reordered nvme_rdma_shutdown_ctrl and nvme_uninit_ctrl, this is perfectly fine because we actually want ctrl uninit (aen, scan cancellation and namespaces removal) to happen before we shutdown the rdma resources. Also, centralize the deletion work and the dead controller removal work code duplication into __nvme_rdma_shutdown_ctrl that accepts a shutdown boolean. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 3ffec37..1279bc2 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1660,15 +1660,20 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) nvme_rdma_destroy_admin_queue(ctrl); } +static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) +{ + nvme_uninit_ctrl(&ctrl->ctrl); + if (shutdown) + nvme_rdma_shutdown_ctrl(ctrl); + nvme_put_ctrl(&ctrl->ctrl); +} + static void nvme_rdma_del_ctrl_work(struct work_struct *work) { struct nvme_rdma_ctrl *ctrl = container_of(work, struct nvme_rdma_ctrl, delete_work); - nvme_remove_namespaces(&ctrl->ctrl); - nvme_rdma_shutdown_ctrl(ctrl); - nvme_uninit_ctrl(&ctrl->ctrl); - nvme_put_ctrl(&ctrl->ctrl); + __nvme_rdma_remove_ctrl(ctrl, true); } static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl) @@ -1701,9 +1706,7 @@ static void nvme_rdma_remove_ctrl_work(struct work_struct *work) struct nvme_rdma_ctrl *ctrl = container_of(work, struct nvme_rdma_ctrl, delete_work); - nvme_remove_namespaces(&ctrl->ctrl); - nvme_uninit_ctrl(&ctrl->ctrl); - nvme_put_ctrl(&ctrl->ctrl); + __nvme_rdma_remove_ctrl(ctrl, false); } static void nvme_rdma_reset_ctrl_work(struct work_struct *work) -- cgit v0.10.2 From a34ca17a9717fe607cd58285a1704cb6526cf561 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 24 Jul 2016 09:22:19 +0300 Subject: nvme-rdma: Free the I/O tags when we delete the controller If we wait until we free the controller (free_ctrl) we might lose our rdma device without any notification while we still have open resources (tags mrs and dma mappings). Instead, destroy the tags with their rdma resources once we delete the device and not when freeing it. Note that we don't do that in nvme_rdma_shutdown_ctrl because controller reset uses it as well and we want to give active I/O a chance to complete successfully. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 1279bc2..6378dc9 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -686,11 +686,6 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) list_del(&ctrl->list); mutex_unlock(&nvme_rdma_ctrl_mutex); - if (ctrl->ctrl.tagset) { - blk_cleanup_queue(ctrl->ctrl.connect_q); - blk_mq_free_tag_set(&ctrl->tag_set); - nvme_rdma_dev_put(ctrl->device); - } kfree(ctrl->queues); nvmf_free_options(nctrl->opts); free_ctrl: @@ -1665,6 +1660,13 @@ static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) nvme_uninit_ctrl(&ctrl->ctrl); if (shutdown) nvme_rdma_shutdown_ctrl(ctrl); + + if (ctrl->ctrl.tagset) { + blk_cleanup_queue(ctrl->ctrl.connect_q); + blk_mq_free_tag_set(&ctrl->tag_set); + nvme_rdma_dev_put(ctrl->device); + } + nvme_put_ctrl(&ctrl->ctrl); } -- cgit v0.10.2 From a159c64d936eb0d1da29d8ad384183d8984899c9 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 24 Jul 2016 09:32:08 +0300 Subject: nvme-loop: Remove duplicate call to nvme_remove_namespaces nvme_uninit_ctrl already does that for us. Note that we reordered nvme_loop_shutdown_ctrl with nvme_uninit_ctrl but its safe because we want controller uninit to happen before we shutdown the transport resources. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 94e7829..7affd40 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -414,9 +414,8 @@ static void nvme_loop_del_ctrl_work(struct work_struct *work) struct nvme_loop_ctrl *ctrl = container_of(work, struct nvme_loop_ctrl, delete_work); - nvme_remove_namespaces(&ctrl->ctrl); - nvme_loop_shutdown_ctrl(ctrl); nvme_uninit_ctrl(&ctrl->ctrl); + nvme_loop_shutdown_ctrl(ctrl); nvme_put_ctrl(&ctrl->ctrl); } @@ -501,7 +500,6 @@ out_free_queues: nvme_loop_destroy_admin_queue(ctrl); out_disable: dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); - nvme_remove_namespaces(&ctrl->ctrl); nvme_uninit_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl); } -- cgit v0.10.2 From 45862ebcc4883b1b6bc0701cd15cb2b68b140c5d Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 24 Jul 2016 09:26:16 +0300 Subject: nvme-rdma: Make sure to shutdown the controller if we can Relying on ctrl state in nvme_rdma_shutdown_ctrl is wrong because it will never be NVME_CTRL_LIVE (delete_ctrl or reset_ctrl invoked it). Instead, check that the admin queue is connected. Note that it is safe because we can never see a copmeting thread trying to destroy the admin queue (reset or delete controller). Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 6378dc9..f4b83686 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1646,7 +1646,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) nvme_rdma_free_io_queues(ctrl); } - if (ctrl->ctrl.state == NVME_CTRL_LIVE) + if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags)) nvme_shutdown_ctrl(&ctrl->ctrl); blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); -- cgit v0.10.2 From d8f7750a08968b105056328652d2c332bdfa062d Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 19 May 2016 15:24:55 +0300 Subject: nvmet-rdma: Correctly handle RDMA device hot removal When configuring a device attached listener, we may see device removal events. In this case we return a non-zero return code from the cm event handler which implicitly destroys the cm_id. It is possible that in the future the user will remove this listener and by that trigger a second call to rdma_destroy_id on an already destroyed cm_id -> BUG. In addition, when a queue bound (active session) cm_id generates a DEVICE_REMOVAL event we must guarantee all resources are cleaned up by the time we return from the event handler. Introduce nvmet_rdma_device_removal which addresses (or at least attempts to) both scenarios. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index e06d504..48c8118 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -77,6 +77,7 @@ enum nvmet_rdma_queue_state { NVMET_RDMA_Q_CONNECTING, NVMET_RDMA_Q_LIVE, NVMET_RDMA_Q_DISCONNECTING, + NVMET_RDMA_IN_DEVICE_REMOVAL, }; struct nvmet_rdma_queue { @@ -984,7 +985,10 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w) struct nvmet_rdma_device *dev = queue->dev; nvmet_rdma_free_queue(queue); - rdma_destroy_id(cm_id); + + if (queue->state != NVMET_RDMA_IN_DEVICE_REMOVAL) + rdma_destroy_id(cm_id); + kref_put(&dev->ref, nvmet_rdma_free_dev); } @@ -1233,8 +1237,9 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) switch (queue->state) { case NVMET_RDMA_Q_CONNECTING: case NVMET_RDMA_Q_LIVE: - disconnect = true; queue->state = NVMET_RDMA_Q_DISCONNECTING; + case NVMET_RDMA_IN_DEVICE_REMOVAL: + disconnect = true; break; case NVMET_RDMA_Q_DISCONNECTING: break; @@ -1272,6 +1277,62 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, schedule_work(&queue->release_work); } +/** + * nvme_rdma_device_removal() - Handle RDMA device removal + * @queue: nvmet rdma queue (cm id qp_context) + * @addr: nvmet address (cm_id context) + * + * DEVICE_REMOVAL event notifies us that the RDMA device is about + * to unplug so we should take care of destroying our RDMA resources. + * This event will be generated for each allocated cm_id. + * + * Note that this event can be generated on a normal queue cm_id + * and/or a device bound listener cm_id (where in this case + * queue will be null). + * + * we claim ownership on destroying the cm_id. For queues we move + * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port + * we nullify the priv to prevent double cm_id destruction and destroying + * the cm_id implicitely by returning a non-zero rc to the callout. + */ +static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, + struct nvmet_rdma_queue *queue) +{ + unsigned long flags; + + if (!queue) { + struct nvmet_port *port = cm_id->context; + + /* + * This is a listener cm_id. Make sure that + * future remove_port won't invoke a double + * cm_id destroy. use atomic xchg to make sure + * we don't compete with remove_port. + */ + if (xchg(&port->priv, NULL) != cm_id) + return 0; + } else { + /* + * This is a queue cm_id. Make sure that + * release queue will not destroy the cm_id + * and schedule all ctrl queues removal (only + * if the queue is not disconnecting already). + */ + spin_lock_irqsave(&queue->state_lock, flags); + if (queue->state != NVMET_RDMA_Q_DISCONNECTING) + queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL; + spin_unlock_irqrestore(&queue->state_lock, flags); + nvmet_rdma_queue_disconnect(queue); + flush_scheduled_work(); + } + + /* + * We need to return 1 so that the core will destroy + * it's own ID. What a great API design.. + */ + return 1; +} + static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { @@ -1294,20 +1355,11 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, break; case RDMA_CM_EVENT_ADDR_CHANGE: case RDMA_CM_EVENT_DISCONNECTED: - case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_TIMEWAIT_EXIT: - /* - * We can get the device removal callback even for a - * CM ID that we aren't actually using. In that case - * the context pointer is NULL, so we shouldn't try - * to disconnect a non-existing queue. But we also - * need to return 1 so that the core will destroy - * it's own ID. What a great API design.. - */ - if (queue) - nvmet_rdma_queue_disconnect(queue); - else - ret = 1; + nvmet_rdma_queue_disconnect(queue); + break; + case RDMA_CM_EVENT_DEVICE_REMOVAL: + ret = nvmet_rdma_device_removal(cm_id, queue); break; case RDMA_CM_EVENT_REJECTED: case RDMA_CM_EVENT_UNREACHABLE: @@ -1396,9 +1448,10 @@ out_destroy_id: static void nvmet_rdma_remove_port(struct nvmet_port *port) { - struct rdma_cm_id *cm_id = port->priv; + struct rdma_cm_id *cm_id = xchg(&port->priv, NULL); - rdma_destroy_id(cm_id); + if (cm_id) + rdma_destroy_id(cm_id); } static struct nvmet_fabrics_ops nvmet_rdma_ops = { -- cgit v0.10.2 From 40e64e07213201710a51e270595d6e6c028f9502 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 28 Jul 2016 18:04:09 +0300 Subject: nvmet-rdma: Don't use the inline buffer in order to avoid allocation for small reads Under extreme conditions this might cause data corruptions. By doing that we we repost the buffer and then post this buffer for the device to send. If we happen to use shared receive queues the device might write to the buffer before it sends it (there is no ordering between send and recv queues). Without SRQs we probably won't get that if the host doesn't mis-behave and send more than we allowed it, but relying on that is not really a good idea. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 48c8118..b4d6485 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -616,15 +616,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, if (!len) return 0; - /* use the already allocated data buffer if possible */ - if (len <= NVMET_RDMA_INLINE_DATA_SIZE && rsp->queue->host_qid) { - nvmet_rdma_use_inline_sg(rsp, len, 0); - } else { - status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, - len); - if (status) - return status; - } + status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt, + len); + if (status) + return status; ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, rsp->req.sg, rsp->req.sg_cnt, 0, addr, key, -- cgit v0.10.2 From 28b89118539da03f4b188763e1b2fd1aec0f580a Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 4 Aug 2016 11:18:49 +0300 Subject: nvmet: Fix controller serial number inconsistency The host is allowed to issue identify as many times as it wants, we need to stay consistent when reporting the serial number for a given controller. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 2fac17a..47c564b 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -13,7 +13,6 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include #include #include "nvmet.h" @@ -83,7 +82,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvme_id_ctrl *id; - u64 serial; u16 status = 0; id = kzalloc(sizeof(*id), GFP_KERNEL); @@ -96,10 +94,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->vid = 0; id->ssvid = 0; - /* generate a random serial number as our controllers are ephemeral: */ - get_random_bytes(&serial, sizeof(serial)); memset(id->sn, ' ', sizeof(id->sn)); - snprintf(id->sn, sizeof(id->sn), "%llx", serial); + snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial); memset(id->mn, ' ', sizeof(id->mn)); strncpy((char *)id->mn, "Linux", sizeof(id->mn)); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 8a891ca..6559d5a 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -13,6 +13,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include "nvmet.h" static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; @@ -728,6 +729,9 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); + /* generate a random serial number as our controllers are ephemeral: */ + get_random_bytes(&ctrl->serial, sizeof(ctrl->serial)); + kref_init(&ctrl->ref); ctrl->subsys = subsys; diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 57dd6d8..76b6eed 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -113,6 +113,7 @@ struct nvmet_ctrl { struct mutex lock; u64 cap; + u64 serial; u32 cc; u32 csts; -- cgit v0.10.2 From 3ef1b4b298d98ccb3cc895baf1b18f7f9d073bee Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 4 Aug 2016 13:46:19 +0300 Subject: nvme-rdma: start async event handler after reconnecting to a controller When we reset or reconnect to a controller, we are cancelling the async event handler so we can safely re-establish resources, but we need to remember to start it again when we successfully reconnect. Signed-off-by: Sagi Grimberg Reviewed-by: Christoph Hellwig diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index f4b83686..e82434a 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -745,6 +745,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) if (ctrl->queue_count > 1) { nvme_start_queues(&ctrl->ctrl); nvme_queue_scan(&ctrl->ctrl); + nvme_queue_async_events(&ctrl->ctrl); } dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); @@ -1747,6 +1748,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) if (ctrl->queue_count > 1) { nvme_start_queues(&ctrl->ctrl); nvme_queue_scan(&ctrl->ctrl); + nvme_queue_async_events(&ctrl->ctrl); } return; -- cgit v0.10.2 From e3266378bdbca82c2854fc612fa9a391eba1f173 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 4 Aug 2016 17:37:40 +0300 Subject: nvme-rdma: Remove unused includes Signed-off-by: Sagi Grimberg Reviewed-by: Steve Wise diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index e82434a..8d2875b 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -12,13 +12,11 @@ * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include #include #include #include #include #include -#include #include #include #include @@ -26,7 +24,6 @@ #include #include #include -#include #include #include -- cgit v0.10.2