Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVM Express device driver
4 * Copyright (c) 2011-2014, Intel Corporation.
5 */
6
7#include <linux/async.h>
8#include <linux/blkdev.h>
9#include <linux/blk-mq.h>
10#include <linux/blk-integrity.h>
11#include <linux/compat.h>
12#include <linux/delay.h>
13#include <linux/errno.h>
14#include <linux/hdreg.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/backing-dev.h>
18#include <linux/slab.h>
19#include <linux/types.h>
20#include <linux/pr.h>
21#include <linux/ptrace.h>
22#include <linux/nvme_ioctl.h>
23#include <linux/pm_qos.h>
24#include <linux/ratelimit.h>
25#include <linux/unaligned.h>
26
27#include "nvme.h"
28#include "fabrics.h"
29#include <linux/nvme-auth.h>
30
31#define CREATE_TRACE_POINTS
32#include "trace.h"
33
34#define NVME_MINORS (1U << MINORBITS)
35
36struct nvme_ns_info {
37 struct nvme_ns_ids ids;
38 u32 nsid;
39 __le32 anagrpid;
40 u8 pi_offset;
41 u16 endgid;
42 u64 runs;
43 bool is_shared;
44 bool is_readonly;
45 bool is_ready;
46 bool is_removed;
47 bool is_rotational;
48 bool no_vwc;
49};
50
51unsigned int admin_timeout = 60;
52module_param(admin_timeout, uint, 0644);
53MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
54EXPORT_SYMBOL_GPL(admin_timeout);
55
56unsigned int nvme_io_timeout = 30;
57module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
58MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
59EXPORT_SYMBOL_GPL(nvme_io_timeout);
60
61static unsigned char shutdown_timeout = 5;
62module_param(shutdown_timeout, byte, 0644);
63MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
64
65static u8 nvme_max_retries = 5;
66module_param_named(max_retries, nvme_max_retries, byte, 0644);
67MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
68
69static unsigned long default_ps_max_latency_us = 100000;
70module_param(default_ps_max_latency_us, ulong, 0644);
71MODULE_PARM_DESC(default_ps_max_latency_us,
72 "max power saving latency for new devices; use PM QOS to change per device");
73
74static bool force_apst;
75module_param(force_apst, bool, 0644);
76MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
77
78static unsigned long apst_primary_timeout_ms = 100;
79module_param(apst_primary_timeout_ms, ulong, 0644);
80MODULE_PARM_DESC(apst_primary_timeout_ms,
81 "primary APST timeout in ms");
82
83static unsigned long apst_secondary_timeout_ms = 2000;
84module_param(apst_secondary_timeout_ms, ulong, 0644);
85MODULE_PARM_DESC(apst_secondary_timeout_ms,
86 "secondary APST timeout in ms");
87
88static unsigned long apst_primary_latency_tol_us = 15000;
89module_param(apst_primary_latency_tol_us, ulong, 0644);
90MODULE_PARM_DESC(apst_primary_latency_tol_us,
91 "primary APST latency tolerance in us");
92
93static unsigned long apst_secondary_latency_tol_us = 100000;
94module_param(apst_secondary_latency_tol_us, ulong, 0644);
95MODULE_PARM_DESC(apst_secondary_latency_tol_us,
96 "secondary APST latency tolerance in us");
97
98/*
99 * Older kernels didn't enable protection information if it was at an offset.
100 * Newer kernels do, so it breaks reads on the upgrade if such formats were
101 * used in prior kernels since the metadata written did not contain a valid
102 * checksum.
103 */
104static bool disable_pi_offsets = false;
105module_param(disable_pi_offsets, bool, 0444);
106MODULE_PARM_DESC(disable_pi_offsets,
107 "disable protection information if it has an offset");
108
109/*
110 * nvme_wq - hosts nvme related works that are not reset or delete
111 * nvme_reset_wq - hosts nvme reset works
112 * nvme_delete_wq - hosts nvme delete works
113 *
114 * nvme_wq will host works such as scan, aen handling, fw activation,
115 * keep-alive, periodic reconnects etc. nvme_reset_wq
116 * runs reset works which also flush works hosted on nvme_wq for
117 * serialization purposes. nvme_delete_wq host controller deletion
118 * works which flush reset works for serialization.
119 */
120struct workqueue_struct *nvme_wq;
121EXPORT_SYMBOL_GPL(nvme_wq);
122
123struct workqueue_struct *nvme_reset_wq;
124EXPORT_SYMBOL_GPL(nvme_reset_wq);
125
126struct workqueue_struct *nvme_delete_wq;
127EXPORT_SYMBOL_GPL(nvme_delete_wq);
128
129static LIST_HEAD(nvme_subsystems);
130DEFINE_MUTEX(nvme_subsystems_lock);
131
132static DEFINE_IDA(nvme_instance_ida);
133static dev_t nvme_ctrl_base_chr_devt;
134static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env);
135static const struct class nvme_class = {
136 .name = "nvme",
137 .dev_uevent = nvme_class_uevent,
138};
139
140static const struct class nvme_subsys_class = {
141 .name = "nvme-subsystem",
142};
143
144static DEFINE_IDA(nvme_ns_chr_minor_ida);
145static dev_t nvme_ns_chr_devt;
146static const struct class nvme_ns_chr_class = {
147 .name = "nvme-generic",
148};
149
150static void nvme_put_subsystem(struct nvme_subsystem *subsys);
151static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
152 unsigned nsid);
153static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
154 struct nvme_command *cmd);
155static int nvme_get_log_lsi(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page,
156 u8 lsp, u8 csi, void *log, size_t size, u64 offset, u16 lsi);
157
158void nvme_queue_scan(struct nvme_ctrl *ctrl)
159{
160 /*
161 * Only new queue scan work when admin and IO queues are both alive
162 */
163 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset)
164 queue_work(nvme_wq, &ctrl->scan_work);
165}
166
167/*
168 * Use this function to proceed with scheduling reset_work for a controller
169 * that had previously been set to the resetting state. This is intended for
170 * code paths that can't be interrupted by other reset attempts. A hot removal
171 * may prevent this from succeeding.
172 */
173int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
174{
175 if (nvme_ctrl_state(ctrl) != NVME_CTRL_RESETTING)
176 return -EBUSY;
177 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
178 return -EBUSY;
179 return 0;
180}
181EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
182
183static void nvme_failfast_work(struct work_struct *work)
184{
185 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
186 struct nvme_ctrl, failfast_work);
187
188 if (nvme_ctrl_state(ctrl) != NVME_CTRL_CONNECTING)
189 return;
190
191 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
192 dev_info(ctrl->device, "failfast expired\n");
193 nvme_kick_requeue_lists(ctrl);
194}
195
196static inline void nvme_start_failfast_work(struct nvme_ctrl *ctrl)
197{
198 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1)
199 return;
200
201 schedule_delayed_work(&ctrl->failfast_work,
202 ctrl->opts->fast_io_fail_tmo * HZ);
203}
204
205static inline void nvme_stop_failfast_work(struct nvme_ctrl *ctrl)
206{
207 if (!ctrl->opts)
208 return;
209
210 cancel_delayed_work_sync(&ctrl->failfast_work);
211 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
212}
213
214
215int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
216{
217 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
218 return -EBUSY;
219 if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
220 return -EBUSY;
221 return 0;
222}
223EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
224
225int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
226{
227 int ret;
228
229 ret = nvme_reset_ctrl(ctrl);
230 if (!ret) {
231 flush_work(&ctrl->reset_work);
232 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
233 ret = -ENETRESET;
234 }
235
236 return ret;
237}
238
239static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
240{
241 dev_info(ctrl->device,
242 "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
243
244 flush_work(&ctrl->reset_work);
245 nvme_stop_ctrl(ctrl);
246 nvme_remove_namespaces(ctrl);
247 ctrl->ops->delete_ctrl(ctrl);
248 nvme_uninit_ctrl(ctrl);
249}
250
251static void nvme_delete_ctrl_work(struct work_struct *work)
252{
253 struct nvme_ctrl *ctrl =
254 container_of(work, struct nvme_ctrl, delete_work);
255
256 nvme_do_delete_ctrl(ctrl);
257}
258
259int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
260{
261 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
262 return -EBUSY;
263 if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
264 return -EBUSY;
265 return 0;
266}
267EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
268
269void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
270{
271 /*
272 * Keep a reference until nvme_do_delete_ctrl() complete,
273 * since ->delete_ctrl can free the controller.
274 */
275 nvme_get_ctrl(ctrl);
276 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
277 nvme_do_delete_ctrl(ctrl);
278 nvme_put_ctrl(ctrl);
279}
280
281static blk_status_t nvme_error_status(u16 status)
282{
283 switch (status & NVME_SCT_SC_MASK) {
284 case NVME_SC_SUCCESS:
285 return BLK_STS_OK;
286 case NVME_SC_CAP_EXCEEDED:
287 return BLK_STS_NOSPC;
288 case NVME_SC_LBA_RANGE:
289 case NVME_SC_CMD_INTERRUPTED:
290 case NVME_SC_NS_NOT_READY:
291 return BLK_STS_TARGET;
292 case NVME_SC_BAD_ATTRIBUTES:
293 case NVME_SC_INVALID_OPCODE:
294 case NVME_SC_INVALID_FIELD:
295 case NVME_SC_INVALID_NS:
296 return BLK_STS_NOTSUPP;
297 case NVME_SC_WRITE_FAULT:
298 case NVME_SC_READ_ERROR:
299 case NVME_SC_UNWRITTEN_BLOCK:
300 case NVME_SC_ACCESS_DENIED:
301 case NVME_SC_READ_ONLY:
302 case NVME_SC_COMPARE_FAILED:
303 return BLK_STS_MEDIUM;
304 case NVME_SC_GUARD_CHECK:
305 case NVME_SC_APPTAG_CHECK:
306 case NVME_SC_REFTAG_CHECK:
307 case NVME_SC_INVALID_PI:
308 return BLK_STS_PROTECTION;
309 case NVME_SC_RESERVATION_CONFLICT:
310 return BLK_STS_RESV_CONFLICT;
311 case NVME_SC_HOST_PATH_ERROR:
312 return BLK_STS_TRANSPORT;
313 case NVME_SC_ZONE_TOO_MANY_ACTIVE:
314 return BLK_STS_ZONE_ACTIVE_RESOURCE;
315 case NVME_SC_ZONE_TOO_MANY_OPEN:
316 return BLK_STS_ZONE_OPEN_RESOURCE;
317 default:
318 return BLK_STS_IOERR;
319 }
320}
321
322static void nvme_retry_req(struct request *req)
323{
324 unsigned long delay = 0;
325 u16 crd;
326
327 /* The mask and shift result must be <= 3 */
328 crd = (nvme_req(req)->status & NVME_STATUS_CRD) >> 11;
329 if (crd)
330 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
331
332 nvme_req(req)->retries++;
333 blk_mq_requeue_request(req, false);
334 blk_mq_delay_kick_requeue_list(req->q, delay);
335}
336
337static void nvme_log_error(struct request *req)
338{
339 struct nvme_ns *ns = req->q->queuedata;
340 struct nvme_request *nr = nvme_req(req);
341
342 if (ns) {
343 pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %u blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
344 ns->disk ? ns->disk->disk_name : "?",
345 nvme_get_opcode_str(nr->cmd->common.opcode),
346 nr->cmd->common.opcode,
347 nvme_sect_to_lba(ns->head, blk_rq_pos(req)),
348 blk_rq_bytes(req) >> ns->head->lba_shift,
349 nvme_get_error_status_str(nr->status),
350 NVME_SCT(nr->status), /* Status Code Type */
351 nr->status & NVME_SC_MASK, /* Status Code */
352 nr->status & NVME_STATUS_MORE ? "MORE " : "",
353 nr->status & NVME_STATUS_DNR ? "DNR " : "");
354 return;
355 }
356
357 pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s\n",
358 dev_name(nr->ctrl->device),
359 nvme_get_admin_opcode_str(nr->cmd->common.opcode),
360 nr->cmd->common.opcode,
361 nvme_get_error_status_str(nr->status),
362 NVME_SCT(nr->status), /* Status Code Type */
363 nr->status & NVME_SC_MASK, /* Status Code */
364 nr->status & NVME_STATUS_MORE ? "MORE " : "",
365 nr->status & NVME_STATUS_DNR ? "DNR " : "");
366}
367
368static void nvme_log_err_passthru(struct request *req)
369{
370 struct nvme_ns *ns = req->q->queuedata;
371 struct nvme_request *nr = nvme_req(req);
372
373 pr_err_ratelimited("%s: %s(0x%x), %s (sct 0x%x / sc 0x%x) %s%s"
374 "cdw10=0x%x cdw11=0x%x cdw12=0x%x cdw13=0x%x cdw14=0x%x cdw15=0x%x\n",
375 ns ? ns->disk->disk_name : dev_name(nr->ctrl->device),
376 ns ? nvme_get_opcode_str(nr->cmd->common.opcode) :
377 nvme_get_admin_opcode_str(nr->cmd->common.opcode),
378 nr->cmd->common.opcode,
379 nvme_get_error_status_str(nr->status),
380 NVME_SCT(nr->status), /* Status Code Type */
381 nr->status & NVME_SC_MASK, /* Status Code */
382 nr->status & NVME_STATUS_MORE ? "MORE " : "",
383 nr->status & NVME_STATUS_DNR ? "DNR " : "",
384 le32_to_cpu(nr->cmd->common.cdw10),
385 le32_to_cpu(nr->cmd->common.cdw11),
386 le32_to_cpu(nr->cmd->common.cdw12),
387 le32_to_cpu(nr->cmd->common.cdw13),
388 le32_to_cpu(nr->cmd->common.cdw14),
389 le32_to_cpu(nr->cmd->common.cdw15));
390}
391
392enum nvme_disposition {
393 COMPLETE,
394 RETRY,
395 FAILOVER,
396 AUTHENTICATE,
397};
398
399static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
400{
401 if (likely(nvme_req(req)->status == 0))
402 return COMPLETE;
403
404 if (blk_noretry_request(req) ||
405 (nvme_req(req)->status & NVME_STATUS_DNR) ||
406 nvme_req(req)->retries >= nvme_max_retries)
407 return COMPLETE;
408
409 if ((nvme_req(req)->status & NVME_SCT_SC_MASK) == NVME_SC_AUTH_REQUIRED)
410 return AUTHENTICATE;
411
412 if (req->cmd_flags & REQ_NVME_MPATH) {
413 if (nvme_is_path_error(nvme_req(req)->status) ||
414 blk_queue_dying(req->q))
415 return FAILOVER;
416 } else {
417 if (blk_queue_dying(req->q))
418 return COMPLETE;
419 }
420
421 return RETRY;
422}
423
424static inline void nvme_end_req_zoned(struct request *req)
425{
426 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
427 req_op(req) == REQ_OP_ZONE_APPEND) {
428 struct nvme_ns *ns = req->q->queuedata;
429
430 req->__sector = nvme_lba_to_sect(ns->head,
431 le64_to_cpu(nvme_req(req)->result.u64));
432 }
433}
434
435static inline void __nvme_end_req(struct request *req)
436{
437 if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
438 if (blk_rq_is_passthrough(req))
439 nvme_log_err_passthru(req);
440 else
441 nvme_log_error(req);
442 }
443 nvme_end_req_zoned(req);
444 nvme_trace_bio_complete(req);
445 if (req->cmd_flags & REQ_NVME_MPATH)
446 nvme_mpath_end_request(req);
447}
448
449void nvme_end_req(struct request *req)
450{
451 blk_status_t status = nvme_error_status(nvme_req(req)->status);
452
453 __nvme_end_req(req);
454 blk_mq_end_request(req, status);
455}
456
457static void __nvme_complete_rq(struct request *req)
458{
459 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
460
461 nvme_cleanup_cmd(req);
462
463 /*
464 * Completions of long-running commands should not be able to
465 * defer sending of periodic keep alives, since the controller
466 * may have completed processing such commands a long time ago
467 * (arbitrarily close to command submission time).
468 * req->deadline - req->timeout is the command submission time
469 * in jiffies.
470 */
471 if (ctrl->kas &&
472 req->deadline - req->timeout >= ctrl->ka_last_check_time)
473 ctrl->comp_seen = true;
474
475 switch (nvme_decide_disposition(req)) {
476 case COMPLETE:
477 nvme_end_req(req);
478 return;
479 case RETRY:
480 nvme_retry_req(req);
481 return;
482 case FAILOVER:
483 nvme_failover_req(req);
484 return;
485 case AUTHENTICATE:
486#ifdef CONFIG_NVME_HOST_AUTH
487 queue_work(nvme_wq, &ctrl->dhchap_auth_work);
488 nvme_retry_req(req);
489#else
490 nvme_end_req(req);
491#endif
492 return;
493 }
494}
495
496void nvme_complete_rq(struct request *req)
497{
498 trace_nvme_complete_rq(req);
499 __nvme_complete_rq(req);
500}
501EXPORT_SYMBOL_GPL(nvme_complete_rq);
502
503void nvme_complete_batch_req(struct request *req)
504{
505 trace_nvme_complete_rq(req);
506 nvme_cleanup_cmd(req);
507 __nvme_end_req(req);
508}
509EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
510
511/*
512 * Called to unwind from ->queue_rq on a failed command submission so that the
513 * multipathing code gets called to potentially failover to another path.
514 * The caller needs to unwind all transport specific resource allocations and
515 * must return propagate the return value.
516 */
517blk_status_t nvme_host_path_error(struct request *req)
518{
519 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
520 blk_mq_set_request_complete(req);
521 __nvme_complete_rq(req);
522 return BLK_STS_OK;
523}
524EXPORT_SYMBOL_GPL(nvme_host_path_error);
525
526bool nvme_cancel_request(struct request *req, void *data)
527{
528 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
529 "Cancelling I/O %d", req->tag);
530
531 /* don't abort one completed or idle request */
532 if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT)
533 return true;
534
535 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
536 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
537 blk_mq_complete_request(req);
538 return true;
539}
540EXPORT_SYMBOL_GPL(nvme_cancel_request);
541
542void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
543{
544 if (ctrl->tagset) {
545 blk_mq_tagset_busy_iter(ctrl->tagset,
546 nvme_cancel_request, ctrl);
547 blk_mq_tagset_wait_completed_request(ctrl->tagset);
548 }
549}
550EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
551
552void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
553{
554 if (ctrl->admin_tagset) {
555 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
556 nvme_cancel_request, ctrl);
557 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
558 }
559}
560EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
561
562bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
563 enum nvme_ctrl_state new_state)
564{
565 enum nvme_ctrl_state old_state;
566 unsigned long flags;
567 bool changed = false;
568
569 spin_lock_irqsave(&ctrl->lock, flags);
570
571 old_state = nvme_ctrl_state(ctrl);
572 switch (new_state) {
573 case NVME_CTRL_LIVE:
574 switch (old_state) {
575 case NVME_CTRL_CONNECTING:
576 changed = true;
577 fallthrough;
578 default:
579 break;
580 }
581 break;
582 case NVME_CTRL_RESETTING:
583 switch (old_state) {
584 case NVME_CTRL_NEW:
585 case NVME_CTRL_LIVE:
586 changed = true;
587 fallthrough;
588 default:
589 break;
590 }
591 break;
592 case NVME_CTRL_CONNECTING:
593 switch (old_state) {
594 case NVME_CTRL_NEW:
595 case NVME_CTRL_RESETTING:
596 changed = true;
597 fallthrough;
598 default:
599 break;
600 }
601 break;
602 case NVME_CTRL_DELETING:
603 switch (old_state) {
604 case NVME_CTRL_LIVE:
605 case NVME_CTRL_RESETTING:
606 case NVME_CTRL_CONNECTING:
607 changed = true;
608 fallthrough;
609 default:
610 break;
611 }
612 break;
613 case NVME_CTRL_DELETING_NOIO:
614 switch (old_state) {
615 case NVME_CTRL_DELETING:
616 case NVME_CTRL_DEAD:
617 changed = true;
618 fallthrough;
619 default:
620 break;
621 }
622 break;
623 case NVME_CTRL_DEAD:
624 switch (old_state) {
625 case NVME_CTRL_DELETING:
626 changed = true;
627 fallthrough;
628 default:
629 break;
630 }
631 break;
632 default:
633 break;
634 }
635
636 if (changed) {
637 WRITE_ONCE(ctrl->state, new_state);
638 wake_up_all(&ctrl->state_wq);
639 }
640
641 spin_unlock_irqrestore(&ctrl->lock, flags);
642 if (!changed)
643 return false;
644
645 if (new_state == NVME_CTRL_LIVE) {
646 if (old_state == NVME_CTRL_CONNECTING)
647 nvme_stop_failfast_work(ctrl);
648 nvme_kick_requeue_lists(ctrl);
649 } else if (new_state == NVME_CTRL_CONNECTING &&
650 old_state == NVME_CTRL_RESETTING) {
651 nvme_start_failfast_work(ctrl);
652 }
653 return changed;
654}
655EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
656
657/*
658 * Waits for the controller state to be resetting, or returns false if it is
659 * not possible to ever transition to that state.
660 */
661bool nvme_wait_reset(struct nvme_ctrl *ctrl)
662{
663 wait_event(ctrl->state_wq,
664 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
665 nvme_state_terminal(ctrl));
666 return nvme_ctrl_state(ctrl) == NVME_CTRL_RESETTING;
667}
668EXPORT_SYMBOL_GPL(nvme_wait_reset);
669
670static void nvme_free_ns_head(struct kref *ref)
671{
672 struct nvme_ns_head *head =
673 container_of(ref, struct nvme_ns_head, ref);
674
675 nvme_mpath_put_disk(head);
676 ida_free(&head->subsys->ns_ida, head->instance);
677 cleanup_srcu_struct(&head->srcu);
678 nvme_put_subsystem(head->subsys);
679 kfree(head->plids);
680 kfree(head);
681}
682
683bool nvme_tryget_ns_head(struct nvme_ns_head *head)
684{
685 return kref_get_unless_zero(&head->ref);
686}
687
688void nvme_put_ns_head(struct nvme_ns_head *head)
689{
690 kref_put(&head->ref, nvme_free_ns_head);
691}
692
693static void nvme_free_ns(struct kref *kref)
694{
695 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
696
697 put_disk(ns->disk);
698 nvme_put_ns_head(ns->head);
699 nvme_put_ctrl(ns->ctrl);
700 kfree(ns);
701}
702
703bool nvme_get_ns(struct nvme_ns *ns)
704{
705 return kref_get_unless_zero(&ns->kref);
706}
707
708void nvme_put_ns(struct nvme_ns *ns)
709{
710 kref_put(&ns->kref, nvme_free_ns);
711}
712EXPORT_SYMBOL_NS_GPL(nvme_put_ns, "NVME_TARGET_PASSTHRU");
713
714static inline void nvme_clear_nvme_request(struct request *req)
715{
716 nvme_req(req)->status = 0;
717 nvme_req(req)->retries = 0;
718 nvme_req(req)->flags = 0;
719 req->rq_flags |= RQF_DONTPREP;
720}
721
722/* initialize a passthrough request */
723void nvme_init_request(struct request *req, struct nvme_command *cmd)
724{
725 struct nvme_request *nr = nvme_req(req);
726 bool logging_enabled;
727
728 if (req->q->queuedata) {
729 struct nvme_ns *ns = req->q->disk->private_data;
730
731 logging_enabled = ns->head->passthru_err_log_enabled;
732 req->timeout = NVME_IO_TIMEOUT;
733 } else { /* no queuedata implies admin queue */
734 logging_enabled = nr->ctrl->passthru_err_log_enabled;
735 req->timeout = NVME_ADMIN_TIMEOUT;
736 }
737
738 if (!logging_enabled)
739 req->rq_flags |= RQF_QUIET;
740
741 /* passthru commands should let the driver set the SGL flags */
742 cmd->common.flags &= ~NVME_CMD_SGL_ALL;
743
744 req->cmd_flags |= REQ_FAILFAST_DRIVER;
745 if (req->mq_hctx->type == HCTX_TYPE_POLL)
746 req->cmd_flags |= REQ_POLLED;
747 nvme_clear_nvme_request(req);
748 memcpy(nr->cmd, cmd, sizeof(*cmd));
749}
750EXPORT_SYMBOL_GPL(nvme_init_request);
751
752/*
753 * For something we're not in a state to send to the device the default action
754 * is to busy it and retry it after the controller state is recovered. However,
755 * if the controller is deleting or if anything is marked for failfast or
756 * nvme multipath it is immediately failed.
757 *
758 * Note: commands used to initialize the controller will be marked for failfast.
759 * Note: nvme cli/ioctl commands are marked for failfast.
760 */
761blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
762 struct request *rq)
763{
764 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
765
766 if (state != NVME_CTRL_DELETING_NOIO &&
767 state != NVME_CTRL_DELETING &&
768 state != NVME_CTRL_DEAD &&
769 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
770 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
771 return BLK_STS_RESOURCE;
772
773 if (!(rq->rq_flags & RQF_DONTPREP))
774 nvme_clear_nvme_request(rq);
775
776 return nvme_host_path_error(rq);
777}
778EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
779
780bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
781 bool queue_live, enum nvme_ctrl_state state)
782{
783 struct nvme_request *req = nvme_req(rq);
784
785 /*
786 * currently we have a problem sending passthru commands
787 * on the admin_q if the controller is not LIVE because we can't
788 * make sure that they are going out after the admin connect,
789 * controller enable and/or other commands in the initialization
790 * sequence. until the controller will be LIVE, fail with
791 * BLK_STS_RESOURCE so that they will be rescheduled.
792 */
793 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
794 return false;
795
796 if (ctrl->ops->flags & NVME_F_FABRICS) {
797 /*
798 * Only allow commands on a live queue, except for the connect
799 * command, which is require to set the queue live in the
800 * appropinquate states.
801 */
802 switch (state) {
803 case NVME_CTRL_CONNECTING:
804 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
805 (req->cmd->fabrics.fctype == nvme_fabrics_type_connect ||
806 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send ||
807 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive))
808 return true;
809 break;
810 default:
811 break;
812 case NVME_CTRL_DEAD:
813 return false;
814 }
815 }
816
817 return queue_live;
818}
819EXPORT_SYMBOL_GPL(__nvme_check_ready);
820
821static inline void nvme_setup_flush(struct nvme_ns *ns,
822 struct nvme_command *cmnd)
823{
824 memset(cmnd, 0, sizeof(*cmnd));
825 cmnd->common.opcode = nvme_cmd_flush;
826 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
827}
828
829static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
830 struct nvme_command *cmnd)
831{
832 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
833 struct nvme_dsm_range *range;
834 struct bio *bio;
835
836 /*
837 * Some devices do not consider the DSM 'Number of Ranges' field when
838 * determining how much data to DMA. Always allocate memory for maximum
839 * number of segments to prevent device reading beyond end of buffer.
840 */
841 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES;
842
843 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN);
844 if (!range) {
845 /*
846 * If we fail allocation our range, fallback to the controller
847 * discard page. If that's also busy, it's safe to return
848 * busy, as we know we can make progress once that's freed.
849 */
850 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
851 return BLK_STS_RESOURCE;
852
853 range = page_address(ns->ctrl->discard_page);
854 }
855
856 if (queue_max_discard_segments(req->q) == 1) {
857 u64 slba = nvme_sect_to_lba(ns->head, blk_rq_pos(req));
858 u32 nlb = blk_rq_sectors(req) >> (ns->head->lba_shift - 9);
859
860 range[0].cattr = cpu_to_le32(0);
861 range[0].nlb = cpu_to_le32(nlb);
862 range[0].slba = cpu_to_le64(slba);
863 n = 1;
864 } else {
865 __rq_for_each_bio(bio, req) {
866 u64 slba = nvme_sect_to_lba(ns->head,
867 bio->bi_iter.bi_sector);
868 u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift;
869
870 if (n < segments) {
871 range[n].cattr = cpu_to_le32(0);
872 range[n].nlb = cpu_to_le32(nlb);
873 range[n].slba = cpu_to_le64(slba);
874 }
875 n++;
876 }
877 }
878
879 if (WARN_ON_ONCE(n != segments)) {
880 if (virt_to_page(range) == ns->ctrl->discard_page)
881 clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
882 else
883 kfree(range);
884 return BLK_STS_IOERR;
885 }
886
887 memset(cmnd, 0, sizeof(*cmnd));
888 cmnd->dsm.opcode = nvme_cmd_dsm;
889 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
890 cmnd->dsm.nr = cpu_to_le32(segments - 1);
891 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
892
893 bvec_set_virt(&req->special_vec, range, alloc_size);
894 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
895
896 return BLK_STS_OK;
897}
898
899static void nvme_set_app_tag(struct request *req, struct nvme_command *cmnd)
900{
901 cmnd->rw.lbat = cpu_to_le16(bio_integrity(req->bio)->app_tag);
902 cmnd->rw.lbatm = cpu_to_le16(0xffff);
903}
904
905static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
906 struct request *req)
907{
908 u32 upper, lower;
909 u64 ref48;
910
911 /* only type1 and type 2 PI formats have a reftag */
912 switch (ns->head->pi_type) {
913 case NVME_NS_DPS_PI_TYPE1:
914 case NVME_NS_DPS_PI_TYPE2:
915 break;
916 default:
917 return;
918 }
919
920 /* both rw and write zeroes share the same reftag format */
921 switch (ns->head->guard_type) {
922 case NVME_NVM_NS_16B_GUARD:
923 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
924 break;
925 case NVME_NVM_NS_64B_GUARD:
926 ref48 = ext_pi_ref_tag(req);
927 lower = lower_32_bits(ref48);
928 upper = upper_32_bits(ref48);
929
930 cmnd->rw.reftag = cpu_to_le32(lower);
931 cmnd->rw.cdw3 = cpu_to_le32(upper);
932 break;
933 default:
934 break;
935 }
936}
937
938static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
939 struct request *req, struct nvme_command *cmnd)
940{
941 memset(cmnd, 0, sizeof(*cmnd));
942
943 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
944 return nvme_setup_discard(ns, req, cmnd);
945
946 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
947 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
948 cmnd->write_zeroes.slba =
949 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
950 cmnd->write_zeroes.length =
951 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
952
953 if (!(req->cmd_flags & REQ_NOUNMAP) &&
954 (ns->head->features & NVME_NS_DEAC))
955 cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC);
956
957 if (nvme_ns_has_pi(ns->head)) {
958 cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
959 nvme_set_ref_tag(ns, cmnd, req);
960 }
961
962 return BLK_STS_OK;
963}
964
965/*
966 * NVMe does not support a dedicated command to issue an atomic write. A write
967 * which does adhere to the device atomic limits will silently be executed
968 * non-atomically. The request issuer should ensure that the write is within
969 * the queue atomic writes limits, but just validate this in case it is not.
970 */
971static bool nvme_valid_atomic_write(struct request *req)
972{
973 struct request_queue *q = req->q;
974 u32 boundary_bytes = queue_atomic_write_boundary_bytes(q);
975
976 if (blk_rq_bytes(req) > queue_atomic_write_unit_max_bytes(q))
977 return false;
978
979 if (boundary_bytes) {
980 u64 mask = boundary_bytes - 1, imask = ~mask;
981 u64 start = blk_rq_pos(req) << SECTOR_SHIFT;
982 u64 end = start + blk_rq_bytes(req) - 1;
983
984 /* If greater then must be crossing a boundary */
985 if (blk_rq_bytes(req) > boundary_bytes)
986 return false;
987
988 if ((start & imask) != (end & imask))
989 return false;
990 }
991
992 return true;
993}
994
995static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
996 struct request *req, struct nvme_command *cmnd,
997 enum nvme_opcode op)
998{
999 u16 control = 0;
1000 u32 dsmgmt = 0;
1001
1002 if (req->cmd_flags & REQ_FUA)
1003 control |= NVME_RW_FUA;
1004 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
1005 control |= NVME_RW_LR;
1006
1007 if (req->cmd_flags & REQ_RAHEAD)
1008 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
1009
1010 if (op == nvme_cmd_write && ns->head->nr_plids) {
1011 u16 write_stream = req->bio->bi_write_stream;
1012
1013 if (WARN_ON_ONCE(write_stream > ns->head->nr_plids))
1014 return BLK_STS_INVAL;
1015
1016 if (write_stream) {
1017 dsmgmt |= ns->head->plids[write_stream - 1] << 16;
1018 control |= NVME_RW_DTYPE_DPLCMT;
1019 }
1020 }
1021
1022 if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req))
1023 return BLK_STS_INVAL;
1024
1025 cmnd->rw.opcode = op;
1026 cmnd->rw.flags = 0;
1027 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
1028 cmnd->rw.cdw2 = 0;
1029 cmnd->rw.cdw3 = 0;
1030 cmnd->rw.metadata = 0;
1031 cmnd->rw.slba =
1032 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
1033 cmnd->rw.length =
1034 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
1035 cmnd->rw.reftag = 0;
1036 cmnd->rw.lbat = 0;
1037 cmnd->rw.lbatm = 0;
1038
1039 if (ns->head->ms) {
1040 /*
1041 * If formatted with metadata, the block layer always provides a
1042 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
1043 * we enable the PRACT bit for protection information or set the
1044 * namespace capacity to zero to prevent any I/O.
1045 */
1046 if (!blk_integrity_rq(req)) {
1047 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
1048 return BLK_STS_NOTSUPP;
1049 control |= NVME_RW_PRINFO_PRACT;
1050 nvme_set_ref_tag(ns, cmnd, req);
1051 }
1052
1053 if (bio_integrity_flagged(req->bio, BIP_CHECK_GUARD))
1054 control |= NVME_RW_PRINFO_PRCHK_GUARD;
1055 if (bio_integrity_flagged(req->bio, BIP_CHECK_REFTAG)) {
1056 control |= NVME_RW_PRINFO_PRCHK_REF;
1057 if (op == nvme_cmd_zone_append)
1058 control |= NVME_RW_APPEND_PIREMAP;
1059 nvme_set_ref_tag(ns, cmnd, req);
1060 }
1061 if (bio_integrity_flagged(req->bio, BIP_CHECK_APPTAG)) {
1062 control |= NVME_RW_PRINFO_PRCHK_APP;
1063 nvme_set_app_tag(req, cmnd);
1064 }
1065 }
1066
1067 cmnd->rw.control = cpu_to_le16(control);
1068 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
1069 return 0;
1070}
1071
1072void nvme_cleanup_cmd(struct request *req)
1073{
1074 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
1075 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
1076
1077 if (req->special_vec.bv_page == ctrl->discard_page)
1078 clear_bit_unlock(0, &ctrl->discard_page_busy);
1079 else
1080 kfree(bvec_virt(&req->special_vec));
1081 req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
1082 }
1083}
1084EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
1085
1086blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
1087{
1088 struct nvme_command *cmd = nvme_req(req)->cmd;
1089 blk_status_t ret = BLK_STS_OK;
1090
1091 if (!(req->rq_flags & RQF_DONTPREP))
1092 nvme_clear_nvme_request(req);
1093
1094 switch (req_op(req)) {
1095 case REQ_OP_DRV_IN:
1096 case REQ_OP_DRV_OUT:
1097 /* these are setup prior to execution in nvme_init_request() */
1098 break;
1099 case REQ_OP_FLUSH:
1100 nvme_setup_flush(ns, cmd);
1101 break;
1102 case REQ_OP_ZONE_RESET_ALL:
1103 case REQ_OP_ZONE_RESET:
1104 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET);
1105 break;
1106 case REQ_OP_ZONE_OPEN:
1107 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN);
1108 break;
1109 case REQ_OP_ZONE_CLOSE:
1110 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE);
1111 break;
1112 case REQ_OP_ZONE_FINISH:
1113 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH);
1114 break;
1115 case REQ_OP_WRITE_ZEROES:
1116 ret = nvme_setup_write_zeroes(ns, req, cmd);
1117 break;
1118 case REQ_OP_DISCARD:
1119 ret = nvme_setup_discard(ns, req, cmd);
1120 break;
1121 case REQ_OP_READ:
1122 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read);
1123 break;
1124 case REQ_OP_WRITE:
1125 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write);
1126 break;
1127 case REQ_OP_ZONE_APPEND:
1128 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append);
1129 break;
1130 default:
1131 WARN_ON_ONCE(1);
1132 return BLK_STS_IOERR;
1133 }
1134
1135 cmd->common.command_id = nvme_cid(req);
1136 trace_nvme_setup_cmd(req, cmd);
1137 return ret;
1138}
1139EXPORT_SYMBOL_GPL(nvme_setup_cmd);
1140
1141/*
1142 * Return values:
1143 * 0: success
1144 * >0: nvme controller's cqe status response
1145 * <0: kernel error in lieu of controller response
1146 */
1147int nvme_execute_rq(struct request *rq, bool at_head)
1148{
1149 blk_status_t status;
1150
1151 status = blk_execute_rq(rq, at_head);
1152 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
1153 return -EINTR;
1154 if (nvme_req(rq)->status)
1155 return nvme_req(rq)->status;
1156 return blk_status_to_errno(status);
1157}
1158EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, "NVME_TARGET_PASSTHRU");
1159
1160/*
1161 * Returns 0 on success. If the result is negative, it's a Linux error code;
1162 * if the result is positive, it's an NVM Express status code
1163 */
1164int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1165 union nvme_result *result, void *buffer, unsigned bufflen,
1166 int qid, nvme_submit_flags_t flags)
1167{
1168 struct request *req;
1169 int ret;
1170 blk_mq_req_flags_t blk_flags = 0;
1171
1172 if (flags & NVME_SUBMIT_NOWAIT)
1173 blk_flags |= BLK_MQ_REQ_NOWAIT;
1174 if (flags & NVME_SUBMIT_RESERVED)
1175 blk_flags |= BLK_MQ_REQ_RESERVED;
1176 if (qid == NVME_QID_ANY)
1177 req = blk_mq_alloc_request(q, nvme_req_op(cmd), blk_flags);
1178 else
1179 req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), blk_flags,
1180 qid - 1);
1181
1182 if (IS_ERR(req))
1183 return PTR_ERR(req);
1184 nvme_init_request(req, cmd);
1185 if (flags & NVME_SUBMIT_RETRY)
1186 req->cmd_flags &= ~REQ_FAILFAST_DRIVER;
1187
1188 if (buffer && bufflen) {
1189 ret = blk_rq_map_kern(req, buffer, bufflen, GFP_KERNEL);
1190 if (ret)
1191 goto out;
1192 }
1193
1194 ret = nvme_execute_rq(req, flags & NVME_SUBMIT_AT_HEAD);
1195 if (result && ret >= 0)
1196 *result = nvme_req(req)->result;
1197 out:
1198 blk_mq_free_request(req);
1199 return ret;
1200}
1201EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
1202
1203int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
1204 void *buffer, unsigned bufflen)
1205{
1206 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
1207 NVME_QID_ANY, 0);
1208}
1209EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
1210
1211u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
1212{
1213 u32 effects = 0;
1214
1215 if (ns) {
1216 effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
1217 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1218 dev_warn_once(ctrl->device,
1219 "IO command:%02x has unusual effects:%08x\n",
1220 opcode, effects);
1221
1222 /*
1223 * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues,
1224 * which would deadlock when done on an I/O command. Note that
1225 * We already warn about an unusual effect above.
1226 */
1227 effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
1228 } else {
1229 effects = le32_to_cpu(ctrl->effects->acs[opcode]);
1230
1231 /* Ignore execution restrictions if any relaxation bits are set */
1232 if (effects & NVME_CMD_EFFECTS_CSER_MASK)
1233 effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
1234 }
1235
1236 return effects;
1237}
1238EXPORT_SYMBOL_NS_GPL(nvme_command_effects, "NVME_TARGET_PASSTHRU");
1239
1240u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
1241{
1242 u32 effects = nvme_command_effects(ctrl, ns, opcode);
1243
1244 /*
1245 * For simplicity, IO to all namespaces is quiesced even if the command
1246 * effects say only one namespace is affected.
1247 */
1248 if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1249 mutex_lock(&ctrl->scan_lock);
1250 mutex_lock(&ctrl->subsys->lock);
1251 nvme_mpath_start_freeze(ctrl->subsys);
1252 nvme_mpath_wait_freeze(ctrl->subsys);
1253 nvme_start_freeze(ctrl);
1254 nvme_wait_freeze(ctrl);
1255 }
1256 return effects;
1257}
1258EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, "NVME_TARGET_PASSTHRU");
1259
1260void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
1261 struct nvme_command *cmd, int status)
1262{
1263 if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
1264 nvme_unfreeze(ctrl);
1265 nvme_mpath_unfreeze(ctrl->subsys);
1266 mutex_unlock(&ctrl->subsys->lock);
1267 mutex_unlock(&ctrl->scan_lock);
1268 }
1269 if (effects & NVME_CMD_EFFECTS_CCC) {
1270 if (!test_and_set_bit(NVME_CTRL_DIRTY_CAPABILITY,
1271 &ctrl->flags)) {
1272 dev_info(ctrl->device,
1273"controller capabilities changed, reset may be required to take effect.\n");
1274 }
1275 }
1276 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
1277 nvme_queue_scan(ctrl);
1278 flush_work(&ctrl->scan_work);
1279 }
1280 if (ns)
1281 return;
1282
1283 switch (cmd->common.opcode) {
1284 case nvme_admin_set_features:
1285 switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) {
1286 case NVME_FEAT_KATO:
1287 /*
1288 * Keep alive commands interval on the host should be
1289 * updated when KATO is modified by Set Features
1290 * commands.
1291 */
1292 if (!status)
1293 nvme_update_keep_alive(ctrl, cmd);
1294 break;
1295 default:
1296 break;
1297 }
1298 break;
1299 default:
1300 break;
1301 }
1302}
1303EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, "NVME_TARGET_PASSTHRU");
1304
1305/*
1306 * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
1307 *
1308 * The host should send Keep Alive commands at half of the Keep Alive Timeout
1309 * accounting for transport roundtrip times [..].
1310 */
1311static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
1312{
1313 unsigned long delay = ctrl->kato * HZ / 2;
1314
1315 /*
1316 * When using Traffic Based Keep Alive, we need to run
1317 * nvme_keep_alive_work at twice the normal frequency, as one
1318 * command completion can postpone sending a keep alive command
1319 * by up to twice the delay between runs.
1320 */
1321 if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
1322 delay /= 2;
1323 return delay;
1324}
1325
1326static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
1327{
1328 unsigned long now = jiffies;
1329 unsigned long delay = nvme_keep_alive_work_period(ctrl);
1330 unsigned long ka_next_check_tm = ctrl->ka_last_check_time + delay;
1331
1332 if (time_after(now, ka_next_check_tm))
1333 delay = 0;
1334 else
1335 delay = ka_next_check_tm - now;
1336
1337 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
1338}
1339
1340static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
1341 blk_status_t status,
1342 const struct io_comp_batch *iob)
1343{
1344 struct nvme_ctrl *ctrl = rq->end_io_data;
1345 unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
1346 unsigned long delay = nvme_keep_alive_work_period(ctrl);
1347 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
1348
1349 /*
1350 * Subtract off the keepalive RTT so nvme_keep_alive_work runs
1351 * at the desired frequency.
1352 */
1353 if (rtt <= delay) {
1354 delay -= rtt;
1355 } else {
1356 dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n",
1357 jiffies_to_msecs(rtt));
1358 delay = 0;
1359 }
1360
1361 blk_mq_free_request(rq);
1362
1363 if (status) {
1364 dev_err(ctrl->device,
1365 "failed nvme_keep_alive_end_io error=%d\n",
1366 status);
1367 return RQ_END_IO_NONE;
1368 }
1369
1370 ctrl->ka_last_check_time = jiffies;
1371 ctrl->comp_seen = false;
1372 if (state == NVME_CTRL_LIVE || state == NVME_CTRL_CONNECTING)
1373 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
1374 return RQ_END_IO_NONE;
1375}
1376
1377static void nvme_keep_alive_work(struct work_struct *work)
1378{
1379 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
1380 struct nvme_ctrl, ka_work);
1381 bool comp_seen = ctrl->comp_seen;
1382 struct request *rq;
1383
1384 ctrl->ka_last_check_time = jiffies;
1385
1386 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
1387 dev_dbg(ctrl->device,
1388 "reschedule traffic based keep-alive timer\n");
1389 ctrl->comp_seen = false;
1390 nvme_queue_keep_alive_work(ctrl);
1391 return;
1392 }
1393
1394 rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
1395 BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
1396 if (IS_ERR(rq)) {
1397 /* allocation failure, reset the controller */
1398 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
1399 nvme_reset_ctrl(ctrl);
1400 return;
1401 }
1402 nvme_init_request(rq, &ctrl->ka_cmd);
1403
1404 rq->timeout = ctrl->kato * HZ;
1405 rq->end_io = nvme_keep_alive_end_io;
1406 rq->end_io_data = ctrl;
1407 blk_execute_rq_nowait(rq, false);
1408}
1409
1410static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
1411{
1412 if (unlikely(ctrl->kato == 0))
1413 return;
1414
1415 nvme_queue_keep_alive_work(ctrl);
1416}
1417
1418void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
1419{
1420 if (unlikely(ctrl->kato == 0))
1421 return;
1422
1423 cancel_delayed_work_sync(&ctrl->ka_work);
1424}
1425EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
1426
1427static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
1428 struct nvme_command *cmd)
1429{
1430 unsigned int new_kato =
1431 DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000);
1432
1433 dev_info(ctrl->device,
1434 "keep alive interval updated from %u ms to %u ms\n",
1435 ctrl->kato * 1000 / 2, new_kato * 1000 / 2);
1436
1437 nvme_stop_keep_alive(ctrl);
1438 ctrl->kato = new_kato;
1439 nvme_start_keep_alive(ctrl);
1440}
1441
1442static bool nvme_id_cns_ok(struct nvme_ctrl *ctrl, u8 cns)
1443{
1444 /*
1445 * The CNS field occupies a full byte starting with NVMe 1.2
1446 */
1447 if (ctrl->vs >= NVME_VS(1, 2, 0))
1448 return true;
1449
1450 /*
1451 * NVMe 1.1 expanded the CNS value to two bits, which means values
1452 * larger than that could get truncated and treated as an incorrect
1453 * value.
1454 *
1455 * Qemu implemented 1.0 behavior for controllers claiming 1.1
1456 * compliance, so they need to be quirked here.
1457 */
1458 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1459 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS))
1460 return cns <= 3;
1461
1462 /*
1463 * NVMe 1.0 used a single bit for the CNS value.
1464 */
1465 return cns <= 1;
1466}
1467
1468static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
1469{
1470 struct nvme_command c = { };
1471 int error;
1472
1473 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1474 c.identify.opcode = nvme_admin_identify;
1475 c.identify.cns = NVME_ID_CNS_CTRL;
1476
1477 *id = kmalloc_obj(struct nvme_id_ctrl);
1478 if (!*id)
1479 return -ENOMEM;
1480
1481 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
1482 sizeof(struct nvme_id_ctrl));
1483 if (error) {
1484 kfree(*id);
1485 *id = NULL;
1486 }
1487 return error;
1488}
1489
1490static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
1491 struct nvme_ns_id_desc *cur, bool *csi_seen)
1492{
1493 const char *warn_str = "ctrl returned bogus length:";
1494 void *data = cur;
1495
1496 switch (cur->nidt) {
1497 case NVME_NIDT_EUI64:
1498 if (cur->nidl != NVME_NIDT_EUI64_LEN) {
1499 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
1500 warn_str, cur->nidl);
1501 return -1;
1502 }
1503 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1504 return NVME_NIDT_EUI64_LEN;
1505 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
1506 return NVME_NIDT_EUI64_LEN;
1507 case NVME_NIDT_NGUID:
1508 if (cur->nidl != NVME_NIDT_NGUID_LEN) {
1509 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
1510 warn_str, cur->nidl);
1511 return -1;
1512 }
1513 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1514 return NVME_NIDT_NGUID_LEN;
1515 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
1516 return NVME_NIDT_NGUID_LEN;
1517 case NVME_NIDT_UUID:
1518 if (cur->nidl != NVME_NIDT_UUID_LEN) {
1519 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
1520 warn_str, cur->nidl);
1521 return -1;
1522 }
1523 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
1524 return NVME_NIDT_UUID_LEN;
1525 uuid_copy(&ids->uuid, data + sizeof(*cur));
1526 return NVME_NIDT_UUID_LEN;
1527 case NVME_NIDT_CSI:
1528 if (cur->nidl != NVME_NIDT_CSI_LEN) {
1529 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n",
1530 warn_str, cur->nidl);
1531 return -1;
1532 }
1533 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN);
1534 *csi_seen = true;
1535 return NVME_NIDT_CSI_LEN;
1536 default:
1537 /* Skip unknown types */
1538 return cur->nidl;
1539 }
1540}
1541
1542static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl,
1543 struct nvme_ns_info *info)
1544{
1545 struct nvme_command c = { };
1546 bool csi_seen = false;
1547 int status, pos, len;
1548 void *data;
1549
1550 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl))
1551 return 0;
1552 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
1553 return 0;
1554
1555 c.identify.opcode = nvme_admin_identify;
1556 c.identify.nsid = cpu_to_le32(info->nsid);
1557 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
1558
1559 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
1560 if (!data)
1561 return -ENOMEM;
1562
1563 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
1564 NVME_IDENTIFY_DATA_SIZE);
1565 if (status) {
1566 dev_warn(ctrl->device,
1567 "Identify Descriptors failed (nsid=%u, status=0x%x)\n",
1568 info->nsid, status);
1569 goto free_data;
1570 }
1571
1572 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
1573 struct nvme_ns_id_desc *cur = data + pos;
1574
1575 if (cur->nidl == 0)
1576 break;
1577
1578 len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen);
1579 if (len < 0)
1580 break;
1581
1582 len += sizeof(*cur);
1583 }
1584
1585 if (nvme_multi_css(ctrl) && !csi_seen) {
1586 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n",
1587 info->nsid);
1588 status = -EINVAL;
1589 }
1590
1591free_data:
1592 kfree(data);
1593 return status;
1594}
1595
1596int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
1597 struct nvme_id_ns **id)
1598{
1599 struct nvme_command c = { };
1600 int error;
1601
1602 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
1603 c.identify.opcode = nvme_admin_identify;
1604 c.identify.nsid = cpu_to_le32(nsid);
1605 c.identify.cns = NVME_ID_CNS_NS;
1606
1607 *id = kmalloc_obj(**id);
1608 if (!*id)
1609 return -ENOMEM;
1610
1611 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
1612 if (error) {
1613 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
1614 kfree(*id);
1615 *id = NULL;
1616 }
1617 return error;
1618}
1619
1620static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
1621 struct nvme_ns_info *info)
1622{
1623 struct nvme_ns_ids *ids = &info->ids;
1624 struct nvme_id_ns *id;
1625 int ret;
1626
1627 ret = nvme_identify_ns(ctrl, info->nsid, &id);
1628 if (ret)
1629 return ret;
1630
1631 if (id->ncap == 0) {
1632 /* namespace not allocated or attached */
1633 info->is_removed = true;
1634 ret = -ENODEV;
1635 goto error;
1636 }
1637
1638 info->anagrpid = id->anagrpid;
1639 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
1640 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
1641 info->is_ready = true;
1642 info->endgid = le16_to_cpu(id->endgid);
1643 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
1644 dev_info(ctrl->device,
1645 "Ignoring bogus Namespace Identifiers\n");
1646 } else {
1647 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
1648 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
1649 memcpy(ids->eui64, id->eui64, sizeof(ids->eui64));
1650 if (ctrl->vs >= NVME_VS(1, 2, 0) &&
1651 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
1652 memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
1653 }
1654
1655error:
1656 kfree(id);
1657 return ret;
1658}
1659
1660static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
1661 struct nvme_ns_info *info)
1662{
1663 struct nvme_id_ns_cs_indep *id;
1664 struct nvme_command c = {
1665 .identify.opcode = nvme_admin_identify,
1666 .identify.nsid = cpu_to_le32(info->nsid),
1667 .identify.cns = NVME_ID_CNS_NS_CS_INDEP,
1668 };
1669 int ret;
1670
1671 id = kmalloc_obj(*id);
1672 if (!id)
1673 return -ENOMEM;
1674
1675 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
1676 if (!ret) {
1677 info->anagrpid = id->anagrpid;
1678 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
1679 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
1680 info->is_ready = id->nstat & NVME_NSTAT_NRDY;
1681 info->is_rotational = id->nsfeat & NVME_NS_ROTATIONAL;
1682 info->no_vwc = id->nsfeat & NVME_NS_VWC_NOT_PRESENT;
1683 info->endgid = le16_to_cpu(id->endgid);
1684 }
1685 kfree(id);
1686 return ret;
1687}
1688
1689static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
1690 unsigned int dword11, void *buffer, size_t buflen, u32 *result)
1691{
1692 union nvme_result res = { 0 };
1693 struct nvme_command c = { };
1694 int ret;
1695
1696 c.features.opcode = op;
1697 c.features.fid = cpu_to_le32(fid);
1698 c.features.dword11 = cpu_to_le32(dword11);
1699
1700 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
1701 buffer, buflen, NVME_QID_ANY, 0);
1702 if (ret >= 0 && result)
1703 *result = le32_to_cpu(res.u32);
1704 return ret;
1705}
1706
1707int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
1708 unsigned int dword11, void *buffer, size_t buflen,
1709 void *result)
1710{
1711 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer,
1712 buflen, result);
1713}
1714EXPORT_SYMBOL_GPL(nvme_set_features);
1715
1716int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
1717 unsigned int dword11, void *buffer, size_t buflen,
1718 void *result)
1719{
1720 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer,
1721 buflen, result);
1722}
1723EXPORT_SYMBOL_GPL(nvme_get_features);
1724
1725int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
1726{
1727 u32 q_count = (*count - 1) | ((*count - 1) << 16);
1728 u32 result;
1729 int status, nr_io_queues;
1730
1731 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
1732 &result);
1733
1734 /*
1735 * It's either a kernel error or the host observed a connection
1736 * lost. In either case it's not possible communicate with the
1737 * controller and thus enter the error code path.
1738 */
1739 if (status < 0 || status == NVME_SC_HOST_PATH_ERROR)
1740 return status;
1741
1742 /*
1743 * Degraded controllers might return an error when setting the queue
1744 * count. We still want to be able to bring them online and offer
1745 * access to the admin queue, as that might be only way to fix them up.
1746 */
1747 if (status > 0) {
1748 dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
1749 *count = 0;
1750 } else {
1751 nr_io_queues = min(result & 0xffff, result >> 16) + 1;
1752 *count = min(*count, nr_io_queues);
1753 }
1754
1755 return 0;
1756}
1757EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1758
1759#define NVME_AEN_SUPPORTED \
1760 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \
1761 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE)
1762
1763static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1764{
1765 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1766 int status;
1767
1768 if (!supported_aens)
1769 return;
1770
1771 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1772 NULL, 0, &result);
1773 if (status)
1774 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1775 supported_aens);
1776
1777 queue_work(nvme_wq, &ctrl->async_event_work);
1778}
1779
1780static int nvme_ns_open(struct nvme_ns *ns)
1781{
1782
1783 /* should never be called due to GENHD_FL_HIDDEN */
1784 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
1785 goto fail;
1786 if (!nvme_get_ns(ns))
1787 goto fail;
1788 if (!try_module_get(ns->ctrl->ops->module))
1789 goto fail_put_ns;
1790
1791 return 0;
1792
1793fail_put_ns:
1794 nvme_put_ns(ns);
1795fail:
1796 return -ENXIO;
1797}
1798
1799static void nvme_ns_release(struct nvme_ns *ns)
1800{
1801
1802 module_put(ns->ctrl->ops->module);
1803 nvme_put_ns(ns);
1804}
1805
1806static int nvme_open(struct gendisk *disk, blk_mode_t mode)
1807{
1808 return nvme_ns_open(disk->private_data);
1809}
1810
1811static void nvme_release(struct gendisk *disk)
1812{
1813 nvme_ns_release(disk->private_data);
1814}
1815
1816int nvme_getgeo(struct gendisk *disk, struct hd_geometry *geo)
1817{
1818 /* some standard values */
1819 geo->heads = 1 << 6;
1820 geo->sectors = 1 << 5;
1821 geo->cylinders = get_capacity(disk) >> 11;
1822 return 0;
1823}
1824
1825static bool nvme_init_integrity(struct nvme_ns_head *head,
1826 struct queue_limits *lim, struct nvme_ns_info *info)
1827{
1828 struct blk_integrity *bi = &lim->integrity;
1829
1830 memset(bi, 0, sizeof(*bi));
1831
1832 if (!head->ms)
1833 return true;
1834
1835 /*
1836 * PI can always be supported as we can ask the controller to simply
1837 * insert/strip it, which is not possible for other kinds of metadata.
1838 */
1839 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) ||
1840 !(head->features & NVME_NS_METADATA_SUPPORTED))
1841 return nvme_ns_has_pi(head);
1842
1843 switch (head->pi_type) {
1844 case NVME_NS_DPS_PI_TYPE3:
1845 switch (head->guard_type) {
1846 case NVME_NVM_NS_16B_GUARD:
1847 bi->csum_type = BLK_INTEGRITY_CSUM_CRC;
1848 bi->tag_size = sizeof(u16) + sizeof(u32);
1849 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1850 break;
1851 case NVME_NVM_NS_64B_GUARD:
1852 bi->csum_type = BLK_INTEGRITY_CSUM_CRC64;
1853 bi->tag_size = sizeof(u16) + 6;
1854 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
1855 break;
1856 default:
1857 break;
1858 }
1859 break;
1860 case NVME_NS_DPS_PI_TYPE1:
1861 case NVME_NS_DPS_PI_TYPE2:
1862 switch (head->guard_type) {
1863 case NVME_NVM_NS_16B_GUARD:
1864 bi->csum_type = BLK_INTEGRITY_CSUM_CRC;
1865 bi->tag_size = sizeof(u16);
1866 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE |
1867 BLK_INTEGRITY_REF_TAG;
1868 break;
1869 case NVME_NVM_NS_64B_GUARD:
1870 bi->csum_type = BLK_INTEGRITY_CSUM_CRC64;
1871 bi->tag_size = sizeof(u16);
1872 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE |
1873 BLK_INTEGRITY_REF_TAG;
1874 break;
1875 default:
1876 break;
1877 }
1878 break;
1879 default:
1880 break;
1881 }
1882
1883 bi->flags |= BLK_SPLIT_INTERVAL_CAPABLE;
1884 bi->metadata_size = head->ms;
1885 if (bi->csum_type) {
1886 bi->pi_tuple_size = head->pi_size;
1887 bi->pi_offset = info->pi_offset;
1888 }
1889 return true;
1890}
1891
1892static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
1893{
1894 return uuid_equal(&a->uuid, &b->uuid) &&
1895 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
1896 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 &&
1897 a->csi == b->csi;
1898}
1899
1900static int nvme_identify_ns_nvm(struct nvme_ctrl *ctrl, unsigned int nsid,
1901 struct nvme_id_ns_nvm **nvmp)
1902{
1903 struct nvme_command c = {
1904 .identify.opcode = nvme_admin_identify,
1905 .identify.nsid = cpu_to_le32(nsid),
1906 .identify.cns = NVME_ID_CNS_CS_NS,
1907 .identify.csi = NVME_CSI_NVM,
1908 };
1909 struct nvme_id_ns_nvm *nvm;
1910 int ret;
1911
1912 nvm = kzalloc_obj(*nvm);
1913 if (!nvm)
1914 return -ENOMEM;
1915
1916 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm));
1917 if (ret)
1918 kfree(nvm);
1919 else
1920 *nvmp = nvm;
1921 return ret;
1922}
1923
1924static void nvme_configure_pi_elbas(struct nvme_ns_head *head,
1925 struct nvme_id_ns *id, struct nvme_id_ns_nvm *nvm)
1926{
1927 u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]);
1928 u8 guard_type;
1929
1930 /* no support for storage tag formats right now */
1931 if (nvme_elbaf_sts(elbaf))
1932 return;
1933
1934 guard_type = nvme_elbaf_guard_type(elbaf);
1935 if ((nvm->pic & NVME_ID_NS_NVM_QPIFS) &&
1936 guard_type == NVME_NVM_NS_QTYPE_GUARD)
1937 guard_type = nvme_elbaf_qualified_guard_type(elbaf);
1938
1939 head->guard_type = guard_type;
1940 switch (head->guard_type) {
1941 case NVME_NVM_NS_64B_GUARD:
1942 head->pi_size = sizeof(struct crc64_pi_tuple);
1943 break;
1944 case NVME_NVM_NS_16B_GUARD:
1945 head->pi_size = sizeof(struct t10_pi_tuple);
1946 break;
1947 default:
1948 break;
1949 }
1950}
1951
1952static void nvme_configure_metadata(struct nvme_ctrl *ctrl,
1953 struct nvme_ns_head *head, struct nvme_id_ns *id,
1954 struct nvme_id_ns_nvm *nvm, struct nvme_ns_info *info)
1955{
1956 head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
1957 head->pi_type = 0;
1958 head->pi_size = 0;
1959 head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms);
1960 if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
1961 return;
1962
1963 if (nvm && (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
1964 nvme_configure_pi_elbas(head, id, nvm);
1965 } else {
1966 head->pi_size = sizeof(struct t10_pi_tuple);
1967 head->guard_type = NVME_NVM_NS_16B_GUARD;
1968 }
1969
1970 if (head->pi_size && head->ms >= head->pi_size)
1971 head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
1972 if (!(id->dps & NVME_NS_DPS_PI_FIRST)) {
1973 if (disable_pi_offsets)
1974 head->pi_type = 0;
1975 else
1976 info->pi_offset = head->ms - head->pi_size;
1977 }
1978
1979 if (ctrl->ops->flags & NVME_F_FABRICS) {
1980 /*
1981 * The NVMe over Fabrics specification only supports metadata as
1982 * part of the extended data LBA. We rely on HCA/HBA support to
1983 * remap the separate metadata buffer from the block layer.
1984 */
1985 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
1986 return;
1987
1988 head->features |= NVME_NS_EXT_LBAS;
1989
1990 /*
1991 * The current fabrics transport drivers support namespace
1992 * metadata formats only if nvme_ns_has_pi() returns true.
1993 * Suppress support for all other formats so the namespace will
1994 * have a 0 capacity and not be usable through the block stack.
1995 *
1996 * Note, this check will need to be modified if any drivers
1997 * gain the ability to use other metadata formats.
1998 */
1999 if (ctrl->max_integrity_segments && nvme_ns_has_pi(head))
2000 head->features |= NVME_NS_METADATA_SUPPORTED;
2001 } else {
2002 /*
2003 * For PCIe controllers, we can't easily remap the separate
2004 * metadata buffer from the block layer and thus require a
2005 * separate metadata buffer for block layer metadata/PI support.
2006 * We allow extended LBAs for the passthrough interface, though.
2007 */
2008 if (id->flbas & NVME_NS_FLBAS_META_EXT)
2009 head->features |= NVME_NS_EXT_LBAS;
2010 else
2011 head->features |= NVME_NS_METADATA_SUPPORTED;
2012 }
2013}
2014
2015
2016static u32 nvme_configure_atomic_write(struct nvme_ns *ns,
2017 struct nvme_id_ns *id, struct queue_limits *lim, u32 bs)
2018{
2019 u32 atomic_bs, boundary = 0;
2020
2021 /*
2022 * We do not support an offset for the atomic boundaries.
2023 */
2024 if (id->nabo)
2025 return bs;
2026
2027 if ((id->nsfeat & NVME_NS_FEAT_ATOMICS) && id->nawupf) {
2028 /*
2029 * Use the per-namespace atomic write unit when available.
2030 */
2031 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
2032 if (id->nabspf)
2033 boundary = (le16_to_cpu(id->nabspf) + 1) * bs;
2034 } else {
2035 if (ns->ctrl->awupf)
2036 dev_info_once(ns->ctrl->device,
2037 "AWUPF ignored, only NAWUPF accepted\n");
2038 atomic_bs = bs;
2039 }
2040
2041 lim->atomic_write_hw_max = atomic_bs;
2042 lim->atomic_write_hw_boundary = boundary;
2043 lim->atomic_write_hw_unit_min = bs;
2044 lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs);
2045 lim->features |= BLK_FEAT_ATOMIC_WRITES;
2046 return atomic_bs;
2047}
2048
2049static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
2050{
2051 return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1;
2052}
2053
2054static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl,
2055 struct queue_limits *lim, bool is_admin)
2056{
2057 lim->max_hw_sectors = ctrl->max_hw_sectors;
2058 lim->max_segments = min_t(u32, USHRT_MAX,
2059 min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments));
2060 lim->max_integrity_segments = ctrl->max_integrity_segments;
2061 lim->virt_boundary_mask = ctrl->ops->get_virt_boundary(ctrl, is_admin);
2062 lim->max_segment_size = UINT_MAX;
2063 lim->dma_alignment = 3;
2064}
2065
2066static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
2067 struct nvme_id_ns_nvm *nvm, struct queue_limits *lim)
2068{
2069 struct nvme_ns_head *head = ns->head;
2070 struct nvme_ctrl *ctrl = ns->ctrl;
2071 u32 bs = 1U << head->lba_shift;
2072 u32 atomic_bs, phys_bs, io_opt = 0;
2073 u32 npdg = 1, npda = 1;
2074 bool valid = true;
2075 u8 optperf;
2076
2077 /*
2078 * The block layer can't support LBA sizes larger than the page size
2079 * or smaller than a sector size yet, so catch this early and don't
2080 * allow block I/O.
2081 */
2082 if (blk_validate_block_size(bs)) {
2083 bs = (1 << 9);
2084 valid = false;
2085 }
2086
2087 phys_bs = bs;
2088 atomic_bs = nvme_configure_atomic_write(ns, id, lim, bs);
2089
2090 optperf = id->nsfeat >> NVME_NS_FEAT_OPTPERF_SHIFT;
2091 if (ctrl->vs >= NVME_VS(2, 1, 0))
2092 optperf &= NVME_NS_FEAT_OPTPERF_MASK_2_1;
2093 else
2094 optperf &= NVME_NS_FEAT_OPTPERF_MASK;
2095 if (optperf) {
2096 /* NPWG = Namespace Preferred Write Granularity */
2097 phys_bs = bs * (1 + le16_to_cpu(id->npwg));
2098 /* NOWS = Namespace Optimal Write Size */
2099 if (id->nows)
2100 io_opt = bs * (1 + le16_to_cpu(id->nows));
2101 }
2102
2103 /*
2104 * Linux filesystems assume writing a single physical block is
2105 * an atomic operation. Hence limit the physical block size to the
2106 * value of the Atomic Write Unit Power Fail parameter.
2107 */
2108 lim->logical_block_size = bs;
2109 lim->physical_block_size = min(phys_bs, atomic_bs);
2110 lim->io_min = phys_bs;
2111 lim->io_opt = io_opt;
2112 if ((ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) &&
2113 (ctrl->oncs & NVME_CTRL_ONCS_DSM))
2114 lim->max_write_zeroes_sectors = UINT_MAX;
2115 else
2116 lim->max_write_zeroes_sectors = ctrl->max_zeroes_sectors;
2117
2118 if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
2119 lim->max_hw_discard_sectors =
2120 nvme_lba_to_sect(ns->head, ctrl->dmrsl);
2121 else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
2122 lim->max_hw_discard_sectors = UINT_MAX;
2123 else
2124 lim->max_hw_discard_sectors = 0;
2125
2126 /*
2127 * NVMe namespaces advertise both a preferred deallocate granularity
2128 * (for a discard length) and alignment (for a discard starting offset).
2129 * However, Linux block devices advertise a single discard_granularity.
2130 * From NVM Command Set specification 1.1 section 5.2.2, the NPDGL/NPDAL
2131 * fields in the NVM Command Set Specific Identify Namespace structure
2132 * are preferred to NPDG/NPDA in the Identify Namespace structure since
2133 * they can represent larger values. However, NPDGL or NPDAL may be 0 if
2134 * unsupported. NPDG and NPDA are 0's based.
2135 * From Figure 115 of NVM Command Set specification 1.1, NPDGL and NPDAL
2136 * are supported if the high bit of OPTPERF is set. NPDG is supported if
2137 * the low bit of OPTPERF is set. NPDA is supported if either is set.
2138 * NPDG should be a multiple of NPDA, and likewise NPDGL should be a
2139 * multiple of NPDAL, but the spec doesn't say anything about NPDG vs.
2140 * NPDAL or NPDGL vs. NPDA. So compute the maximum instead of assuming
2141 * NPDG(L) is the larger. If neither NPDG, NPDGL, NPDA, nor NPDAL are
2142 * supported, default the discard_granularity to the logical block size.
2143 */
2144 if (optperf & 0x2 && nvm && nvm->npdgl)
2145 npdg = le32_to_cpu(nvm->npdgl);
2146 else if (optperf & 0x1)
2147 npdg = from0based(id->npdg);
2148 if (optperf & 0x2 && nvm && nvm->npdal)
2149 npda = le32_to_cpu(nvm->npdal);
2150 else if (optperf)
2151 npda = from0based(id->npda);
2152 if (check_mul_overflow(max(npdg, npda), lim->logical_block_size,
2153 &lim->discard_granularity))
2154 lim->discard_granularity = lim->logical_block_size;
2155
2156 if (ctrl->dmrl)
2157 lim->max_discard_segments = ctrl->dmrl;
2158 else
2159 lim->max_discard_segments = NVME_DSM_MAX_RANGES;
2160 return valid;
2161}
2162
2163static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
2164{
2165 return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags);
2166}
2167
2168static inline bool nvme_first_scan(struct gendisk *disk)
2169{
2170 /* nvme_alloc_ns() scans the disk prior to adding it */
2171 return !disk_live(disk);
2172}
2173
2174static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id,
2175 struct queue_limits *lim)
2176{
2177 struct nvme_ctrl *ctrl = ns->ctrl;
2178 u32 iob;
2179
2180 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
2181 is_power_of_2(ctrl->max_hw_sectors))
2182 iob = ctrl->max_hw_sectors;
2183 else
2184 iob = nvme_lba_to_sect(ns->head, le16_to_cpu(id->noiob));
2185
2186 if (!iob)
2187 return;
2188
2189 if (!is_power_of_2(iob)) {
2190 if (nvme_first_scan(ns->disk))
2191 pr_warn("%s: ignoring unaligned IO boundary:%u\n",
2192 ns->disk->disk_name, iob);
2193 return;
2194 }
2195
2196 if (blk_queue_is_zoned(ns->disk->queue)) {
2197 if (nvme_first_scan(ns->disk))
2198 pr_warn("%s: ignoring zoned namespace IO boundary\n",
2199 ns->disk->disk_name);
2200 return;
2201 }
2202
2203 lim->chunk_sectors = iob;
2204}
2205
2206static int nvme_update_ns_info_generic(struct nvme_ns *ns,
2207 struct nvme_ns_info *info)
2208{
2209 struct queue_limits lim;
2210 unsigned int memflags;
2211 int ret;
2212
2213 lim = queue_limits_start_update(ns->disk->queue);
2214 nvme_set_ctrl_limits(ns->ctrl, &lim, false);
2215
2216 memflags = blk_mq_freeze_queue(ns->disk->queue);
2217 ret = queue_limits_commit_update(ns->disk->queue, &lim);
2218 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
2219 blk_mq_unfreeze_queue(ns->disk->queue, memflags);
2220
2221 /* Hide the block-interface for these devices */
2222 if (!ret)
2223 ret = -ENODEV;
2224 return ret;
2225}
2226
2227static int nvme_query_fdp_granularity(struct nvme_ctrl *ctrl,
2228 struct nvme_ns_info *info, u8 fdp_idx)
2229{
2230 struct nvme_fdp_config_log hdr, *h;
2231 struct nvme_fdp_config_desc *desc;
2232 size_t size = sizeof(hdr);
2233 void *log, *end;
2234 int i, n, ret;
2235
2236 ret = nvme_get_log_lsi(ctrl, 0, NVME_LOG_FDP_CONFIGS, 0,
2237 NVME_CSI_NVM, &hdr, size, 0, info->endgid);
2238 if (ret) {
2239 dev_warn(ctrl->device,
2240 "FDP configs log header status:0x%x endgid:%d\n", ret,
2241 info->endgid);
2242 return ret;
2243 }
2244
2245 size = le32_to_cpu(hdr.sze);
2246 if (size > PAGE_SIZE * MAX_ORDER_NR_PAGES) {
2247 dev_warn(ctrl->device, "FDP config size too large:%zu\n",
2248 size);
2249 return 0;
2250 }
2251
2252 h = kvmalloc(size, GFP_KERNEL);
2253 if (!h)
2254 return -ENOMEM;
2255
2256 ret = nvme_get_log_lsi(ctrl, 0, NVME_LOG_FDP_CONFIGS, 0,
2257 NVME_CSI_NVM, h, size, 0, info->endgid);
2258 if (ret) {
2259 dev_warn(ctrl->device,
2260 "FDP configs log status:0x%x endgid:%d\n", ret,
2261 info->endgid);
2262 goto out;
2263 }
2264
2265 n = le16_to_cpu(h->numfdpc) + 1;
2266 if (fdp_idx > n) {
2267 dev_warn(ctrl->device, "FDP index:%d out of range:%d\n",
2268 fdp_idx, n);
2269 /* Proceed without registering FDP streams */
2270 ret = 0;
2271 goto out;
2272 }
2273
2274 log = h + 1;
2275 desc = log;
2276 end = log + size - sizeof(*h);
2277 for (i = 0; i < fdp_idx; i++) {
2278 log += le16_to_cpu(desc->dsze);
2279 desc = log;
2280 if (log >= end) {
2281 dev_warn(ctrl->device,
2282 "FDP invalid config descriptor list\n");
2283 ret = 0;
2284 goto out;
2285 }
2286 }
2287
2288 if (le32_to_cpu(desc->nrg) > 1) {
2289 dev_warn(ctrl->device, "FDP NRG > 1 not supported\n");
2290 ret = 0;
2291 goto out;
2292 }
2293
2294 info->runs = le64_to_cpu(desc->runs);
2295out:
2296 kvfree(h);
2297 return ret;
2298}
2299
2300static int nvme_query_fdp_info(struct nvme_ns *ns, struct nvme_ns_info *info)
2301{
2302 struct nvme_ns_head *head = ns->head;
2303 struct nvme_ctrl *ctrl = ns->ctrl;
2304 struct nvme_fdp_ruh_status *ruhs;
2305 struct nvme_fdp_config fdp;
2306 struct nvme_command c = {};
2307 size_t size;
2308 int i, ret;
2309
2310 /*
2311 * The FDP configuration is static for the lifetime of the namespace,
2312 * so return immediately if we've already registered this namespace's
2313 * streams.
2314 */
2315 if (head->nr_plids)
2316 return 0;
2317
2318 ret = nvme_get_features(ctrl, NVME_FEAT_FDP, info->endgid, NULL, 0,
2319 &fdp);
2320 if (ret) {
2321 dev_warn(ctrl->device, "FDP get feature status:0x%x\n", ret);
2322 return ret;
2323 }
2324
2325 if (!(fdp.flags & FDPCFG_FDPE))
2326 return 0;
2327
2328 ret = nvme_query_fdp_granularity(ctrl, info, fdp.fdpcidx);
2329 if (!info->runs)
2330 return ret;
2331
2332 size = struct_size(ruhs, ruhsd, S8_MAX - 1);
2333 ruhs = kzalloc(size, GFP_KERNEL);
2334 if (!ruhs)
2335 return -ENOMEM;
2336
2337 c.imr.opcode = nvme_cmd_io_mgmt_recv;
2338 c.imr.nsid = cpu_to_le32(head->ns_id);
2339 c.imr.mo = NVME_IO_MGMT_RECV_MO_RUHS;
2340 c.imr.numd = cpu_to_le32(nvme_bytes_to_numd(size));
2341 ret = nvme_submit_sync_cmd(ns->queue, &c, ruhs, size);
2342 if (ret) {
2343 dev_warn(ctrl->device, "FDP io-mgmt status:0x%x\n", ret);
2344 goto free;
2345 }
2346
2347 head->nr_plids = le16_to_cpu(ruhs->nruhsd);
2348 if (!head->nr_plids)
2349 goto free;
2350
2351 head->plids = kcalloc(head->nr_plids, sizeof(*head->plids),
2352 GFP_KERNEL);
2353 if (!head->plids) {
2354 dev_warn(ctrl->device,
2355 "failed to allocate %u FDP placement IDs\n",
2356 head->nr_plids);
2357 head->nr_plids = 0;
2358 ret = -ENOMEM;
2359 goto free;
2360 }
2361
2362 for (i = 0; i < head->nr_plids; i++)
2363 head->plids[i] = le16_to_cpu(ruhs->ruhsd[i].pid);
2364free:
2365 kfree(ruhs);
2366 return ret;
2367}
2368
2369static int nvme_update_ns_info_block(struct nvme_ns *ns,
2370 struct nvme_ns_info *info)
2371{
2372 struct queue_limits lim;
2373 struct nvme_id_ns_nvm *nvm = NULL;
2374 struct nvme_zone_info zi = {};
2375 struct nvme_id_ns *id;
2376 unsigned int memflags;
2377 sector_t capacity;
2378 unsigned lbaf;
2379 int ret;
2380
2381 ret = nvme_identify_ns(ns->ctrl, info->nsid, &id);
2382 if (ret)
2383 return ret;
2384
2385 if (id->ncap == 0) {
2386 /* namespace not allocated or attached */
2387 info->is_removed = true;
2388 ret = -ENXIO;
2389 goto out;
2390 }
2391 lbaf = nvme_lbaf_index(id->flbas);
2392
2393 if (nvme_id_cns_ok(ns->ctrl, NVME_ID_CNS_CS_NS)) {
2394 ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm);
2395 if (ret < 0)
2396 goto out;
2397 }
2398
2399 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
2400 ns->head->ids.csi == NVME_CSI_ZNS) {
2401 ret = nvme_query_zone_info(ns, lbaf, &zi);
2402 if (ret < 0)
2403 goto out;
2404 }
2405
2406 if (ns->ctrl->ctratt & NVME_CTRL_ATTR_FDPS) {
2407 ret = nvme_query_fdp_info(ns, info);
2408 if (ret < 0)
2409 goto out;
2410 }
2411
2412 lim = queue_limits_start_update(ns->disk->queue);
2413
2414 memflags = blk_mq_freeze_queue(ns->disk->queue);
2415 ns->head->lba_shift = id->lbaf[lbaf].ds;
2416 ns->head->nuse = le64_to_cpu(id->nuse);
2417 capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
2418 nvme_set_ctrl_limits(ns->ctrl, &lim, false);
2419 nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info);
2420 nvme_set_chunk_sectors(ns, id, &lim);
2421 if (!nvme_update_disk_info(ns, id, nvm, &lim))
2422 capacity = 0;
2423
2424 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
2425 ns->head->ids.csi == NVME_CSI_ZNS)
2426 nvme_update_zone_info(ns, &lim, &zi);
2427
2428 if ((ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT) && !info->no_vwc)
2429 lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
2430 else
2431 lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
2432
2433 if (info->is_rotational)
2434 lim.features |= BLK_FEAT_ROTATIONAL;
2435
2436 /*
2437 * Register a metadata profile for PI, or the plain non-integrity NVMe
2438 * metadata masquerading as Type 0 if supported, otherwise reject block
2439 * I/O to namespaces with metadata except when the namespace supports
2440 * PI, as it can strip/insert in that case.
2441 */
2442 if (!nvme_init_integrity(ns->head, &lim, info))
2443 capacity = 0;
2444
2445 lim.max_write_streams = ns->head->nr_plids;
2446 if (lim.max_write_streams)
2447 lim.write_stream_granularity = min(info->runs, U32_MAX);
2448 else
2449 lim.write_stream_granularity = 0;
2450
2451 /*
2452 * Only set the DEAC bit if the device guarantees that reads from
2453 * deallocated data return zeroes. While the DEAC bit does not
2454 * require that, it must be a no-op if reads from deallocated data
2455 * do not return zeroes.
2456 */
2457 if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) {
2458 ns->head->features |= NVME_NS_DEAC;
2459 lim.max_hw_wzeroes_unmap_sectors = lim.max_write_zeroes_sectors;
2460 }
2461
2462 ret = queue_limits_commit_update(ns->disk->queue, &lim);
2463 if (ret) {
2464 blk_mq_unfreeze_queue(ns->disk->queue, memflags);
2465 goto out;
2466 }
2467
2468 set_capacity_and_notify(ns->disk, capacity);
2469 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
2470 set_bit(NVME_NS_READY, &ns->flags);
2471 blk_mq_unfreeze_queue(ns->disk->queue, memflags);
2472
2473 if (blk_queue_is_zoned(ns->queue)) {
2474 ret = blk_revalidate_disk_zones(ns->disk);
2475 if (ret && !nvme_first_scan(ns->disk))
2476 goto out;
2477 }
2478
2479 ret = 0;
2480out:
2481 kfree(nvm);
2482 kfree(id);
2483 return ret;
2484}
2485
2486static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
2487{
2488 bool unsupported = false;
2489 int ret;
2490
2491 switch (info->ids.csi) {
2492 case NVME_CSI_ZNS:
2493 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
2494 dev_info(ns->ctrl->device,
2495 "block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
2496 info->nsid);
2497 ret = nvme_update_ns_info_generic(ns, info);
2498 break;
2499 }
2500 ret = nvme_update_ns_info_block(ns, info);
2501 break;
2502 case NVME_CSI_NVM:
2503 ret = nvme_update_ns_info_block(ns, info);
2504 break;
2505 default:
2506 dev_info(ns->ctrl->device,
2507 "block device for nsid %u not supported (csi %u)\n",
2508 info->nsid, info->ids.csi);
2509 ret = nvme_update_ns_info_generic(ns, info);
2510 break;
2511 }
2512
2513 /*
2514 * If probing fails due an unsupported feature, hide the block device,
2515 * but still allow other access.
2516 */
2517 if (ret == -ENODEV) {
2518 ns->disk->flags |= GENHD_FL_HIDDEN;
2519 set_bit(NVME_NS_READY, &ns->flags);
2520 unsupported = true;
2521 ret = 0;
2522 }
2523
2524 if (!ret && nvme_ns_head_multipath(ns->head)) {
2525 struct queue_limits *ns_lim = &ns->disk->queue->limits;
2526 struct queue_limits lim;
2527 unsigned int memflags;
2528
2529 lim = queue_limits_start_update(ns->head->disk->queue);
2530 memflags = blk_mq_freeze_queue(ns->head->disk->queue);
2531 /*
2532 * queue_limits mixes values that are the hardware limitations
2533 * for bio splitting with what is the device configuration.
2534 *
2535 * For NVMe the device configuration can change after e.g. a
2536 * Format command, and we really want to pick up the new format
2537 * value here. But we must still stack the queue limits to the
2538 * least common denominator for multipathing to split the bios
2539 * properly.
2540 *
2541 * To work around this, we explicitly set the device
2542 * configuration to those that we just queried, but only stack
2543 * the splitting limits in to make sure we still obey possibly
2544 * lower limitations of other controllers.
2545 */
2546 lim.logical_block_size = ns_lim->logical_block_size;
2547 lim.physical_block_size = ns_lim->physical_block_size;
2548 lim.io_min = ns_lim->io_min;
2549 lim.io_opt = ns_lim->io_opt;
2550 queue_limits_stack_bdev(&lim, ns->disk->part0, 0,
2551 ns->head->disk->disk_name);
2552 if (unsupported)
2553 ns->head->disk->flags |= GENHD_FL_HIDDEN;
2554 else
2555 nvme_init_integrity(ns->head, &lim, info);
2556 lim.max_write_streams = ns_lim->max_write_streams;
2557 lim.write_stream_granularity = ns_lim->write_stream_granularity;
2558 ret = queue_limits_commit_update(ns->head->disk->queue, &lim);
2559
2560 set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk));
2561 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
2562 nvme_mpath_revalidate_paths(ns);
2563
2564 blk_mq_unfreeze_queue(ns->head->disk->queue, memflags);
2565 }
2566
2567 return ret;
2568}
2569
2570int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16],
2571 enum blk_unique_id type)
2572{
2573 struct nvme_ns_ids *ids = &ns->head->ids;
2574
2575 if (type != BLK_UID_EUI64)
2576 return -EINVAL;
2577
2578 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) {
2579 memcpy(id, &ids->nguid, sizeof(ids->nguid));
2580 return sizeof(ids->nguid);
2581 }
2582 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) {
2583 memcpy(id, &ids->eui64, sizeof(ids->eui64));
2584 return sizeof(ids->eui64);
2585 }
2586
2587 return -EINVAL;
2588}
2589
2590static int nvme_get_unique_id(struct gendisk *disk, u8 id[16],
2591 enum blk_unique_id type)
2592{
2593 return nvme_ns_get_unique_id(disk->private_data, id, type);
2594}
2595
2596#ifdef CONFIG_BLK_SED_OPAL
2597static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
2598 bool send)
2599{
2600 struct nvme_ctrl *ctrl = data;
2601 struct nvme_command cmd = { };
2602
2603 if (send)
2604 cmd.common.opcode = nvme_admin_security_send;
2605 else
2606 cmd.common.opcode = nvme_admin_security_recv;
2607 cmd.common.nsid = 0;
2608 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
2609 cmd.common.cdw11 = cpu_to_le32(len);
2610
2611 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
2612 NVME_QID_ANY, NVME_SUBMIT_AT_HEAD);
2613}
2614
2615static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
2616{
2617 if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) {
2618 if (!ctrl->opal_dev)
2619 ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit);
2620 else if (was_suspended)
2621 opal_unlock_from_suspend(ctrl->opal_dev);
2622 } else {
2623 free_opal_dev(ctrl->opal_dev);
2624 ctrl->opal_dev = NULL;
2625 }
2626}
2627#else
2628static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
2629{
2630}
2631#endif /* CONFIG_BLK_SED_OPAL */
2632
2633#ifdef CONFIG_BLK_DEV_ZONED
2634static int nvme_report_zones(struct gendisk *disk, sector_t sector,
2635 unsigned int nr_zones, struct blk_report_zones_args *args)
2636{
2637 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, args);
2638}
2639#else
2640#define nvme_report_zones NULL
2641#endif /* CONFIG_BLK_DEV_ZONED */
2642
2643const struct block_device_operations nvme_bdev_ops = {
2644 .owner = THIS_MODULE,
2645 .ioctl = nvme_ioctl,
2646 .compat_ioctl = blkdev_compat_ptr_ioctl,
2647 .open = nvme_open,
2648 .release = nvme_release,
2649 .getgeo = nvme_getgeo,
2650 .get_unique_id = nvme_get_unique_id,
2651 .report_zones = nvme_report_zones,
2652 .pr_ops = &nvme_pr_ops,
2653};
2654
2655static int nvme_wait_ready(struct nvme_ctrl *ctrl, u32 mask, u32 val,
2656 u32 timeout, const char *op)
2657{
2658 unsigned long timeout_jiffies = jiffies + timeout * HZ;
2659 u32 csts;
2660 int ret;
2661
2662 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
2663 if (csts == ~0)
2664 return -ENODEV;
2665 if ((csts & mask) == val)
2666 break;
2667
2668 usleep_range(1000, 2000);
2669 if (fatal_signal_pending(current))
2670 return -EINTR;
2671 if (time_after(jiffies, timeout_jiffies)) {
2672 dev_err(ctrl->device,
2673 "Device not ready; aborting %s, CSTS=0x%x\n",
2674 op, csts);
2675 return -ENODEV;
2676 }
2677 }
2678
2679 return ret;
2680}
2681
2682int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2683{
2684 int ret;
2685
2686 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
2687 if (shutdown)
2688 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
2689 else
2690 ctrl->ctrl_config &= ~NVME_CC_ENABLE;
2691
2692 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2693 if (ret)
2694 return ret;
2695
2696 if (shutdown) {
2697 return nvme_wait_ready(ctrl, NVME_CSTS_SHST_MASK,
2698 NVME_CSTS_SHST_CMPLT,
2699 ctrl->shutdown_timeout, "shutdown");
2700 }
2701 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
2702 msleep(NVME_QUIRK_DELAY_AMOUNT);
2703 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, 0,
2704 (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset");
2705}
2706EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
2707
2708int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
2709{
2710 unsigned dev_page_min;
2711 u32 timeout;
2712 int ret;
2713
2714 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2715 if (ret) {
2716 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
2717 return ret;
2718 }
2719 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12;
2720
2721 if (NVME_CTRL_PAGE_SHIFT < dev_page_min) {
2722 dev_err(ctrl->device,
2723 "Minimum device page size %u too large for host (%u)\n",
2724 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT);
2725 return -ENODEV;
2726 }
2727
2728 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI)
2729 ctrl->ctrl_config = NVME_CC_CSS_CSI;
2730 else
2731 ctrl->ctrl_config = NVME_CC_CSS_NVM;
2732
2733 /*
2734 * Setting CRIME results in CSTS.RDY before the media is ready. This
2735 * makes it possible for media related commands to return the error
2736 * NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY. Until the driver is
2737 * restructured to handle retries, disable CC.CRIME.
2738 */
2739 ctrl->ctrl_config &= ~NVME_CC_CRIME;
2740
2741 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
2742 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE;
2743 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
2744 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2745 if (ret)
2746 return ret;
2747
2748 /* CAP value may change after initial CC write */
2749 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
2750 if (ret)
2751 return ret;
2752
2753 timeout = NVME_CAP_TIMEOUT(ctrl->cap);
2754 if (ctrl->cap & NVME_CAP_CRMS_CRWMS) {
2755 u32 crto, ready_timeout;
2756
2757 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto);
2758 if (ret) {
2759 dev_err(ctrl->device, "Reading CRTO failed (%d)\n",
2760 ret);
2761 return ret;
2762 }
2763
2764 /*
2765 * CRTO should always be greater or equal to CAP.TO, but some
2766 * devices are known to get this wrong. Use the larger of the
2767 * two values.
2768 */
2769 ready_timeout = NVME_CRTO_CRWMT(crto);
2770
2771 if (ready_timeout < timeout)
2772 dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n",
2773 crto, ctrl->cap);
2774 else
2775 timeout = ready_timeout;
2776 }
2777
2778 ctrl->ctrl_config |= NVME_CC_ENABLE;
2779 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
2780 if (ret)
2781 return ret;
2782 return nvme_wait_ready(ctrl, NVME_CSTS_RDY, NVME_CSTS_RDY,
2783 (timeout + 1) / 2, "initialisation");
2784}
2785EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
2786
2787static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
2788{
2789 __le64 ts;
2790 int ret;
2791
2792 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP))
2793 return 0;
2794
2795 ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
2796 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts),
2797 NULL);
2798 if (ret)
2799 dev_warn_once(ctrl->device,
2800 "could not set timestamp (%d)\n", ret);
2801 return ret;
2802}
2803
2804static int nvme_configure_host_options(struct nvme_ctrl *ctrl)
2805{
2806 struct nvme_feat_host_behavior *host;
2807 u8 acre = 0, lbafee = 0;
2808 int ret;
2809
2810 /* Don't bother enabling the feature if retry delay is not reported */
2811 if (ctrl->crdt[0])
2812 acre = NVME_ENABLE_ACRE;
2813 if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)
2814 lbafee = NVME_ENABLE_LBAFEE;
2815
2816 if (!acre && !lbafee)
2817 return 0;
2818
2819 host = kzalloc_obj(*host);
2820 if (!host)
2821 return 0;
2822
2823 host->acre = acre;
2824 host->lbafee = lbafee;
2825 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
2826 host, sizeof(*host), NULL);
2827 kfree(host);
2828 return ret;
2829}
2830
2831/*
2832 * The function checks whether the given total (exlat + enlat) latency of
2833 * a power state allows the latter to be used as an APST transition target.
2834 * It does so by comparing the latency to the primary and secondary latency
2835 * tolerances defined by module params. If there's a match, the corresponding
2836 * timeout value is returned and the matching tolerance index (1 or 2) is
2837 * reported.
2838 */
2839static bool nvme_apst_get_transition_time(u64 total_latency,
2840 u64 *transition_time, unsigned *last_index)
2841{
2842 if (total_latency <= apst_primary_latency_tol_us) {
2843 if (*last_index == 1)
2844 return false;
2845 *last_index = 1;
2846 *transition_time = apst_primary_timeout_ms;
2847 return true;
2848 }
2849 if (apst_secondary_timeout_ms &&
2850 total_latency <= apst_secondary_latency_tol_us) {
2851 if (*last_index <= 2)
2852 return false;
2853 *last_index = 2;
2854 *transition_time = apst_secondary_timeout_ms;
2855 return true;
2856 }
2857 return false;
2858}
2859
2860/*
2861 * APST (Autonomous Power State Transition) lets us program a table of power
2862 * state transitions that the controller will perform automatically.
2863 *
2864 * Depending on module params, one of the two supported techniques will be used:
2865 *
2866 * - If the parameters provide explicit timeouts and tolerances, they will be
2867 * used to build a table with up to 2 non-operational states to transition to.
2868 * The default parameter values were selected based on the values used by
2869 * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic
2870 * regeneration of the APST table in the event of switching between external
2871 * and battery power, the timeouts and tolerances reflect a compromise
2872 * between values used by Microsoft for AC and battery scenarios.
2873 * - If not, we'll configure the table with a simple heuristic: we are willing
2874 * to spend at most 2% of the time transitioning between power states.
2875 * Therefore, when running in any given state, we will enter the next
2876 * lower-power non-operational state after waiting 50 * (enlat + exlat)
2877 * microseconds, as long as that state's exit latency is under the requested
2878 * maximum latency.
2879 *
2880 * We will not autonomously enter any non-operational state for which the total
2881 * latency exceeds ps_max_latency_us.
2882 *
2883 * Users can set ps_max_latency_us to zero to turn off APST.
2884 */
2885static int nvme_configure_apst(struct nvme_ctrl *ctrl)
2886{
2887 struct nvme_feat_auto_pst *table;
2888 unsigned apste = 0;
2889 u64 max_lat_us = 0;
2890 __le64 target = 0;
2891 int max_ps = -1;
2892 int state;
2893 int ret;
2894 unsigned last_lt_index = UINT_MAX;
2895
2896 /*
2897 * If APST isn't supported or if we haven't been initialized yet,
2898 * then don't do anything.
2899 */
2900 if (!ctrl->apsta)
2901 return 0;
2902
2903 if (ctrl->npss > 31) {
2904 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
2905 return 0;
2906 }
2907
2908 table = kzalloc_obj(*table);
2909 if (!table)
2910 return 0;
2911
2912 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
2913 /* Turn off APST. */
2914 dev_dbg(ctrl->device, "APST disabled\n");
2915 goto done;
2916 }
2917
2918 /*
2919 * Walk through all states from lowest- to highest-power.
2920 * According to the spec, lower-numbered states use more power. NPSS,
2921 * despite the name, is the index of the lowest-power state, not the
2922 * number of states.
2923 */
2924 for (state = (int)ctrl->npss; state >= 0; state--) {
2925 u64 total_latency_us, exit_latency_us, transition_ms;
2926
2927 if (target)
2928 table->entries[state] = target;
2929
2930 /*
2931 * Don't allow transitions to the deepest state if it's quirked
2932 * off.
2933 */
2934 if (state == ctrl->npss &&
2935 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
2936 continue;
2937
2938 /*
2939 * Is this state a useful non-operational state for higher-power
2940 * states to autonomously transition to?
2941 */
2942 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE))
2943 continue;
2944
2945 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
2946 if (exit_latency_us > ctrl->ps_max_latency_us)
2947 continue;
2948
2949 total_latency_us = exit_latency_us +
2950 le32_to_cpu(ctrl->psd[state].entry_lat);
2951
2952 /*
2953 * This state is good. It can be used as the APST idle target
2954 * for higher power states.
2955 */
2956 if (apst_primary_timeout_ms && apst_primary_latency_tol_us) {
2957 if (!nvme_apst_get_transition_time(total_latency_us,
2958 &transition_ms, &last_lt_index))
2959 continue;
2960 } else {
2961 transition_ms = total_latency_us + 19;
2962 do_div(transition_ms, 20);
2963 if (transition_ms > (1 << 24) - 1)
2964 transition_ms = (1 << 24) - 1;
2965 }
2966
2967 target = cpu_to_le64((state << 3) | (transition_ms << 8));
2968 if (max_ps == -1)
2969 max_ps = state;
2970 if (total_latency_us > max_lat_us)
2971 max_lat_us = total_latency_us;
2972 }
2973
2974 if (max_ps == -1)
2975 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
2976 else
2977 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
2978 max_ps, max_lat_us, (int)sizeof(*table), table);
2979 apste = 1;
2980
2981done:
2982 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
2983 table, sizeof(*table), NULL);
2984 if (ret)
2985 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
2986 kfree(table);
2987 return ret;
2988}
2989
2990static void nvme_set_latency_tolerance(struct device *dev, s32 val)
2991{
2992 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
2993 u64 latency;
2994
2995 switch (val) {
2996 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
2997 case PM_QOS_LATENCY_ANY:
2998 latency = U64_MAX;
2999 break;
3000
3001 default:
3002 latency = val;
3003 }
3004
3005 if (ctrl->ps_max_latency_us != latency) {
3006 ctrl->ps_max_latency_us = latency;
3007 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
3008 nvme_configure_apst(ctrl);
3009 }
3010}
3011
3012struct nvme_core_quirk_entry {
3013 /*
3014 * NVMe model and firmware strings are padded with spaces. For
3015 * simplicity, strings in the quirk table are padded with NULLs
3016 * instead.
3017 */
3018 u16 vid;
3019 const char *mn;
3020 const char *fr;
3021 unsigned long quirks;
3022};
3023
3024static const struct nvme_core_quirk_entry core_quirks[] = {
3025 {
3026 /*
3027 * This Toshiba device seems to die using any APST states. See:
3028 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
3029 */
3030 .vid = 0x1179,
3031 .mn = "THNSF5256GPUK TOSHIBA",
3032 .quirks = NVME_QUIRK_NO_APST,
3033 },
3034 {
3035 /*
3036 * This LiteON CL1-3D*-Q11 firmware version has a race
3037 * condition associated with actions related to suspend to idle
3038 * LiteON has resolved the problem in future firmware
3039 */
3040 .vid = 0x14a4,
3041 .fr = "22301111",
3042 .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
3043 },
3044 {
3045 /*
3046 * This Kioxia CD6-V Series / HPE PE8030 device times out and
3047 * aborts I/O during any load, but more easily reproducible
3048 * with discards (fstrim).
3049 *
3050 * The device is left in a state where it is also not possible
3051 * to use "nvme set-feature" to disable APST, but booting with
3052 * nvme_core.default_ps_max_latency_us=0 works.
3053 */
3054 .vid = 0x1e0f,
3055 .mn = "KCD6XVUL6T40",
3056 .quirks = NVME_QUIRK_NO_APST,
3057 },
3058 {
3059 /*
3060 * The external Samsung X5 SSD fails initialization without a
3061 * delay before checking if it is ready and has a whole set of
3062 * other problems. To make this even more interesting, it
3063 * shares the PCI ID with internal Samsung 970 Evo Plus that
3064 * does not need or want these quirks.
3065 */
3066 .vid = 0x144d,
3067 .mn = "Samsung Portable SSD X5",
3068 .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
3069 NVME_QUIRK_NO_DEEPEST_PS |
3070 NVME_QUIRK_IGNORE_DEV_SUBNQN,
3071 }
3072};
3073
3074/* match is null-terminated but idstr is space-padded. */
3075static bool string_matches(const char *idstr, const char *match, size_t len)
3076{
3077 size_t matchlen;
3078
3079 if (!match)
3080 return true;
3081
3082 matchlen = strlen(match);
3083 WARN_ON_ONCE(matchlen > len);
3084
3085 if (memcmp(idstr, match, matchlen))
3086 return false;
3087
3088 for (; matchlen < len; matchlen++)
3089 if (idstr[matchlen] != ' ')
3090 return false;
3091
3092 return true;
3093}
3094
3095static bool quirk_matches(const struct nvme_id_ctrl *id,
3096 const struct nvme_core_quirk_entry *q)
3097{
3098 return q->vid == le16_to_cpu(id->vid) &&
3099 string_matches(id->mn, q->mn, sizeof(id->mn)) &&
3100 string_matches(id->fr, q->fr, sizeof(id->fr));
3101}
3102
3103static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
3104 struct nvme_id_ctrl *id)
3105{
3106 size_t nqnlen;
3107 int off;
3108
3109 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
3110 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
3111 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
3112 strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
3113 return;
3114 }
3115
3116 if (ctrl->vs >= NVME_VS(1, 2, 1))
3117 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
3118 }
3119
3120 /*
3121 * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe
3122 * Base Specification 2.0. It is slightly different from the format
3123 * specified there due to historic reasons, and we can't change it now.
3124 */
3125 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
3126 "nqn.2014.08.org.nvmexpress:%04x%04x",
3127 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
3128 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
3129 off += sizeof(id->sn);
3130 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
3131 off += sizeof(id->mn);
3132 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
3133}
3134
3135static void nvme_release_subsystem(struct device *dev)
3136{
3137 struct nvme_subsystem *subsys =
3138 container_of(dev, struct nvme_subsystem, dev);
3139
3140 if (subsys->instance >= 0)
3141 ida_free(&nvme_instance_ida, subsys->instance);
3142 kfree(subsys);
3143}
3144
3145static void nvme_destroy_subsystem(struct kref *ref)
3146{
3147 struct nvme_subsystem *subsys =
3148 container_of(ref, struct nvme_subsystem, ref);
3149
3150 mutex_lock(&nvme_subsystems_lock);
3151 list_del(&subsys->entry);
3152 mutex_unlock(&nvme_subsystems_lock);
3153
3154 ida_destroy(&subsys->ns_ida);
3155 device_del(&subsys->dev);
3156 put_device(&subsys->dev);
3157}
3158
3159static void nvme_put_subsystem(struct nvme_subsystem *subsys)
3160{
3161 kref_put(&subsys->ref, nvme_destroy_subsystem);
3162}
3163
3164static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
3165{
3166 struct nvme_subsystem *subsys;
3167
3168 lockdep_assert_held(&nvme_subsystems_lock);
3169
3170 /*
3171 * Fail matches for discovery subsystems. This results
3172 * in each discovery controller bound to a unique subsystem.
3173 * This avoids issues with validating controller values
3174 * that can only be true when there is a single unique subsystem.
3175 * There may be multiple and completely independent entities
3176 * that provide discovery controllers.
3177 */
3178 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME))
3179 return NULL;
3180
3181 list_for_each_entry(subsys, &nvme_subsystems, entry) {
3182 if (strcmp(subsys->subnqn, subsysnqn))
3183 continue;
3184 if (!kref_get_unless_zero(&subsys->ref))
3185 continue;
3186 return subsys;
3187 }
3188
3189 return NULL;
3190}
3191
3192static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
3193{
3194 return ctrl->opts && ctrl->opts->discovery_nqn;
3195}
3196
3197static inline bool nvme_admin_ctrl(struct nvme_ctrl *ctrl)
3198{
3199 return ctrl->cntrltype == NVME_CTRL_ADMIN;
3200}
3201
3202static inline bool nvme_is_io_ctrl(struct nvme_ctrl *ctrl)
3203{
3204 return !nvme_discovery_ctrl(ctrl) && !nvme_admin_ctrl(ctrl);
3205}
3206
3207static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
3208 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
3209{
3210 struct nvme_ctrl *tmp;
3211
3212 lockdep_assert_held(&nvme_subsystems_lock);
3213
3214 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
3215 if (nvme_state_terminal(tmp))
3216 continue;
3217
3218 if (tmp->cntlid == ctrl->cntlid) {
3219 dev_err(ctrl->device,
3220 "Duplicate cntlid %u with %s, subsys %s, rejecting\n",
3221 ctrl->cntlid, dev_name(tmp->device),
3222 subsys->subnqn);
3223 return false;
3224 }
3225
3226 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
3227 nvme_discovery_ctrl(ctrl))
3228 continue;
3229
3230 dev_err(ctrl->device,
3231 "Subsystem does not support multiple controllers\n");
3232 return false;
3233 }
3234
3235 return true;
3236}
3237
3238static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
3239{
3240 struct nvme_subsystem *subsys, *found;
3241 int ret;
3242
3243 subsys = kzalloc_obj(*subsys);
3244 if (!subsys)
3245 return -ENOMEM;
3246
3247 subsys->instance = -1;
3248 mutex_init(&subsys->lock);
3249 kref_init(&subsys->ref);
3250 INIT_LIST_HEAD(&subsys->ctrls);
3251 INIT_LIST_HEAD(&subsys->nsheads);
3252 nvme_init_subnqn(subsys, ctrl, id);
3253 memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
3254 memcpy(subsys->model, id->mn, sizeof(subsys->model));
3255 subsys->vendor_id = le16_to_cpu(id->vid);
3256 subsys->cmic = id->cmic;
3257
3258 /* Versions prior to 1.4 don't necessarily report a valid type */
3259 if (id->cntrltype == NVME_CTRL_DISC ||
3260 !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
3261 subsys->subtype = NVME_NQN_DISC;
3262 else
3263 subsys->subtype = NVME_NQN_NVME;
3264
3265 if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
3266 dev_err(ctrl->device,
3267 "Subsystem %s is not a discovery controller",
3268 subsys->subnqn);
3269 kfree(subsys);
3270 return -EINVAL;
3271 }
3272 nvme_mpath_default_iopolicy(subsys);
3273
3274 subsys->dev.class = &nvme_subsys_class;
3275 subsys->dev.release = nvme_release_subsystem;
3276 subsys->dev.groups = nvme_subsys_attrs_groups;
3277 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
3278 device_initialize(&subsys->dev);
3279
3280 mutex_lock(&nvme_subsystems_lock);
3281 found = __nvme_find_get_subsystem(subsys->subnqn);
3282 if (found) {
3283 put_device(&subsys->dev);
3284 subsys = found;
3285
3286 if (!nvme_validate_cntlid(subsys, ctrl, id)) {
3287 ret = -EINVAL;
3288 goto out_put_subsystem;
3289 }
3290 } else {
3291 ret = device_add(&subsys->dev);
3292 if (ret) {
3293 dev_err(ctrl->device,
3294 "failed to register subsystem device.\n");
3295 put_device(&subsys->dev);
3296 goto out_unlock;
3297 }
3298 ida_init(&subsys->ns_ida);
3299 list_add_tail(&subsys->entry, &nvme_subsystems);
3300 }
3301
3302 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
3303 dev_name(ctrl->device));
3304 if (ret) {
3305 dev_err(ctrl->device,
3306 "failed to create sysfs link from subsystem.\n");
3307 goto out_put_subsystem;
3308 }
3309
3310 if (!found)
3311 subsys->instance = ctrl->instance;
3312 ctrl->subsys = subsys;
3313 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
3314 mutex_unlock(&nvme_subsystems_lock);
3315 return 0;
3316
3317out_put_subsystem:
3318 nvme_put_subsystem(subsys);
3319out_unlock:
3320 mutex_unlock(&nvme_subsystems_lock);
3321 return ret;
3322}
3323
3324static int nvme_get_log_lsi(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page,
3325 u8 lsp, u8 csi, void *log, size_t size, u64 offset, u16 lsi)
3326{
3327 struct nvme_command c = { };
3328 u32 dwlen = nvme_bytes_to_numd(size);
3329
3330 c.get_log_page.opcode = nvme_admin_get_log_page;
3331 c.get_log_page.nsid = cpu_to_le32(nsid);
3332 c.get_log_page.lid = log_page;
3333 c.get_log_page.lsp = lsp;
3334 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1));
3335 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16);
3336 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset));
3337 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset));
3338 c.get_log_page.csi = csi;
3339 c.get_log_page.lsi = cpu_to_le16(lsi);
3340
3341 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
3342}
3343
3344int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
3345 void *log, size_t size, u64 offset)
3346{
3347 return nvme_get_log_lsi(ctrl, nsid, log_page, lsp, csi, log, size,
3348 offset, 0);
3349}
3350
3351static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
3352 struct nvme_effects_log **log)
3353{
3354 struct nvme_effects_log *old, *cel = xa_load(&ctrl->cels, csi);
3355 int ret;
3356
3357 if (cel)
3358 goto out;
3359
3360 cel = kzalloc_obj(*cel);
3361 if (!cel)
3362 return -ENOMEM;
3363
3364 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
3365 cel, sizeof(*cel), 0);
3366 if (ret) {
3367 kfree(cel);
3368 return ret;
3369 }
3370
3371 old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
3372 if (xa_is_err(old)) {
3373 kfree(cel);
3374 return xa_err(old);
3375 }
3376out:
3377 *log = cel;
3378 return 0;
3379}
3380
3381static inline u32 nvme_mps_to_sectors(struct nvme_ctrl *ctrl, u32 units)
3382{
3383 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val;
3384
3385 if (check_shl_overflow(1U, units + page_shift - 9, &val))
3386 return UINT_MAX;
3387 return val;
3388}
3389
3390static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
3391{
3392 struct nvme_command c = { };
3393 struct nvme_id_ctrl_nvm *id;
3394 int ret;
3395
3396 /*
3397 * Even though NVMe spec explicitly states that MDTS is not applicable
3398 * to the write-zeroes, we are cautious and limit the size to the
3399 * controllers max_hw_sectors value, which is based on the MDTS field
3400 * and possibly other limiting factors.
3401 */
3402 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
3403 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
3404 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors;
3405 else
3406 ctrl->max_zeroes_sectors = 0;
3407
3408 if (!nvme_is_io_ctrl(ctrl) ||
3409 !nvme_id_cns_ok(ctrl, NVME_ID_CNS_CS_CTRL) ||
3410 test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags))
3411 return 0;
3412
3413 id = kzalloc_obj(*id);
3414 if (!id)
3415 return -ENOMEM;
3416
3417 c.identify.opcode = nvme_admin_identify;
3418 c.identify.cns = NVME_ID_CNS_CS_CTRL;
3419 c.identify.csi = NVME_CSI_NVM;
3420
3421 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
3422 if (ret)
3423 goto free_data;
3424
3425 ctrl->dmrl = id->dmrl;
3426 ctrl->dmrsl = le32_to_cpu(id->dmrsl);
3427 if (id->wzsl && !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
3428 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl);
3429
3430free_data:
3431 if (ret > 0)
3432 set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags);
3433 kfree(id);
3434 return ret;
3435}
3436
3437static int nvme_init_effects_log(struct nvme_ctrl *ctrl,
3438 u8 csi, struct nvme_effects_log **log)
3439{
3440 struct nvme_effects_log *effects, *old;
3441
3442 effects = kzalloc_obj(*effects);
3443 if (!effects)
3444 return -ENOMEM;
3445
3446 old = xa_store(&ctrl->cels, csi, effects, GFP_KERNEL);
3447 if (xa_is_err(old)) {
3448 kfree(effects);
3449 return xa_err(old);
3450 }
3451
3452 *log = effects;
3453 return 0;
3454}
3455
3456static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
3457{
3458 struct nvme_effects_log *log = ctrl->effects;
3459
3460 log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
3461 NVME_CMD_EFFECTS_NCC |
3462 NVME_CMD_EFFECTS_CSE_MASK);
3463 log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
3464 NVME_CMD_EFFECTS_CSE_MASK);
3465
3466 /*
3467 * The spec says the result of a security receive command depends on
3468 * the previous security send command. As such, many vendors log this
3469 * command as one to submitted only when no other commands to the same
3470 * namespace are outstanding. The intention is to tell the host to
3471 * prevent mixing security send and receive.
3472 *
3473 * This driver can only enforce such exclusive access against IO
3474 * queues, though. We are not readily able to enforce such a rule for
3475 * two commands to the admin queue, which is the only queue that
3476 * matters for this command.
3477 *
3478 * Rather than blindly freezing the IO queues for this effect that
3479 * doesn't even apply to IO, mask it off.
3480 */
3481 log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK);
3482
3483 log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
3484 log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
3485 log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
3486}
3487
3488static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
3489{
3490 int ret = 0;
3491
3492 if (ctrl->effects)
3493 return 0;
3494
3495 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
3496 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
3497 if (ret < 0)
3498 return ret;
3499 }
3500
3501 if (!ctrl->effects) {
3502 ret = nvme_init_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
3503 if (ret < 0)
3504 return ret;
3505 }
3506
3507 nvme_init_known_nvm_effects(ctrl);
3508 return 0;
3509}
3510
3511static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
3512{
3513 /*
3514 * In fabrics we need to verify the cntlid matches the
3515 * admin connect
3516 */
3517 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
3518 dev_err(ctrl->device,
3519 "Mismatching cntlid: Connect %u vs Identify %u, rejecting\n",
3520 ctrl->cntlid, le16_to_cpu(id->cntlid));
3521 return -EINVAL;
3522 }
3523
3524 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
3525 dev_err(ctrl->device,
3526 "keep-alive support is mandatory for fabrics\n");
3527 return -EINVAL;
3528 }
3529
3530 if (nvme_is_io_ctrl(ctrl) && ctrl->ioccsz < 4) {
3531 dev_err(ctrl->device,
3532 "I/O queue command capsule supported size %d < 4\n",
3533 ctrl->ioccsz);
3534 return -EINVAL;
3535 }
3536
3537 if (nvme_is_io_ctrl(ctrl) && ctrl->iorcsz < 1) {
3538 dev_err(ctrl->device,
3539 "I/O queue response capsule supported size %d < 1\n",
3540 ctrl->iorcsz);
3541 return -EINVAL;
3542 }
3543
3544 if (!ctrl->maxcmd) {
3545 dev_warn(ctrl->device,
3546 "Firmware bug: maximum outstanding commands is 0\n");
3547 ctrl->maxcmd = ctrl->sqsize + 1;
3548 }
3549
3550 return 0;
3551}
3552
3553static int nvme_init_identify(struct nvme_ctrl *ctrl)
3554{
3555 struct queue_limits lim;
3556 struct nvme_id_ctrl *id;
3557 u32 max_hw_sectors;
3558 bool prev_apst_enabled;
3559 int ret;
3560
3561 ret = nvme_identify_ctrl(ctrl, &id);
3562 if (ret) {
3563 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
3564 return -EIO;
3565 }
3566
3567 if (!(ctrl->ops->flags & NVME_F_FABRICS))
3568 ctrl->cntlid = le16_to_cpu(id->cntlid);
3569
3570 if (!ctrl->identified) {
3571 unsigned int i;
3572
3573 /*
3574 * Check for quirks. Quirk can depend on firmware version,
3575 * so, in principle, the set of quirks present can change
3576 * across a reset. As a possible future enhancement, we
3577 * could re-scan for quirks every time we reinitialize
3578 * the device, but we'd have to make sure that the driver
3579 * behaves intelligently if the quirks change.
3580 */
3581 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
3582 if (quirk_matches(id, &core_quirks[i]))
3583 ctrl->quirks |= core_quirks[i].quirks;
3584 }
3585
3586 ret = nvme_init_subsystem(ctrl, id);
3587 if (ret)
3588 goto out_free;
3589
3590 ret = nvme_init_effects(ctrl, id);
3591 if (ret)
3592 goto out_free;
3593 }
3594 memcpy(ctrl->subsys->firmware_rev, id->fr,
3595 sizeof(ctrl->subsys->firmware_rev));
3596
3597 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
3598 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
3599 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
3600 }
3601
3602 ctrl->crdt[0] = le16_to_cpu(id->crdt1);
3603 ctrl->crdt[1] = le16_to_cpu(id->crdt2);
3604 ctrl->crdt[2] = le16_to_cpu(id->crdt3);
3605
3606 ctrl->oacs = le16_to_cpu(id->oacs);
3607 ctrl->oncs = le16_to_cpu(id->oncs);
3608 ctrl->mtfa = le16_to_cpu(id->mtfa);
3609 ctrl->oaes = le32_to_cpu(id->oaes);
3610 ctrl->wctemp = le16_to_cpu(id->wctemp);
3611 ctrl->cctemp = le16_to_cpu(id->cctemp);
3612
3613 atomic_set(&ctrl->abort_limit, id->acl + 1);
3614 ctrl->vwc = id->vwc;
3615 if (id->mdts)
3616 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts);
3617 else
3618 max_hw_sectors = UINT_MAX;
3619 ctrl->max_hw_sectors =
3620 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
3621
3622 lim = queue_limits_start_update(ctrl->admin_q);
3623 nvme_set_ctrl_limits(ctrl, &lim, true);
3624 ret = queue_limits_commit_update(ctrl->admin_q, &lim);
3625 if (ret)
3626 goto out_free;
3627
3628 ctrl->sgls = le32_to_cpu(id->sgls);
3629 ctrl->kas = le16_to_cpu(id->kas);
3630 ctrl->max_namespaces = le32_to_cpu(id->mnan);
3631 ctrl->ctratt = le32_to_cpu(id->ctratt);
3632
3633 ctrl->cntrltype = id->cntrltype;
3634 ctrl->dctype = id->dctype;
3635
3636 if (id->rtd3e) {
3637 /* us -> s */
3638 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC;
3639
3640 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time,
3641 shutdown_timeout, 60);
3642
3643 if (ctrl->shutdown_timeout != shutdown_timeout)
3644 dev_info(ctrl->device,
3645 "D3 entry latency set to %u seconds\n",
3646 ctrl->shutdown_timeout);
3647 } else
3648 ctrl->shutdown_timeout = shutdown_timeout;
3649
3650 ctrl->npss = id->npss;
3651 ctrl->apsta = id->apsta;
3652 prev_apst_enabled = ctrl->apst_enabled;
3653 if (ctrl->quirks & NVME_QUIRK_NO_APST) {
3654 if (force_apst && id->apsta) {
3655 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
3656 ctrl->apst_enabled = true;
3657 } else {
3658 ctrl->apst_enabled = false;
3659 }
3660 } else {
3661 ctrl->apst_enabled = id->apsta;
3662 }
3663 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
3664
3665 if (ctrl->ops->flags & NVME_F_FABRICS) {
3666 ctrl->icdoff = le16_to_cpu(id->icdoff);
3667 ctrl->ioccsz = le32_to_cpu(id->ioccsz);
3668 ctrl->iorcsz = le32_to_cpu(id->iorcsz);
3669 ctrl->maxcmd = le16_to_cpu(id->maxcmd);
3670
3671 ret = nvme_check_ctrl_fabric_info(ctrl, id);
3672 if (ret)
3673 goto out_free;
3674 } else {
3675 ctrl->hmpre = le32_to_cpu(id->hmpre);
3676 ctrl->hmmin = le32_to_cpu(id->hmmin);
3677 ctrl->hmminds = le32_to_cpu(id->hmminds);
3678 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
3679 }
3680
3681 ret = nvme_mpath_init_identify(ctrl, id);
3682 if (ret < 0)
3683 goto out_free;
3684
3685 if (ctrl->apst_enabled && !prev_apst_enabled)
3686 dev_pm_qos_expose_latency_tolerance(ctrl->device);
3687 else if (!ctrl->apst_enabled && prev_apst_enabled)
3688 dev_pm_qos_hide_latency_tolerance(ctrl->device);
3689 ctrl->awupf = le16_to_cpu(id->awupf);
3690out_free:
3691 kfree(id);
3692 return ret;
3693}
3694
3695/*
3696 * Initialize the cached copies of the Identify data and various controller
3697 * register in our nvme_ctrl structure. This should be called as soon as
3698 * the admin queue is fully up and running.
3699 */
3700int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
3701{
3702 int ret;
3703
3704 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
3705 if (ret) {
3706 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
3707 return ret;
3708 }
3709
3710 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
3711
3712 if (ctrl->vs >= NVME_VS(1, 1, 0))
3713 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
3714
3715 ret = nvme_init_identify(ctrl);
3716 if (ret)
3717 return ret;
3718
3719 if (nvme_admin_ctrl(ctrl)) {
3720 /*
3721 * An admin controller has one admin queue, but no I/O queues.
3722 * Override queue_count so it only creates an admin queue.
3723 */
3724 dev_dbg(ctrl->device,
3725 "Subsystem %s is an administrative controller",
3726 ctrl->subsys->subnqn);
3727 ctrl->queue_count = 1;
3728 }
3729
3730 ret = nvme_configure_apst(ctrl);
3731 if (ret < 0)
3732 return ret;
3733
3734 ret = nvme_configure_timestamp(ctrl);
3735 if (ret < 0)
3736 return ret;
3737
3738 ret = nvme_configure_host_options(ctrl);
3739 if (ret < 0)
3740 return ret;
3741
3742 nvme_configure_opal(ctrl, was_suspended);
3743
3744 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
3745 /*
3746 * Do not return errors unless we are in a controller reset,
3747 * the controller works perfectly fine without hwmon.
3748 */
3749 ret = nvme_hwmon_init(ctrl);
3750 if (ret == -EINTR)
3751 return ret;
3752 }
3753
3754 clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags);
3755 ctrl->identified = true;
3756
3757 nvme_start_keep_alive(ctrl);
3758
3759 return 0;
3760}
3761EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
3762
3763static int nvme_dev_open(struct inode *inode, struct file *file)
3764{
3765 struct nvme_ctrl *ctrl =
3766 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3767
3768 switch (nvme_ctrl_state(ctrl)) {
3769 case NVME_CTRL_LIVE:
3770 break;
3771 default:
3772 return -EWOULDBLOCK;
3773 }
3774
3775 nvme_get_ctrl(ctrl);
3776 if (!try_module_get(ctrl->ops->module)) {
3777 nvme_put_ctrl(ctrl);
3778 return -EINVAL;
3779 }
3780
3781 file->private_data = ctrl;
3782 return 0;
3783}
3784
3785static int nvme_dev_release(struct inode *inode, struct file *file)
3786{
3787 struct nvme_ctrl *ctrl =
3788 container_of(inode->i_cdev, struct nvme_ctrl, cdev);
3789
3790 module_put(ctrl->ops->module);
3791 nvme_put_ctrl(ctrl);
3792 return 0;
3793}
3794
3795static const struct file_operations nvme_dev_fops = {
3796 .owner = THIS_MODULE,
3797 .open = nvme_dev_open,
3798 .release = nvme_dev_release,
3799 .unlocked_ioctl = nvme_dev_ioctl,
3800 .compat_ioctl = compat_ptr_ioctl,
3801 .uring_cmd = nvme_dev_uring_cmd,
3802};
3803
3804static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl,
3805 unsigned nsid)
3806{
3807 struct nvme_ns_head *h;
3808
3809 lockdep_assert_held(&ctrl->subsys->lock);
3810
3811 list_for_each_entry(h, &ctrl->subsys->nsheads, entry) {
3812 /*
3813 * Private namespaces can share NSIDs under some conditions.
3814 * In that case we can't use the same ns_head for namespaces
3815 * with the same NSID.
3816 */
3817 if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h))
3818 continue;
3819 if (nvme_tryget_ns_head(h))
3820 return h;
3821 }
3822
3823 return NULL;
3824}
3825
3826static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys,
3827 struct nvme_ns_ids *ids)
3828{
3829 bool has_uuid = !uuid_is_null(&ids->uuid);
3830 bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid));
3831 bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
3832 struct nvme_ns_head *h;
3833
3834 lockdep_assert_held(&subsys->lock);
3835
3836 list_for_each_entry(h, &subsys->nsheads, entry) {
3837 if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid))
3838 return -EINVAL;
3839 if (has_nguid &&
3840 memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0)
3841 return -EINVAL;
3842 if (has_eui64 &&
3843 memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0)
3844 return -EINVAL;
3845 }
3846
3847 return 0;
3848}
3849
3850static void nvme_cdev_rel(struct device *dev)
3851{
3852 ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
3853}
3854
3855void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
3856{
3857 cdev_device_del(cdev, cdev_device);
3858 put_device(cdev_device);
3859}
3860
3861int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
3862 const struct file_operations *fops, struct module *owner)
3863{
3864 int minor, ret;
3865
3866 minor = ida_alloc(&nvme_ns_chr_minor_ida, GFP_KERNEL);
3867 if (minor < 0)
3868 return minor;
3869 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
3870 cdev_device->class = &nvme_ns_chr_class;
3871 cdev_device->release = nvme_cdev_rel;
3872 device_initialize(cdev_device);
3873 cdev_init(cdev, fops);
3874 cdev->owner = owner;
3875 ret = cdev_device_add(cdev, cdev_device);
3876 if (ret)
3877 put_device(cdev_device);
3878
3879 return ret;
3880}
3881
3882static int nvme_ns_chr_open(struct inode *inode, struct file *file)
3883{
3884 return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev));
3885}
3886
3887static int nvme_ns_chr_release(struct inode *inode, struct file *file)
3888{
3889 nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev));
3890 return 0;
3891}
3892
3893static const struct file_operations nvme_ns_chr_fops = {
3894 .owner = THIS_MODULE,
3895 .open = nvme_ns_chr_open,
3896 .release = nvme_ns_chr_release,
3897 .unlocked_ioctl = nvme_ns_chr_ioctl,
3898 .compat_ioctl = compat_ptr_ioctl,
3899 .uring_cmd = nvme_ns_chr_uring_cmd,
3900 .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
3901};
3902
3903static int nvme_add_ns_cdev(struct nvme_ns *ns)
3904{
3905 int ret;
3906
3907 ns->cdev_device.parent = ns->ctrl->device;
3908 ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
3909 ns->ctrl->instance, ns->head->instance);
3910 if (ret)
3911 return ret;
3912
3913 return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
3914 ns->ctrl->ops->module);
3915}
3916
3917static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
3918 struct nvme_ns_info *info)
3919{
3920 struct nvme_ns_head *head;
3921 size_t size = sizeof(*head);
3922 int ret = -ENOMEM;
3923
3924#ifdef CONFIG_NVME_MULTIPATH
3925 size += num_possible_nodes() * sizeof(struct nvme_ns *);
3926#endif
3927
3928 head = kzalloc(size, GFP_KERNEL);
3929 if (!head)
3930 goto out;
3931 ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL);
3932 if (ret < 0)
3933 goto out_free_head;
3934 head->instance = ret;
3935 INIT_LIST_HEAD(&head->list);
3936 ret = init_srcu_struct(&head->srcu);
3937 if (ret)
3938 goto out_ida_remove;
3939 head->subsys = ctrl->subsys;
3940 head->ns_id = info->nsid;
3941 head->ids = info->ids;
3942 head->shared = info->is_shared;
3943 head->rotational = info->is_rotational;
3944 ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1);
3945 ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE);
3946 kref_init(&head->ref);
3947
3948 if (head->ids.csi) {
3949 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects);
3950 if (ret)
3951 goto out_cleanup_srcu;
3952 } else
3953 head->effects = ctrl->effects;
3954
3955 ret = nvme_mpath_alloc_disk(ctrl, head);
3956 if (ret)
3957 goto out_cleanup_srcu;
3958
3959 list_add_tail(&head->entry, &ctrl->subsys->nsheads);
3960
3961 kref_get(&ctrl->subsys->ref);
3962
3963 return head;
3964out_cleanup_srcu:
3965 cleanup_srcu_struct(&head->srcu);
3966out_ida_remove:
3967 ida_free(&ctrl->subsys->ns_ida, head->instance);
3968out_free_head:
3969 kfree(head);
3970out:
3971 if (ret > 0)
3972 ret = blk_status_to_errno(nvme_error_status(ret));
3973 return ERR_PTR(ret);
3974}
3975
3976static int nvme_global_check_duplicate_ids(struct nvme_subsystem *this,
3977 struct nvme_ns_ids *ids)
3978{
3979 struct nvme_subsystem *s;
3980 int ret = 0;
3981
3982 /*
3983 * Note that this check is racy as we try to avoid holding the global
3984 * lock over the whole ns_head creation. But it is only intended as
3985 * a sanity check anyway.
3986 */
3987 mutex_lock(&nvme_subsystems_lock);
3988 list_for_each_entry(s, &nvme_subsystems, entry) {
3989 if (s == this)
3990 continue;
3991 mutex_lock(&s->lock);
3992 ret = nvme_subsys_check_duplicate_ids(s, ids);
3993 mutex_unlock(&s->lock);
3994 if (ret)
3995 break;
3996 }
3997 mutex_unlock(&nvme_subsystems_lock);
3998
3999 return ret;
4000}
4001
4002static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
4003{
4004 struct nvme_ctrl *ctrl = ns->ctrl;
4005 struct nvme_ns_head *head = NULL;
4006 int ret;
4007
4008 ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids);
4009 if (ret) {
4010 /*
4011 * We've found two different namespaces on two different
4012 * subsystems that report the same ID. This is pretty nasty
4013 * for anything that actually requires unique device
4014 * identification. In the kernel we need this for multipathing,
4015 * and in user space the /dev/disk/by-id/ links rely on it.
4016 *
4017 * If the device also claims to be multi-path capable back off
4018 * here now and refuse the probe the second device as this is a
4019 * recipe for data corruption. If not this is probably a
4020 * cheap consumer device if on the PCIe bus, so let the user
4021 * proceed and use the shiny toy, but warn that with changing
4022 * probing order (which due to our async probing could just be
4023 * device taking longer to startup) the other device could show
4024 * up at any time.
4025 */
4026 nvme_print_device_info(ctrl);
4027 if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */
4028 ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) &&
4029 info->is_shared)) {
4030 dev_err(ctrl->device,
4031 "ignoring nsid %d because of duplicate IDs\n",
4032 info->nsid);
4033 return ret;
4034 }
4035
4036 dev_err(ctrl->device,
4037 "clearing duplicate IDs for nsid %d\n", info->nsid);
4038 dev_err(ctrl->device,
4039 "use of /dev/disk/by-id/ may cause data corruption\n");
4040 memset(&info->ids.nguid, 0, sizeof(info->ids.nguid));
4041 memset(&info->ids.uuid, 0, sizeof(info->ids.uuid));
4042 memset(&info->ids.eui64, 0, sizeof(info->ids.eui64));
4043 ctrl->quirks |= NVME_QUIRK_BOGUS_NID;
4044 }
4045
4046 mutex_lock(&ctrl->subsys->lock);
4047 head = nvme_find_ns_head(ctrl, info->nsid);
4048 if (!head) {
4049 ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids);
4050 if (ret) {
4051 dev_err(ctrl->device,
4052 "duplicate IDs in subsystem for nsid %d\n",
4053 info->nsid);
4054 goto out_unlock;
4055 }
4056 head = nvme_alloc_ns_head(ctrl, info);
4057 if (IS_ERR(head)) {
4058 ret = PTR_ERR(head);
4059 goto out_unlock;
4060 }
4061 } else {
4062 ret = -EINVAL;
4063 if ((!info->is_shared || !head->shared) &&
4064 !list_empty(&head->list)) {
4065 dev_err(ctrl->device,
4066 "Duplicate unshared namespace %d\n",
4067 info->nsid);
4068 goto out_put_ns_head;
4069 }
4070 if (!nvme_ns_ids_equal(&head->ids, &info->ids)) {
4071 dev_err(ctrl->device,
4072 "IDs don't match for shared namespace %d\n",
4073 info->nsid);
4074 goto out_put_ns_head;
4075 }
4076
4077 if (!multipath) {
4078 dev_warn(ctrl->device,
4079 "Found shared namespace %d, but multipathing not supported.\n",
4080 info->nsid);
4081 dev_warn_once(ctrl->device,
4082 "Shared namespace support requires core_nvme.multipath=Y.\n");
4083 }
4084 }
4085
4086 list_add_tail_rcu(&ns->siblings, &head->list);
4087 ns->head = head;
4088 mutex_unlock(&ctrl->subsys->lock);
4089
4090#ifdef CONFIG_NVME_MULTIPATH
4091 if (cancel_delayed_work(&head->remove_work))
4092 module_put(THIS_MODULE);
4093#endif
4094 return 0;
4095
4096out_put_ns_head:
4097 nvme_put_ns_head(head);
4098out_unlock:
4099 mutex_unlock(&ctrl->subsys->lock);
4100 return ret;
4101}
4102
4103struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
4104{
4105 struct nvme_ns *ns, *ret = NULL;
4106 int srcu_idx;
4107
4108 srcu_idx = srcu_read_lock(&ctrl->srcu);
4109 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
4110 srcu_read_lock_held(&ctrl->srcu)) {
4111 if (ns->head->ns_id == nsid) {
4112 if (!nvme_get_ns(ns))
4113 continue;
4114 ret = ns;
4115 break;
4116 }
4117 if (ns->head->ns_id > nsid)
4118 break;
4119 }
4120 srcu_read_unlock(&ctrl->srcu, srcu_idx);
4121 return ret;
4122}
4123EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, "NVME_TARGET_PASSTHRU");
4124
4125/*
4126 * Add the namespace to the controller list while keeping the list ordered.
4127 */
4128static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
4129{
4130 struct nvme_ns *tmp;
4131
4132 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
4133 if (tmp->head->ns_id < ns->head->ns_id) {
4134 list_add_rcu(&ns->list, &tmp->list);
4135 return;
4136 }
4137 }
4138 list_add_rcu(&ns->list, &ns->ctrl->namespaces);
4139}
4140
4141static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
4142{
4143 struct queue_limits lim = { };
4144 struct nvme_ns *ns;
4145 struct gendisk *disk;
4146 int node = ctrl->numa_node;
4147 bool last_path = false;
4148
4149 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
4150 if (!ns)
4151 return;
4152
4153 if (ctrl->opts && ctrl->opts->data_digest)
4154 lim.features |= BLK_FEAT_STABLE_WRITES;
4155 if (ctrl->ops->supports_pci_p2pdma &&
4156 ctrl->ops->supports_pci_p2pdma(ctrl))
4157 lim.features |= BLK_FEAT_PCI_P2PDMA;
4158
4159 disk = blk_mq_alloc_disk(ctrl->tagset, &lim, ns);
4160 if (IS_ERR(disk))
4161 goto out_free_ns;
4162 disk->fops = &nvme_bdev_ops;
4163 disk->private_data = ns;
4164
4165 ns->disk = disk;
4166 ns->queue = disk->queue;
4167 ns->ctrl = ctrl;
4168 kref_init(&ns->kref);
4169
4170 if (nvme_init_ns_head(ns, info))
4171 goto out_cleanup_disk;
4172
4173 /*
4174 * If multipathing is enabled, the device name for all disks and not
4175 * just those that represent shared namespaces needs to be based on the
4176 * subsystem instance. Using the controller instance for private
4177 * namespaces could lead to naming collisions between shared and private
4178 * namespaces if they don't use a common numbering scheme.
4179 *
4180 * If multipathing is not enabled, disk names must use the controller
4181 * instance as shared namespaces will show up as multiple block
4182 * devices.
4183 */
4184 if (nvme_ns_head_multipath(ns->head)) {
4185 sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
4186 ctrl->instance, ns->head->instance);
4187 disk->flags |= GENHD_FL_HIDDEN;
4188 } else if (multipath) {
4189 sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance,
4190 ns->head->instance);
4191 } else {
4192 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
4193 ns->head->instance);
4194 }
4195
4196 if (nvme_update_ns_info(ns, info))
4197 goto out_unlink_ns;
4198
4199 mutex_lock(&ctrl->namespaces_lock);
4200 /*
4201 * Ensure that no namespaces are added to the ctrl list after the queues
4202 * are frozen, thereby avoiding a deadlock between scan and reset.
4203 */
4204 if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) {
4205 mutex_unlock(&ctrl->namespaces_lock);
4206 goto out_unlink_ns;
4207 }
4208 nvme_ns_add_to_ctrl_list(ns);
4209 mutex_unlock(&ctrl->namespaces_lock);
4210 synchronize_srcu(&ctrl->srcu);
4211 nvme_get_ctrl(ctrl);
4212
4213 if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups))
4214 goto out_cleanup_ns_from_list;
4215
4216 if (!nvme_ns_head_multipath(ns->head))
4217 nvme_add_ns_cdev(ns);
4218
4219 nvme_mpath_add_disk(ns, info->anagrpid);
4220 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
4221
4222 return;
4223
4224 out_cleanup_ns_from_list:
4225 nvme_put_ctrl(ctrl);
4226 mutex_lock(&ctrl->namespaces_lock);
4227 list_del_rcu(&ns->list);
4228 mutex_unlock(&ctrl->namespaces_lock);
4229 synchronize_srcu(&ctrl->srcu);
4230 out_unlink_ns:
4231 mutex_lock(&ctrl->subsys->lock);
4232 list_del_rcu(&ns->siblings);
4233 if (list_empty(&ns->head->list)) {
4234 list_del_init(&ns->head->entry);
4235 /*
4236 * If multipath is not configured, we still create a namespace
4237 * head (nshead), but head->disk is not initialized in that
4238 * case. As a result, only a single reference to nshead is held
4239 * (via kref_init()) when it is created. Therefore, ensure that
4240 * we do not release the reference to nshead twice if head->disk
4241 * is not present.
4242 */
4243 if (ns->head->disk)
4244 last_path = true;
4245 }
4246 mutex_unlock(&ctrl->subsys->lock);
4247 if (last_path)
4248 nvme_put_ns_head(ns->head);
4249 nvme_put_ns_head(ns->head);
4250 out_cleanup_disk:
4251 put_disk(disk);
4252 out_free_ns:
4253 kfree(ns);
4254}
4255
4256static void nvme_ns_remove(struct nvme_ns *ns)
4257{
4258 bool last_path = false;
4259
4260 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
4261 return;
4262
4263 clear_bit(NVME_NS_READY, &ns->flags);
4264 set_capacity(ns->disk, 0);
4265 nvme_fault_inject_fini(&ns->fault_inject);
4266
4267 /*
4268 * Ensure that !NVME_NS_READY is seen by other threads to prevent
4269 * this ns going back into current_path.
4270 */
4271 synchronize_srcu(&ns->head->srcu);
4272
4273 /* wait for concurrent submissions */
4274 if (nvme_mpath_clear_current_path(ns))
4275 synchronize_srcu(&ns->head->srcu);
4276
4277 mutex_lock(&ns->ctrl->subsys->lock);
4278 list_del_rcu(&ns->siblings);
4279 if (list_empty(&ns->head->list)) {
4280 if (!nvme_mpath_queue_if_no_path(ns->head))
4281 list_del_init(&ns->head->entry);
4282 last_path = true;
4283 }
4284 mutex_unlock(&ns->ctrl->subsys->lock);
4285
4286 /* guarantee not available in head->list */
4287 synchronize_srcu(&ns->head->srcu);
4288
4289 if (!nvme_ns_head_multipath(ns->head))
4290 nvme_cdev_del(&ns->cdev, &ns->cdev_device);
4291
4292 nvme_mpath_remove_sysfs_link(ns);
4293
4294 del_gendisk(ns->disk);
4295
4296 mutex_lock(&ns->ctrl->namespaces_lock);
4297 list_del_rcu(&ns->list);
4298 mutex_unlock(&ns->ctrl->namespaces_lock);
4299 synchronize_srcu(&ns->ctrl->srcu);
4300
4301 if (last_path)
4302 nvme_mpath_remove_disk(ns->head);
4303 nvme_put_ns(ns);
4304}
4305
4306static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
4307{
4308 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid);
4309
4310 if (ns) {
4311 nvme_ns_remove(ns);
4312 nvme_put_ns(ns);
4313 }
4314}
4315
4316static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info)
4317{
4318 int ret = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
4319
4320 if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) {
4321 dev_err(ns->ctrl->device,
4322 "identifiers changed for nsid %d\n", ns->head->ns_id);
4323 goto out;
4324 }
4325
4326 ret = nvme_update_ns_info(ns, info);
4327out:
4328 /*
4329 * Only remove the namespace if we got a fatal error back from the
4330 * device, otherwise ignore the error and just move on.
4331 *
4332 * TODO: we should probably schedule a delayed retry here.
4333 */
4334 if (ret > 0 && (ret & NVME_STATUS_DNR))
4335 nvme_ns_remove(ns);
4336}
4337
4338static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
4339{
4340 struct nvme_ns_info info = { .nsid = nsid };
4341 struct nvme_ns *ns;
4342 int ret = 1;
4343
4344 if (nvme_identify_ns_descs(ctrl, &info))
4345 return;
4346
4347 if (info.ids.csi != NVME_CSI_NVM && !nvme_multi_css(ctrl)) {
4348 dev_warn(ctrl->device,
4349 "command set not reported for nsid: %d\n", nsid);
4350 return;
4351 }
4352
4353 /*
4354 * If available try to use the Command Set Independent Identify Namespace
4355 * data structure to find all the generic information that is needed to
4356 * set up a namespace. If not fall back to the legacy version.
4357 */
4358 if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
4359 (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS) ||
4360 ctrl->vs >= NVME_VS(2, 0, 0))
4361 ret = nvme_ns_info_from_id_cs_indep(ctrl, &info);
4362 if (ret > 0)
4363 ret = nvme_ns_info_from_identify(ctrl, &info);
4364
4365 if (info.is_removed)
4366 nvme_ns_remove_by_nsid(ctrl, nsid);
4367
4368 /*
4369 * Ignore the namespace if it is not ready. We will get an AEN once it
4370 * becomes ready and restart the scan.
4371 */
4372 if (ret || !info.is_ready)
4373 return;
4374
4375 ns = nvme_find_get_ns(ctrl, nsid);
4376 if (ns) {
4377 nvme_validate_ns(ns, &info);
4378 nvme_put_ns(ns);
4379 } else {
4380 nvme_alloc_ns(ctrl, &info);
4381 }
4382}
4383
4384/**
4385 * struct async_scan_info - keeps track of controller & NSIDs to scan
4386 * @ctrl: Controller on which namespaces are being scanned
4387 * @next_nsid: Index of next NSID to scan in ns_list
4388 * @ns_list: Pointer to list of NSIDs to scan
4389 *
4390 * Note: There is a single async_scan_info structure shared by all instances
4391 * of nvme_scan_ns_async() scanning a given controller, so the atomic
4392 * operations on next_nsid are critical to ensure each instance scans a unique
4393 * NSID.
4394 */
4395struct async_scan_info {
4396 struct nvme_ctrl *ctrl;
4397 atomic_t next_nsid;
4398 __le32 *ns_list;
4399};
4400
4401static void nvme_scan_ns_async(void *data, async_cookie_t cookie)
4402{
4403 struct async_scan_info *scan_info = data;
4404 int idx;
4405 u32 nsid;
4406
4407 idx = (u32)atomic_fetch_inc(&scan_info->next_nsid);
4408 nsid = le32_to_cpu(scan_info->ns_list[idx]);
4409
4410 nvme_scan_ns(scan_info->ctrl, nsid);
4411}
4412
4413static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
4414 unsigned nsid)
4415{
4416 struct nvme_ns *ns, *next;
4417 LIST_HEAD(rm_list);
4418
4419 mutex_lock(&ctrl->namespaces_lock);
4420 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
4421 if (ns->head->ns_id > nsid) {
4422 list_del_rcu(&ns->list);
4423 synchronize_srcu(&ctrl->srcu);
4424 list_add_tail_rcu(&ns->list, &rm_list);
4425 }
4426 }
4427 mutex_unlock(&ctrl->namespaces_lock);
4428
4429 list_for_each_entry_safe(ns, next, &rm_list, list)
4430 nvme_ns_remove(ns);
4431}
4432
4433static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
4434{
4435 const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32);
4436 __le32 *ns_list;
4437 u32 prev = 0;
4438 int ret = 0, i;
4439 ASYNC_DOMAIN(domain);
4440 struct async_scan_info scan_info;
4441
4442 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
4443 if (!ns_list)
4444 return -ENOMEM;
4445
4446 scan_info.ctrl = ctrl;
4447 scan_info.ns_list = ns_list;
4448 for (;;) {
4449 struct nvme_command cmd = {
4450 .identify.opcode = nvme_admin_identify,
4451 .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST,
4452 .identify.nsid = cpu_to_le32(prev),
4453 };
4454
4455 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list,
4456 NVME_IDENTIFY_DATA_SIZE);
4457 if (ret) {
4458 dev_warn(ctrl->device,
4459 "Identify NS List failed (status=0x%x)\n", ret);
4460 goto free;
4461 }
4462
4463 atomic_set(&scan_info.next_nsid, 0);
4464 for (i = 0; i < nr_entries; i++) {
4465 u32 nsid = le32_to_cpu(ns_list[i]);
4466
4467 if (!nsid) /* end of the list? */
4468 goto out;
4469 async_schedule_domain(nvme_scan_ns_async, &scan_info,
4470 &domain);
4471 while (++prev < nsid)
4472 nvme_ns_remove_by_nsid(ctrl, prev);
4473 }
4474 async_synchronize_full_domain(&domain);
4475 }
4476 out:
4477 nvme_remove_invalid_namespaces(ctrl, prev);
4478 free:
4479 async_synchronize_full_domain(&domain);
4480 kfree(ns_list);
4481 return ret;
4482}
4483
4484static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl)
4485{
4486 struct nvme_id_ctrl *id;
4487 u32 nn, i;
4488
4489 if (nvme_identify_ctrl(ctrl, &id))
4490 return;
4491 nn = le32_to_cpu(id->nn);
4492 kfree(id);
4493
4494 for (i = 1; i <= nn; i++)
4495 nvme_scan_ns(ctrl, i);
4496
4497 nvme_remove_invalid_namespaces(ctrl, nn);
4498}
4499
4500static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
4501{
4502 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
4503 __le32 *log;
4504 int error;
4505
4506 log = kzalloc(log_size, GFP_KERNEL);
4507 if (!log)
4508 return;
4509
4510 /*
4511 * We need to read the log to clear the AEN, but we don't want to rely
4512 * on it for the changed namespace information as userspace could have
4513 * raced with us in reading the log page, which could cause us to miss
4514 * updates.
4515 */
4516 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0,
4517 NVME_CSI_NVM, log, log_size, 0);
4518 if (error)
4519 dev_warn(ctrl->device,
4520 "reading changed ns log failed: %d\n", error);
4521
4522 kfree(log);
4523}
4524
4525static void nvme_scan_work(struct work_struct *work)
4526{
4527 struct nvme_ctrl *ctrl =
4528 container_of(work, struct nvme_ctrl, scan_work);
4529 int ret;
4530
4531 /* No tagset on a live ctrl means IO queues could not created */
4532 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset)
4533 return;
4534
4535 /*
4536 * Identify controller limits can change at controller reset due to
4537 * new firmware download, even though it is not common we cannot ignore
4538 * such scenario. Controller's non-mdts limits are reported in the unit
4539 * of logical blocks that is dependent on the format of attached
4540 * namespace. Hence re-read the limits at the time of ns allocation.
4541 */
4542 ret = nvme_init_non_mdts_limits(ctrl);
4543 if (ret < 0) {
4544 dev_warn(ctrl->device,
4545 "reading non-mdts-limits failed: %d\n", ret);
4546 return;
4547 }
4548
4549 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
4550 dev_info(ctrl->device, "rescanning namespaces.\n");
4551 nvme_clear_changed_ns_log(ctrl);
4552 }
4553
4554 mutex_lock(&ctrl->scan_lock);
4555 if (!nvme_id_cns_ok(ctrl, NVME_ID_CNS_NS_ACTIVE_LIST)) {
4556 nvme_scan_ns_sequential(ctrl);
4557 } else {
4558 /*
4559 * Fall back to sequential scan if DNR is set to handle broken
4560 * devices which should support Identify NS List (as per the VS
4561 * they report) but don't actually support it.
4562 */
4563 ret = nvme_scan_ns_list(ctrl);
4564 if (ret > 0 && ret & NVME_STATUS_DNR)
4565 nvme_scan_ns_sequential(ctrl);
4566 }
4567 mutex_unlock(&ctrl->scan_lock);
4568
4569 /* Requeue if we have missed AENs */
4570 if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events))
4571 nvme_queue_scan(ctrl);
4572#ifdef CONFIG_NVME_MULTIPATH
4573 else if (ctrl->ana_log_buf)
4574 /* Re-read the ANA log page to not miss updates */
4575 queue_work(nvme_wq, &ctrl->ana_work);
4576#endif
4577}
4578
4579/*
4580 * This function iterates the namespace list unlocked to allow recovery from
4581 * controller failure. It is up to the caller to ensure the namespace list is
4582 * not modified by scan work while this function is executing.
4583 */
4584void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
4585{
4586 struct nvme_ns *ns, *next;
4587 LIST_HEAD(ns_list);
4588
4589 /*
4590 * make sure to requeue I/O to all namespaces as these
4591 * might result from the scan itself and must complete
4592 * for the scan_work to make progress
4593 */
4594 nvme_mpath_clear_ctrl_paths(ctrl);
4595
4596 /*
4597 * Unquiesce io queues so any pending IO won't hang, especially
4598 * those submitted from scan work
4599 */
4600 nvme_unquiesce_io_queues(ctrl);
4601
4602 /* prevent racing with ns scanning */
4603 flush_work(&ctrl->scan_work);
4604
4605 /*
4606 * The dead states indicates the controller was not gracefully
4607 * disconnected. In that case, we won't be able to flush any data while
4608 * removing the namespaces' disks; fail all the queues now to avoid
4609 * potentially having to clean up the failed sync later.
4610 */
4611 if (nvme_ctrl_state(ctrl) == NVME_CTRL_DEAD)
4612 nvme_mark_namespaces_dead(ctrl);
4613
4614 /* this is a no-op when called from the controller reset handler */
4615 nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO);
4616
4617 mutex_lock(&ctrl->namespaces_lock);
4618 list_splice_init_rcu(&ctrl->namespaces, &ns_list, synchronize_rcu);
4619 mutex_unlock(&ctrl->namespaces_lock);
4620 synchronize_srcu(&ctrl->srcu);
4621
4622 list_for_each_entry_safe(ns, next, &ns_list, list)
4623 nvme_ns_remove(ns);
4624}
4625EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
4626
4627static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env)
4628{
4629 const struct nvme_ctrl *ctrl =
4630 container_of(dev, struct nvme_ctrl, ctrl_device);
4631 struct nvmf_ctrl_options *opts = ctrl->opts;
4632 int ret;
4633
4634 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name);
4635 if (ret)
4636 return ret;
4637
4638 if (opts) {
4639 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr);
4640 if (ret)
4641 return ret;
4642
4643 ret = add_uevent_var(env, "NVME_TRSVCID=%s",
4644 opts->trsvcid ?: "none");
4645 if (ret)
4646 return ret;
4647
4648 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s",
4649 opts->host_traddr ?: "none");
4650 if (ret)
4651 return ret;
4652
4653 ret = add_uevent_var(env, "NVME_HOST_IFACE=%s",
4654 opts->host_iface ?: "none");
4655 }
4656 return ret;
4657}
4658
4659static void nvme_change_uevent(struct nvme_ctrl *ctrl, char *envdata)
4660{
4661 char *envp[2] = { envdata, NULL };
4662
4663 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4664}
4665
4666static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
4667{
4668 char *envp[2] = { NULL, NULL };
4669 u32 aen_result = ctrl->aen_result;
4670
4671 ctrl->aen_result = 0;
4672 if (!aen_result)
4673 return;
4674
4675 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
4676 if (!envp[0])
4677 return;
4678 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
4679 kfree(envp[0]);
4680}
4681
4682static void nvme_async_event_work(struct work_struct *work)
4683{
4684 struct nvme_ctrl *ctrl =
4685 container_of(work, struct nvme_ctrl, async_event_work);
4686
4687 nvme_aen_uevent(ctrl);
4688
4689 /*
4690 * The transport drivers must guarantee AER submission here is safe by
4691 * flushing ctrl async_event_work after changing the controller state
4692 * from LIVE and before freeing the admin queue.
4693 */
4694 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE)
4695 ctrl->ops->submit_async_event(ctrl);
4696}
4697
4698static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
4699{
4700
4701 u32 csts;
4702
4703 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts))
4704 return false;
4705
4706 if (csts == ~0)
4707 return false;
4708
4709 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP));
4710}
4711
4712static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
4713{
4714 struct nvme_fw_slot_info_log *log;
4715 u8 next_fw_slot, cur_fw_slot;
4716
4717 log = kmalloc_obj(*log);
4718 if (!log)
4719 return;
4720
4721 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
4722 log, sizeof(*log), 0)) {
4723 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
4724 goto out_free_log;
4725 }
4726
4727 cur_fw_slot = log->afi & 0x7;
4728 next_fw_slot = (log->afi & 0x70) >> 4;
4729 if (!cur_fw_slot || (next_fw_slot && (cur_fw_slot != next_fw_slot))) {
4730 dev_info(ctrl->device,
4731 "Firmware is activated after next Controller Level Reset\n");
4732 goto out_free_log;
4733 }
4734
4735 memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1],
4736 sizeof(ctrl->subsys->firmware_rev));
4737
4738out_free_log:
4739 kfree(log);
4740}
4741
4742static void nvme_fw_act_work(struct work_struct *work)
4743{
4744 struct nvme_ctrl *ctrl = container_of(work,
4745 struct nvme_ctrl, fw_act_work);
4746 unsigned long fw_act_timeout;
4747
4748 nvme_auth_stop(ctrl);
4749
4750 if (ctrl->mtfa)
4751 fw_act_timeout = jiffies + msecs_to_jiffies(ctrl->mtfa * 100);
4752 else
4753 fw_act_timeout = jiffies + secs_to_jiffies(admin_timeout);
4754
4755 nvme_quiesce_io_queues(ctrl);
4756 while (nvme_ctrl_pp_status(ctrl)) {
4757 if (time_after(jiffies, fw_act_timeout)) {
4758 dev_warn(ctrl->device,
4759 "Fw activation timeout, reset controller\n");
4760 nvme_try_sched_reset(ctrl);
4761 return;
4762 }
4763 msleep(100);
4764 }
4765
4766 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) ||
4767 !nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
4768 return;
4769
4770 nvme_unquiesce_io_queues(ctrl);
4771 /* read FW slot information to clear the AER */
4772 nvme_get_fw_slot_info(ctrl);
4773
4774 queue_work(nvme_wq, &ctrl->async_event_work);
4775}
4776
4777static u32 nvme_aer_type(u32 result)
4778{
4779 return result & 0x7;
4780}
4781
4782static u32 nvme_aer_subtype(u32 result)
4783{
4784 return (result & 0xff00) >> 8;
4785}
4786
4787static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
4788{
4789 u32 aer_notice_type = nvme_aer_subtype(result);
4790 bool requeue = true;
4791
4792 switch (aer_notice_type) {
4793 case NVME_AER_NOTICE_NS_CHANGED:
4794 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
4795 nvme_queue_scan(ctrl);
4796 break;
4797 case NVME_AER_NOTICE_FW_ACT_STARTING:
4798 /*
4799 * We are (ab)using the RESETTING state to prevent subsequent
4800 * recovery actions from interfering with the controller's
4801 * firmware activation.
4802 */
4803 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
4804 requeue = false;
4805 queue_work(nvme_wq, &ctrl->fw_act_work);
4806 }
4807 break;
4808#ifdef CONFIG_NVME_MULTIPATH
4809 case NVME_AER_NOTICE_ANA:
4810 if (!ctrl->ana_log_buf)
4811 break;
4812 queue_work(nvme_wq, &ctrl->ana_work);
4813 break;
4814#endif
4815 case NVME_AER_NOTICE_DISC_CHANGED:
4816 ctrl->aen_result = result;
4817 break;
4818 default:
4819 dev_warn(ctrl->device, "async event result %08x\n", result);
4820 }
4821 return requeue;
4822}
4823
4824static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
4825{
4826 dev_warn(ctrl->device,
4827 "resetting controller due to persistent internal error\n");
4828 nvme_reset_ctrl(ctrl);
4829}
4830
4831void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
4832 volatile union nvme_result *res)
4833{
4834 u32 result = le32_to_cpu(res->u32);
4835 u32 aer_type = nvme_aer_type(result);
4836 u32 aer_subtype = nvme_aer_subtype(result);
4837 bool requeue = true;
4838
4839 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
4840 return;
4841
4842 trace_nvme_async_event(ctrl, result);
4843 switch (aer_type) {
4844 case NVME_AER_NOTICE:
4845 requeue = nvme_handle_aen_notice(ctrl, result);
4846 break;
4847 case NVME_AER_ERROR:
4848 /*
4849 * For a persistent internal error, don't run async_event_work
4850 * to submit a new AER. The controller reset will do it.
4851 */
4852 if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) {
4853 nvme_handle_aer_persistent_error(ctrl);
4854 return;
4855 }
4856 fallthrough;
4857 case NVME_AER_SMART:
4858 case NVME_AER_CSS:
4859 case NVME_AER_VS:
4860 ctrl->aen_result = result;
4861 break;
4862 default:
4863 break;
4864 }
4865
4866 if (requeue)
4867 queue_work(nvme_wq, &ctrl->async_event_work);
4868}
4869EXPORT_SYMBOL_GPL(nvme_complete_async_event);
4870
4871int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
4872 const struct blk_mq_ops *ops, unsigned int cmd_size)
4873{
4874 int ret;
4875
4876 memset(set, 0, sizeof(*set));
4877 set->ops = ops;
4878 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
4879 if (ctrl->ops->flags & NVME_F_FABRICS)
4880 /* Reserved for fabric connect and keep alive */
4881 set->reserved_tags = 2;
4882 set->numa_node = ctrl->numa_node;
4883 if (ctrl->ops->flags & NVME_F_BLOCKING)
4884 set->flags |= BLK_MQ_F_BLOCKING;
4885 set->cmd_size = cmd_size;
4886 set->driver_data = ctrl;
4887 set->nr_hw_queues = 1;
4888 set->timeout = NVME_ADMIN_TIMEOUT;
4889 ret = blk_mq_alloc_tag_set(set);
4890 if (ret)
4891 return ret;
4892
4893 /*
4894 * If a previous admin queue exists (e.g., from before a reset),
4895 * put it now before allocating a new one to avoid orphaning it.
4896 */
4897 if (ctrl->admin_q)
4898 blk_put_queue(ctrl->admin_q);
4899
4900 ctrl->admin_q = blk_mq_alloc_queue(set, NULL, NULL);
4901 if (IS_ERR(ctrl->admin_q)) {
4902 ret = PTR_ERR(ctrl->admin_q);
4903 goto out_free_tagset;
4904 }
4905
4906 if (ctrl->ops->flags & NVME_F_FABRICS) {
4907 ctrl->fabrics_q = blk_mq_alloc_queue(set, NULL, NULL);
4908 if (IS_ERR(ctrl->fabrics_q)) {
4909 ret = PTR_ERR(ctrl->fabrics_q);
4910 goto out_cleanup_admin_q;
4911 }
4912 }
4913
4914 ctrl->admin_tagset = set;
4915 return 0;
4916
4917out_cleanup_admin_q:
4918 blk_mq_destroy_queue(ctrl->admin_q);
4919 blk_put_queue(ctrl->admin_q);
4920out_free_tagset:
4921 blk_mq_free_tag_set(set);
4922 ctrl->admin_q = NULL;
4923 ctrl->fabrics_q = NULL;
4924 return ret;
4925}
4926EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
4927
4928void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
4929{
4930 /*
4931 * As we're about to destroy the queue and free tagset
4932 * we can not have keep-alive work running.
4933 */
4934 nvme_stop_keep_alive(ctrl);
4935 blk_mq_destroy_queue(ctrl->admin_q);
4936 if (ctrl->ops->flags & NVME_F_FABRICS) {
4937 blk_mq_destroy_queue(ctrl->fabrics_q);
4938 blk_put_queue(ctrl->fabrics_q);
4939 }
4940 blk_mq_free_tag_set(ctrl->admin_tagset);
4941}
4942EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
4943
4944int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
4945 const struct blk_mq_ops *ops, unsigned int nr_maps,
4946 unsigned int cmd_size)
4947{
4948 int ret;
4949
4950 memset(set, 0, sizeof(*set));
4951 set->ops = ops;
4952 set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1);
4953 /*
4954 * Some Apple controllers requires tags to be unique across admin and
4955 * the (only) I/O queue, so reserve the first 32 tags of the I/O queue.
4956 */
4957 if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
4958 set->reserved_tags = NVME_AQ_DEPTH;
4959 else if (ctrl->ops->flags & NVME_F_FABRICS)
4960 /* Reserved for fabric connect */
4961 set->reserved_tags = 1;
4962 set->numa_node = ctrl->numa_node;
4963 if (ctrl->ops->flags & NVME_F_BLOCKING)
4964 set->flags |= BLK_MQ_F_BLOCKING;
4965 set->cmd_size = cmd_size;
4966 set->driver_data = ctrl;
4967 set->nr_hw_queues = ctrl->queue_count - 1;
4968 set->timeout = NVME_IO_TIMEOUT;
4969 set->nr_maps = nr_maps;
4970 ret = blk_mq_alloc_tag_set(set);
4971 if (ret)
4972 return ret;
4973
4974 if (ctrl->ops->flags & NVME_F_FABRICS) {
4975 struct queue_limits lim = {
4976 .features = BLK_FEAT_SKIP_TAGSET_QUIESCE,
4977 };
4978
4979 ctrl->connect_q = blk_mq_alloc_queue(set, &lim, NULL);
4980 if (IS_ERR(ctrl->connect_q)) {
4981 ret = PTR_ERR(ctrl->connect_q);
4982 goto out_free_tag_set;
4983 }
4984 }
4985
4986 ctrl->tagset = set;
4987 return 0;
4988
4989out_free_tag_set:
4990 blk_mq_free_tag_set(set);
4991 ctrl->connect_q = NULL;
4992 return ret;
4993}
4994EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
4995
4996void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
4997{
4998 if (ctrl->ops->flags & NVME_F_FABRICS) {
4999 blk_mq_destroy_queue(ctrl->connect_q);
5000 blk_put_queue(ctrl->connect_q);
5001 }
5002 blk_mq_free_tag_set(ctrl->tagset);
5003}
5004EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set);
5005
5006void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
5007{
5008 nvme_mpath_stop(ctrl);
5009 nvme_auth_stop(ctrl);
5010 nvme_stop_failfast_work(ctrl);
5011 flush_work(&ctrl->async_event_work);
5012 cancel_work_sync(&ctrl->fw_act_work);
5013 if (ctrl->ops->stop_ctrl)
5014 ctrl->ops->stop_ctrl(ctrl);
5015}
5016EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
5017
5018void nvme_start_ctrl(struct nvme_ctrl *ctrl)
5019{
5020 nvme_enable_aen(ctrl);
5021
5022 /*
5023 * persistent discovery controllers need to send indication to userspace
5024 * to re-read the discovery log page to learn about possible changes
5025 * that were missed. We identify persistent discovery controllers by
5026 * checking that they started once before, hence are reconnecting back.
5027 */
5028 if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
5029 nvme_discovery_ctrl(ctrl)) {
5030 if (!ctrl->kato) {
5031 nvme_stop_keep_alive(ctrl);
5032 ctrl->kato = NVME_DEFAULT_KATO;
5033 nvme_start_keep_alive(ctrl);
5034 }
5035 nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
5036 }
5037
5038 if (ctrl->queue_count > 1) {
5039 nvme_queue_scan(ctrl);
5040 nvme_unquiesce_io_queues(ctrl);
5041 nvme_mpath_update(ctrl);
5042 }
5043
5044 nvme_change_uevent(ctrl, "NVME_EVENT=connected");
5045 set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags);
5046}
5047EXPORT_SYMBOL_GPL(nvme_start_ctrl);
5048
5049void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
5050{
5051 nvme_stop_keep_alive(ctrl);
5052 nvme_hwmon_exit(ctrl);
5053 nvme_fault_inject_fini(&ctrl->fault_inject);
5054 dev_pm_qos_hide_latency_tolerance(ctrl->device);
5055 cdev_device_del(&ctrl->cdev, ctrl->device);
5056 nvme_put_ctrl(ctrl);
5057}
5058EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
5059
5060static void nvme_free_cels(struct nvme_ctrl *ctrl)
5061{
5062 struct nvme_effects_log *cel;
5063 unsigned long i;
5064
5065 xa_for_each(&ctrl->cels, i, cel) {
5066 xa_erase(&ctrl->cels, i);
5067 kfree(cel);
5068 }
5069
5070 xa_destroy(&ctrl->cels);
5071}
5072
5073static void nvme_free_ctrl(struct device *dev)
5074{
5075 struct nvme_ctrl *ctrl =
5076 container_of(dev, struct nvme_ctrl, ctrl_device);
5077 struct nvme_subsystem *subsys = ctrl->subsys;
5078
5079 if (ctrl->admin_q)
5080 blk_put_queue(ctrl->admin_q);
5081 if (!subsys || ctrl->instance != subsys->instance)
5082 ida_free(&nvme_instance_ida, ctrl->instance);
5083 nvme_free_cels(ctrl);
5084 nvme_mpath_uninit(ctrl);
5085 cleanup_srcu_struct(&ctrl->srcu);
5086 nvme_auth_stop(ctrl);
5087 nvme_auth_free(ctrl);
5088 __free_page(ctrl->discard_page);
5089 free_opal_dev(ctrl->opal_dev);
5090
5091 if (subsys) {
5092 mutex_lock(&nvme_subsystems_lock);
5093 list_del(&ctrl->subsys_entry);
5094 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
5095 mutex_unlock(&nvme_subsystems_lock);
5096 }
5097
5098 ctrl->ops->free_ctrl(ctrl);
5099
5100 if (subsys)
5101 nvme_put_subsystem(subsys);
5102}
5103
5104/*
5105 * Initialize a NVMe controller structures. This needs to be called during
5106 * earliest initialization so that we have the initialized structured around
5107 * during probing.
5108 *
5109 * On success, the caller must use the nvme_put_ctrl() to release this when
5110 * needed, which also invokes the ops->free_ctrl() callback.
5111 */
5112int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
5113 const struct nvme_ctrl_ops *ops, unsigned long quirks)
5114{
5115 int ret;
5116
5117 WRITE_ONCE(ctrl->state, NVME_CTRL_NEW);
5118 ctrl->passthru_err_log_enabled = false;
5119 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
5120 spin_lock_init(&ctrl->lock);
5121 mutex_init(&ctrl->namespaces_lock);
5122
5123 ret = init_srcu_struct(&ctrl->srcu);
5124 if (ret)
5125 return ret;
5126
5127 mutex_init(&ctrl->scan_lock);
5128 INIT_LIST_HEAD(&ctrl->namespaces);
5129 xa_init(&ctrl->cels);
5130 ctrl->dev = dev;
5131 ctrl->ops = ops;
5132 ctrl->quirks = quirks;
5133 ctrl->numa_node = NUMA_NO_NODE;
5134 INIT_WORK(&ctrl->scan_work, nvme_scan_work);
5135 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
5136 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
5137 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
5138 init_waitqueue_head(&ctrl->state_wq);
5139
5140 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
5141 INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
5142 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
5143 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
5144 ctrl->ka_last_check_time = jiffies;
5145
5146 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
5147 PAGE_SIZE);
5148 ctrl->discard_page = alloc_page(GFP_KERNEL);
5149 if (!ctrl->discard_page) {
5150 ret = -ENOMEM;
5151 goto out;
5152 }
5153
5154 ret = ida_alloc(&nvme_instance_ida, GFP_KERNEL);
5155 if (ret < 0)
5156 goto out;
5157 ctrl->instance = ret;
5158
5159 ret = nvme_auth_init_ctrl(ctrl);
5160 if (ret)
5161 goto out_release_instance;
5162
5163 nvme_mpath_init_ctrl(ctrl);
5164
5165 device_initialize(&ctrl->ctrl_device);
5166 ctrl->device = &ctrl->ctrl_device;
5167 ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
5168 ctrl->instance);
5169 ctrl->device->class = &nvme_class;
5170 ctrl->device->parent = ctrl->dev;
5171 if (ops->dev_attr_groups)
5172 ctrl->device->groups = ops->dev_attr_groups;
5173 else
5174 ctrl->device->groups = nvme_dev_attr_groups;
5175 ctrl->device->release = nvme_free_ctrl;
5176 dev_set_drvdata(ctrl->device, ctrl);
5177
5178 return ret;
5179
5180out_release_instance:
5181 ida_free(&nvme_instance_ida, ctrl->instance);
5182out:
5183 if (ctrl->discard_page)
5184 __free_page(ctrl->discard_page);
5185 cleanup_srcu_struct(&ctrl->srcu);
5186 return ret;
5187}
5188EXPORT_SYMBOL_GPL(nvme_init_ctrl);
5189
5190/*
5191 * On success, returns with an elevated controller reference and caller must
5192 * use nvme_uninit_ctrl() to properly free resources associated with the ctrl.
5193 */
5194int nvme_add_ctrl(struct nvme_ctrl *ctrl)
5195{
5196 int ret;
5197
5198 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
5199 if (ret)
5200 return ret;
5201
5202 cdev_init(&ctrl->cdev, &nvme_dev_fops);
5203 ctrl->cdev.owner = ctrl->ops->module;
5204 ret = cdev_device_add(&ctrl->cdev, ctrl->device);
5205 if (ret)
5206 return ret;
5207
5208 /*
5209 * Initialize latency tolerance controls. The sysfs files won't
5210 * be visible to userspace unless the device actually supports APST.
5211 */
5212 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
5213 dev_pm_qos_update_user_latency_tolerance(ctrl->device,
5214 min(default_ps_max_latency_us, (unsigned long)S32_MAX));
5215
5216 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
5217 nvme_get_ctrl(ctrl);
5218
5219 return 0;
5220}
5221EXPORT_SYMBOL_GPL(nvme_add_ctrl);
5222
5223/* let I/O to all namespaces fail in preparation for surprise removal */
5224void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl)
5225{
5226 struct nvme_ns *ns;
5227 int srcu_idx;
5228
5229 srcu_idx = srcu_read_lock(&ctrl->srcu);
5230 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5231 srcu_read_lock_held(&ctrl->srcu))
5232 blk_mark_disk_dead(ns->disk);
5233 srcu_read_unlock(&ctrl->srcu, srcu_idx);
5234}
5235EXPORT_SYMBOL_GPL(nvme_mark_namespaces_dead);
5236
5237void nvme_unfreeze(struct nvme_ctrl *ctrl)
5238{
5239 struct nvme_ns *ns;
5240 int srcu_idx;
5241
5242 srcu_idx = srcu_read_lock(&ctrl->srcu);
5243 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5244 srcu_read_lock_held(&ctrl->srcu))
5245 blk_mq_unfreeze_queue_non_owner(ns->queue);
5246 srcu_read_unlock(&ctrl->srcu, srcu_idx);
5247 clear_bit(NVME_CTRL_FROZEN, &ctrl->flags);
5248}
5249EXPORT_SYMBOL_GPL(nvme_unfreeze);
5250
5251int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
5252{
5253 struct nvme_ns *ns;
5254 int srcu_idx;
5255
5256 srcu_idx = srcu_read_lock(&ctrl->srcu);
5257 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5258 srcu_read_lock_held(&ctrl->srcu)) {
5259 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
5260 if (timeout <= 0)
5261 break;
5262 }
5263 srcu_read_unlock(&ctrl->srcu, srcu_idx);
5264 return timeout;
5265}
5266EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
5267
5268void nvme_wait_freeze(struct nvme_ctrl *ctrl)
5269{
5270 struct nvme_ns *ns;
5271 int srcu_idx;
5272
5273 srcu_idx = srcu_read_lock(&ctrl->srcu);
5274 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5275 srcu_read_lock_held(&ctrl->srcu))
5276 blk_mq_freeze_queue_wait(ns->queue);
5277 srcu_read_unlock(&ctrl->srcu, srcu_idx);
5278}
5279EXPORT_SYMBOL_GPL(nvme_wait_freeze);
5280
5281void nvme_start_freeze(struct nvme_ctrl *ctrl)
5282{
5283 struct nvme_ns *ns;
5284 int srcu_idx;
5285
5286 set_bit(NVME_CTRL_FROZEN, &ctrl->flags);
5287 srcu_idx = srcu_read_lock(&ctrl->srcu);
5288 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5289 srcu_read_lock_held(&ctrl->srcu))
5290 /*
5291 * Typical non_owner use case is from pci driver, in which
5292 * start_freeze is called from timeout work function, but
5293 * unfreeze is done in reset work context
5294 */
5295 blk_freeze_queue_start_non_owner(ns->queue);
5296 srcu_read_unlock(&ctrl->srcu, srcu_idx);
5297}
5298EXPORT_SYMBOL_GPL(nvme_start_freeze);
5299
5300void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl)
5301{
5302 if (!ctrl->tagset)
5303 return;
5304 if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags))
5305 blk_mq_quiesce_tagset(ctrl->tagset);
5306 else
5307 blk_mq_wait_quiesce_done(ctrl->tagset);
5308}
5309EXPORT_SYMBOL_GPL(nvme_quiesce_io_queues);
5310
5311void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl)
5312{
5313 if (!ctrl->tagset)
5314 return;
5315 if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags))
5316 blk_mq_unquiesce_tagset(ctrl->tagset);
5317}
5318EXPORT_SYMBOL_GPL(nvme_unquiesce_io_queues);
5319
5320void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl)
5321{
5322 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
5323 blk_mq_quiesce_queue(ctrl->admin_q);
5324 else
5325 blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set);
5326}
5327EXPORT_SYMBOL_GPL(nvme_quiesce_admin_queue);
5328
5329void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl)
5330{
5331 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
5332 blk_mq_unquiesce_queue(ctrl->admin_q);
5333}
5334EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue);
5335
5336void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
5337{
5338 struct nvme_ns *ns;
5339 int srcu_idx;
5340
5341 srcu_idx = srcu_read_lock(&ctrl->srcu);
5342 list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
5343 srcu_read_lock_held(&ctrl->srcu))
5344 blk_sync_queue(ns->queue);
5345 srcu_read_unlock(&ctrl->srcu, srcu_idx);
5346}
5347EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
5348
5349void nvme_sync_queues(struct nvme_ctrl *ctrl)
5350{
5351 nvme_sync_io_queues(ctrl);
5352 if (ctrl->admin_q)
5353 blk_sync_queue(ctrl->admin_q);
5354}
5355EXPORT_SYMBOL_GPL(nvme_sync_queues);
5356
5357struct nvme_ctrl *nvme_ctrl_from_file(struct file *file)
5358{
5359 if (file->f_op != &nvme_dev_fops)
5360 return NULL;
5361 return file->private_data;
5362}
5363EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, "NVME_TARGET_PASSTHRU");
5364
5365/*
5366 * Check we didn't inadvertently grow the command structure sizes:
5367 */
5368static inline void _nvme_check_size(void)
5369{
5370 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64);
5371 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
5372 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64);
5373 BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
5374 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64);
5375 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
5376 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64);
5377 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64);
5378 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
5379 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64);
5380 BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
5381 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
5382 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
5383 BUILD_BUG_ON(sizeof(struct nvme_id_ns_cs_indep) !=
5384 NVME_IDENTIFY_DATA_SIZE);
5385 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE);
5386 BUILD_BUG_ON(sizeof(struct nvme_id_ns_nvm) != NVME_IDENTIFY_DATA_SIZE);
5387 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE);
5388 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE);
5389 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
5390 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
5391 BUILD_BUG_ON(sizeof(struct nvme_endurance_group_log) != 512);
5392 BUILD_BUG_ON(sizeof(struct nvme_rotational_media_log) != 512);
5393 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
5394 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64);
5395 BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512);
5396}
5397
5398
5399static int __init nvme_core_init(void)
5400{
5401 unsigned int wq_flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS;
5402 int result = -ENOMEM;
5403
5404 _nvme_check_size();
5405
5406 nvme_wq = alloc_workqueue("nvme-wq", wq_flags, 0);
5407 if (!nvme_wq)
5408 goto out;
5409
5410 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", wq_flags, 0);
5411 if (!nvme_reset_wq)
5412 goto destroy_wq;
5413
5414 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", wq_flags, 0);
5415 if (!nvme_delete_wq)
5416 goto destroy_reset_wq;
5417
5418 result = alloc_chrdev_region(&nvme_ctrl_base_chr_devt, 0,
5419 NVME_MINORS, "nvme");
5420 if (result < 0)
5421 goto destroy_delete_wq;
5422
5423 result = class_register(&nvme_class);
5424 if (result)
5425 goto unregister_chrdev;
5426
5427 result = class_register(&nvme_subsys_class);
5428 if (result)
5429 goto destroy_class;
5430
5431 result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS,
5432 "nvme-generic");
5433 if (result < 0)
5434 goto destroy_subsys_class;
5435
5436 result = class_register(&nvme_ns_chr_class);
5437 if (result)
5438 goto unregister_generic_ns;
5439
5440 result = nvme_init_auth();
5441 if (result)
5442 goto destroy_ns_chr;
5443 return 0;
5444
5445destroy_ns_chr:
5446 class_unregister(&nvme_ns_chr_class);
5447unregister_generic_ns:
5448 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
5449destroy_subsys_class:
5450 class_unregister(&nvme_subsys_class);
5451destroy_class:
5452 class_unregister(&nvme_class);
5453unregister_chrdev:
5454 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
5455destroy_delete_wq:
5456 destroy_workqueue(nvme_delete_wq);
5457destroy_reset_wq:
5458 destroy_workqueue(nvme_reset_wq);
5459destroy_wq:
5460 destroy_workqueue(nvme_wq);
5461out:
5462 return result;
5463}
5464
5465static void __exit nvme_core_exit(void)
5466{
5467 nvme_exit_auth();
5468 class_unregister(&nvme_ns_chr_class);
5469 class_unregister(&nvme_subsys_class);
5470 class_unregister(&nvme_class);
5471 unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
5472 unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
5473 destroy_workqueue(nvme_delete_wq);
5474 destroy_workqueue(nvme_reset_wq);
5475 destroy_workqueue(nvme_wq);
5476 ida_destroy(&nvme_ns_chr_minor_ida);
5477 ida_destroy(&nvme_instance_ida);
5478}
5479
5480MODULE_LICENSE("GPL");
5481MODULE_VERSION("1.0");
5482MODULE_DESCRIPTION("NVMe host core framework");
5483module_init(nvme_core_init);
5484module_exit(nvme_core_exit);