Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe admin command implementation.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/rculist.h>
9#include <linux/part_stat.h>
10
11#include <generated/utsrelease.h>
12#include <linux/unaligned.h>
13#include "nvmet.h"
14
15static void nvmet_execute_delete_sq(struct nvmet_req *req)
16{
17 struct nvmet_ctrl *ctrl = req->sq->ctrl;
18 u16 sqid = le16_to_cpu(req->cmd->delete_queue.qid);
19 u16 status;
20
21 if (!nvmet_is_pci_ctrl(ctrl)) {
22 status = nvmet_report_invalid_opcode(req);
23 goto complete;
24 }
25
26 if (!sqid) {
27 status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
28 goto complete;
29 }
30
31 status = nvmet_check_sqid(ctrl, sqid, false);
32 if (status != NVME_SC_SUCCESS)
33 goto complete;
34
35 status = ctrl->ops->delete_sq(ctrl, sqid);
36
37complete:
38 nvmet_req_complete(req, status);
39}
40
41static void nvmet_execute_create_sq(struct nvmet_req *req)
42{
43 struct nvmet_ctrl *ctrl = req->sq->ctrl;
44 struct nvme_command *cmd = req->cmd;
45 u16 sqid = le16_to_cpu(cmd->create_sq.sqid);
46 u16 cqid = le16_to_cpu(cmd->create_sq.cqid);
47 u16 sq_flags = le16_to_cpu(cmd->create_sq.sq_flags);
48 u16 qsize = le16_to_cpu(cmd->create_sq.qsize);
49 u64 prp1 = le64_to_cpu(cmd->create_sq.prp1);
50 u16 status;
51
52 if (!nvmet_is_pci_ctrl(ctrl)) {
53 status = nvmet_report_invalid_opcode(req);
54 goto complete;
55 }
56
57 if (!sqid) {
58 status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
59 goto complete;
60 }
61
62 status = nvmet_check_sqid(ctrl, sqid, true);
63 if (status != NVME_SC_SUCCESS)
64 goto complete;
65
66 status = nvmet_check_io_cqid(ctrl, cqid, false);
67 if (status != NVME_SC_SUCCESS) {
68 pr_err("SQ %u: Invalid CQID %u\n", sqid, cqid);
69 goto complete;
70 }
71
72 if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
73 status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
74 goto complete;
75 }
76
77 status = ctrl->ops->create_sq(ctrl, sqid, cqid, sq_flags, qsize, prp1);
78
79complete:
80 nvmet_req_complete(req, status);
81}
82
83static void nvmet_execute_delete_cq(struct nvmet_req *req)
84{
85 struct nvmet_ctrl *ctrl = req->sq->ctrl;
86 u16 cqid = le16_to_cpu(req->cmd->delete_queue.qid);
87 u16 status;
88
89 if (!nvmet_is_pci_ctrl(ctrl)) {
90 status = nvmet_report_invalid_opcode(req);
91 goto complete;
92 }
93
94 status = nvmet_check_io_cqid(ctrl, cqid, false);
95 if (status != NVME_SC_SUCCESS)
96 goto complete;
97
98 if (!ctrl->cqs[cqid] || nvmet_cq_in_use(ctrl->cqs[cqid])) {
99 /* Some SQs are still using this CQ */
100 status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
101 goto complete;
102 }
103
104 status = ctrl->ops->delete_cq(ctrl, cqid);
105
106complete:
107 nvmet_req_complete(req, status);
108}
109
110static void nvmet_execute_create_cq(struct nvmet_req *req)
111{
112 struct nvmet_ctrl *ctrl = req->sq->ctrl;
113 struct nvme_command *cmd = req->cmd;
114 u16 cqid = le16_to_cpu(cmd->create_cq.cqid);
115 u16 cq_flags = le16_to_cpu(cmd->create_cq.cq_flags);
116 u16 qsize = le16_to_cpu(cmd->create_cq.qsize);
117 u16 irq_vector = le16_to_cpu(cmd->create_cq.irq_vector);
118 u64 prp1 = le64_to_cpu(cmd->create_cq.prp1);
119 u16 status;
120
121 if (!nvmet_is_pci_ctrl(ctrl)) {
122 status = nvmet_report_invalid_opcode(req);
123 goto complete;
124 }
125
126 status = nvmet_check_io_cqid(ctrl, cqid, true);
127 if (status != NVME_SC_SUCCESS)
128 goto complete;
129
130 if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) {
131 status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR;
132 goto complete;
133 }
134
135 status = ctrl->ops->create_cq(ctrl, cqid, cq_flags, qsize,
136 prp1, irq_vector);
137
138complete:
139 nvmet_req_complete(req, status);
140}
141
142u32 nvmet_get_log_page_len(struct nvme_command *cmd)
143{
144 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
145
146 len <<= 16;
147 len += le16_to_cpu(cmd->get_log_page.numdl);
148 /* NUMD is a 0's based value */
149 len += 1;
150 len *= sizeof(u32);
151
152 return len;
153}
154
155static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
156{
157 switch (cdw10 & 0xff) {
158 case NVME_FEAT_HOST_ID:
159 return sizeof(req->sq->ctrl->hostid);
160 default:
161 return 0;
162 }
163}
164
165u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
166{
167 return le64_to_cpu(cmd->get_log_page.lpo);
168}
169
170static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
171{
172 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
173}
174
175static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
176{
177 struct nvmet_ctrl *ctrl = req->sq->ctrl;
178 unsigned long flags;
179 off_t offset = 0;
180 u64 slot;
181 u64 i;
182
183 spin_lock_irqsave(&ctrl->error_lock, flags);
184 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
185
186 for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
187 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
188 sizeof(struct nvme_error_slot)))
189 break;
190
191 if (slot == 0)
192 slot = NVMET_ERROR_LOG_SLOTS - 1;
193 else
194 slot--;
195 offset += sizeof(struct nvme_error_slot);
196 }
197 spin_unlock_irqrestore(&ctrl->error_lock, flags);
198 nvmet_req_complete(req, 0);
199}
200
201static void nvmet_execute_get_supported_log_pages(struct nvmet_req *req)
202{
203 struct nvme_supported_log *logs;
204 u16 status;
205
206 logs = kzalloc_obj(*logs);
207 if (!logs) {
208 status = NVME_SC_INTERNAL;
209 goto out;
210 }
211
212 logs->lids[NVME_LOG_SUPPORTED] = cpu_to_le32(NVME_LIDS_LSUPP);
213 logs->lids[NVME_LOG_ERROR] = cpu_to_le32(NVME_LIDS_LSUPP);
214 logs->lids[NVME_LOG_SMART] = cpu_to_le32(NVME_LIDS_LSUPP);
215 logs->lids[NVME_LOG_FW_SLOT] = cpu_to_le32(NVME_LIDS_LSUPP);
216 logs->lids[NVME_LOG_CHANGED_NS] = cpu_to_le32(NVME_LIDS_LSUPP);
217 logs->lids[NVME_LOG_CMD_EFFECTS] = cpu_to_le32(NVME_LIDS_LSUPP);
218 logs->lids[NVME_LOG_ENDURANCE_GROUP] = cpu_to_le32(NVME_LIDS_LSUPP);
219 logs->lids[NVME_LOG_ANA] = cpu_to_le32(NVME_LIDS_LSUPP);
220 logs->lids[NVME_LOG_FEATURES] = cpu_to_le32(NVME_LIDS_LSUPP);
221 logs->lids[NVME_LOG_RMI] = cpu_to_le32(NVME_LIDS_LSUPP);
222 logs->lids[NVME_LOG_RESERVATION] = cpu_to_le32(NVME_LIDS_LSUPP);
223
224 status = nvmet_copy_to_sgl(req, 0, logs, sizeof(*logs));
225 kfree(logs);
226out:
227 nvmet_req_complete(req, status);
228}
229
230static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
231 struct nvme_smart_log *slog)
232{
233 u64 host_reads, host_writes, data_units_read, data_units_written;
234 u16 status;
235
236 status = nvmet_req_find_ns(req);
237 if (status)
238 return status;
239
240 /* we don't have the right data for file backed ns */
241 if (!req->ns->bdev)
242 return NVME_SC_SUCCESS;
243
244 host_reads = part_stat_read(req->ns->bdev, ios[READ]);
245 data_units_read =
246 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
247 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
248 data_units_written =
249 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
250
251 put_unaligned_le64(host_reads, &slog->host_reads[0]);
252 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
253 put_unaligned_le64(host_writes, &slog->host_writes[0]);
254 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
255
256 return NVME_SC_SUCCESS;
257}
258
259static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
260 struct nvme_smart_log *slog)
261{
262 u64 host_reads = 0, host_writes = 0;
263 u64 data_units_read = 0, data_units_written = 0;
264 struct nvmet_ns *ns;
265 struct nvmet_ctrl *ctrl;
266 unsigned long idx;
267
268 ctrl = req->sq->ctrl;
269 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
270 /* we don't have the right data for file backed ns */
271 if (!ns->bdev)
272 continue;
273 host_reads += part_stat_read(ns->bdev, ios[READ]);
274 data_units_read += DIV_ROUND_UP(
275 part_stat_read(ns->bdev, sectors[READ]), 1000);
276 host_writes += part_stat_read(ns->bdev, ios[WRITE]);
277 data_units_written += DIV_ROUND_UP(
278 part_stat_read(ns->bdev, sectors[WRITE]), 1000);
279 }
280
281 put_unaligned_le64(host_reads, &slog->host_reads[0]);
282 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
283 put_unaligned_le64(host_writes, &slog->host_writes[0]);
284 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
285
286 return NVME_SC_SUCCESS;
287}
288
289static void nvmet_execute_get_log_page_rmi(struct nvmet_req *req)
290{
291 struct nvme_rotational_media_log *log;
292 struct gendisk *disk;
293 u16 status;
294
295 req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
296 req->cmd->get_log_page.lsi));
297 status = nvmet_req_find_ns(req);
298 if (status)
299 goto out;
300
301 if (!req->ns->bdev || !bdev_rot(req->ns->bdev)) {
302 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
303 goto out;
304 }
305
306 if (req->transfer_len != sizeof(*log)) {
307 status = NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR;
308 goto out;
309 }
310
311 log = kzalloc_obj(*log);
312 if (!log)
313 goto out;
314
315 log->endgid = req->cmd->get_log_page.lsi;
316 disk = req->ns->bdev->bd_disk;
317 if (disk && disk->ia_ranges)
318 log->numa = cpu_to_le16(disk->ia_ranges->nr_ia_ranges);
319 else
320 log->numa = cpu_to_le16(1);
321
322 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
323 kfree(log);
324out:
325 nvmet_req_complete(req, status);
326}
327
328static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
329{
330 struct nvme_smart_log *log;
331 u16 status = NVME_SC_INTERNAL;
332 unsigned long flags;
333
334 if (req->transfer_len != sizeof(*log))
335 goto out;
336
337 log = kzalloc_obj(*log);
338 if (!log)
339 goto out;
340
341 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
342 status = nvmet_get_smart_log_all(req, log);
343 else
344 status = nvmet_get_smart_log_nsid(req, log);
345 if (status)
346 goto out_free_log;
347
348 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
349 put_unaligned_le64(req->sq->ctrl->err_counter,
350 &log->num_err_log_entries);
351 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
352
353 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
354out_free_log:
355 kfree(log);
356out:
357 nvmet_req_complete(req, status);
358}
359
360static void nvmet_get_cmd_effects_admin(struct nvmet_ctrl *ctrl,
361 struct nvme_effects_log *log)
362{
363 /* For a PCI target controller, advertize support for the . */
364 if (nvmet_is_pci_ctrl(ctrl)) {
365 log->acs[nvme_admin_delete_sq] =
366 log->acs[nvme_admin_create_sq] =
367 log->acs[nvme_admin_delete_cq] =
368 log->acs[nvme_admin_create_cq] =
369 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
370 }
371
372 log->acs[nvme_admin_get_log_page] =
373 log->acs[nvme_admin_identify] =
374 log->acs[nvme_admin_abort_cmd] =
375 log->acs[nvme_admin_set_features] =
376 log->acs[nvme_admin_get_features] =
377 log->acs[nvme_admin_async_event] =
378 log->acs[nvme_admin_keep_alive] =
379 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
380}
381
382static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
383{
384 log->iocs[nvme_cmd_read] =
385 log->iocs[nvme_cmd_flush] =
386 log->iocs[nvme_cmd_dsm] =
387 log->iocs[nvme_cmd_resv_acquire] =
388 log->iocs[nvme_cmd_resv_register] =
389 log->iocs[nvme_cmd_resv_release] =
390 log->iocs[nvme_cmd_resv_report] =
391 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
392 log->iocs[nvme_cmd_write] =
393 log->iocs[nvme_cmd_write_zeroes] =
394 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
395}
396
397static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
398{
399 log->iocs[nvme_cmd_zone_append] =
400 log->iocs[nvme_cmd_zone_mgmt_send] =
401 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
402 log->iocs[nvme_cmd_zone_mgmt_recv] =
403 cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
404}
405
406static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
407{
408 struct nvmet_ctrl *ctrl = req->sq->ctrl;
409 struct nvme_effects_log *log;
410 u16 status = NVME_SC_SUCCESS;
411
412 log = kzalloc_obj(*log);
413 if (!log) {
414 status = NVME_SC_INTERNAL;
415 goto out;
416 }
417
418 switch (req->cmd->get_log_page.csi) {
419 case NVME_CSI_NVM:
420 nvmet_get_cmd_effects_admin(ctrl, log);
421 nvmet_get_cmd_effects_nvm(log);
422 break;
423 case NVME_CSI_ZNS:
424 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
425 status = NVME_SC_INVALID_IO_CMD_SET;
426 goto free;
427 }
428 nvmet_get_cmd_effects_admin(ctrl, log);
429 nvmet_get_cmd_effects_nvm(log);
430 nvmet_get_cmd_effects_zns(log);
431 break;
432 default:
433 status = NVME_SC_INVALID_LOG_PAGE;
434 goto free;
435 }
436
437 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
438free:
439 kfree(log);
440out:
441 nvmet_req_complete(req, status);
442}
443
444static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
445{
446 struct nvmet_ctrl *ctrl = req->sq->ctrl;
447 u16 status = NVME_SC_INTERNAL;
448 size_t len;
449
450 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
451 goto out;
452
453 mutex_lock(&ctrl->lock);
454 if (ctrl->nr_changed_ns == U32_MAX)
455 len = sizeof(__le32);
456 else
457 len = ctrl->nr_changed_ns * sizeof(__le32);
458 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
459 if (!status)
460 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
461 ctrl->nr_changed_ns = 0;
462 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
463 mutex_unlock(&ctrl->lock);
464out:
465 nvmet_req_complete(req, status);
466}
467
468static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
469 struct nvme_ana_group_desc *desc)
470{
471 struct nvmet_ctrl *ctrl = req->sq->ctrl;
472 struct nvmet_ns *ns;
473 unsigned long idx;
474 u32 count = 0;
475
476 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
477 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
478 if (ns->anagrpid == grpid)
479 desc->nsids[count++] = cpu_to_le32(ns->nsid);
480 }
481 }
482
483 desc->grpid = cpu_to_le32(grpid);
484 desc->nnsids = cpu_to_le32(count);
485 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
486 desc->state = req->port->ana_state[grpid];
487 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
488 return struct_size(desc, nsids, count);
489}
490
491static void nvmet_execute_get_log_page_endgrp(struct nvmet_req *req)
492{
493 u64 host_reads, host_writes, data_units_read, data_units_written;
494 struct nvme_endurance_group_log *log;
495 u16 status;
496
497 /*
498 * The target driver emulates each endurance group as its own
499 * namespace, reusing the nsid as the endurance group identifier.
500 */
501 req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
502 req->cmd->get_log_page.lsi));
503 status = nvmet_req_find_ns(req);
504 if (status)
505 goto out;
506
507 log = kzalloc_obj(*log);
508 if (!log) {
509 status = NVME_SC_INTERNAL;
510 goto out;
511 }
512
513 if (!req->ns->bdev)
514 goto copy;
515
516 host_reads = part_stat_read(req->ns->bdev, ios[READ]);
517 data_units_read =
518 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
519 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
520 data_units_written =
521 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
522
523 put_unaligned_le64(host_reads, &log->hrc[0]);
524 put_unaligned_le64(data_units_read, &log->dur[0]);
525 put_unaligned_le64(host_writes, &log->hwc[0]);
526 put_unaligned_le64(data_units_written, &log->duw[0]);
527copy:
528 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
529 kfree(log);
530out:
531 nvmet_req_complete(req, status);
532}
533
534static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
535{
536 struct nvme_ana_rsp_hdr hdr = { 0, };
537 struct nvme_ana_group_desc *desc;
538 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
539 size_t len;
540 u32 grpid;
541 u16 ngrps = 0;
542 u16 status;
543
544 status = NVME_SC_INTERNAL;
545 desc = kmalloc_flex(*desc, nsids, NVMET_MAX_NAMESPACES);
546 if (!desc)
547 goto out;
548
549 down_read(&nvmet_ana_sem);
550 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
551 if (!nvmet_ana_group_enabled[grpid])
552 continue;
553 len = nvmet_format_ana_group(req, grpid, desc);
554 status = nvmet_copy_to_sgl(req, offset, desc, len);
555 if (status)
556 break;
557 offset += len;
558 ngrps++;
559 }
560 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
561 if (nvmet_ana_group_enabled[grpid])
562 ngrps++;
563 }
564
565 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
566 hdr.ngrps = cpu_to_le16(ngrps);
567 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
568 up_read(&nvmet_ana_sem);
569
570 kfree(desc);
571
572 /* copy the header last once we know the number of groups */
573 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
574out:
575 nvmet_req_complete(req, status);
576}
577
578static void nvmet_execute_get_log_page_features(struct nvmet_req *req)
579{
580 struct nvme_supported_features_log *features;
581 u16 status;
582
583 features = kzalloc_obj(*features);
584 if (!features) {
585 status = NVME_SC_INTERNAL;
586 goto out;
587 }
588
589 features->fis[NVME_FEAT_NUM_QUEUES] =
590 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
591 features->fis[NVME_FEAT_KATO] =
592 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
593 features->fis[NVME_FEAT_ASYNC_EVENT] =
594 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
595 features->fis[NVME_FEAT_HOST_ID] =
596 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE);
597 features->fis[NVME_FEAT_WRITE_PROTECT] =
598 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
599 features->fis[NVME_FEAT_RESV_MASK] =
600 cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE);
601
602 status = nvmet_copy_to_sgl(req, 0, features, sizeof(*features));
603 kfree(features);
604out:
605 nvmet_req_complete(req, status);
606}
607
608static void nvmet_execute_get_log_page(struct nvmet_req *req)
609{
610 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
611 return;
612
613 switch (req->cmd->get_log_page.lid) {
614 case NVME_LOG_SUPPORTED:
615 return nvmet_execute_get_supported_log_pages(req);
616 case NVME_LOG_ERROR:
617 return nvmet_execute_get_log_page_error(req);
618 case NVME_LOG_SMART:
619 return nvmet_execute_get_log_page_smart(req);
620 case NVME_LOG_FW_SLOT:
621 /*
622 * We only support a single firmware slot which always is
623 * active, so we can zero out the whole firmware slot log and
624 * still claim to fully implement this mandatory log page.
625 */
626 return nvmet_execute_get_log_page_noop(req);
627 case NVME_LOG_CHANGED_NS:
628 return nvmet_execute_get_log_changed_ns(req);
629 case NVME_LOG_CMD_EFFECTS:
630 return nvmet_execute_get_log_cmd_effects_ns(req);
631 case NVME_LOG_ENDURANCE_GROUP:
632 return nvmet_execute_get_log_page_endgrp(req);
633 case NVME_LOG_ANA:
634 return nvmet_execute_get_log_page_ana(req);
635 case NVME_LOG_FEATURES:
636 return nvmet_execute_get_log_page_features(req);
637 case NVME_LOG_RMI:
638 return nvmet_execute_get_log_page_rmi(req);
639 case NVME_LOG_RESERVATION:
640 return nvmet_execute_get_log_page_resv(req);
641 }
642 pr_debug("unhandled lid %d on qid %d\n",
643 req->cmd->get_log_page.lid, req->sq->qid);
644 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
645 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
646}
647
648static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
649{
650 struct nvmet_ctrl *ctrl = req->sq->ctrl;
651 struct nvmet_subsys *subsys = ctrl->subsys;
652 struct nvme_id_ctrl *id;
653 u32 cmd_capsule_size, ctratt;
654 u16 status = 0;
655
656 if (!subsys->subsys_discovered) {
657 mutex_lock(&subsys->lock);
658 subsys->subsys_discovered = true;
659 mutex_unlock(&subsys->lock);
660 }
661
662 id = kzalloc_obj(*id);
663 if (!id) {
664 status = NVME_SC_INTERNAL;
665 goto out;
666 }
667
668 id->vid = cpu_to_le16(subsys->vendor_id);
669 id->ssvid = cpu_to_le16(subsys->subsys_vendor_id);
670
671 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
672 memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number,
673 strlen(subsys->model_number), ' ');
674 memcpy_and_pad(id->fr, sizeof(id->fr),
675 subsys->firmware_rev, strlen(subsys->firmware_rev), ' ');
676
677 put_unaligned_le24(subsys->ieee_oui, id->ieee);
678
679 id->rab = 6;
680
681 if (nvmet_is_disc_subsys(ctrl->subsys))
682 id->cntrltype = NVME_CTRL_DISC;
683 else
684 id->cntrltype = NVME_CTRL_IO;
685
686 /* we support multiple ports, multiples hosts and ANA: */
687 id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
688 NVME_CTRL_CMIC_ANA;
689
690 /* Limit MDTS according to port config or transport capability */
691 id->mdts = nvmet_ctrl_mdts(req);
692 id->cntlid = cpu_to_le16(ctrl->cntlid);
693 id->ver = cpu_to_le32(ctrl->subsys->ver);
694
695 /* XXX: figure out what to do about RTD3R/RTD3 */
696 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
697 ctratt = NVME_CTRL_ATTR_HID_128_BIT | NVME_CTRL_ATTR_TBKAS;
698 if (nvmet_is_pci_ctrl(ctrl))
699 ctratt |= NVME_CTRL_ATTR_RHII;
700 id->ctratt = cpu_to_le32(ctratt);
701
702 id->oacs = 0;
703
704 /*
705 * We don't really have a practical limit on the number of abort
706 * commands. But we don't do anything useful for abort either, so
707 * no point in allowing more abort commands than the spec requires.
708 */
709 id->acl = 3;
710
711 id->aerl = NVMET_ASYNC_EVENTS - 1;
712
713 /* first slot is read-only, only one slot supported */
714 id->frmw = (1 << 0) | (1 << 1);
715 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
716 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
717 id->npss = 0;
718
719 /* We support keep-alive timeout in granularity of seconds */
720 id->kas = cpu_to_le16(NVMET_KAS);
721
722 id->sqes = (0x6 << 4) | 0x6;
723 id->cqes = (0x4 << 4) | 0x4;
724
725 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
726 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
727
728 id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
729 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
730 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
731 NVME_CTRL_ONCS_WRITE_ZEROES |
732 NVME_CTRL_ONCS_RESERVATIONS);
733
734 /* XXX: don't report vwc if the underlying device is write through */
735 id->vwc = NVME_CTRL_VWC_PRESENT;
736
737 /*
738 * We can't support atomic writes bigger than a LBA without support
739 * from the backend device.
740 */
741 id->awun = 0;
742 id->awupf = 0;
743
744 /* we always support SGLs */
745 id->sgls = cpu_to_le32(NVME_CTRL_SGLS_BYTE_ALIGNED);
746 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
747 id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_KSDBDS);
748 if (req->port->inline_data_size)
749 id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_SAOS);
750
751 strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
752
753 /*
754 * Max command capsule size is sqe + in-capsule data size.
755 * Disable in-capsule data for Metadata capable controllers.
756 */
757 cmd_capsule_size = sizeof(struct nvme_command);
758 if (!ctrl->pi_support)
759 cmd_capsule_size += req->port->inline_data_size;
760 id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
761
762 /* Max response capsule size is cqe */
763 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
764
765 id->msdbd = ctrl->ops->msdbd;
766
767 /*
768 * Endurance group identifier is 16 bits, so we can't let namespaces
769 * overflow that since we reuse the nsid
770 */
771 BUILD_BUG_ON(NVMET_MAX_NAMESPACES > USHRT_MAX);
772 id->endgidmax = cpu_to_le16(NVMET_MAX_NAMESPACES);
773
774 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
775 id->anatt = 10; /* random value */
776 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
777 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
778
779 /*
780 * Meh, we don't really support any power state. Fake up the same
781 * values that qemu does.
782 */
783 id->psd[0].max_power = cpu_to_le16(0x9c4);
784 id->psd[0].entry_lat = cpu_to_le32(0x10);
785 id->psd[0].exit_lat = cpu_to_le32(0x4);
786
787 id->nwpc = 1 << 0; /* write protect and no write protect */
788
789 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
790
791 kfree(id);
792out:
793 nvmet_req_complete(req, status);
794}
795
796static void nvmet_execute_identify_ns(struct nvmet_req *req)
797{
798 struct nvme_id_ns *id;
799 u16 status;
800
801 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
802 req->error_loc = offsetof(struct nvme_identify, nsid);
803 status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
804 goto out;
805 }
806
807 id = kzalloc_obj(*id);
808 if (!id) {
809 status = NVME_SC_INTERNAL;
810 goto out;
811 }
812
813 /* return an all zeroed buffer if we can't find an active namespace */
814 status = nvmet_req_find_ns(req);
815 if (status) {
816 status = 0;
817 goto done;
818 }
819
820 if (nvmet_ns_revalidate(req->ns)) {
821 mutex_lock(&req->ns->subsys->lock);
822 nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
823 mutex_unlock(&req->ns->subsys->lock);
824 }
825
826 /*
827 * nuse = ncap = nsze isn't always true, but we have no way to find
828 * that out from the underlying device.
829 */
830 id->ncap = id->nsze =
831 cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
832 switch (req->port->ana_state[req->ns->anagrpid]) {
833 case NVME_ANA_INACCESSIBLE:
834 case NVME_ANA_PERSISTENT_LOSS:
835 break;
836 default:
837 id->nuse = id->nsze;
838 break;
839 }
840
841 if (req->ns->bdev)
842 nvmet_bdev_set_limits(req->ns->bdev, id);
843
844 /*
845 * We just provide a single LBA format that matches what the
846 * underlying device reports.
847 */
848 id->nlbaf = 0;
849 id->flbas = 0;
850
851 /*
852 * Our namespace might always be shared. Not just with other
853 * controllers, but also with any other user of the block device.
854 */
855 id->nmic = NVME_NS_NMIC_SHARED;
856 id->anagrpid = cpu_to_le32(req->ns->anagrpid);
857
858 if (req->ns->pr.enable)
859 id->rescap = NVME_PR_SUPPORT_WRITE_EXCLUSIVE |
860 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS |
861 NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY |
862 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY |
863 NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS |
864 NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS |
865 NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF;
866
867 /*
868 * Since we don't know any better, every namespace is its own endurance
869 * group.
870 */
871 id->endgid = cpu_to_le16(req->ns->nsid);
872
873 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
874
875 id->lbaf[0].ds = req->ns->blksize_shift;
876
877 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
878 id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
879 NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
880 NVME_NS_DPC_PI_TYPE3;
881 id->mc = NVME_MC_EXTENDED_LBA;
882 id->dps = req->ns->pi_type;
883 id->flbas = NVME_NS_FLBAS_META_EXT;
884 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
885 }
886
887 if (req->ns->readonly)
888 id->nsattr |= NVME_NS_ATTR_RO;
889done:
890 if (!status)
891 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
892
893 kfree(id);
894out:
895 nvmet_req_complete(req, status);
896}
897
898static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req)
899{
900 u16 min_endgid = le16_to_cpu(req->cmd->identify.cnssid);
901 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
902 struct nvmet_ctrl *ctrl = req->sq->ctrl;
903 struct nvmet_ns *ns;
904 unsigned long idx;
905 __le16 *list;
906 u16 status;
907 int i = 1;
908
909 list = kzalloc(buf_size, GFP_KERNEL);
910 if (!list) {
911 status = NVME_SC_INTERNAL;
912 goto out;
913 }
914
915 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
916 if (ns->nsid <= min_endgid)
917 continue;
918
919 list[i++] = cpu_to_le16(ns->nsid);
920 if (i == buf_size / sizeof(__le16))
921 break;
922 }
923
924 list[0] = cpu_to_le16(i - 1);
925 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
926 kfree(list);
927out:
928 nvmet_req_complete(req, status);
929}
930
931static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css)
932{
933 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
934 struct nvmet_ctrl *ctrl = req->sq->ctrl;
935 struct nvmet_ns *ns;
936 unsigned long idx;
937 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
938 __le32 *list;
939 u16 status = 0;
940 int i = 0;
941
942 /*
943 * NSID values 0xFFFFFFFE and NVME_NSID_ALL are invalid
944 * See NVMe Base Specification, Active Namespace ID list (CNS 02h).
945 */
946 if (min_nsid == 0xFFFFFFFE || min_nsid == NVME_NSID_ALL) {
947 req->error_loc = offsetof(struct nvme_identify, nsid);
948 status = NVME_SC_INVALID_NS | NVME_STATUS_DNR;
949 goto out;
950 }
951
952 list = kzalloc(buf_size, GFP_KERNEL);
953 if (!list) {
954 status = NVME_SC_INTERNAL;
955 goto out;
956 }
957
958 nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
959 if (ns->nsid <= min_nsid)
960 continue;
961 if (match_css && req->ns->csi != req->cmd->identify.csi)
962 continue;
963 list[i++] = cpu_to_le32(ns->nsid);
964 if (i == buf_size / sizeof(__le32))
965 break;
966 }
967
968 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
969
970 kfree(list);
971out:
972 nvmet_req_complete(req, status);
973}
974
975static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
976 void *id, off_t *off)
977{
978 struct nvme_ns_id_desc desc = {
979 .nidt = type,
980 .nidl = len,
981 };
982 u16 status;
983
984 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
985 if (status)
986 return status;
987 *off += sizeof(desc);
988
989 status = nvmet_copy_to_sgl(req, *off, id, len);
990 if (status)
991 return status;
992 *off += len;
993
994 return 0;
995}
996
997static void nvmet_execute_identify_desclist(struct nvmet_req *req)
998{
999 off_t off = 0;
1000 u16 status;
1001
1002 status = nvmet_req_find_ns(req);
1003 if (status)
1004 goto out;
1005
1006 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
1007 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
1008 NVME_NIDT_UUID_LEN,
1009 &req->ns->uuid, &off);
1010 if (status)
1011 goto out;
1012 }
1013 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
1014 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
1015 NVME_NIDT_NGUID_LEN,
1016 &req->ns->nguid, &off);
1017 if (status)
1018 goto out;
1019 }
1020
1021 status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
1022 NVME_NIDT_CSI_LEN,
1023 &req->ns->csi, &off);
1024 if (status)
1025 goto out;
1026
1027 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
1028 off) != NVME_IDENTIFY_DATA_SIZE - off)
1029 status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1030
1031out:
1032 nvmet_req_complete(req, status);
1033}
1034
1035static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
1036{
1037 /* Not supported: return zeroes */
1038 nvmet_req_complete(req,
1039 nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
1040}
1041
1042static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
1043{
1044 u16 status;
1045 struct nvme_id_ns_nvm *id;
1046
1047 status = nvmet_req_find_ns(req);
1048 if (status)
1049 goto out;
1050
1051 id = kzalloc_obj(*id);
1052 if (!id) {
1053 status = NVME_SC_INTERNAL;
1054 goto out;
1055 }
1056 if (req->ns->bdev)
1057 nvmet_bdev_set_nvm_limits(req->ns->bdev, id);
1058 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
1059 kfree(id);
1060out:
1061 nvmet_req_complete(req, status);
1062}
1063
1064static void nvmet_execute_id_cs_indep(struct nvmet_req *req)
1065{
1066 struct nvme_id_ns_cs_indep *id;
1067 u16 status;
1068
1069 status = nvmet_req_find_ns(req);
1070 if (status)
1071 goto out;
1072
1073 id = kzalloc_obj(*id);
1074 if (!id) {
1075 status = NVME_SC_INTERNAL;
1076 goto out;
1077 }
1078
1079 id->nstat = NVME_NSTAT_NRDY;
1080 id->anagrpid = cpu_to_le32(req->ns->anagrpid);
1081 id->nmic = NVME_NS_NMIC_SHARED;
1082 if (req->ns->readonly)
1083 id->nsattr |= NVME_NS_ATTR_RO;
1084 if (req->ns->bdev && bdev_rot(req->ns->bdev))
1085 id->nsfeat |= NVME_NS_ROTATIONAL;
1086 /*
1087 * We need flush command to flush the file's metadata,
1088 * so report supporting vwc if backend is file, even
1089 * though buffered_io is disable.
1090 */
1091 if (req->ns->bdev && !bdev_write_cache(req->ns->bdev))
1092 id->nsfeat |= NVME_NS_VWC_NOT_PRESENT;
1093
1094 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
1095 kfree(id);
1096out:
1097 nvmet_req_complete(req, status);
1098}
1099
1100static void nvmet_execute_identify(struct nvmet_req *req)
1101{
1102 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
1103 return;
1104
1105 switch (req->cmd->identify.cns) {
1106 case NVME_ID_CNS_NS:
1107 nvmet_execute_identify_ns(req);
1108 return;
1109 case NVME_ID_CNS_CTRL:
1110 nvmet_execute_identify_ctrl(req);
1111 return;
1112 case NVME_ID_CNS_NS_ACTIVE_LIST:
1113 nvmet_execute_identify_nslist(req, false);
1114 return;
1115 case NVME_ID_CNS_NS_DESC_LIST:
1116 nvmet_execute_identify_desclist(req);
1117 return;
1118 case NVME_ID_CNS_CS_NS:
1119 switch (req->cmd->identify.csi) {
1120 case NVME_CSI_NVM:
1121 nvme_execute_identify_ns_nvm(req);
1122 return;
1123 case NVME_CSI_ZNS:
1124 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
1125 nvmet_execute_identify_ns_zns(req);
1126 return;
1127 }
1128 break;
1129 }
1130 break;
1131 case NVME_ID_CNS_CS_CTRL:
1132 switch (req->cmd->identify.csi) {
1133 case NVME_CSI_NVM:
1134 nvmet_execute_identify_ctrl_nvm(req);
1135 return;
1136 case NVME_CSI_ZNS:
1137 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
1138 nvmet_execute_identify_ctrl_zns(req);
1139 return;
1140 }
1141 break;
1142 }
1143 break;
1144 case NVME_ID_CNS_NS_ACTIVE_LIST_CS:
1145 nvmet_execute_identify_nslist(req, true);
1146 return;
1147 case NVME_ID_CNS_NS_CS_INDEP:
1148 nvmet_execute_id_cs_indep(req);
1149 return;
1150 case NVME_ID_CNS_ENDGRP_LIST:
1151 nvmet_execute_identify_endgrp_list(req);
1152 return;
1153 }
1154
1155 pr_debug("unhandled identify cns %d on qid %d\n",
1156 req->cmd->identify.cns, req->sq->qid);
1157 req->error_loc = offsetof(struct nvme_identify, cns);
1158 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
1159}
1160
1161/*
1162 * A "minimum viable" abort implementation: the command is mandatory in the
1163 * spec, but we are not required to do any useful work. We couldn't really
1164 * do a useful abort, so don't bother even with waiting for the command
1165 * to be executed and return immediately telling the command to abort
1166 * wasn't found.
1167 */
1168static void nvmet_execute_abort(struct nvmet_req *req)
1169{
1170 if (!nvmet_check_transfer_len(req, 0))
1171 return;
1172 nvmet_set_result(req, 1);
1173 nvmet_req_complete(req, 0);
1174}
1175
1176static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
1177{
1178 u16 status;
1179
1180 if (req->ns->file)
1181 status = nvmet_file_flush(req);
1182 else
1183 status = nvmet_bdev_flush(req);
1184
1185 if (status)
1186 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
1187 return status;
1188}
1189
1190static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
1191{
1192 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
1193 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1194 u16 status;
1195
1196 status = nvmet_req_find_ns(req);
1197 if (status)
1198 return status;
1199
1200 mutex_lock(&subsys->lock);
1201 switch (write_protect) {
1202 case NVME_NS_WRITE_PROTECT:
1203 req->ns->readonly = true;
1204 status = nvmet_write_protect_flush_sync(req);
1205 if (status)
1206 req->ns->readonly = false;
1207 break;
1208 case NVME_NS_NO_WRITE_PROTECT:
1209 req->ns->readonly = false;
1210 status = 0;
1211 break;
1212 default:
1213 break;
1214 }
1215
1216 if (!status)
1217 nvmet_ns_changed(subsys, req->ns->nsid);
1218 mutex_unlock(&subsys->lock);
1219 return status;
1220}
1221
1222u16 nvmet_set_feat_kato(struct nvmet_req *req)
1223{
1224 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
1225
1226 nvmet_stop_keep_alive_timer(req->sq->ctrl);
1227 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
1228 nvmet_start_keep_alive_timer(req->sq->ctrl);
1229
1230 nvmet_set_result(req, req->sq->ctrl->kato);
1231
1232 return 0;
1233}
1234
1235u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
1236{
1237 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
1238
1239 if (val32 & ~mask) {
1240 req->error_loc = offsetof(struct nvme_common_command, cdw11);
1241 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1242 }
1243
1244 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
1245 nvmet_set_result(req, val32);
1246
1247 return 0;
1248}
1249
1250static u16 nvmet_set_feat_host_id(struct nvmet_req *req)
1251{
1252 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1253
1254 if (!nvmet_is_pci_ctrl(ctrl))
1255 return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
1256
1257 /*
1258 * The NVMe base specifications v2.1 recommends supporting 128-bits host
1259 * IDs (section 5.1.25.1.28.1). However, that same section also says
1260 * that "The controller may support a 64-bit Host Identifier and/or an
1261 * extended 128-bit Host Identifier". So simplify this support and do
1262 * not support 64-bits host IDs to avoid needing to check that all
1263 * controllers associated with the same subsystem all use the same host
1264 * ID size.
1265 */
1266 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
1267 req->error_loc = offsetof(struct nvme_common_command, cdw11);
1268 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1269 }
1270
1271 return nvmet_copy_from_sgl(req, 0, &req->sq->ctrl->hostid,
1272 sizeof(req->sq->ctrl->hostid));
1273}
1274
1275static u16 nvmet_set_feat_irq_coalesce(struct nvmet_req *req)
1276{
1277 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1278 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1279 struct nvmet_feat_irq_coalesce irqc = {
1280 .time = (cdw11 >> 8) & 0xff,
1281 .thr = cdw11 & 0xff,
1282 };
1283
1284 /*
1285 * This feature is not supported for fabrics controllers and mandatory
1286 * for PCI controllers.
1287 */
1288 if (!nvmet_is_pci_ctrl(ctrl)) {
1289 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1290 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1291 }
1292
1293 return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
1294}
1295
1296static u16 nvmet_set_feat_irq_config(struct nvmet_req *req)
1297{
1298 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1299 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1300 struct nvmet_feat_irq_config irqcfg = {
1301 .iv = cdw11 & 0xffff,
1302 .cd = (cdw11 >> 16) & 0x1,
1303 };
1304
1305 /*
1306 * This feature is not supported for fabrics controllers and mandatory
1307 * for PCI controllers.
1308 */
1309 if (!nvmet_is_pci_ctrl(ctrl)) {
1310 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1311 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1312 }
1313
1314 return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
1315}
1316
1317static u16 nvmet_set_feat_arbitration(struct nvmet_req *req)
1318{
1319 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1320 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1321 struct nvmet_feat_arbitration arb = {
1322 .hpw = (cdw11 >> 24) & 0xff,
1323 .mpw = (cdw11 >> 16) & 0xff,
1324 .lpw = (cdw11 >> 8) & 0xff,
1325 .ab = cdw11 & 0x3,
1326 };
1327
1328 if (!ctrl->ops->set_feature) {
1329 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1330 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1331 }
1332
1333 return ctrl->ops->set_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
1334}
1335
1336void nvmet_execute_set_features(struct nvmet_req *req)
1337{
1338 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1339 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
1340 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
1341 u16 status = 0;
1342 u16 nsqr;
1343 u16 ncqr;
1344
1345 if (!nvmet_check_data_len_lte(req, 0))
1346 return;
1347
1348 switch (cdw10 & 0xff) {
1349 case NVME_FEAT_ARBITRATION:
1350 status = nvmet_set_feat_arbitration(req);
1351 break;
1352 case NVME_FEAT_NUM_QUEUES:
1353 ncqr = (cdw11 >> 16) & 0xffff;
1354 nsqr = cdw11 & 0xffff;
1355 if (ncqr == 0xffff || nsqr == 0xffff) {
1356 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1357 break;
1358 }
1359 nvmet_set_result(req,
1360 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
1361 break;
1362 case NVME_FEAT_IRQ_COALESCE:
1363 status = nvmet_set_feat_irq_coalesce(req);
1364 break;
1365 case NVME_FEAT_IRQ_CONFIG:
1366 status = nvmet_set_feat_irq_config(req);
1367 break;
1368 case NVME_FEAT_KATO:
1369 status = nvmet_set_feat_kato(req);
1370 break;
1371 case NVME_FEAT_ASYNC_EVENT:
1372 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
1373 break;
1374 case NVME_FEAT_HOST_ID:
1375 status = nvmet_set_feat_host_id(req);
1376 break;
1377 case NVME_FEAT_WRITE_PROTECT:
1378 status = nvmet_set_feat_write_protect(req);
1379 break;
1380 case NVME_FEAT_RESV_MASK:
1381 status = nvmet_set_feat_resv_notif_mask(req, cdw11);
1382 break;
1383 default:
1384 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1385 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1386 break;
1387 }
1388
1389 nvmet_req_complete(req, status);
1390}
1391
1392static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
1393{
1394 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1395 u32 result;
1396
1397 result = nvmet_req_find_ns(req);
1398 if (result)
1399 return result;
1400
1401 mutex_lock(&subsys->lock);
1402 if (req->ns->readonly == true)
1403 result = NVME_NS_WRITE_PROTECT;
1404 else
1405 result = NVME_NS_NO_WRITE_PROTECT;
1406 nvmet_set_result(req, result);
1407 mutex_unlock(&subsys->lock);
1408
1409 return 0;
1410}
1411
1412static u16 nvmet_get_feat_irq_coalesce(struct nvmet_req *req)
1413{
1414 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1415 struct nvmet_feat_irq_coalesce irqc = { };
1416 u16 status;
1417
1418 /*
1419 * This feature is not supported for fabrics controllers and mandatory
1420 * for PCI controllers.
1421 */
1422 if (!nvmet_is_pci_ctrl(ctrl)) {
1423 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1424 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1425 }
1426
1427 status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc);
1428 if (status != NVME_SC_SUCCESS)
1429 return status;
1430
1431 nvmet_set_result(req, ((u32)irqc.time << 8) | (u32)irqc.thr);
1432
1433 return NVME_SC_SUCCESS;
1434}
1435
1436static u16 nvmet_get_feat_irq_config(struct nvmet_req *req)
1437{
1438 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1439 u32 iv = le32_to_cpu(req->cmd->common.cdw11) & 0xffff;
1440 struct nvmet_feat_irq_config irqcfg = { .iv = iv };
1441 u16 status;
1442
1443 /*
1444 * This feature is not supported for fabrics controllers and mandatory
1445 * for PCI controllers.
1446 */
1447 if (!nvmet_is_pci_ctrl(ctrl)) {
1448 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1449 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1450 }
1451
1452 status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg);
1453 if (status != NVME_SC_SUCCESS)
1454 return status;
1455
1456 nvmet_set_result(req, ((u32)irqcfg.cd << 16) | iv);
1457
1458 return NVME_SC_SUCCESS;
1459}
1460
1461static u16 nvmet_get_feat_arbitration(struct nvmet_req *req)
1462{
1463 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1464 struct nvmet_feat_arbitration arb = { };
1465 u16 status;
1466
1467 if (!ctrl->ops->get_feature) {
1468 req->error_loc = offsetof(struct nvme_common_command, cdw10);
1469 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1470 }
1471
1472 status = ctrl->ops->get_feature(ctrl, NVME_FEAT_ARBITRATION, &arb);
1473 if (status != NVME_SC_SUCCESS)
1474 return status;
1475
1476 nvmet_set_result(req,
1477 ((u32)arb.hpw << 24) |
1478 ((u32)arb.mpw << 16) |
1479 ((u32)arb.lpw << 8) |
1480 (arb.ab & 0x3));
1481
1482 return NVME_SC_SUCCESS;
1483}
1484
1485void nvmet_get_feat_kato(struct nvmet_req *req)
1486{
1487 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
1488}
1489
1490void nvmet_get_feat_async_event(struct nvmet_req *req)
1491{
1492 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
1493}
1494
1495void nvmet_execute_get_features(struct nvmet_req *req)
1496{
1497 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
1498 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
1499 u16 status = 0;
1500
1501 if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
1502 return;
1503
1504 switch (cdw10 & 0xff) {
1505 /*
1506 * These features are mandatory in the spec, but we don't
1507 * have a useful way to implement them. We'll eventually
1508 * need to come up with some fake values for these.
1509 */
1510#if 0
1511 case NVME_FEAT_POWER_MGMT:
1512 break;
1513 case NVME_FEAT_TEMP_THRESH:
1514 break;
1515 case NVME_FEAT_ERR_RECOVERY:
1516 break;
1517 case NVME_FEAT_WRITE_ATOMIC:
1518 break;
1519#endif
1520 case NVME_FEAT_ARBITRATION:
1521 status = nvmet_get_feat_arbitration(req);
1522 break;
1523 case NVME_FEAT_IRQ_COALESCE:
1524 status = nvmet_get_feat_irq_coalesce(req);
1525 break;
1526 case NVME_FEAT_IRQ_CONFIG:
1527 status = nvmet_get_feat_irq_config(req);
1528 break;
1529 case NVME_FEAT_ASYNC_EVENT:
1530 nvmet_get_feat_async_event(req);
1531 break;
1532 case NVME_FEAT_VOLATILE_WC:
1533 nvmet_set_result(req, 1);
1534 break;
1535 case NVME_FEAT_NUM_QUEUES:
1536 nvmet_set_result(req,
1537 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
1538 break;
1539 case NVME_FEAT_KATO:
1540 nvmet_get_feat_kato(req);
1541 break;
1542 case NVME_FEAT_HOST_ID:
1543 /* need 128-bit host identifier flag */
1544 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
1545 req->error_loc =
1546 offsetof(struct nvme_common_command, cdw11);
1547 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1548 break;
1549 }
1550
1551 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
1552 sizeof(req->sq->ctrl->hostid));
1553 break;
1554 case NVME_FEAT_WRITE_PROTECT:
1555 status = nvmet_get_feat_write_protect(req);
1556 break;
1557 case NVME_FEAT_RESV_MASK:
1558 status = nvmet_get_feat_resv_notif_mask(req);
1559 break;
1560 default:
1561 req->error_loc =
1562 offsetof(struct nvme_common_command, cdw10);
1563 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1564 break;
1565 }
1566
1567 nvmet_req_complete(req, status);
1568}
1569
1570void nvmet_execute_async_event(struct nvmet_req *req)
1571{
1572 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1573
1574 if (!nvmet_check_transfer_len(req, 0))
1575 return;
1576
1577 mutex_lock(&ctrl->lock);
1578 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
1579 mutex_unlock(&ctrl->lock);
1580 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR);
1581 return;
1582 }
1583 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
1584 mutex_unlock(&ctrl->lock);
1585
1586 queue_work(nvmet_aen_wq, &ctrl->async_event_work);
1587}
1588
1589void nvmet_execute_keep_alive(struct nvmet_req *req)
1590{
1591 struct nvmet_ctrl *ctrl = req->sq->ctrl;
1592 u16 status = 0;
1593
1594 if (!nvmet_check_transfer_len(req, 0))
1595 return;
1596
1597 if (!ctrl->kato) {
1598 status = NVME_SC_KA_TIMEOUT_INVALID;
1599 goto out;
1600 }
1601
1602 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
1603 ctrl->cntlid, ctrl->kato);
1604 mod_delayed_work(system_percpu_wq, &ctrl->ka_work, ctrl->kato * HZ);
1605out:
1606 nvmet_req_complete(req, status);
1607}
1608
1609u32 nvmet_admin_cmd_data_len(struct nvmet_req *req)
1610{
1611 struct nvme_command *cmd = req->cmd;
1612
1613 if (nvme_is_fabrics(cmd))
1614 return nvmet_fabrics_admin_cmd_data_len(req);
1615 if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1616 return nvmet_discovery_cmd_data_len(req);
1617
1618 switch (cmd->common.opcode) {
1619 case nvme_admin_get_log_page:
1620 return nvmet_get_log_page_len(cmd);
1621 case nvme_admin_identify:
1622 return NVME_IDENTIFY_DATA_SIZE;
1623 case nvme_admin_get_features:
1624 return nvmet_feat_data_len(req, le32_to_cpu(cmd->common.cdw10));
1625 default:
1626 return 0;
1627 }
1628}
1629
1630u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1631{
1632 struct nvme_command *cmd = req->cmd;
1633 u16 ret;
1634
1635 if (nvme_is_fabrics(cmd))
1636 return nvmet_parse_fabrics_admin_cmd(req);
1637 if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1638 return nvmet_parse_discovery_cmd(req);
1639
1640 ret = nvmet_check_ctrl_status(req);
1641 if (unlikely(ret))
1642 return ret;
1643
1644 /* For PCI controllers, admin commands shall not use SGL. */
1645 if (nvmet_is_pci_ctrl(req->sq->ctrl) && !req->sq->qid &&
1646 cmd->common.flags & NVME_CMD_SGL_ALL)
1647 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1648
1649 if (nvmet_is_passthru_req(req))
1650 return nvmet_parse_passthru_admin_cmd(req);
1651
1652 switch (cmd->common.opcode) {
1653 case nvme_admin_delete_sq:
1654 req->execute = nvmet_execute_delete_sq;
1655 return 0;
1656 case nvme_admin_create_sq:
1657 req->execute = nvmet_execute_create_sq;
1658 return 0;
1659 case nvme_admin_get_log_page:
1660 req->execute = nvmet_execute_get_log_page;
1661 return 0;
1662 case nvme_admin_delete_cq:
1663 req->execute = nvmet_execute_delete_cq;
1664 return 0;
1665 case nvme_admin_create_cq:
1666 req->execute = nvmet_execute_create_cq;
1667 return 0;
1668 case nvme_admin_identify:
1669 req->execute = nvmet_execute_identify;
1670 return 0;
1671 case nvme_admin_abort_cmd:
1672 req->execute = nvmet_execute_abort;
1673 return 0;
1674 case nvme_admin_set_features:
1675 req->execute = nvmet_execute_set_features;
1676 return 0;
1677 case nvme_admin_get_features:
1678 req->execute = nvmet_execute_get_features;
1679 return 0;
1680 case nvme_admin_async_event:
1681 req->execute = nvmet_execute_async_event;
1682 return 0;
1683 case nvme_admin_keep_alive:
1684 req->execute = nvmet_execute_keep_alive;
1685 return 0;
1686 default:
1687 return nvmet_report_invalid_opcode(req);
1688 }
1689}