Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Sysfs interface for the NVMe core driver.
4 *
5 * Copyright (c) 2011-2014, Intel Corporation.
6 */
7
8#include <linux/nvme-auth.h>
9
10#include "nvme.h"
11#include "fabrics.h"
12
13static ssize_t nvme_sysfs_reset(struct device *dev,
14 struct device_attribute *attr, const char *buf,
15 size_t count)
16{
17 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
18 int ret;
19
20 ret = nvme_reset_ctrl_sync(ctrl);
21 if (ret < 0)
22 return ret;
23 return count;
24}
25static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
26
27static ssize_t nvme_sysfs_rescan(struct device *dev,
28 struct device_attribute *attr, const char *buf,
29 size_t count)
30{
31 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
32
33 nvme_queue_scan(ctrl);
34 return count;
35}
36static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
37
38static ssize_t nvme_adm_passthru_err_log_enabled_show(struct device *dev,
39 struct device_attribute *attr, char *buf)
40{
41 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
42
43 return sysfs_emit(buf,
44 ctrl->passthru_err_log_enabled ? "on\n" : "off\n");
45}
46
47static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev,
48 struct device_attribute *attr, const char *buf, size_t count)
49{
50 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
51 bool passthru_err_log_enabled;
52 int err;
53
54 err = kstrtobool(buf, &passthru_err_log_enabled);
55 if (err)
56 return -EINVAL;
57
58 ctrl->passthru_err_log_enabled = passthru_err_log_enabled;
59
60 return count;
61}
62
63static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
64{
65 struct gendisk *disk = dev_to_disk(dev);
66
67 if (nvme_disk_is_ns_head(disk))
68 return disk->private_data;
69 return nvme_get_ns_from_dev(dev)->head;
70}
71
72static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev,
73 struct device_attribute *attr, char *buf)
74{
75 struct nvme_ns_head *head = dev_to_ns_head(dev);
76
77 return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n");
78}
79
80static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev,
81 struct device_attribute *attr, const char *buf, size_t count)
82{
83 struct nvme_ns_head *head = dev_to_ns_head(dev);
84 bool passthru_err_log_enabled;
85 int err;
86
87 err = kstrtobool(buf, &passthru_err_log_enabled);
88 if (err)
89 return -EINVAL;
90 head->passthru_err_log_enabled = passthru_err_log_enabled;
91
92 return count;
93}
94
95static struct device_attribute dev_attr_adm_passthru_err_log_enabled = \
96 __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
97 nvme_adm_passthru_err_log_enabled_show, nvme_adm_passthru_err_log_enabled_store);
98
99static struct device_attribute dev_attr_io_passthru_err_log_enabled = \
100 __ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
101 nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store);
102
103static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
104 char *buf)
105{
106 struct nvme_ns_head *head = dev_to_ns_head(dev);
107 struct nvme_ns_ids *ids = &head->ids;
108 struct nvme_subsystem *subsys = head->subsys;
109 int serial_len = sizeof(subsys->serial);
110 int model_len = sizeof(subsys->model);
111
112 if (!uuid_is_null(&ids->uuid))
113 return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid);
114
115 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
116 return sysfs_emit(buf, "eui.%16phN\n", ids->nguid);
117
118 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
119 return sysfs_emit(buf, "eui.%8phN\n", ids->eui64);
120
121 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
122 subsys->serial[serial_len - 1] == '\0'))
123 serial_len--;
124 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
125 subsys->model[model_len - 1] == '\0'))
126 model_len--;
127
128 return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
129 serial_len, subsys->serial, model_len, subsys->model,
130 head->ns_id);
131}
132static DEVICE_ATTR_RO(wwid);
133
134static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
135 char *buf)
136{
137 return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
138}
139static DEVICE_ATTR_RO(nguid);
140
141static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
142 char *buf)
143{
144 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
145
146 /* For backward compatibility expose the NGUID to userspace if
147 * we have no UUID set
148 */
149 if (uuid_is_null(&ids->uuid)) {
150 dev_warn_once(dev,
151 "No UUID available providing old NGUID\n");
152 return sysfs_emit(buf, "%pU\n", ids->nguid);
153 }
154 return sysfs_emit(buf, "%pU\n", &ids->uuid);
155}
156static DEVICE_ATTR_RO(uuid);
157
158static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
159 char *buf)
160{
161 return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
162}
163static DEVICE_ATTR_RO(eui);
164
165static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
166 char *buf)
167{
168 return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
169}
170static DEVICE_ATTR_RO(nsid);
171
172static ssize_t csi_show(struct device *dev, struct device_attribute *attr,
173 char *buf)
174{
175 return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ids.csi);
176}
177static DEVICE_ATTR_RO(csi);
178
179static ssize_t metadata_bytes_show(struct device *dev,
180 struct device_attribute *attr, char *buf)
181{
182 return sysfs_emit(buf, "%u\n", dev_to_ns_head(dev)->ms);
183}
184static DEVICE_ATTR_RO(metadata_bytes);
185
186static int ns_head_update_nuse(struct nvme_ns_head *head)
187{
188 struct nvme_id_ns *id;
189 struct nvme_ns *ns;
190 int srcu_idx, ret = -EWOULDBLOCK;
191
192 /* Avoid issuing commands too often by rate limiting the update */
193 if (!__ratelimit(&head->rs_nuse))
194 return 0;
195
196 srcu_idx = srcu_read_lock(&head->srcu);
197 ns = nvme_find_path(head);
198 if (!ns)
199 goto out_unlock;
200
201 ret = nvme_identify_ns(ns->ctrl, head->ns_id, &id);
202 if (ret)
203 goto out_unlock;
204
205 head->nuse = le64_to_cpu(id->nuse);
206 kfree(id);
207
208out_unlock:
209 srcu_read_unlock(&head->srcu, srcu_idx);
210 return ret;
211}
212
213static int ns_update_nuse(struct nvme_ns *ns)
214{
215 struct nvme_id_ns *id;
216 int ret;
217
218 /* Avoid issuing commands too often by rate limiting the update. */
219 if (!__ratelimit(&ns->head->rs_nuse))
220 return 0;
221
222 ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id);
223 if (ret)
224 return ret;
225
226 ns->head->nuse = le64_to_cpu(id->nuse);
227 kfree(id);
228 return 0;
229}
230
231static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
232 char *buf)
233{
234 struct nvme_ns_head *head = dev_to_ns_head(dev);
235 struct gendisk *disk = dev_to_disk(dev);
236 int ret;
237
238 if (nvme_disk_is_ns_head(disk))
239 ret = ns_head_update_nuse(head);
240 else
241 ret = ns_update_nuse(disk->private_data);
242 if (ret)
243 return ret;
244
245 return sysfs_emit(buf, "%llu\n", head->nuse);
246}
247static DEVICE_ATTR_RO(nuse);
248
249static struct attribute *nvme_ns_attrs[] = {
250 &dev_attr_wwid.attr,
251 &dev_attr_uuid.attr,
252 &dev_attr_nguid.attr,
253 &dev_attr_eui.attr,
254 &dev_attr_csi.attr,
255 &dev_attr_nsid.attr,
256 &dev_attr_metadata_bytes.attr,
257 &dev_attr_nuse.attr,
258#ifdef CONFIG_NVME_MULTIPATH
259 &dev_attr_ana_grpid.attr,
260 &dev_attr_ana_state.attr,
261 &dev_attr_queue_depth.attr,
262 &dev_attr_numa_nodes.attr,
263 &dev_attr_delayed_removal_secs.attr,
264#endif
265 &dev_attr_io_passthru_err_log_enabled.attr,
266 NULL,
267};
268
269static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
270 struct attribute *a, int n)
271{
272 struct device *dev = container_of(kobj, struct device, kobj);
273 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
274
275 if (a == &dev_attr_uuid.attr) {
276 if (uuid_is_null(&ids->uuid) &&
277 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
278 return 0;
279 }
280 if (a == &dev_attr_nguid.attr) {
281 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
282 return 0;
283 }
284 if (a == &dev_attr_eui.attr) {
285 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
286 return 0;
287 }
288#ifdef CONFIG_NVME_MULTIPATH
289 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) {
290 /* per-path attr */
291 if (nvme_disk_is_ns_head(dev_to_disk(dev)))
292 return 0;
293 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
294 return 0;
295 }
296 if (a == &dev_attr_queue_depth.attr || a == &dev_attr_numa_nodes.attr) {
297 if (nvme_disk_is_ns_head(dev_to_disk(dev)))
298 return 0;
299 }
300 if (a == &dev_attr_delayed_removal_secs.attr) {
301 struct gendisk *disk = dev_to_disk(dev);
302
303 if (!nvme_disk_is_ns_head(disk))
304 return 0;
305 }
306#endif
307 return a->mode;
308}
309
310static const struct attribute_group nvme_ns_attr_group = {
311 .attrs = nvme_ns_attrs,
312 .is_visible = nvme_ns_attrs_are_visible,
313};
314
315#ifdef CONFIG_NVME_MULTIPATH
316/*
317 * NOTE: The dummy attribute does not appear in sysfs. It exists solely to allow
318 * control over the visibility of the multipath sysfs node. Without at least one
319 * attribute defined in nvme_ns_mpath_attrs[], the sysfs implementation does not
320 * invoke the multipath_sysfs_group_visible() method. As a result, we would not
321 * be able to control the visibility of the multipath sysfs node.
322 */
323static struct attribute dummy_attr = {
324 .name = "dummy",
325};
326
327static struct attribute *nvme_ns_mpath_attrs[] = {
328 &dummy_attr,
329 NULL,
330};
331
332static bool multipath_sysfs_group_visible(struct kobject *kobj)
333{
334 struct device *dev = container_of(kobj, struct device, kobj);
335
336 return nvme_disk_is_ns_head(dev_to_disk(dev));
337}
338
339static bool multipath_sysfs_attr_visible(struct kobject *kobj,
340 struct attribute *attr, int n)
341{
342 return false;
343}
344
345DEFINE_SYSFS_GROUP_VISIBLE(multipath_sysfs)
346
347const struct attribute_group nvme_ns_mpath_attr_group = {
348 .name = "multipath",
349 .attrs = nvme_ns_mpath_attrs,
350 .is_visible = SYSFS_GROUP_VISIBLE(multipath_sysfs),
351};
352#endif
353
354const struct attribute_group *nvme_ns_attr_groups[] = {
355 &nvme_ns_attr_group,
356#ifdef CONFIG_NVME_MULTIPATH
357 &nvme_ns_mpath_attr_group,
358#endif
359 NULL,
360};
361
362#define nvme_show_str_function(field) \
363static ssize_t field##_show(struct device *dev, \
364 struct device_attribute *attr, char *buf) \
365{ \
366 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
367 return sysfs_emit(buf, "%.*s\n", \
368 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
369} \
370static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
371
372nvme_show_str_function(model);
373nvme_show_str_function(serial);
374nvme_show_str_function(firmware_rev);
375
376#define nvme_show_int_function(field) \
377static ssize_t field##_show(struct device *dev, \
378 struct device_attribute *attr, char *buf) \
379{ \
380 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
381 return sysfs_emit(buf, "%d\n", ctrl->field); \
382} \
383static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
384
385nvme_show_int_function(cntlid);
386nvme_show_int_function(numa_node);
387nvme_show_int_function(queue_count);
388nvme_show_int_function(sqsize);
389nvme_show_int_function(kato);
390
391static ssize_t nvme_sysfs_delete(struct device *dev,
392 struct device_attribute *attr, const char *buf,
393 size_t count)
394{
395 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
396
397 if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags))
398 return -EBUSY;
399
400 if (device_remove_file_self(dev, attr))
401 nvme_delete_ctrl_sync(ctrl);
402 return count;
403}
404static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
405
406static ssize_t nvme_sysfs_show_transport(struct device *dev,
407 struct device_attribute *attr,
408 char *buf)
409{
410 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
411
412 return sysfs_emit(buf, "%s\n", ctrl->ops->name);
413}
414static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
415
416static ssize_t nvme_sysfs_show_state(struct device *dev,
417 struct device_attribute *attr,
418 char *buf)
419{
420 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
421 unsigned state = (unsigned)nvme_ctrl_state(ctrl);
422 static const char *const state_name[] = {
423 [NVME_CTRL_NEW] = "new",
424 [NVME_CTRL_LIVE] = "live",
425 [NVME_CTRL_RESETTING] = "resetting",
426 [NVME_CTRL_CONNECTING] = "connecting",
427 [NVME_CTRL_DELETING] = "deleting",
428 [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)",
429 [NVME_CTRL_DEAD] = "dead",
430 };
431
432 if (state < ARRAY_SIZE(state_name) && state_name[state])
433 return sysfs_emit(buf, "%s\n", state_name[state]);
434
435 return sysfs_emit(buf, "unknown state\n");
436}
437
438static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
439
440static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
441 struct device_attribute *attr,
442 char *buf)
443{
444 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
445
446 return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
447}
448static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
449
450static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
451 struct device_attribute *attr,
452 char *buf)
453{
454 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
455
456 return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
457}
458static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
459
460static ssize_t nvme_sysfs_show_hostid(struct device *dev,
461 struct device_attribute *attr,
462 char *buf)
463{
464 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
465
466 return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
467}
468static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
469
470static ssize_t nvme_sysfs_show_address(struct device *dev,
471 struct device_attribute *attr,
472 char *buf)
473{
474 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
475
476 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
477}
478static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
479
480static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev,
481 struct device_attribute *attr, char *buf)
482{
483 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
484 struct nvmf_ctrl_options *opts = ctrl->opts;
485
486 if (ctrl->opts->max_reconnects == -1)
487 return sysfs_emit(buf, "off\n");
488 return sysfs_emit(buf, "%d\n",
489 opts->max_reconnects * opts->reconnect_delay);
490}
491
492static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev,
493 struct device_attribute *attr, const char *buf, size_t count)
494{
495 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
496 struct nvmf_ctrl_options *opts = ctrl->opts;
497 int ctrl_loss_tmo, err;
498
499 err = kstrtoint(buf, 10, &ctrl_loss_tmo);
500 if (err)
501 return -EINVAL;
502
503 if (ctrl_loss_tmo < 0)
504 opts->max_reconnects = -1;
505 else
506 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
507 opts->reconnect_delay);
508 return count;
509}
510static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR,
511 nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store);
512
513static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev,
514 struct device_attribute *attr, char *buf)
515{
516 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
517
518 if (ctrl->opts->reconnect_delay == -1)
519 return sysfs_emit(buf, "off\n");
520 return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay);
521}
522
523static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev,
524 struct device_attribute *attr, const char *buf, size_t count)
525{
526 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
527 unsigned int v;
528 int err;
529
530 err = kstrtou32(buf, 10, &v);
531 if (err)
532 return err;
533
534 ctrl->opts->reconnect_delay = v;
535 return count;
536}
537static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR,
538 nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store);
539
540static ssize_t nvme_ctrl_fast_io_fail_tmo_show(struct device *dev,
541 struct device_attribute *attr, char *buf)
542{
543 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
544
545 if (ctrl->opts->fast_io_fail_tmo == -1)
546 return sysfs_emit(buf, "off\n");
547 return sysfs_emit(buf, "%d\n", ctrl->opts->fast_io_fail_tmo);
548}
549
550static ssize_t nvme_ctrl_fast_io_fail_tmo_store(struct device *dev,
551 struct device_attribute *attr, const char *buf, size_t count)
552{
553 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
554 struct nvmf_ctrl_options *opts = ctrl->opts;
555 int fast_io_fail_tmo, err;
556
557 err = kstrtoint(buf, 10, &fast_io_fail_tmo);
558 if (err)
559 return -EINVAL;
560
561 if (fast_io_fail_tmo < 0)
562 opts->fast_io_fail_tmo = -1;
563 else
564 opts->fast_io_fail_tmo = fast_io_fail_tmo;
565 return count;
566}
567static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR,
568 nvme_ctrl_fast_io_fail_tmo_show, nvme_ctrl_fast_io_fail_tmo_store);
569
570static ssize_t cntrltype_show(struct device *dev,
571 struct device_attribute *attr, char *buf)
572{
573 static const char * const type[] = {
574 [NVME_CTRL_IO] = "io\n",
575 [NVME_CTRL_DISC] = "discovery\n",
576 [NVME_CTRL_ADMIN] = "admin\n",
577 };
578 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
579
580 if (ctrl->cntrltype > NVME_CTRL_ADMIN || !type[ctrl->cntrltype])
581 return sysfs_emit(buf, "reserved\n");
582
583 return sysfs_emit(buf, type[ctrl->cntrltype]);
584}
585static DEVICE_ATTR_RO(cntrltype);
586
587static ssize_t dctype_show(struct device *dev,
588 struct device_attribute *attr, char *buf)
589{
590 static const char * const type[] = {
591 [NVME_DCTYPE_NOT_REPORTED] = "none\n",
592 [NVME_DCTYPE_DDC] = "ddc\n",
593 [NVME_DCTYPE_CDC] = "cdc\n",
594 };
595 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
596
597 if (ctrl->dctype > NVME_DCTYPE_CDC || !type[ctrl->dctype])
598 return sysfs_emit(buf, "reserved\n");
599
600 return sysfs_emit(buf, type[ctrl->dctype]);
601}
602static DEVICE_ATTR_RO(dctype);
603
604static ssize_t quirks_show(struct device *dev, struct device_attribute *attr,
605 char *buf)
606{
607 int count = 0, i;
608 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
609 unsigned long quirks = ctrl->quirks;
610
611 if (!quirks)
612 return sysfs_emit(buf, "none\n");
613
614 for (i = 0; quirks; ++i) {
615 if (quirks & 1) {
616 count += sysfs_emit_at(buf, count, "%s\n",
617 nvme_quirk_name(BIT(i)));
618 }
619 quirks >>= 1;
620 }
621
622 return count;
623}
624static DEVICE_ATTR_RO(quirks);
625
626#ifdef CONFIG_NVME_HOST_AUTH
627static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
628 struct device_attribute *attr, char *buf)
629{
630 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
631 struct nvmf_ctrl_options *opts = ctrl->opts;
632
633 if (!opts->dhchap_secret)
634 return sysfs_emit(buf, "none\n");
635 return sysfs_emit(buf, "%s\n", opts->dhchap_secret);
636}
637
638static ssize_t nvme_ctrl_dhchap_secret_store(struct device *dev,
639 struct device_attribute *attr, const char *buf, size_t count)
640{
641 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
642 struct nvmf_ctrl_options *opts = ctrl->opts;
643 char *dhchap_secret;
644
645 if (!ctrl->opts->dhchap_secret)
646 return -EINVAL;
647 if (count < 7)
648 return -EINVAL;
649 if (memcmp(buf, "DHHC-1:", 7))
650 return -EINVAL;
651
652 dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
653 if (!dhchap_secret)
654 return -ENOMEM;
655 memcpy(dhchap_secret, buf, count);
656 nvme_auth_stop(ctrl);
657 if (strcmp(dhchap_secret, opts->dhchap_secret)) {
658 struct nvme_dhchap_key *key, *host_key;
659 int ret;
660
661 ret = nvme_auth_parse_key(dhchap_secret, &key);
662 if (ret) {
663 kfree(dhchap_secret);
664 return ret;
665 }
666 kfree(opts->dhchap_secret);
667 opts->dhchap_secret = dhchap_secret;
668 host_key = ctrl->host_key;
669 mutex_lock(&ctrl->dhchap_auth_mutex);
670 ctrl->host_key = key;
671 mutex_unlock(&ctrl->dhchap_auth_mutex);
672 nvme_auth_free_key(host_key);
673 } else
674 kfree(dhchap_secret);
675 /* Start re-authentication */
676 dev_info(ctrl->device, "re-authenticating controller\n");
677 queue_work(nvme_wq, &ctrl->dhchap_auth_work);
678
679 return count;
680}
681
682static DEVICE_ATTR(dhchap_secret, S_IRUGO | S_IWUSR,
683 nvme_ctrl_dhchap_secret_show, nvme_ctrl_dhchap_secret_store);
684
685static ssize_t nvme_ctrl_dhchap_ctrl_secret_show(struct device *dev,
686 struct device_attribute *attr, char *buf)
687{
688 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
689 struct nvmf_ctrl_options *opts = ctrl->opts;
690
691 if (!opts->dhchap_ctrl_secret)
692 return sysfs_emit(buf, "none\n");
693 return sysfs_emit(buf, "%s\n", opts->dhchap_ctrl_secret);
694}
695
696static ssize_t nvme_ctrl_dhchap_ctrl_secret_store(struct device *dev,
697 struct device_attribute *attr, const char *buf, size_t count)
698{
699 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
700 struct nvmf_ctrl_options *opts = ctrl->opts;
701 char *dhchap_secret;
702
703 if (!ctrl->opts->dhchap_ctrl_secret)
704 return -EINVAL;
705 if (count < 7)
706 return -EINVAL;
707 if (memcmp(buf, "DHHC-1:", 7))
708 return -EINVAL;
709
710 dhchap_secret = kzalloc(count + 1, GFP_KERNEL);
711 if (!dhchap_secret)
712 return -ENOMEM;
713 memcpy(dhchap_secret, buf, count);
714 nvme_auth_stop(ctrl);
715 if (strcmp(dhchap_secret, opts->dhchap_ctrl_secret)) {
716 struct nvme_dhchap_key *key, *ctrl_key;
717 int ret;
718
719 ret = nvme_auth_parse_key(dhchap_secret, &key);
720 if (ret) {
721 kfree(dhchap_secret);
722 return ret;
723 }
724 kfree(opts->dhchap_ctrl_secret);
725 opts->dhchap_ctrl_secret = dhchap_secret;
726 ctrl_key = ctrl->ctrl_key;
727 mutex_lock(&ctrl->dhchap_auth_mutex);
728 ctrl->ctrl_key = key;
729 mutex_unlock(&ctrl->dhchap_auth_mutex);
730 nvme_auth_free_key(ctrl_key);
731 } else
732 kfree(dhchap_secret);
733 /* Start re-authentication */
734 dev_info(ctrl->device, "re-authenticating controller\n");
735 queue_work(nvme_wq, &ctrl->dhchap_auth_work);
736
737 return count;
738}
739
740static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR,
741 nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store);
742#endif
743
744static struct attribute *nvme_dev_attrs[] = {
745 &dev_attr_reset_controller.attr,
746 &dev_attr_rescan_controller.attr,
747 &dev_attr_model.attr,
748 &dev_attr_serial.attr,
749 &dev_attr_firmware_rev.attr,
750 &dev_attr_cntlid.attr,
751 &dev_attr_delete_controller.attr,
752 &dev_attr_transport.attr,
753 &dev_attr_subsysnqn.attr,
754 &dev_attr_address.attr,
755 &dev_attr_state.attr,
756 &dev_attr_numa_node.attr,
757 &dev_attr_queue_count.attr,
758 &dev_attr_sqsize.attr,
759 &dev_attr_hostnqn.attr,
760 &dev_attr_hostid.attr,
761 &dev_attr_ctrl_loss_tmo.attr,
762 &dev_attr_reconnect_delay.attr,
763 &dev_attr_fast_io_fail_tmo.attr,
764 &dev_attr_kato.attr,
765 &dev_attr_cntrltype.attr,
766 &dev_attr_dctype.attr,
767 &dev_attr_quirks.attr,
768#ifdef CONFIG_NVME_HOST_AUTH
769 &dev_attr_dhchap_secret.attr,
770 &dev_attr_dhchap_ctrl_secret.attr,
771#endif
772 &dev_attr_adm_passthru_err_log_enabled.attr,
773 NULL
774};
775
776static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
777 struct attribute *a, int n)
778{
779 struct device *dev = container_of(kobj, struct device, kobj);
780 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
781
782 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl)
783 return 0;
784 if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
785 return 0;
786 if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
787 return 0;
788 if (a == &dev_attr_hostid.attr && !ctrl->opts)
789 return 0;
790 if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
791 return 0;
792 if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
793 return 0;
794 if (a == &dev_attr_fast_io_fail_tmo.attr && !ctrl->opts)
795 return 0;
796#ifdef CONFIG_NVME_HOST_AUTH
797 if (a == &dev_attr_dhchap_secret.attr && !ctrl->opts)
798 return 0;
799 if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts)
800 return 0;
801#endif
802
803 return a->mode;
804}
805
806const struct attribute_group nvme_dev_attrs_group = {
807 .attrs = nvme_dev_attrs,
808 .is_visible = nvme_dev_attrs_are_visible,
809};
810EXPORT_SYMBOL_GPL(nvme_dev_attrs_group);
811
812#ifdef CONFIG_NVME_TCP_TLS
813static ssize_t tls_key_show(struct device *dev,
814 struct device_attribute *attr, char *buf)
815{
816 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
817
818 if (!ctrl->tls_pskid)
819 return 0;
820 return sysfs_emit(buf, "%08x\n", ctrl->tls_pskid);
821}
822static DEVICE_ATTR_RO(tls_key);
823
824static ssize_t tls_configured_key_show(struct device *dev,
825 struct device_attribute *attr, char *buf)
826{
827 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
828 struct key *key = ctrl->opts->tls_key;
829
830 return sysfs_emit(buf, "%08x\n", key_serial(key));
831}
832
833static ssize_t tls_configured_key_store(struct device *dev,
834 struct device_attribute *attr,
835 const char *buf, size_t count)
836{
837 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
838 int error, qid;
839
840 error = kstrtoint(buf, 10, &qid);
841 if (error)
842 return error;
843
844 /*
845 * We currently only allow userspace to write a `0` indicating
846 * generate a new key.
847 */
848 if (qid)
849 return -EINVAL;
850
851 if (!ctrl->opts || !ctrl->opts->concat)
852 return -EOPNOTSUPP;
853
854 error = nvme_auth_negotiate(ctrl, 0);
855 if (error < 0) {
856 nvme_reset_ctrl(ctrl);
857 return error;
858 }
859
860 error = nvme_auth_wait(ctrl, 0);
861 if (error < 0) {
862 nvme_reset_ctrl(ctrl);
863 return error;
864 }
865
866 /*
867 * We need to reset the TLS connection, so let's just
868 * reset the controller.
869 */
870 nvme_reset_ctrl(ctrl);
871
872 return count;
873}
874static DEVICE_ATTR_RW(tls_configured_key);
875
876static ssize_t tls_keyring_show(struct device *dev,
877 struct device_attribute *attr, char *buf)
878{
879 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
880 struct key *keyring = ctrl->opts->keyring;
881
882 return sysfs_emit(buf, "%s\n", keyring->description);
883}
884static DEVICE_ATTR_RO(tls_keyring);
885
886static ssize_t tls_mode_show(struct device *dev,
887 struct device_attribute *attr, char *buf)
888{
889 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
890 const char *mode;
891
892 if (ctrl->opts->tls)
893 mode = "tls";
894 else
895 mode = "concat";
896
897 return sysfs_emit(buf, "%s\n", mode);
898}
899static DEVICE_ATTR_RO(tls_mode);
900
901static struct attribute *nvme_tls_attrs[] = {
902 &dev_attr_tls_key.attr,
903 &dev_attr_tls_configured_key.attr,
904 &dev_attr_tls_keyring.attr,
905 &dev_attr_tls_mode.attr,
906 NULL,
907};
908
909static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
910 struct attribute *a, int n)
911{
912 struct device *dev = container_of(kobj, struct device, kobj);
913 struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
914
915 if (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp"))
916 return 0;
917
918 if (a == &dev_attr_tls_key.attr &&
919 !ctrl->opts->tls && !ctrl->opts->concat)
920 return 0;
921 if (a == &dev_attr_tls_configured_key.attr &&
922 !ctrl->opts->concat)
923 return 0;
924 if (a == &dev_attr_tls_keyring.attr &&
925 !ctrl->opts->keyring)
926 return 0;
927 if (a == &dev_attr_tls_mode.attr &&
928 !ctrl->opts->tls && !ctrl->opts->concat)
929 return 0;
930
931 return a->mode;
932}
933
934static const struct attribute_group nvme_tls_attrs_group = {
935 .attrs = nvme_tls_attrs,
936 .is_visible = nvme_tls_attrs_are_visible,
937};
938#endif
939
940const struct attribute_group *nvme_dev_attr_groups[] = {
941 &nvme_dev_attrs_group,
942#ifdef CONFIG_NVME_TCP_TLS
943 &nvme_tls_attrs_group,
944#endif
945 NULL,
946};
947
948#define SUBSYS_ATTR_RO(_name, _mode, _show) \
949 struct device_attribute subsys_attr_##_name = \
950 __ATTR(_name, _mode, _show, NULL)
951
952static ssize_t nvme_subsys_show_nqn(struct device *dev,
953 struct device_attribute *attr,
954 char *buf)
955{
956 struct nvme_subsystem *subsys =
957 container_of(dev, struct nvme_subsystem, dev);
958
959 return sysfs_emit(buf, "%s\n", subsys->subnqn);
960}
961static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
962
963static ssize_t nvme_subsys_show_type(struct device *dev,
964 struct device_attribute *attr,
965 char *buf)
966{
967 struct nvme_subsystem *subsys =
968 container_of(dev, struct nvme_subsystem, dev);
969
970 switch (subsys->subtype) {
971 case NVME_NQN_DISC:
972 return sysfs_emit(buf, "discovery\n");
973 case NVME_NQN_NVME:
974 return sysfs_emit(buf, "nvm\n");
975 default:
976 return sysfs_emit(buf, "reserved\n");
977 }
978}
979static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
980
981#define nvme_subsys_show_str_function(field) \
982static ssize_t subsys_##field##_show(struct device *dev, \
983 struct device_attribute *attr, char *buf) \
984{ \
985 struct nvme_subsystem *subsys = \
986 container_of(dev, struct nvme_subsystem, dev); \
987 return sysfs_emit(buf, "%.*s\n", \
988 (int)sizeof(subsys->field), subsys->field); \
989} \
990static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
991
992nvme_subsys_show_str_function(model);
993nvme_subsys_show_str_function(serial);
994nvme_subsys_show_str_function(firmware_rev);
995
996static struct attribute *nvme_subsys_attrs[] = {
997 &subsys_attr_model.attr,
998 &subsys_attr_serial.attr,
999 &subsys_attr_firmware_rev.attr,
1000 &subsys_attr_subsysnqn.attr,
1001 &subsys_attr_subsystype.attr,
1002#ifdef CONFIG_NVME_MULTIPATH
1003 &subsys_attr_iopolicy.attr,
1004#endif
1005 NULL,
1006};
1007
1008static const struct attribute_group nvme_subsys_attrs_group = {
1009 .attrs = nvme_subsys_attrs,
1010};
1011
1012const struct attribute_group *nvme_subsys_attrs_groups[] = {
1013 &nvme_subsys_attrs_group,
1014 NULL,
1015};