Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 */
5
6#ifndef _NVMET_H
7#define _NVMET_H
8
9#include <linux/dma-mapping.h>
10#include <linux/types.h>
11#include <linux/device.h>
12#include <linux/kref.h>
13#include <linux/percpu-refcount.h>
14#include <linux/list.h>
15#include <linux/mutex.h>
16#include <linux/uuid.h>
17#include <linux/nvme.h>
18#include <linux/configfs.h>
19#include <linux/rcupdate.h>
20#include <linux/blkdev.h>
21#include <linux/radix-tree.h>
22#include <linux/t10-pi.h>
23#include <linux/kfifo.h>
24
25#define NVMET_DEFAULT_VS NVME_VS(2, 1, 0)
26
27#define NVMET_NS_ENABLED XA_MARK_1
28#define NVMET_ASYNC_EVENTS 4
29#define NVMET_ERROR_LOG_SLOTS 128
30#define NVMET_NO_ERROR_LOC ((u16)-1)
31#define NVMET_DEFAULT_CTRL_MODEL "Linux"
32#define NVMET_MN_MAX_SIZE 40
33#define NVMET_SN_MAX_SIZE 20
34#define NVMET_FR_MAX_SIZE 8
35#define NVMET_PR_LOG_QUEUE_SIZE 64
36
37#define nvmet_for_each_ns(xa, index, entry) \
38 xa_for_each(xa, index, entry)
39
40#define nvmet_for_each_enabled_ns(xa, index, entry) \
41 xa_for_each_marked(xa, index, entry, NVMET_NS_ENABLED)
42
43/*
44 * Supported optional AENs:
45 */
46#define NVMET_AEN_CFG_OPTIONAL \
47 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
48#define NVMET_DISC_AEN_CFG_OPTIONAL \
49 (NVME_AEN_CFG_DISC_CHANGE)
50
51/*
52 * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
53 */
54#define NVMET_AEN_CFG_ALL \
55 (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
56 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
57 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
58
59/* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
60 * The 16 bit shift is to set IATTR bit to 1, which means offending
61 * offset starts in the data section of connect()
62 */
63#define IPO_IATTR_CONNECT_DATA(x) \
64 (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
65#define IPO_IATTR_CONNECT_SQE(x) \
66 (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
67
68struct nvmet_pr_registrant {
69 u64 rkey;
70 uuid_t hostid;
71 enum nvme_pr_type rtype;
72 struct list_head entry;
73 struct rcu_head rcu;
74};
75
76struct nvmet_pr {
77 bool enable;
78 unsigned long notify_mask;
79 atomic_t generation;
80 struct nvmet_pr_registrant __rcu *holder;
81 /*
82 * During the execution of the reservation command, mutual
83 * exclusion is required throughout the process. However,
84 * while waiting asynchronously for the 'per controller
85 * percpu_ref' to complete before the 'preempt and abort'
86 * command finishes, a semaphore is needed to ensure mutual
87 * exclusion instead of a mutex.
88 */
89 struct semaphore pr_sem;
90 struct list_head registrant_list;
91};
92
93struct nvmet_pr_per_ctrl_ref {
94 struct percpu_ref ref;
95 struct completion free_done;
96 struct completion confirm_done;
97 uuid_t hostid;
98};
99
100struct nvmet_ns {
101 struct percpu_ref ref;
102 struct file *bdev_file;
103 struct block_device *bdev;
104 struct file *file;
105 bool readonly;
106 u32 nsid;
107 u32 blksize_shift;
108 loff_t size;
109 u8 nguid[16];
110 uuid_t uuid;
111 u32 anagrpid;
112
113 bool buffered_io;
114 bool enabled;
115 struct nvmet_subsys *subsys;
116 const char *device_path;
117
118 struct config_group device_group;
119 struct config_group group;
120
121 struct completion disable_done;
122 mempool_t *bvec_pool;
123
124 struct pci_dev *p2p_dev;
125 int use_p2pmem;
126 int pi_type;
127 int metadata_size;
128 u8 csi;
129 struct nvmet_pr pr;
130 struct xarray pr_per_ctrl_refs;
131};
132
133static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
134{
135 return container_of(to_config_group(item), struct nvmet_ns, group);
136}
137
138static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
139{
140 return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
141}
142
143struct nvmet_cq {
144 struct nvmet_ctrl *ctrl;
145 u16 qid;
146 u16 size;
147 refcount_t ref;
148};
149
150struct nvmet_sq {
151 struct nvmet_ctrl *ctrl;
152 struct percpu_ref ref;
153 struct nvmet_cq *cq;
154 u16 qid;
155 u16 size;
156 u32 sqhd;
157 bool sqhd_disabled;
158#ifdef CONFIG_NVME_TARGET_AUTH
159 bool authenticated;
160 struct delayed_work auth_expired_work;
161 u16 dhchap_tid;
162 u8 sc_c;
163 u8 dhchap_status;
164 u8 dhchap_step;
165 u8 *dhchap_c1;
166 u8 *dhchap_c2;
167 u32 dhchap_s1;
168 u32 dhchap_s2;
169 u8 *dhchap_skey;
170 int dhchap_skey_len;
171#endif
172#ifdef CONFIG_NVME_TARGET_TCP_TLS
173 struct key *tls_key;
174#endif
175 struct completion free_done;
176 struct completion confirm_done;
177};
178
179struct nvmet_ana_group {
180 struct config_group group;
181 struct nvmet_port *port;
182 u32 grpid;
183};
184
185static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
186{
187 return container_of(to_config_group(item), struct nvmet_ana_group,
188 group);
189}
190
191/**
192 * struct nvmet_port - Common structure to keep port
193 * information for the target.
194 * @entry: Entry into referrals or transport list.
195 * @disc_addr: Address information is stored in a format defined
196 * for a discovery log page entry.
197 * @group: ConfigFS group for this element's folder.
198 * @priv: Private data for the transport.
199 */
200struct nvmet_port {
201 struct list_head entry;
202 struct nvmf_disc_rsp_page_entry disc_addr;
203 struct config_group group;
204 struct config_group subsys_group;
205 struct list_head subsystems;
206 struct config_group referrals_group;
207 struct list_head referrals;
208 struct list_head global_entry;
209 struct config_group ana_groups_group;
210 struct nvmet_ana_group ana_default_group;
211 enum nvme_ana_state *ana_state;
212 struct key *keyring;
213 void *priv;
214 bool enabled;
215 int inline_data_size;
216 int max_queue_size;
217 int mdts;
218 const struct nvmet_fabrics_ops *tr_ops;
219 bool pi_enable;
220};
221
222static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
223{
224 return container_of(to_config_group(item), struct nvmet_port,
225 group);
226}
227
228static inline struct nvmet_port *ana_groups_to_port(
229 struct config_item *item)
230{
231 return container_of(to_config_group(item), struct nvmet_port,
232 ana_groups_group);
233}
234
235static inline u8 nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port *port)
236{
237 return (port->disc_addr.treq & NVME_TREQ_SECURE_CHANNEL_MASK);
238}
239
240static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
241{
242 return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED;
243}
244
245struct nvmet_pr_log_mgr {
246 struct mutex lock;
247 u64 lost_count;
248 u64 counter;
249 DECLARE_KFIFO(log_queue, struct nvme_pr_log, NVMET_PR_LOG_QUEUE_SIZE);
250};
251
252struct nvmet_ctrl {
253 struct nvmet_subsys *subsys;
254 struct nvmet_sq **sqs;
255 struct nvmet_cq **cqs;
256
257 void *drvdata;
258
259 bool reset_tbkas;
260
261 struct mutex lock;
262 u64 cap;
263 u32 cc;
264 u32 csts;
265
266 uuid_t hostid;
267 u16 cntlid;
268 u32 kato;
269
270 struct nvmet_port *port;
271
272 u32 aen_enabled;
273 unsigned long aen_masked;
274 struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
275 unsigned int nr_async_event_cmds;
276 struct list_head async_events;
277 struct work_struct async_event_work;
278
279 struct list_head subsys_entry;
280 struct kref ref;
281 struct delayed_work ka_work;
282 struct work_struct fatal_err_work;
283
284 const struct nvmet_fabrics_ops *ops;
285
286 __le32 *changed_ns_list;
287 u32 nr_changed_ns;
288
289 char hostnqn[NVMF_NQN_FIELD_LEN];
290
291 struct device *p2p_client;
292 struct radix_tree_root p2p_ns_map;
293#ifdef CONFIG_NVME_TARGET_DEBUGFS
294 struct dentry *debugfs_dir;
295#endif
296 spinlock_t error_lock;
297 u64 err_counter;
298 struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
299 bool pi_support;
300 bool concat;
301#ifdef CONFIG_NVME_TARGET_AUTH
302 struct nvme_dhchap_key *host_key;
303 struct nvme_dhchap_key *ctrl_key;
304 u8 shash_id;
305 struct crypto_kpp *dh_tfm;
306 u8 dh_gid;
307 u8 *dh_key;
308 size_t dh_keysize;
309#endif
310#ifdef CONFIG_NVME_TARGET_TCP_TLS
311 struct key *tls_key;
312#endif
313 struct nvmet_pr_log_mgr pr_log_mgr;
314};
315
316struct nvmet_subsys {
317 enum nvme_subsys_type type;
318
319 struct mutex lock;
320 struct kref ref;
321
322 struct xarray namespaces;
323 unsigned int nr_namespaces;
324 u32 max_nsid;
325 u16 cntlid_min;
326 u16 cntlid_max;
327
328 struct list_head ctrls;
329
330 struct list_head hosts;
331 bool allow_any_host;
332#ifdef CONFIG_NVME_TARGET_DEBUGFS
333 struct dentry *debugfs_dir;
334#endif
335 u16 max_qid;
336
337 u64 ver;
338 char serial[NVMET_SN_MAX_SIZE];
339 bool subsys_discovered;
340 char *subsysnqn;
341 bool pi_support;
342
343 struct config_group group;
344
345 struct config_group namespaces_group;
346 struct config_group allowed_hosts_group;
347
348 u16 vendor_id;
349 u16 subsys_vendor_id;
350 char *model_number;
351 u32 ieee_oui;
352 char *firmware_rev;
353
354#ifdef CONFIG_NVME_TARGET_PASSTHRU
355 struct nvme_ctrl *passthru_ctrl;
356 char *passthru_ctrl_path;
357 struct config_group passthru_group;
358 unsigned int admin_timeout;
359 unsigned int io_timeout;
360 unsigned int clear_ids;
361#endif /* CONFIG_NVME_TARGET_PASSTHRU */
362
363#ifdef CONFIG_BLK_DEV_ZONED
364 u8 zasl;
365#endif /* CONFIG_BLK_DEV_ZONED */
366};
367
368static inline struct nvmet_subsys *to_subsys(struct config_item *item)
369{
370 return container_of(to_config_group(item), struct nvmet_subsys, group);
371}
372
373static inline struct nvmet_subsys *namespaces_to_subsys(
374 struct config_item *item)
375{
376 return container_of(to_config_group(item), struct nvmet_subsys,
377 namespaces_group);
378}
379
380struct nvmet_host {
381 struct config_group group;
382 u8 *dhchap_secret;
383 u8 *dhchap_ctrl_secret;
384 u8 dhchap_key_hash;
385 u8 dhchap_ctrl_key_hash;
386 u8 dhchap_hash_id;
387 u8 dhchap_dhgroup_id;
388};
389
390static inline struct nvmet_host *to_host(struct config_item *item)
391{
392 return container_of(to_config_group(item), struct nvmet_host, group);
393}
394
395static inline char *nvmet_host_name(struct nvmet_host *host)
396{
397 return config_item_name(&host->group.cg_item);
398}
399
400struct nvmet_host_link {
401 struct list_head entry;
402 struct nvmet_host *host;
403};
404
405struct nvmet_subsys_link {
406 struct list_head entry;
407 struct nvmet_subsys *subsys;
408};
409
410struct nvmet_req;
411struct nvmet_fabrics_ops {
412 struct module *owner;
413 unsigned int type;
414 unsigned int msdbd;
415 unsigned int flags;
416#define NVMF_KEYED_SGLS (1 << 0)
417#define NVMF_METADATA_SUPPORTED (1 << 1)
418 void (*queue_response)(struct nvmet_req *req);
419 int (*add_port)(struct nvmet_port *port);
420 void (*remove_port)(struct nvmet_port *port);
421 void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
422 void (*disc_traddr)(struct nvmet_req *req,
423 struct nvmet_port *port, char *traddr);
424 ssize_t (*host_traddr)(struct nvmet_ctrl *ctrl,
425 char *traddr, size_t traddr_len);
426 u16 (*install_queue)(struct nvmet_sq *nvme_sq);
427 void (*discovery_chg)(struct nvmet_port *port);
428 u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
429 u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
430
431 /* Operations mandatory for PCI target controllers */
432 u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 cqid, u16 flags,
433 u16 qsize, u64 prp1);
434 u16 (*delete_sq)(struct nvmet_ctrl *ctrl, u16 sqid);
435 u16 (*create_cq)(struct nvmet_ctrl *ctrl, u16 cqid, u16 flags,
436 u16 qsize, u64 prp1, u16 irq_vector);
437 u16 (*delete_cq)(struct nvmet_ctrl *ctrl, u16 cqid);
438 u16 (*set_feature)(const struct nvmet_ctrl *ctrl, u8 feat,
439 void *feat_data);
440 u16 (*get_feature)(const struct nvmet_ctrl *ctrl, u8 feat,
441 void *feat_data);
442};
443
444#define NVMET_MAX_INLINE_BIOVEC 8
445#define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
446
447struct nvmet_req {
448 struct nvme_command *cmd;
449 struct nvme_completion *cqe;
450 struct nvmet_sq *sq;
451 struct nvmet_cq *cq;
452 struct nvmet_ns *ns;
453 struct scatterlist *sg;
454 struct scatterlist *metadata_sg;
455 struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
456 union {
457 struct {
458 struct bio inline_bio;
459 } b;
460 struct {
461 bool mpool_alloc;
462 struct kiocb iocb;
463 struct bio_vec *bvec;
464 struct work_struct work;
465 } f;
466 struct {
467 struct bio inline_bio;
468 struct request *rq;
469 struct work_struct work;
470 bool use_workqueue;
471 } p;
472#ifdef CONFIG_BLK_DEV_ZONED
473 struct {
474 struct bio inline_bio;
475 struct work_struct zmgmt_work;
476 } z;
477#endif /* CONFIG_BLK_DEV_ZONED */
478 struct {
479 struct work_struct abort_work;
480 } r;
481 };
482 int sg_cnt;
483 int metadata_sg_cnt;
484 /* data length as parsed from the SGL descriptor: */
485 size_t transfer_len;
486 size_t metadata_len;
487
488 struct nvmet_port *port;
489
490 void (*execute)(struct nvmet_req *req);
491 const struct nvmet_fabrics_ops *ops;
492
493 struct pci_dev *p2p_dev;
494 struct device *p2p_client;
495 u16 error_loc;
496 u64 error_slba;
497 struct nvmet_pr_per_ctrl_ref *pc_ref;
498};
499
500#define NVMET_MAX_MPOOL_BVEC 16
501extern struct kmem_cache *nvmet_bvec_cache;
502extern struct workqueue_struct *buffered_io_wq;
503extern struct workqueue_struct *zbd_wq;
504extern struct workqueue_struct *nvmet_wq;
505extern struct workqueue_struct *nvmet_aen_wq;
506
507static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
508{
509 req->cqe->result.u32 = cpu_to_le32(result);
510}
511
512/*
513 * NVMe command writes actually are DMA reads for us on the target side.
514 */
515static inline enum dma_data_direction
516nvmet_data_dir(struct nvmet_req *req)
517{
518 return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
519}
520
521struct nvmet_async_event {
522 struct list_head entry;
523 u8 event_type;
524 u8 event_info;
525 u8 log_page;
526};
527
528static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
529{
530 int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
531
532 if (!rae)
533 clear_bit(bn, &req->sq->ctrl->aen_masked);
534}
535
536static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
537{
538 if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
539 return true;
540 return test_and_set_bit(bn, &ctrl->aen_masked);
541}
542
543void nvmet_get_feat_kato(struct nvmet_req *req);
544void nvmet_get_feat_async_event(struct nvmet_req *req);
545u16 nvmet_set_feat_kato(struct nvmet_req *req);
546u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
547void nvmet_execute_async_event(struct nvmet_req *req);
548void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
549void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
550
551u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
552u32 nvmet_connect_cmd_data_len(struct nvmet_req *req);
553void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
554void nvmet_bdev_set_nvm_limits(struct block_device *bdev,
555 struct nvme_id_ns_nvm *id);
556u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
557u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
558u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
559u32 nvmet_admin_cmd_data_len(struct nvmet_req *req);
560u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
561u32 nvmet_discovery_cmd_data_len(struct nvmet_req *req);
562u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
563u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
564u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req);
565u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
566u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req);
567
568bool nvmet_req_init(struct nvmet_req *req, struct nvmet_sq *sq,
569 const struct nvmet_fabrics_ops *ops);
570void nvmet_req_uninit(struct nvmet_req *req);
571size_t nvmet_req_transfer_len(struct nvmet_req *req);
572bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
573bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
574void nvmet_req_complete(struct nvmet_req *req, u16 status);
575int nvmet_req_alloc_sgls(struct nvmet_req *req);
576void nvmet_req_free_sgls(struct nvmet_req *req);
577
578void nvmet_execute_set_features(struct nvmet_req *req);
579void nvmet_execute_get_features(struct nvmet_req *req);
580void nvmet_execute_keep_alive(struct nvmet_req *req);
581
582u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create);
583u16 nvmet_check_io_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create);
584void nvmet_cq_init(struct nvmet_cq *cq);
585void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
586 u16 size);
587u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
588 u16 size);
589void nvmet_cq_destroy(struct nvmet_cq *cq);
590bool nvmet_cq_get(struct nvmet_cq *cq);
591void nvmet_cq_put(struct nvmet_cq *cq);
592bool nvmet_cq_in_use(struct nvmet_cq *cq);
593u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, bool create);
594void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
595 u16 size);
596u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
597 struct nvmet_cq *cq, u16 qid, u16 size);
598void nvmet_sq_destroy(struct nvmet_sq *sq);
599int nvmet_sq_init(struct nvmet_sq *sq, struct nvmet_cq *cq);
600
601void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
602
603void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
604
605struct nvmet_alloc_ctrl_args {
606 struct nvmet_port *port;
607 struct nvmet_sq *sq;
608 char *subsysnqn;
609 char *hostnqn;
610 uuid_t *hostid;
611 const struct nvmet_fabrics_ops *ops;
612 struct device *p2p_client;
613 u32 kato;
614 __le32 result;
615 u16 error_loc;
616 u16 status;
617};
618
619struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args);
620struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
621 const char *hostnqn, u16 cntlid,
622 struct nvmet_req *req);
623void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
624u16 nvmet_check_ctrl_status(struct nvmet_req *req);
625ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
626 char *traddr, size_t traddr_len);
627
628struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
629 enum nvme_subsys_type type);
630void nvmet_subsys_put(struct nvmet_subsys *subsys);
631void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
632
633u16 nvmet_req_find_ns(struct nvmet_req *req);
634void nvmet_put_namespace(struct nvmet_ns *ns);
635int nvmet_ns_enable(struct nvmet_ns *ns);
636void nvmet_ns_disable(struct nvmet_ns *ns);
637struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
638void nvmet_ns_free(struct nvmet_ns *ns);
639
640void nvmet_send_ana_event(struct nvmet_subsys *subsys,
641 struct nvmet_port *port);
642void nvmet_port_send_ana_event(struct nvmet_port *port);
643
644int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
645void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
646
647void nvmet_port_del_ctrls(struct nvmet_port *port,
648 struct nvmet_subsys *subsys);
649
650int nvmet_enable_port(struct nvmet_port *port);
651void nvmet_disable_port(struct nvmet_port *port);
652
653void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
654void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
655
656u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
657 size_t len);
658u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
659 size_t len);
660u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
661
662u32 nvmet_get_log_page_len(struct nvme_command *cmd);
663u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
664
665extern struct list_head *nvmet_ports;
666void nvmet_port_disc_changed(struct nvmet_port *port,
667 struct nvmet_subsys *subsys);
668void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
669 struct nvmet_host *host);
670void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
671 u8 event_info, u8 log_page);
672
673#define NVMET_MIN_QUEUE_SIZE 16
674#define NVMET_MAX_QUEUE_SIZE 1024
675#define NVMET_NR_QUEUES 128
676#define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1)
677#define NVMET_MAX_MDTS 255
678
679/*
680 * Nice round number that makes a list of nsids fit into a page.
681 * Should become tunable at some point in the future.
682 */
683#define NVMET_MAX_NAMESPACES 1024
684
685/*
686 * 0 is not a valid ANA group ID, so we start numbering at 1.
687 *
688 * ANA Group 1 exists without manual intervention, has namespaces assigned to it
689 * by default, and is available in an optimized state through all ports.
690 */
691#define NVMET_MAX_ANAGRPS 128
692#define NVMET_DEFAULT_ANA_GRPID 1
693
694#define NVMET_KAS 10
695#define NVMET_DISC_KATO_MS 120000
696
697int __init nvmet_init_configfs(void);
698void __exit nvmet_exit_configfs(void);
699
700int __init nvmet_init_discovery(void);
701void nvmet_exit_discovery(void);
702
703extern struct nvmet_subsys *nvmet_disc_subsys;
704extern struct rw_semaphore nvmet_config_sem;
705
706extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
707extern u64 nvmet_ana_chgcnt;
708extern struct rw_semaphore nvmet_ana_sem;
709
710bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
711
712int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
713int nvmet_file_ns_enable(struct nvmet_ns *ns);
714void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
715void nvmet_file_ns_disable(struct nvmet_ns *ns);
716u16 nvmet_bdev_flush(struct nvmet_req *req);
717u16 nvmet_file_flush(struct nvmet_req *req);
718void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
719void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
720void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
721bool nvmet_ns_revalidate(struct nvmet_ns *ns);
722u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
723
724bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
725void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
726void nvmet_execute_identify_ns_zns(struct nvmet_req *req);
727void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
728void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
729void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
730
731static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
732{
733 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
734 req->ns->blksize_shift;
735}
736
737static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
738{
739 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
740 return 0;
741 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
742 req->ns->metadata_size;
743}
744
745static inline u32 nvmet_dsm_len(struct nvmet_req *req)
746{
747 return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
748 sizeof(struct nvme_dsm_range);
749}
750
751static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
752{
753 return req->sq->ctrl->subsys;
754}
755
756static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
757{
758 return subsys->type != NVME_NQN_NVME;
759}
760
761static inline bool nvmet_is_pci_ctrl(struct nvmet_ctrl *ctrl)
762{
763 return ctrl->port->disc_addr.trtype == NVMF_TRTYPE_PCI;
764}
765
766/* Limit MDTS according to port config or transport capability */
767static inline u8 nvmet_ctrl_mdts(struct nvmet_req *req)
768{
769 struct nvmet_ctrl *ctrl = req->sq->ctrl;
770 u8 mdts = req->port->mdts;
771
772 if (!ctrl->ops->get_mdts)
773 return mdts;
774 return min_not_zero(ctrl->ops->get_mdts(ctrl), mdts);
775}
776
777#ifdef CONFIG_NVME_TARGET_PASSTHRU
778void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
779int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
780void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
781u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
782u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
783static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
784{
785 return subsys->passthru_ctrl;
786}
787#else /* CONFIG_NVME_TARGET_PASSTHRU */
788static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
789{
790}
791static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
792{
793}
794static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
795{
796 return 0;
797}
798static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
799{
800 return 0;
801}
802static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
803{
804 return NULL;
805}
806#endif /* CONFIG_NVME_TARGET_PASSTHRU */
807
808static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
809{
810 return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
811}
812
813void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
814
815u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
816u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
817
818static inline bool nvmet_cc_en(u32 cc)
819{
820 return (cc & NVME_CC_ENABLE) >> NVME_CC_EN_SHIFT;
821}
822
823static inline u8 nvmet_cc_css(u32 cc)
824{
825 return (cc & NVME_CC_CSS_MASK) >> NVME_CC_CSS_SHIFT;
826}
827
828static inline u8 nvmet_cc_mps(u32 cc)
829{
830 return (cc & NVME_CC_MPS_MASK) >> NVME_CC_MPS_SHIFT;
831}
832
833static inline u8 nvmet_cc_ams(u32 cc)
834{
835 return (cc & NVME_CC_AMS_MASK) >> NVME_CC_AMS_SHIFT;
836}
837
838static inline u8 nvmet_cc_shn(u32 cc)
839{
840 return (cc & NVME_CC_SHN_MASK) >> NVME_CC_SHN_SHIFT;
841}
842
843static inline u8 nvmet_cc_iosqes(u32 cc)
844{
845 return (cc & NVME_CC_IOSQES_MASK) >> NVME_CC_IOSQES_SHIFT;
846}
847
848static inline u8 nvmet_cc_iocqes(u32 cc)
849{
850 return (cc & NVME_CC_IOCQES_MASK) >> NVME_CC_IOCQES_SHIFT;
851}
852
853/* Convert a 32-bit number to a 16-bit 0's based number */
854static inline __le16 to0based(u32 a)
855{
856 return cpu_to_le16(clamp(a, 1U, 1U << 16) - 1);
857}
858
859static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
860{
861 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
862 return false;
863 return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
864}
865
866static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
867{
868 return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
869}
870
871static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
872{
873 return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
874}
875
876static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
877{
878 return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
879 req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
880}
881
882static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
883{
884 if (bio != &req->b.inline_bio)
885 bio_put(bio);
886 else
887 bio_uninit(bio);
888}
889
890#ifdef CONFIG_NVME_TARGET_TCP_TLS
891static inline key_serial_t nvmet_queue_tls_keyid(struct nvmet_sq *sq)
892{
893 return sq->tls_key ? key_serial(sq->tls_key) : 0;
894}
895static inline void nvmet_sq_put_tls_key(struct nvmet_sq *sq)
896{
897 if (sq->tls_key) {
898 key_put(sq->tls_key);
899 sq->tls_key = NULL;
900 }
901}
902#else
903static inline key_serial_t nvmet_queue_tls_keyid(struct nvmet_sq *sq) { return 0; }
904static inline void nvmet_sq_put_tls_key(struct nvmet_sq *sq) {}
905#endif
906#ifdef CONFIG_NVME_TARGET_AUTH
907u32 nvmet_auth_send_data_len(struct nvmet_req *req);
908void nvmet_execute_auth_send(struct nvmet_req *req);
909u32 nvmet_auth_receive_data_len(struct nvmet_req *req);
910void nvmet_execute_auth_receive(struct nvmet_req *req);
911int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
912 bool set_ctrl);
913int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
914u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, bool reset);
915void nvmet_auth_sq_init(struct nvmet_sq *sq);
916void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
917void nvmet_auth_sq_free(struct nvmet_sq *sq);
918int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
919bool nvmet_check_auth_status(struct nvmet_req *req);
920int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
921 unsigned int hash_len);
922int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
923 unsigned int hash_len);
924static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
925{
926 return ctrl->host_key != NULL && !nvmet_queue_tls_keyid(sq);
927}
928int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
929 u8 *buf, int buf_size);
930int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
931 const u8 *pkey, int pkey_size);
932void nvmet_auth_insert_psk(struct nvmet_sq *sq);
933#else
934static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl,
935 struct nvmet_sq *sq, bool reset)
936{
937 return 0;
938}
939static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
940{
941}
942static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
943static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
944static inline bool nvmet_check_auth_status(struct nvmet_req *req)
945{
946 return true;
947}
948static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl,
949 struct nvmet_sq *sq)
950{
951 return false;
952}
953static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
954static inline void nvmet_auth_insert_psk(struct nvmet_sq *sq) {};
955#endif
956
957int nvmet_pr_init_ns(struct nvmet_ns *ns);
958u16 nvmet_parse_pr_cmd(struct nvmet_req *req);
959u16 nvmet_pr_check_cmd_access(struct nvmet_req *req);
960int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl);
961void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl);
962void nvmet_pr_exit_ns(struct nvmet_ns *ns);
963void nvmet_execute_get_log_page_resv(struct nvmet_req *req);
964u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask);
965u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req);
966u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req);
967static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref)
968{
969 percpu_ref_put(&pc_ref->ref);
970}
971
972/*
973 * Data for the get_feature() and set_feature() operations of PCI target
974 * controllers.
975 */
976struct nvmet_feat_irq_coalesce {
977 u8 thr;
978 u8 time;
979};
980
981struct nvmet_feat_irq_config {
982 u16 iv;
983 bool cd;
984};
985
986struct nvmet_feat_arbitration {
987 u8 hpw;
988 u8 mpw;
989 u8 lpw;
990 u8 ab;
991};
992
993#endif /* _NVMET_H */