Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Virtio SCSI HBA driver
4 *
5 * Copyright IBM Corp. 2010
6 * Copyright Red Hat, Inc. 2011
7 *
8 * Authors:
9 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
10 * Paolo Bonzini <pbonzini@redhat.com>
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/mempool.h>
18#include <linux/interrupt.h>
19#include <linux/virtio.h>
20#include <linux/virtio_ids.h>
21#include <linux/virtio_config.h>
22#include <linux/virtio_scsi.h>
23#include <linux/cpu.h>
24#include <linux/blkdev.h>
25#include <linux/blk-integrity.h>
26#include <scsi/scsi_host.h>
27#include <scsi/scsi_device.h>
28#include <scsi/scsi_cmnd.h>
29#include <scsi/scsi_tcq.h>
30#include <scsi/scsi_devinfo.h>
31#include <linux/seqlock.h>
32#include <linux/dma-mapping.h>
33
34#include "sd.h"
35
36#define VIRTIO_SCSI_MEMPOOL_SZ 64
37#define VIRTIO_SCSI_EVENT_LEN 8
38#define VIRTIO_SCSI_VQ_BASE 2
39
40static unsigned int virtscsi_poll_queues;
41module_param(virtscsi_poll_queues, uint, 0644);
42MODULE_PARM_DESC(virtscsi_poll_queues,
43 "The number of dedicated virtqueues for polling I/O");
44
45/* Command queue element */
46struct virtio_scsi_cmd {
47 struct scsi_cmnd *sc;
48 struct completion *comp;
49 union {
50 struct virtio_scsi_cmd_req cmd;
51 struct virtio_scsi_cmd_req_pi cmd_pi;
52 struct virtio_scsi_ctrl_tmf_req tmf;
53 struct virtio_scsi_ctrl_an_req an;
54 } req;
55 union {
56 struct virtio_scsi_cmd_resp cmd;
57 struct virtio_scsi_ctrl_tmf_resp tmf;
58 struct virtio_scsi_ctrl_an_resp an;
59 struct virtio_scsi_event evt;
60 } resp;
61} ____cacheline_aligned_in_smp;
62
63struct virtio_scsi_event_node {
64 struct virtio_scsi *vscsi;
65 struct virtio_scsi_event *event;
66 struct work_struct work;
67};
68
69struct virtio_scsi_vq {
70 /* Protects vq */
71 spinlock_t vq_lock;
72
73 struct virtqueue *vq;
74};
75
76/* Driver instance state */
77struct virtio_scsi {
78 struct virtio_device *vdev;
79
80 /* Get some buffers ready for event vq */
81 struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
82
83 u32 num_queues;
84 int io_queues[HCTX_MAX_TYPES];
85
86 struct hlist_node node;
87
88 /* Protected by event_vq lock */
89 bool stop_events;
90
91 struct virtio_scsi_vq ctrl_vq;
92 struct virtio_scsi_vq event_vq;
93
94 __dma_from_device_group_begin();
95 struct virtio_scsi_event events[VIRTIO_SCSI_EVENT_LEN];
96 __dma_from_device_group_end();
97
98 struct virtio_scsi_vq req_vqs[];
99};
100
101static struct kmem_cache *virtscsi_cmd_cache;
102static mempool_t *virtscsi_cmd_pool;
103
104static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
105{
106 return vdev->priv;
107}
108
109static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
110{
111 if (resid)
112 scsi_set_resid(sc, min(resid, scsi_bufflen(sc)));
113}
114
115/*
116 * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
117 *
118 * Called with vq_lock held.
119 */
120static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
121{
122 struct virtio_scsi_cmd *cmd = buf;
123 struct scsi_cmnd *sc = cmd->sc;
124 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
125
126 dev_dbg(&sc->device->sdev_gendev,
127 "cmd %p response %u status %#02x sense_len %u\n",
128 sc, resp->response, resp->status, resp->sense_len);
129
130 sc->result = resp->status;
131 virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid));
132 switch (resp->response) {
133 case VIRTIO_SCSI_S_OK:
134 set_host_byte(sc, DID_OK);
135 break;
136 case VIRTIO_SCSI_S_OVERRUN:
137 set_host_byte(sc, DID_ERROR);
138 break;
139 case VIRTIO_SCSI_S_ABORTED:
140 set_host_byte(sc, DID_ABORT);
141 break;
142 case VIRTIO_SCSI_S_BAD_TARGET:
143 set_host_byte(sc, DID_BAD_TARGET);
144 break;
145 case VIRTIO_SCSI_S_RESET:
146 set_host_byte(sc, DID_RESET);
147 break;
148 case VIRTIO_SCSI_S_BUSY:
149 set_host_byte(sc, DID_BUS_BUSY);
150 break;
151 case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
152 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
153 break;
154 case VIRTIO_SCSI_S_TARGET_FAILURE:
155 set_host_byte(sc, DID_BAD_TARGET);
156 break;
157 case VIRTIO_SCSI_S_NEXUS_FAILURE:
158 set_status_byte(sc, SAM_STAT_RESERVATION_CONFLICT);
159 break;
160 default:
161 scmd_printk(KERN_WARNING, sc, "Unknown response %d",
162 resp->response);
163 fallthrough;
164 case VIRTIO_SCSI_S_FAILURE:
165 set_host_byte(sc, DID_ERROR);
166 break;
167 }
168
169 WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
170 VIRTIO_SCSI_SENSE_SIZE);
171 if (resp->sense_len) {
172 memcpy(sc->sense_buffer, resp->sense,
173 min_t(u32,
174 virtio32_to_cpu(vscsi->vdev, resp->sense_len),
175 VIRTIO_SCSI_SENSE_SIZE));
176 }
177
178 scsi_done(sc);
179}
180
181static void virtscsi_vq_done(struct virtio_scsi *vscsi,
182 struct virtio_scsi_vq *virtscsi_vq,
183 void (*fn)(struct virtio_scsi *vscsi, void *buf))
184{
185 void *buf;
186 unsigned int len;
187 unsigned long flags;
188 struct virtqueue *vq = virtscsi_vq->vq;
189
190 spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
191 do {
192 virtqueue_disable_cb(vq);
193 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
194 fn(vscsi, buf);
195
196 } while (!virtqueue_enable_cb(vq));
197 spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
198}
199
200static void virtscsi_req_done(struct virtqueue *vq)
201{
202 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
203 struct virtio_scsi *vscsi = shost_priv(sh);
204 int index = vq->index - VIRTIO_SCSI_VQ_BASE;
205 struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
206
207 virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
208};
209
210static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
211{
212 int i, num_vqs;
213
214 num_vqs = vscsi->num_queues;
215 for (i = 0; i < num_vqs; i++)
216 virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
217 virtscsi_complete_cmd);
218}
219
220static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
221{
222 struct virtio_scsi_cmd *cmd = buf;
223
224 if (cmd->comp)
225 complete(cmd->comp);
226}
227
228static void virtscsi_ctrl_done(struct virtqueue *vq)
229{
230 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
231 struct virtio_scsi *vscsi = shost_priv(sh);
232
233 virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
234};
235
236
237static int virtscsi_kick_event(struct virtio_scsi *vscsi,
238 struct virtio_scsi_event_node *event_node)
239{
240 int err;
241 struct scatterlist sg;
242 unsigned long flags;
243
244 sg_init_one(&sg, event_node->event, sizeof(struct virtio_scsi_event));
245
246 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
247
248 err = virtqueue_add_inbuf_cache_clean(vscsi->event_vq.vq, &sg, 1, event_node,
249 GFP_ATOMIC);
250 if (!err)
251 virtqueue_kick(vscsi->event_vq.vq);
252
253 spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
254
255 return err;
256}
257
258static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
259{
260 int i;
261
262 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
263 vscsi->event_list[i].vscsi = vscsi;
264 vscsi->event_list[i].event = &vscsi->events[i];
265 virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
266 }
267
268 return 0;
269}
270
271static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
272{
273 int i;
274
275 /* Stop scheduling work before calling cancel_work_sync. */
276 spin_lock_irq(&vscsi->event_vq.vq_lock);
277 vscsi->stop_events = true;
278 spin_unlock_irq(&vscsi->event_vq.vq_lock);
279
280 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
281 cancel_work_sync(&vscsi->event_list[i].work);
282}
283
284static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
285 struct virtio_scsi_event *event)
286{
287 struct scsi_device *sdev;
288 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
289 unsigned int target = event->lun[1];
290 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
291
292 switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
293 case VIRTIO_SCSI_EVT_RESET_RESCAN:
294 if (lun == 0) {
295 scsi_scan_target(&shost->shost_gendev, 0, target,
296 SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
297 } else {
298 scsi_add_device(shost, 0, target, lun);
299 }
300 break;
301 case VIRTIO_SCSI_EVT_RESET_REMOVED:
302 sdev = scsi_device_lookup(shost, 0, target, lun);
303 if (sdev) {
304 scsi_remove_device(sdev);
305 scsi_device_put(sdev);
306 } else {
307 pr_err("SCSI device %d 0 %d %d not found\n",
308 shost->host_no, target, lun);
309 }
310 break;
311 default:
312 pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
313 }
314}
315
316static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
317 struct virtio_scsi_event *event)
318{
319 struct scsi_device *sdev;
320 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
321 unsigned int target = event->lun[1];
322 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
323 u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255;
324 u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8;
325
326 sdev = scsi_device_lookup(shost, 0, target, lun);
327 if (!sdev) {
328 pr_err("SCSI device %d 0 %d %d not found\n",
329 shost->host_no, target, lun);
330 return;
331 }
332
333 /* Handle "Parameters changed", "Mode parameters changed", and
334 "Capacity data has changed". */
335 if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
336 scsi_rescan_device(sdev);
337
338 scsi_device_put(sdev);
339}
340
341static int virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
342{
343 struct scsi_device *sdev;
344 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
345 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
346 int result, inquiry_len, inq_result_len = 256;
347 char *inq_result = kmalloc(inq_result_len, GFP_KERNEL);
348
349 if (!inq_result)
350 return -ENOMEM;
351
352 shost_for_each_device(sdev, shost) {
353 inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
354
355 memset(scsi_cmd, 0, sizeof(scsi_cmd));
356 scsi_cmd[0] = INQUIRY;
357 scsi_cmd[4] = (unsigned char) inquiry_len;
358
359 memset(inq_result, 0, inq_result_len);
360
361 result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
362 inq_result, inquiry_len,
363 SD_TIMEOUT, SD_MAX_RETRIES, NULL);
364
365 if (result == 0 && inq_result[0] >> 5) {
366 /* PQ indicates the LUN is not attached */
367 scsi_remove_device(sdev);
368 } else if (result > 0 && host_byte(result) == DID_BAD_TARGET) {
369 /*
370 * If all LUNs of a virtio-scsi device are unplugged
371 * it will respond with BAD TARGET on any INQUIRY
372 * command.
373 * Remove the device in this case as well.
374 */
375 scsi_remove_device(sdev);
376 }
377 }
378
379 kfree(inq_result);
380 return 0;
381}
382
383static void virtscsi_handle_event(struct work_struct *work)
384{
385 struct virtio_scsi_event_node *event_node =
386 container_of(work, struct virtio_scsi_event_node, work);
387 struct virtio_scsi *vscsi = event_node->vscsi;
388 struct virtio_scsi_event *event = event_node->event;
389
390 if (event->event &
391 cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
392 int ret;
393
394 event->event &= ~cpu_to_virtio32(vscsi->vdev,
395 VIRTIO_SCSI_T_EVENTS_MISSED);
396 ret = virtscsi_rescan_hotunplug(vscsi);
397 if (ret)
398 return;
399 scsi_scan_host(virtio_scsi_host(vscsi->vdev));
400 }
401
402 switch (virtio32_to_cpu(vscsi->vdev, event->event)) {
403 case VIRTIO_SCSI_T_NO_EVENT:
404 break;
405 case VIRTIO_SCSI_T_TRANSPORT_RESET:
406 virtscsi_handle_transport_reset(vscsi, event);
407 break;
408 case VIRTIO_SCSI_T_PARAM_CHANGE:
409 virtscsi_handle_param_change(vscsi, event);
410 break;
411 default:
412 pr_err("Unsupported virtio scsi event %x\n", event->event);
413 }
414 virtscsi_kick_event(vscsi, event_node);
415}
416
417static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
418{
419 struct virtio_scsi_event_node *event_node = buf;
420
421 if (!vscsi->stop_events)
422 queue_work(system_freezable_wq, &event_node->work);
423}
424
425static void virtscsi_event_done(struct virtqueue *vq)
426{
427 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
428 struct virtio_scsi *vscsi = shost_priv(sh);
429
430 virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
431};
432
433static int __virtscsi_add_cmd(struct virtqueue *vq,
434 struct virtio_scsi_cmd *cmd,
435 size_t req_size, size_t resp_size)
436{
437 struct scsi_cmnd *sc = cmd->sc;
438 struct scatterlist *sgs[6], req, resp;
439 struct sg_table *out, *in;
440 unsigned out_num = 0, in_num = 0;
441
442 out = in = NULL;
443
444 if (sc && sc->sc_data_direction != DMA_NONE) {
445 if (sc->sc_data_direction != DMA_FROM_DEVICE)
446 out = &sc->sdb.table;
447 if (sc->sc_data_direction != DMA_TO_DEVICE)
448 in = &sc->sdb.table;
449 }
450
451 /* Request header. */
452 sg_init_one(&req, &cmd->req, req_size);
453 sgs[out_num++] = &req;
454
455 /* Data-out buffer. */
456 if (out) {
457 /* Place WRITE protection SGLs before Data OUT payload */
458 if (scsi_prot_sg_count(sc))
459 sgs[out_num++] = scsi_prot_sglist(sc);
460 sgs[out_num++] = out->sgl;
461 }
462
463 /* Response header. */
464 sg_init_one(&resp, &cmd->resp, resp_size);
465 sgs[out_num + in_num++] = &resp;
466
467 /* Data-in buffer */
468 if (in) {
469 /* Place READ protection SGLs before Data IN payload */
470 if (scsi_prot_sg_count(sc))
471 sgs[out_num + in_num++] = scsi_prot_sglist(sc);
472 sgs[out_num + in_num++] = in->sgl;
473 }
474
475 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
476}
477
478static void virtscsi_kick_vq(struct virtio_scsi_vq *vq)
479{
480 bool needs_kick;
481 unsigned long flags;
482
483 spin_lock_irqsave(&vq->vq_lock, flags);
484 needs_kick = virtqueue_kick_prepare(vq->vq);
485 spin_unlock_irqrestore(&vq->vq_lock, flags);
486
487 if (needs_kick)
488 virtqueue_notify(vq->vq);
489}
490
491/**
492 * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it
493 * @vq : the struct virtqueue we're talking about
494 * @cmd : command structure
495 * @req_size : size of the request buffer
496 * @resp_size : size of the response buffer
497 * @kick : whether to kick the virtqueue immediately
498 */
499static int virtscsi_add_cmd(struct virtio_scsi_vq *vq,
500 struct virtio_scsi_cmd *cmd,
501 size_t req_size, size_t resp_size,
502 bool kick)
503{
504 unsigned long flags;
505 int err;
506 bool needs_kick = false;
507
508 spin_lock_irqsave(&vq->vq_lock, flags);
509 err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
510 if (!err && kick)
511 needs_kick = virtqueue_kick_prepare(vq->vq);
512
513 spin_unlock_irqrestore(&vq->vq_lock, flags);
514
515 if (needs_kick)
516 virtqueue_notify(vq->vq);
517 return err;
518}
519
520static void virtio_scsi_init_hdr(struct virtio_device *vdev,
521 struct virtio_scsi_cmd_req *cmd,
522 struct scsi_cmnd *sc)
523{
524 cmd->lun[0] = 1;
525 cmd->lun[1] = sc->device->id;
526 cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
527 cmd->lun[3] = sc->device->lun & 0xff;
528 cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc);
529 cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
530 cmd->prio = 0;
531 cmd->crn = 0;
532}
533
534#ifdef CONFIG_BLK_DEV_INTEGRITY
535static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
536 struct virtio_scsi_cmd_req_pi *cmd_pi,
537 struct scsi_cmnd *sc)
538{
539 struct request *rq = scsi_cmd_to_rq(sc);
540 struct blk_integrity *bi;
541
542 virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
543
544 if (!rq || !scsi_prot_sg_count(sc))
545 return;
546
547 bi = blk_get_integrity(rq->q->disk);
548
549 if (sc->sc_data_direction == DMA_TO_DEVICE)
550 cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
551 bio_integrity_bytes(bi,
552 blk_rq_sectors(rq)));
553 else if (sc->sc_data_direction == DMA_FROM_DEVICE)
554 cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
555 bio_integrity_bytes(bi,
556 blk_rq_sectors(rq)));
557}
558#endif
559
560static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
561 struct scsi_cmnd *sc)
562{
563 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
564 u16 hwq = blk_mq_unique_tag_to_hwq(tag);
565
566 return &vscsi->req_vqs[hwq];
567}
568
569static enum scsi_qc_status virtscsi_queuecommand(struct Scsi_Host *shost,
570 struct scsi_cmnd *sc)
571{
572 struct virtio_scsi *vscsi = shost_priv(shost);
573 struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
574 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
575 bool kick;
576 unsigned long flags;
577 int req_size;
578 int ret;
579
580 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
581
582 /* TODO: check feature bit and fail if unsupported? */
583 BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
584
585 dev_dbg(&sc->device->sdev_gendev,
586 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
587
588 cmd->sc = sc;
589
590 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
591
592#ifdef CONFIG_BLK_DEV_INTEGRITY
593 if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
594 virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
595 memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
596 req_size = sizeof(cmd->req.cmd_pi);
597 } else
598#endif
599 {
600 virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
601 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
602 req_size = sizeof(cmd->req.cmd);
603 }
604
605 kick = (sc->flags & SCMD_LAST) != 0;
606 ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick);
607 if (ret == -EIO) {
608 cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
609 spin_lock_irqsave(&req_vq->vq_lock, flags);
610 virtscsi_complete_cmd(vscsi, cmd);
611 spin_unlock_irqrestore(&req_vq->vq_lock, flags);
612 } else if (ret != 0) {
613 return SCSI_MLQUEUE_HOST_BUSY;
614 }
615 return 0;
616}
617
618static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
619{
620 DECLARE_COMPLETION_ONSTACK(comp);
621 int ret = FAILED;
622
623 cmd->comp = ∁
624 if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd,
625 sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
626 goto out;
627
628 wait_for_completion(&comp);
629 if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
630 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
631 ret = SUCCESS;
632
633 /*
634 * The spec guarantees that all requests related to the TMF have
635 * been completed, but the callback might not have run yet if
636 * we're using independent interrupts (e.g. MSI). Poll the
637 * virtqueues once.
638 *
639 * In the abort case, scsi_done() will do nothing, because the
640 * command timed out and hence SCMD_STATE_COMPLETE has been set.
641 */
642 virtscsi_poll_requests(vscsi);
643
644out:
645 mempool_free(cmd, virtscsi_cmd_pool);
646 return ret;
647}
648
649static int virtscsi_device_reset(struct scsi_cmnd *sc)
650{
651 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
652 struct virtio_scsi_cmd *cmd;
653
654 sdev_printk(KERN_INFO, sc->device, "device reset\n");
655 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
656 if (!cmd)
657 return FAILED;
658
659 memset(cmd, 0, sizeof(*cmd));
660 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
661 .type = VIRTIO_SCSI_T_TMF,
662 .subtype = cpu_to_virtio32(vscsi->vdev,
663 VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET),
664 .lun[0] = 1,
665 .lun[1] = sc->device->id,
666 .lun[2] = (sc->device->lun >> 8) | 0x40,
667 .lun[3] = sc->device->lun & 0xff,
668 };
669 return virtscsi_tmf(vscsi, cmd);
670}
671
672static int virtscsi_device_alloc(struct scsi_device *sdevice)
673{
674 /*
675 * Passed through SCSI targets (e.g. with qemu's 'scsi-block')
676 * may have transfer limits which come from the host SCSI
677 * controller or something on the host side other than the
678 * target itself.
679 *
680 * To make this work properly, the hypervisor can adjust the
681 * target's VPD information to advertise these limits. But
682 * for that to work, the guest has to look at the VPD pages,
683 * which we won't do by default if it is an SPC-2 device, even
684 * if it does actually support it.
685 *
686 * So, set the blist to always try to read the VPD pages.
687 */
688 sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES;
689
690 return 0;
691}
692
693
694/**
695 * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
696 * @sdev: Virtscsi target whose queue depth to change
697 * @qdepth: New queue depth
698 */
699static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
700{
701 struct Scsi_Host *shost = sdev->host;
702 int max_depth = shost->cmd_per_lun;
703
704 return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
705}
706
707static int virtscsi_abort(struct scsi_cmnd *sc)
708{
709 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
710 struct virtio_scsi_cmd *cmd;
711
712 scmd_printk(KERN_INFO, sc, "abort\n");
713 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
714 if (!cmd)
715 return FAILED;
716
717 memset(cmd, 0, sizeof(*cmd));
718 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
719 .type = VIRTIO_SCSI_T_TMF,
720 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
721 .lun[0] = 1,
722 .lun[1] = sc->device->id,
723 .lun[2] = (sc->device->lun >> 8) | 0x40,
724 .lun[3] = sc->device->lun & 0xff,
725 .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc),
726 };
727 return virtscsi_tmf(vscsi, cmd);
728}
729
730static void virtscsi_map_queues(struct Scsi_Host *shost)
731{
732 struct virtio_scsi *vscsi = shost_priv(shost);
733 int i, qoff;
734
735 for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
736 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
737
738 map->nr_queues = vscsi->io_queues[i];
739 map->queue_offset = qoff;
740 qoff += map->nr_queues;
741
742 if (map->nr_queues == 0)
743 continue;
744
745 /*
746 * Regular queues have interrupts and hence CPU affinity is
747 * defined by the core virtio code, but polling queues have
748 * no interrupts so we let the block layer assign CPU affinity.
749 */
750 if (i == HCTX_TYPE_POLL)
751 blk_mq_map_queues(map);
752 else
753 blk_mq_map_hw_queues(map, &vscsi->vdev->dev, 2);
754 }
755}
756
757static int virtscsi_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
758{
759 struct virtio_scsi *vscsi = shost_priv(shost);
760 struct virtio_scsi_vq *virtscsi_vq = &vscsi->req_vqs[queue_num];
761 unsigned long flags;
762 unsigned int len;
763 int found = 0;
764 void *buf;
765
766 spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
767
768 while ((buf = virtqueue_get_buf(virtscsi_vq->vq, &len)) != NULL) {
769 virtscsi_complete_cmd(vscsi, buf);
770 found++;
771 }
772
773 spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
774
775 return found;
776}
777
778static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
779{
780 struct virtio_scsi *vscsi = shost_priv(shost);
781
782 virtscsi_kick_vq(&vscsi->req_vqs[hwq]);
783}
784
785/*
786 * The host guarantees to respond to each command, although I/O
787 * latencies might be higher than on bare metal. Reset the timer
788 * unconditionally to give the host a chance to perform EH.
789 */
790static enum scsi_timeout_action virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
791{
792 return SCSI_EH_RESET_TIMER;
793}
794
795static const struct scsi_host_template virtscsi_host_template = {
796 .module = THIS_MODULE,
797 .name = "Virtio SCSI HBA",
798 .proc_name = "virtio_scsi",
799 .this_id = -1,
800 .cmd_size = sizeof(struct virtio_scsi_cmd),
801 .queuecommand = virtscsi_queuecommand,
802 .mq_poll = virtscsi_mq_poll,
803 .commit_rqs = virtscsi_commit_rqs,
804 .change_queue_depth = virtscsi_change_queue_depth,
805 .eh_abort_handler = virtscsi_abort,
806 .eh_device_reset_handler = virtscsi_device_reset,
807 .eh_timed_out = virtscsi_eh_timed_out,
808 .sdev_init = virtscsi_device_alloc,
809
810 .dma_boundary = UINT_MAX,
811 .map_queues = virtscsi_map_queues,
812 .track_queue_depth = 1,
813};
814
815#define virtscsi_config_get(vdev, fld) \
816 ({ \
817 __virtio_native_type(struct virtio_scsi_config, fld) __val; \
818 virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
819 __val; \
820 })
821
822#define virtscsi_config_set(vdev, fld, val) \
823 do { \
824 __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
825 virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
826 } while(0)
827
828static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
829 struct virtqueue *vq)
830{
831 spin_lock_init(&virtscsi_vq->vq_lock);
832 virtscsi_vq->vq = vq;
833}
834
835static void virtscsi_remove_vqs(struct virtio_device *vdev)
836{
837 /* Stop all the virtqueues. */
838 virtio_reset_device(vdev);
839 vdev->config->del_vqs(vdev);
840}
841
842static int virtscsi_init(struct virtio_device *vdev,
843 struct virtio_scsi *vscsi)
844{
845 int err;
846 u32 i;
847 u32 num_vqs, num_poll_vqs, num_req_vqs;
848 struct virtqueue_info *vqs_info;
849 struct virtqueue **vqs;
850 struct irq_affinity desc = { .pre_vectors = 2 };
851
852 num_req_vqs = vscsi->num_queues;
853 num_vqs = num_req_vqs + VIRTIO_SCSI_VQ_BASE;
854 vqs = kmalloc_objs(struct virtqueue *, num_vqs);
855 vqs_info = kzalloc_objs(*vqs_info, num_vqs);
856
857 if (!vqs || !vqs_info) {
858 err = -ENOMEM;
859 goto out;
860 }
861
862 num_poll_vqs = min_t(unsigned int, virtscsi_poll_queues,
863 num_req_vqs - 1);
864 vscsi->io_queues[HCTX_TYPE_DEFAULT] = num_req_vqs - num_poll_vqs;
865 vscsi->io_queues[HCTX_TYPE_READ] = 0;
866 vscsi->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
867
868 dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
869 vscsi->io_queues[HCTX_TYPE_DEFAULT],
870 vscsi->io_queues[HCTX_TYPE_READ],
871 vscsi->io_queues[HCTX_TYPE_POLL]);
872
873 vqs_info[0].callback = virtscsi_ctrl_done;
874 vqs_info[0].name = "control";
875 vqs_info[1].callback = virtscsi_event_done;
876 vqs_info[1].name = "event";
877 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs - num_poll_vqs; i++) {
878 vqs_info[i].callback = virtscsi_req_done;
879 vqs_info[i].name = "request";
880 }
881
882 for (; i < num_vqs; i++)
883 vqs_info[i].name = "request_poll";
884
885 /* Discover virtqueues and write information to configuration. */
886 err = virtio_find_vqs(vdev, num_vqs, vqs, vqs_info, &desc);
887 if (err)
888 goto out;
889
890 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
891 virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
892 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
893 virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
894 vqs[i]);
895
896 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
897 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
898
899 err = 0;
900
901out:
902 kfree(vqs_info);
903 kfree(vqs);
904 if (err)
905 virtscsi_remove_vqs(vdev);
906 return err;
907}
908
909static int virtscsi_probe(struct virtio_device *vdev)
910{
911 struct Scsi_Host *shost;
912 struct virtio_scsi *vscsi;
913 int err;
914 u32 sg_elems, num_targets;
915 u32 cmd_per_lun;
916 u32 num_queues;
917
918 if (!vdev->config->get) {
919 dev_err(&vdev->dev, "%s failure: config access disabled\n",
920 __func__);
921 return -EINVAL;
922 }
923
924 /* We need to know how many queues before we allocate. */
925 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
926 num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
927 num_queues = blk_mq_num_possible_queues(num_queues);
928
929 num_targets = virtscsi_config_get(vdev, max_target) + 1;
930
931 shost = scsi_host_alloc(&virtscsi_host_template,
932 struct_size(vscsi, req_vqs, num_queues));
933 if (!shost)
934 return -ENOMEM;
935
936 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
937 shost->sg_tablesize = sg_elems;
938 shost->nr_maps = 1;
939 vscsi = shost_priv(shost);
940 vscsi->vdev = vdev;
941 vscsi->num_queues = num_queues;
942 vdev->priv = shost;
943
944 err = virtscsi_init(vdev, vscsi);
945 if (err)
946 goto virtscsi_init_failed;
947
948 if (vscsi->io_queues[HCTX_TYPE_POLL])
949 shost->nr_maps = HCTX_TYPE_POLL + 1;
950
951 shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq);
952
953 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
954 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
955 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
956
957 /* LUNs > 256 are reported with format 1, so they go in the range
958 * 16640-32767.
959 */
960 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
961 shost->max_id = num_targets;
962 shost->max_channel = 0;
963 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
964 shost->nr_hw_queues = num_queues;
965
966#ifdef CONFIG_BLK_DEV_INTEGRITY
967 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
968 int host_prot;
969
970 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
971 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
972 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
973
974 scsi_host_set_prot(shost, host_prot);
975 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
976 }
977#endif
978
979 err = scsi_add_host(shost, &vdev->dev);
980 if (err)
981 goto scsi_add_host_failed;
982
983 virtio_device_ready(vdev);
984
985 for (int i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
986 INIT_WORK(&vscsi->event_list[i].work, virtscsi_handle_event);
987
988 virtscsi_kick_event_all(vscsi);
989
990 scsi_scan_host(shost);
991 return 0;
992
993scsi_add_host_failed:
994 vdev->config->del_vqs(vdev);
995virtscsi_init_failed:
996 scsi_host_put(shost);
997 return err;
998}
999
1000static void virtscsi_remove(struct virtio_device *vdev)
1001{
1002 struct Scsi_Host *shost = virtio_scsi_host(vdev);
1003 struct virtio_scsi *vscsi = shost_priv(shost);
1004
1005 virtscsi_cancel_event_work(vscsi);
1006
1007 scsi_remove_host(shost);
1008 virtscsi_remove_vqs(vdev);
1009 scsi_host_put(shost);
1010}
1011
1012#ifdef CONFIG_PM_SLEEP
1013static int virtscsi_freeze(struct virtio_device *vdev)
1014{
1015 virtscsi_remove_vqs(vdev);
1016 return 0;
1017}
1018
1019static int virtscsi_restore(struct virtio_device *vdev)
1020{
1021 struct Scsi_Host *sh = virtio_scsi_host(vdev);
1022 struct virtio_scsi *vscsi = shost_priv(sh);
1023 int err;
1024
1025 err = virtscsi_init(vdev, vscsi);
1026 if (err)
1027 return err;
1028
1029 virtio_device_ready(vdev);
1030
1031 virtscsi_kick_event_all(vscsi);
1032
1033 return err;
1034}
1035#endif
1036
1037static struct virtio_device_id id_table[] = {
1038 { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
1039 { 0 },
1040};
1041
1042static unsigned int features[] = {
1043 VIRTIO_SCSI_F_HOTPLUG,
1044 VIRTIO_SCSI_F_CHANGE,
1045#ifdef CONFIG_BLK_DEV_INTEGRITY
1046 VIRTIO_SCSI_F_T10_PI,
1047#endif
1048};
1049
1050static struct virtio_driver virtio_scsi_driver = {
1051 .feature_table = features,
1052 .feature_table_size = ARRAY_SIZE(features),
1053 .driver.name = KBUILD_MODNAME,
1054 .id_table = id_table,
1055 .probe = virtscsi_probe,
1056#ifdef CONFIG_PM_SLEEP
1057 .freeze = virtscsi_freeze,
1058 .restore = virtscsi_restore,
1059#endif
1060 .remove = virtscsi_remove,
1061};
1062
1063static int __init virtio_scsi_init(void)
1064{
1065 int ret = -ENOMEM;
1066
1067 virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
1068 if (!virtscsi_cmd_cache) {
1069 pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
1070 goto error;
1071 }
1072
1073
1074 virtscsi_cmd_pool =
1075 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
1076 virtscsi_cmd_cache);
1077 if (!virtscsi_cmd_pool) {
1078 pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
1079 goto error;
1080 }
1081 ret = register_virtio_driver(&virtio_scsi_driver);
1082 if (ret < 0)
1083 goto error;
1084
1085 return 0;
1086
1087error:
1088 mempool_destroy(virtscsi_cmd_pool);
1089 virtscsi_cmd_pool = NULL;
1090 kmem_cache_destroy(virtscsi_cmd_cache);
1091 virtscsi_cmd_cache = NULL;
1092 return ret;
1093}
1094
1095static void __exit virtio_scsi_fini(void)
1096{
1097 unregister_virtio_driver(&virtio_scsi_driver);
1098 mempool_destroy(virtscsi_cmd_pool);
1099 kmem_cache_destroy(virtscsi_cmd_cache);
1100}
1101module_init(virtio_scsi_init);
1102module_exit(virtio_scsi_fini);
1103
1104MODULE_DEVICE_TABLE(virtio, id_table);
1105MODULE_DESCRIPTION("Virtio SCSI HBA driver");
1106MODULE_LICENSE("GPL");