Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at master 2258 lines 61 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (c) 2009, Microsoft Corporation. 4 * 5 * Authors: 6 * Haiyang Zhang <haiyangz@microsoft.com> 7 * Hank Janssen <hjanssen@microsoft.com> 8 * K. Y. Srinivasan <kys@microsoft.com> 9 */ 10 11#include <linux/kernel.h> 12#include <linux/wait.h> 13#include <linux/sched.h> 14#include <linux/completion.h> 15#include <linux/string.h> 16#include <linux/mm.h> 17#include <linux/delay.h> 18#include <linux/init.h> 19#include <linux/slab.h> 20#include <linux/module.h> 21#include <linux/device.h> 22#include <linux/hyperv.h> 23#include <linux/blkdev.h> 24#include <linux/dma-mapping.h> 25 26#include <scsi/scsi.h> 27#include <scsi/scsi_cmnd.h> 28#include <scsi/scsi_host.h> 29#include <scsi/scsi_device.h> 30#include <scsi/scsi_tcq.h> 31#include <scsi/scsi_eh.h> 32#include <scsi/scsi_devinfo.h> 33#include <scsi/scsi_dbg.h> 34#include <scsi/scsi_transport_fc.h> 35#include <scsi/scsi_transport.h> 36 37/* 38 * All wire protocol details (storage protocol between the guest and the host) 39 * are consolidated here. 40 * 41 * Begin protocol definitions. 42 */ 43 44/* 45 * Version history: 46 * V1 Beta: 0.1 47 * V1 RC < 2008/1/31: 1.0 48 * V1 RC > 2008/1/31: 2.0 49 * Win7: 4.2 50 * Win8: 5.1 51 * Win8.1: 6.0 52 * Win10: 6.2 53 */ 54 55#define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \ 56 (((MINOR_) & 0xff))) 57#define VMSTOR_PROTO_VERSION_WIN6 VMSTOR_PROTO_VERSION(2, 0) 58#define VMSTOR_PROTO_VERSION_WIN7 VMSTOR_PROTO_VERSION(4, 2) 59#define VMSTOR_PROTO_VERSION_WIN8 VMSTOR_PROTO_VERSION(5, 1) 60#define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0) 61#define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2) 62 63/* channel callback timeout in ms */ 64#define CALLBACK_TIMEOUT 2 65 66/* Packet structure describing virtual storage requests. */ 67enum vstor_packet_operation { 68 VSTOR_OPERATION_COMPLETE_IO = 1, 69 VSTOR_OPERATION_REMOVE_DEVICE = 2, 70 VSTOR_OPERATION_EXECUTE_SRB = 3, 71 VSTOR_OPERATION_RESET_LUN = 4, 72 VSTOR_OPERATION_RESET_ADAPTER = 5, 73 VSTOR_OPERATION_RESET_BUS = 6, 74 VSTOR_OPERATION_BEGIN_INITIALIZATION = 7, 75 VSTOR_OPERATION_END_INITIALIZATION = 8, 76 VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9, 77 VSTOR_OPERATION_QUERY_PROPERTIES = 10, 78 VSTOR_OPERATION_ENUMERATE_BUS = 11, 79 VSTOR_OPERATION_FCHBA_DATA = 12, 80 VSTOR_OPERATION_CREATE_SUB_CHANNELS = 13, 81 VSTOR_OPERATION_MAXIMUM = 13 82}; 83 84/* 85 * WWN packet for Fibre Channel HBA 86 */ 87 88struct hv_fc_wwn_packet { 89 u8 primary_active; 90 u8 reserved1[3]; 91 u8 primary_port_wwn[8]; 92 u8 primary_node_wwn[8]; 93 u8 secondary_port_wwn[8]; 94 u8 secondary_node_wwn[8]; 95}; 96 97 98 99/* 100 * SRB Flag Bits 101 */ 102 103#define SRB_FLAGS_QUEUE_ACTION_ENABLE 0x00000002 104#define SRB_FLAGS_DISABLE_DISCONNECT 0x00000004 105#define SRB_FLAGS_DISABLE_SYNCH_TRANSFER 0x00000008 106#define SRB_FLAGS_BYPASS_FROZEN_QUEUE 0x00000010 107#define SRB_FLAGS_DISABLE_AUTOSENSE 0x00000020 108#define SRB_FLAGS_DATA_IN 0x00000040 109#define SRB_FLAGS_DATA_OUT 0x00000080 110#define SRB_FLAGS_NO_DATA_TRANSFER 0x00000000 111#define SRB_FLAGS_UNSPECIFIED_DIRECTION (SRB_FLAGS_DATA_IN | SRB_FLAGS_DATA_OUT) 112#define SRB_FLAGS_NO_QUEUE_FREEZE 0x00000100 113#define SRB_FLAGS_ADAPTER_CACHE_ENABLE 0x00000200 114#define SRB_FLAGS_FREE_SENSE_BUFFER 0x00000400 115 116/* 117 * This flag indicates the request is part of the workflow for processing a D3. 118 */ 119#define SRB_FLAGS_D3_PROCESSING 0x00000800 120#define SRB_FLAGS_IS_ACTIVE 0x00010000 121#define SRB_FLAGS_ALLOCATED_FROM_ZONE 0x00020000 122#define SRB_FLAGS_SGLIST_FROM_POOL 0x00040000 123#define SRB_FLAGS_BYPASS_LOCKED_QUEUE 0x00080000 124#define SRB_FLAGS_NO_KEEP_AWAKE 0x00100000 125#define SRB_FLAGS_PORT_DRIVER_ALLOCSENSE 0x00200000 126#define SRB_FLAGS_PORT_DRIVER_SENSEHASPORT 0x00400000 127#define SRB_FLAGS_DONT_START_NEXT_PACKET 0x00800000 128#define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000 129#define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000 130 131#define SP_UNTAGGED ((unsigned char) ~0) 132#define SRB_SIMPLE_TAG_REQUEST 0x20 133 134/* 135 * Platform neutral description of a scsi request - 136 * this remains the same across the write regardless of 32/64 bit 137 * note: it's patterned off the SCSI_PASS_THROUGH structure 138 */ 139#define STORVSC_MAX_CMD_LEN 0x10 140 141/* Sense buffer size is the same for all versions since Windows 8 */ 142#define STORVSC_SENSE_BUFFER_SIZE 0x14 143#define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14 144 145/* 146 * The storage protocol version is determined during the 147 * initial exchange with the host. It will indicate which 148 * storage functionality is available in the host. 149*/ 150static int vmstor_proto_version; 151 152static bool hv_dev_is_fc(struct hv_device *hv_dev); 153 154#define STORVSC_LOGGING_NONE 0 155#define STORVSC_LOGGING_ERROR 1 156#define STORVSC_LOGGING_WARN 2 157 158static int logging_level = STORVSC_LOGGING_ERROR; 159module_param(logging_level, int, S_IRUGO|S_IWUSR); 160MODULE_PARM_DESC(logging_level, 161 "Logging level, 0 - None, 1 - Error (default), 2 - Warning."); 162 163static inline bool do_logging(int level) 164{ 165 return logging_level >= level; 166} 167 168#define storvsc_log(dev, level, fmt, ...) \ 169do { \ 170 if (do_logging(level)) \ 171 dev_warn(&(dev)->device, fmt, ##__VA_ARGS__); \ 172} while (0) 173 174#define storvsc_log_ratelimited(dev, level, fmt, ...) \ 175do { \ 176 if (do_logging(level)) \ 177 dev_warn_ratelimited(&(dev)->device, fmt, ##__VA_ARGS__); \ 178} while (0) 179 180struct vmscsi_request { 181 u16 length; 182 u8 srb_status; 183 u8 scsi_status; 184 185 u8 port_number; 186 u8 path_id; 187 u8 target_id; 188 u8 lun; 189 190 u8 cdb_length; 191 u8 sense_info_length; 192 u8 data_in; 193 u8 reserved; 194 195 u32 data_transfer_length; 196 197 union { 198 u8 cdb[STORVSC_MAX_CMD_LEN]; 199 u8 sense_data[STORVSC_SENSE_BUFFER_SIZE]; 200 u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING]; 201 }; 202 /* 203 * The following was added in win8. 204 */ 205 u16 reserve; 206 u8 queue_tag; 207 u8 queue_action; 208 u32 srb_flags; 209 u32 time_out_value; 210 u32 queue_sort_ey; 211 212} __attribute((packed)); 213 214/* 215 * The list of windows version in order of preference. 216 */ 217 218static const int protocol_version[] = { 219 VMSTOR_PROTO_VERSION_WIN10, 220 VMSTOR_PROTO_VERSION_WIN8_1, 221 VMSTOR_PROTO_VERSION_WIN8, 222}; 223 224 225/* 226 * This structure is sent during the initialization phase to get the different 227 * properties of the channel. 228 */ 229 230#define STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL 0x1 231 232struct vmstorage_channel_properties { 233 u32 reserved; 234 u16 max_channel_cnt; 235 u16 reserved1; 236 237 u32 flags; 238 u32 max_transfer_bytes; 239 240 u64 reserved2; 241} __packed; 242 243/* This structure is sent during the storage protocol negotiations. */ 244struct vmstorage_protocol_version { 245 /* Major (MSW) and minor (LSW) version numbers. */ 246 u16 major_minor; 247 248 /* 249 * Revision number is auto-incremented whenever this file is changed 250 * (See FILL_VMSTOR_REVISION macro above). Mismatch does not 251 * definitely indicate incompatibility--but it does indicate mismatched 252 * builds. 253 * This is only used on the windows side. Just set it to 0. 254 */ 255 u16 revision; 256} __packed; 257 258/* Channel Property Flags */ 259#define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1 260#define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2 261 262struct vstor_packet { 263 /* Requested operation type */ 264 enum vstor_packet_operation operation; 265 266 /* Flags - see below for values */ 267 u32 flags; 268 269 /* Status of the request returned from the server side. */ 270 u32 status; 271 272 /* Data payload area */ 273 union { 274 /* 275 * Structure used to forward SCSI commands from the 276 * client to the server. 277 */ 278 struct vmscsi_request vm_srb; 279 280 /* Structure used to query channel properties. */ 281 struct vmstorage_channel_properties storage_channel_properties; 282 283 /* Used during version negotiations. */ 284 struct vmstorage_protocol_version version; 285 286 /* Fibre channel address packet */ 287 struct hv_fc_wwn_packet wwn_packet; 288 289 /* Number of sub-channels to create */ 290 u16 sub_channel_count; 291 292 /* This will be the maximum of the union members */ 293 u8 buffer[0x34]; 294 }; 295} __packed; 296 297/* 298 * Packet Flags: 299 * 300 * This flag indicates that the server should send back a completion for this 301 * packet. 302 */ 303 304#define REQUEST_COMPLETION_FLAG 0x1 305 306/* Matches Windows-end */ 307enum storvsc_request_type { 308 WRITE_TYPE = 0, 309 READ_TYPE, 310 UNKNOWN_TYPE, 311}; 312 313/* 314 * SRB status codes and masks. In the 8-bit field, the two high order bits 315 * are flags, while the remaining 6 bits are an integer status code. The 316 * definitions here include only the subset of the integer status codes that 317 * are tested for in this driver. 318 */ 319#define SRB_STATUS_AUTOSENSE_VALID 0x80 320#define SRB_STATUS_QUEUE_FROZEN 0x40 321 322/* SRB status integer codes */ 323#define SRB_STATUS_SUCCESS 0x01 324#define SRB_STATUS_ABORTED 0x02 325#define SRB_STATUS_ERROR 0x04 326#define SRB_STATUS_INVALID_REQUEST 0x06 327#define SRB_STATUS_TIMEOUT 0x09 328#define SRB_STATUS_SELECTION_TIMEOUT 0x0A 329#define SRB_STATUS_BUS_RESET 0x0E 330#define SRB_STATUS_DATA_OVERRUN 0x12 331#define SRB_STATUS_INVALID_LUN 0x20 332#define SRB_STATUS_INTERNAL_ERROR 0x30 333 334#define SRB_STATUS(status) \ 335 (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN)) 336/* 337 * This is the end of Protocol specific defines. 338 */ 339 340static int storvsc_ringbuffer_size = (128 * 1024); 341static int aligned_ringbuffer_size; 342static u32 max_outstanding_req_per_channel; 343static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth); 344 345static int storvsc_vcpus_per_sub_channel = 4; 346static unsigned int storvsc_max_hw_queues; 347 348module_param(storvsc_ringbuffer_size, int, S_IRUGO); 349MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)"); 350 351module_param(storvsc_max_hw_queues, uint, 0644); 352MODULE_PARM_DESC(storvsc_max_hw_queues, "Maximum number of hardware queues"); 353 354module_param(storvsc_vcpus_per_sub_channel, int, S_IRUGO); 355MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels"); 356 357static int ring_avail_percent_lowater = 10; 358module_param(ring_avail_percent_lowater, int, S_IRUGO); 359MODULE_PARM_DESC(ring_avail_percent_lowater, 360 "Select a channel if available ring size > this in percent"); 361 362/* 363 * Timeout in seconds for all devices managed by this driver. 364 */ 365static const int storvsc_timeout = 180; 366 367#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 368static struct scsi_transport_template *fc_transport_template; 369#endif 370 371static struct scsi_host_template scsi_driver; 372static void storvsc_on_channel_callback(void *context); 373 374#define STORVSC_MAX_LUNS_PER_TARGET 255 375#define STORVSC_MAX_TARGETS 2 376#define STORVSC_MAX_CHANNELS 8 377 378#define STORVSC_FC_MAX_LUNS_PER_TARGET 255 379#define STORVSC_FC_MAX_TARGETS 128 380#define STORVSC_FC_MAX_CHANNELS 8 381#define STORVSC_FC_MAX_XFER_SIZE ((u32)(512 * 1024)) 382 383#define STORVSC_IDE_MAX_LUNS_PER_TARGET 64 384#define STORVSC_IDE_MAX_TARGETS 1 385#define STORVSC_IDE_MAX_CHANNELS 1 386 387/* 388 * Upper bound on the size of a storvsc packet. 389 */ 390#define STORVSC_MAX_PKT_SIZE (sizeof(struct vmpacket_descriptor) +\ 391 sizeof(struct vstor_packet)) 392 393struct storvsc_cmd_request { 394 struct scsi_cmnd *cmd; 395 396 struct hv_device *device; 397 398 /* Synchronize the request/response if needed */ 399 struct completion wait_event; 400 401 struct vmbus_channel_packet_multipage_buffer mpb; 402 struct vmbus_packet_mpb_array *payload; 403 u32 payload_sz; 404 405 struct vstor_packet vstor_packet; 406}; 407 408 409/* A storvsc device is a device object that contains a vmbus channel */ 410struct storvsc_device { 411 struct hv_device *device; 412 413 bool destroy; 414 bool drain_notify; 415 atomic_t num_outstanding_req; 416 struct Scsi_Host *host; 417 418 wait_queue_head_t waiting_to_drain; 419 420 /* 421 * Each unique Port/Path/Target represents 1 channel ie scsi 422 * controller. In reality, the pathid, targetid is always 0 423 * and the port is set by us 424 */ 425 unsigned int port_number; 426 unsigned char path_id; 427 unsigned char target_id; 428 429 /* 430 * Max I/O, the device can support. 431 */ 432 u32 max_transfer_bytes; 433 /* 434 * Number of sub-channels we will open. 435 */ 436 u16 num_sc; 437 struct vmbus_channel **stor_chns; 438 /* 439 * Mask of CPUs bound to subchannels. 440 */ 441 struct cpumask alloced_cpus; 442 /* 443 * Serializes modifications of stor_chns[] from storvsc_do_io() 444 * and storvsc_change_target_cpu(). 445 */ 446 spinlock_t lock; 447 /* Used for vsc/vsp channel reset process */ 448 struct storvsc_cmd_request init_request; 449 struct storvsc_cmd_request reset_request; 450 /* 451 * Currently active port and node names for FC devices. 452 */ 453 u64 node_name; 454 u64 port_name; 455#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 456 struct fc_rport *rport; 457#endif 458}; 459 460struct hv_host_device { 461 struct hv_device *dev; 462 unsigned int port; 463 unsigned char path; 464 unsigned char target; 465 struct workqueue_struct *handle_error_wq; 466 struct work_struct host_scan_work; 467 struct Scsi_Host *host; 468}; 469 470struct storvsc_scan_work { 471 struct work_struct work; 472 struct Scsi_Host *host; 473 u8 lun; 474 u8 tgt_id; 475}; 476 477static void storvsc_device_scan(struct work_struct *work) 478{ 479 struct storvsc_scan_work *wrk; 480 struct scsi_device *sdev; 481 482 wrk = container_of(work, struct storvsc_scan_work, work); 483 484 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); 485 if (!sdev) 486 goto done; 487 scsi_rescan_device(sdev); 488 scsi_device_put(sdev); 489 490done: 491 kfree(wrk); 492} 493 494static void storvsc_host_scan(struct work_struct *work) 495{ 496 struct Scsi_Host *host; 497 struct scsi_device *sdev; 498 struct hv_host_device *host_device = 499 container_of(work, struct hv_host_device, host_scan_work); 500 501 host = host_device->host; 502 /* 503 * Before scanning the host, first check to see if any of the 504 * currently known devices have been hot removed. We issue a 505 * "unit ready" command against all currently known devices. 506 * This I/O will result in an error for devices that have been 507 * removed. As part of handling the I/O error, we remove the device. 508 * 509 * When a LUN is added or removed, the host sends us a signal to 510 * scan the host. Thus we are forced to discover the LUNs that 511 * may have been removed this way. 512 */ 513 mutex_lock(&host->scan_mutex); 514 shost_for_each_device(sdev, host) 515 scsi_test_unit_ready(sdev, 1, 1, NULL); 516 mutex_unlock(&host->scan_mutex); 517 /* 518 * Now scan the host to discover LUNs that may have been added. 519 */ 520 scsi_scan_host(host); 521} 522 523static void storvsc_remove_lun(struct work_struct *work) 524{ 525 struct storvsc_scan_work *wrk; 526 struct scsi_device *sdev; 527 528 wrk = container_of(work, struct storvsc_scan_work, work); 529 if (!scsi_host_get(wrk->host)) 530 goto done; 531 532 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); 533 534 if (sdev) { 535 scsi_remove_device(sdev); 536 scsi_device_put(sdev); 537 } 538 scsi_host_put(wrk->host); 539 540done: 541 kfree(wrk); 542} 543 544 545/* 546 * We can get incoming messages from the host that are not in response to 547 * messages that we have sent out. An example of this would be messages 548 * received by the guest to notify dynamic addition/removal of LUNs. To 549 * deal with potential race conditions where the driver may be in the 550 * midst of being unloaded when we might receive an unsolicited message 551 * from the host, we have implemented a mechanism to gurantee sequential 552 * consistency: 553 * 554 * 1) Once the device is marked as being destroyed, we will fail all 555 * outgoing messages. 556 * 2) We permit incoming messages when the device is being destroyed, 557 * only to properly account for messages already sent out. 558 */ 559 560static inline struct storvsc_device *get_out_stor_device( 561 struct hv_device *device) 562{ 563 struct storvsc_device *stor_device; 564 565 stor_device = hv_get_drvdata(device); 566 567 if (stor_device && stor_device->destroy) 568 stor_device = NULL; 569 570 return stor_device; 571} 572 573 574static inline void storvsc_wait_to_drain(struct storvsc_device *dev) 575{ 576 dev->drain_notify = true; 577 wait_event(dev->waiting_to_drain, 578 atomic_read(&dev->num_outstanding_req) == 0); 579 dev->drain_notify = false; 580} 581 582static inline struct storvsc_device *get_in_stor_device( 583 struct hv_device *device) 584{ 585 struct storvsc_device *stor_device; 586 587 stor_device = hv_get_drvdata(device); 588 589 if (!stor_device) 590 goto get_in_err; 591 592 /* 593 * If the device is being destroyed; allow incoming 594 * traffic only to cleanup outstanding requests. 595 */ 596 597 if (stor_device->destroy && 598 (atomic_read(&stor_device->num_outstanding_req) == 0)) 599 stor_device = NULL; 600 601get_in_err: 602 return stor_device; 603 604} 605 606static void storvsc_change_target_cpu(struct vmbus_channel *channel, u32 old, 607 u32 new) 608{ 609 struct storvsc_device *stor_device; 610 struct vmbus_channel *cur_chn; 611 bool old_is_alloced = false; 612 struct hv_device *device; 613 unsigned long flags; 614 int cpu; 615 616 device = channel->primary_channel ? 617 channel->primary_channel->device_obj 618 : channel->device_obj; 619 stor_device = get_out_stor_device(device); 620 if (!stor_device) 621 return; 622 623 /* See storvsc_do_io() -> get_og_chn(). */ 624 spin_lock_irqsave(&stor_device->lock, flags); 625 626 /* 627 * Determines if the storvsc device has other channels assigned to 628 * the "old" CPU to update the alloced_cpus mask and the stor_chns 629 * array. 630 */ 631 if (device->channel != channel && device->channel->target_cpu == old) { 632 cur_chn = device->channel; 633 old_is_alloced = true; 634 goto old_is_alloced; 635 } 636 list_for_each_entry(cur_chn, &device->channel->sc_list, sc_list) { 637 if (cur_chn == channel) 638 continue; 639 if (cur_chn->target_cpu == old) { 640 old_is_alloced = true; 641 goto old_is_alloced; 642 } 643 } 644 645old_is_alloced: 646 if (old_is_alloced) 647 WRITE_ONCE(stor_device->stor_chns[old], cur_chn); 648 else 649 cpumask_clear_cpu(old, &stor_device->alloced_cpus); 650 651 /* "Flush" the stor_chns array. */ 652 for_each_possible_cpu(cpu) { 653 if (stor_device->stor_chns[cpu] && !cpumask_test_cpu( 654 cpu, &stor_device->alloced_cpus)) 655 WRITE_ONCE(stor_device->stor_chns[cpu], NULL); 656 } 657 658 WRITE_ONCE(stor_device->stor_chns[new], channel); 659 cpumask_set_cpu(new, &stor_device->alloced_cpus); 660 661 spin_unlock_irqrestore(&stor_device->lock, flags); 662} 663 664static u64 storvsc_next_request_id(struct vmbus_channel *channel, u64 rqst_addr) 665{ 666 struct storvsc_cmd_request *request = 667 (struct storvsc_cmd_request *)(unsigned long)rqst_addr; 668 669 if (rqst_addr == VMBUS_RQST_INIT) 670 return VMBUS_RQST_INIT; 671 if (rqst_addr == VMBUS_RQST_RESET) 672 return VMBUS_RQST_RESET; 673 674 /* 675 * Cannot return an ID of 0, which is reserved for an unsolicited 676 * message from Hyper-V. 677 */ 678 return (u64)blk_mq_unique_tag(scsi_cmd_to_rq(request->cmd)) + 1; 679} 680 681static void handle_sc_creation(struct vmbus_channel *new_sc) 682{ 683 struct hv_device *device = new_sc->primary_channel->device_obj; 684 struct device *dev = &device->device; 685 struct storvsc_device *stor_device; 686 struct vmstorage_channel_properties props; 687 int ret; 688 689 stor_device = get_out_stor_device(device); 690 if (!stor_device) 691 return; 692 693 memset(&props, 0, sizeof(struct vmstorage_channel_properties)); 694 new_sc->max_pkt_size = STORVSC_MAX_PKT_SIZE; 695 696 new_sc->next_request_id_callback = storvsc_next_request_id; 697 698 ret = vmbus_open(new_sc, 699 aligned_ringbuffer_size, 700 aligned_ringbuffer_size, 701 (void *)&props, 702 sizeof(struct vmstorage_channel_properties), 703 storvsc_on_channel_callback, new_sc); 704 705 /* In case vmbus_open() fails, we don't use the sub-channel. */ 706 if (ret != 0) { 707 dev_err(dev, "Failed to open sub-channel: err=%d\n", ret); 708 return; 709 } 710 711 new_sc->change_target_cpu_callback = storvsc_change_target_cpu; 712 713 /* Add the sub-channel to the array of available channels. */ 714 stor_device->stor_chns[new_sc->target_cpu] = new_sc; 715 cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus); 716} 717 718static void handle_multichannel_storage(struct hv_device *device, int max_chns) 719{ 720 struct device *dev = &device->device; 721 struct storvsc_device *stor_device; 722 int num_sc; 723 struct storvsc_cmd_request *request; 724 struct vstor_packet *vstor_packet; 725 int ret, t; 726 727 /* 728 * If the number of CPUs is artificially restricted, such as 729 * with maxcpus=1 on the kernel boot line, Hyper-V could offer 730 * sub-channels >= the number of CPUs. These sub-channels 731 * should not be created. The primary channel is already created 732 * and assigned to one CPU, so check against # CPUs - 1. 733 */ 734 num_sc = min((int)(num_online_cpus() - 1), max_chns); 735 if (!num_sc) 736 return; 737 738 stor_device = get_out_stor_device(device); 739 if (!stor_device) 740 return; 741 742 stor_device->num_sc = num_sc; 743 request = &stor_device->init_request; 744 vstor_packet = &request->vstor_packet; 745 746 /* 747 * Establish a handler for dealing with subchannels. 748 */ 749 vmbus_set_sc_create_callback(device->channel, handle_sc_creation); 750 751 /* 752 * Request the host to create sub-channels. 753 */ 754 memset(request, 0, sizeof(struct storvsc_cmd_request)); 755 init_completion(&request->wait_event); 756 vstor_packet->operation = VSTOR_OPERATION_CREATE_SUB_CHANNELS; 757 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 758 vstor_packet->sub_channel_count = num_sc; 759 760 ret = vmbus_sendpacket(device->channel, vstor_packet, 761 sizeof(struct vstor_packet), 762 VMBUS_RQST_INIT, 763 VM_PKT_DATA_INBAND, 764 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 765 766 if (ret != 0) { 767 dev_err(dev, "Failed to create sub-channel: err=%d\n", ret); 768 return; 769 } 770 771 t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ); 772 if (t == 0) { 773 dev_err(dev, "Failed to create sub-channel: timed out\n"); 774 return; 775 } 776 777 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 778 vstor_packet->status != 0) { 779 dev_err(dev, "Failed to create sub-channel: op=%d, host=0x%x\n", 780 vstor_packet->operation, vstor_packet->status); 781 return; 782 } 783 784 /* 785 * We need to do nothing here, because vmbus_process_offer() 786 * invokes channel->sc_creation_callback, which will open and use 787 * the sub-channel(s). 788 */ 789} 790 791static void cache_wwn(struct storvsc_device *stor_device, 792 struct vstor_packet *vstor_packet) 793{ 794 /* 795 * Cache the currently active port and node ww names. 796 */ 797 if (vstor_packet->wwn_packet.primary_active) { 798 stor_device->node_name = 799 wwn_to_u64(vstor_packet->wwn_packet.primary_node_wwn); 800 stor_device->port_name = 801 wwn_to_u64(vstor_packet->wwn_packet.primary_port_wwn); 802 } else { 803 stor_device->node_name = 804 wwn_to_u64(vstor_packet->wwn_packet.secondary_node_wwn); 805 stor_device->port_name = 806 wwn_to_u64(vstor_packet->wwn_packet.secondary_port_wwn); 807 } 808} 809 810 811static int storvsc_execute_vstor_op(struct hv_device *device, 812 struct storvsc_cmd_request *request, 813 bool status_check) 814{ 815 struct storvsc_device *stor_device; 816 struct vstor_packet *vstor_packet; 817 int ret, t; 818 819 stor_device = get_out_stor_device(device); 820 if (!stor_device) 821 return -ENODEV; 822 823 vstor_packet = &request->vstor_packet; 824 825 init_completion(&request->wait_event); 826 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 827 828 ret = vmbus_sendpacket(device->channel, vstor_packet, 829 sizeof(struct vstor_packet), 830 VMBUS_RQST_INIT, 831 VM_PKT_DATA_INBAND, 832 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 833 if (ret != 0) 834 return ret; 835 836 t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ); 837 if (t == 0) 838 return -ETIMEDOUT; 839 840 if (!status_check) 841 return ret; 842 843 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 844 vstor_packet->status != 0) 845 return -EINVAL; 846 847 return ret; 848} 849 850static int storvsc_channel_init(struct hv_device *device, bool is_fc) 851{ 852 struct storvsc_device *stor_device; 853 struct storvsc_cmd_request *request; 854 struct vstor_packet *vstor_packet; 855 int ret, i; 856 int max_chns; 857 bool process_sub_channels = false; 858 859 stor_device = get_out_stor_device(device); 860 if (!stor_device) 861 return -ENODEV; 862 863 request = &stor_device->init_request; 864 vstor_packet = &request->vstor_packet; 865 866 /* 867 * Now, initiate the vsc/vsp initialization protocol on the open 868 * channel 869 */ 870 memset(request, 0, sizeof(struct storvsc_cmd_request)); 871 vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION; 872 ret = storvsc_execute_vstor_op(device, request, true); 873 if (ret) 874 return ret; 875 /* 876 * Query host supported protocol version. 877 */ 878 879 for (i = 0; i < ARRAY_SIZE(protocol_version); i++) { 880 /* reuse the packet for version range supported */ 881 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 882 vstor_packet->operation = 883 VSTOR_OPERATION_QUERY_PROTOCOL_VERSION; 884 885 vstor_packet->version.major_minor = protocol_version[i]; 886 887 /* 888 * The revision number is only used in Windows; set it to 0. 889 */ 890 vstor_packet->version.revision = 0; 891 ret = storvsc_execute_vstor_op(device, request, false); 892 if (ret != 0) 893 return ret; 894 895 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO) 896 return -EINVAL; 897 898 if (vstor_packet->status == 0) { 899 vmstor_proto_version = protocol_version[i]; 900 901 break; 902 } 903 } 904 905 if (vstor_packet->status != 0) { 906 dev_err(&device->device, "Obsolete Hyper-V version\n"); 907 return -EINVAL; 908 } 909 910 911 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 912 vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES; 913 ret = storvsc_execute_vstor_op(device, request, true); 914 if (ret != 0) 915 return ret; 916 917 /* 918 * Check to see if multi-channel support is there. 919 * Hosts that implement protocol version of 5.1 and above 920 * support multi-channel. 921 */ 922 max_chns = vstor_packet->storage_channel_properties.max_channel_cnt; 923 924 /* 925 * Allocate state to manage the sub-channels. 926 * We allocate an array based on the number of CPU ids. This array 927 * is initially sparsely populated for the CPUs assigned to channels: 928 * primary + sub-channels. As I/Os are initiated by different CPUs, 929 * the slots for all online CPUs are populated to evenly distribute 930 * the load across all channels. 931 */ 932 stor_device->stor_chns = kcalloc(nr_cpu_ids, sizeof(void *), 933 GFP_KERNEL); 934 if (stor_device->stor_chns == NULL) 935 return -ENOMEM; 936 937 device->channel->change_target_cpu_callback = storvsc_change_target_cpu; 938 939 stor_device->stor_chns[device->channel->target_cpu] = device->channel; 940 cpumask_set_cpu(device->channel->target_cpu, 941 &stor_device->alloced_cpus); 942 943 if (vstor_packet->storage_channel_properties.flags & 944 STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL) 945 process_sub_channels = true; 946 947 stor_device->max_transfer_bytes = 948 vstor_packet->storage_channel_properties.max_transfer_bytes; 949 950 if (!is_fc) 951 goto done; 952 953 /* 954 * For FC devices retrieve FC HBA data. 955 */ 956 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 957 vstor_packet->operation = VSTOR_OPERATION_FCHBA_DATA; 958 ret = storvsc_execute_vstor_op(device, request, true); 959 if (ret != 0) 960 return ret; 961 962 /* 963 * Cache the currently active port and node ww names. 964 */ 965 cache_wwn(stor_device, vstor_packet); 966 967done: 968 969 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 970 vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION; 971 ret = storvsc_execute_vstor_op(device, request, true); 972 if (ret != 0) 973 return ret; 974 975 if (process_sub_channels) 976 handle_multichannel_storage(device, max_chns); 977 978 return ret; 979} 980 981static void storvsc_handle_error(struct vmscsi_request *vm_srb, 982 struct scsi_cmnd *scmnd, 983 struct Scsi_Host *host, 984 u8 asc, u8 ascq) 985{ 986 struct storvsc_scan_work *wrk; 987 void (*process_err_fn)(struct work_struct *work); 988 struct hv_host_device *host_dev = shost_priv(host); 989 990 switch (SRB_STATUS(vm_srb->srb_status)) { 991 case SRB_STATUS_ERROR: 992 case SRB_STATUS_ABORTED: 993 case SRB_STATUS_INVALID_REQUEST: 994 case SRB_STATUS_INTERNAL_ERROR: 995 case SRB_STATUS_TIMEOUT: 996 case SRB_STATUS_SELECTION_TIMEOUT: 997 case SRB_STATUS_BUS_RESET: 998 case SRB_STATUS_DATA_OVERRUN: 999 if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) { 1000 /* Check for capacity change */ 1001 if ((asc == 0x2a) && (ascq == 0x9)) { 1002 process_err_fn = storvsc_device_scan; 1003 /* Retry the I/O that triggered this. */ 1004 set_host_byte(scmnd, DID_REQUEUE); 1005 goto do_work; 1006 } 1007 1008 /* 1009 * Check for "Operating parameters have changed" 1010 * due to Hyper-V changing the VHD/VHDX BlockSize 1011 * when adding/removing a differencing disk. This 1012 * causes discard_granularity to change, so do a 1013 * rescan to pick up the new granularity. We don't 1014 * want scsi_report_sense() to output a message 1015 * that a sysadmin wouldn't know what to do with. 1016 */ 1017 if ((asc == 0x3f) && (ascq != 0x03) && 1018 (ascq != 0x0e)) { 1019 process_err_fn = storvsc_device_scan; 1020 set_host_byte(scmnd, DID_REQUEUE); 1021 goto do_work; 1022 } 1023 1024 /* 1025 * Otherwise, let upper layer deal with the 1026 * error when sense message is present 1027 */ 1028 return; 1029 } 1030 1031 /* 1032 * If there is an error; offline the device since all 1033 * error recovery strategies would have already been 1034 * deployed on the host side. However, if the command 1035 * were a pass-through command deal with it appropriately. 1036 */ 1037 switch (scmnd->cmnd[0]) { 1038 case ATA_16: 1039 case ATA_12: 1040 set_host_byte(scmnd, DID_PASSTHROUGH); 1041 break; 1042 /* 1043 * On some Hyper-V hosts TEST_UNIT_READY command can 1044 * return SRB_STATUS_ERROR. Let the upper level code 1045 * deal with it based on the sense information. 1046 */ 1047 case TEST_UNIT_READY: 1048 break; 1049 default: 1050 set_host_byte(scmnd, DID_ERROR); 1051 } 1052 return; 1053 1054 case SRB_STATUS_INVALID_LUN: 1055 set_host_byte(scmnd, DID_NO_CONNECT); 1056 process_err_fn = storvsc_remove_lun; 1057 goto do_work; 1058 1059 } 1060 return; 1061 1062do_work: 1063 /* 1064 * We need to schedule work to process this error; schedule it. 1065 */ 1066 wrk = kmalloc_obj(struct storvsc_scan_work, GFP_ATOMIC); 1067 if (!wrk) { 1068 set_host_byte(scmnd, DID_BAD_TARGET); 1069 return; 1070 } 1071 1072 wrk->host = host; 1073 wrk->lun = vm_srb->lun; 1074 wrk->tgt_id = vm_srb->target_id; 1075 INIT_WORK(&wrk->work, process_err_fn); 1076 queue_work(host_dev->handle_error_wq, &wrk->work); 1077} 1078 1079 1080static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request, 1081 struct storvsc_device *stor_dev) 1082{ 1083 struct scsi_cmnd *scmnd = cmd_request->cmd; 1084 struct scsi_sense_hdr sense_hdr; 1085 struct vmscsi_request *vm_srb; 1086 u32 data_transfer_length; 1087 struct Scsi_Host *host; 1088 u32 payload_sz = cmd_request->payload_sz; 1089 void *payload = cmd_request->payload; 1090 bool sense_ok; 1091 1092 host = stor_dev->host; 1093 1094 vm_srb = &cmd_request->vstor_packet.vm_srb; 1095 data_transfer_length = vm_srb->data_transfer_length; 1096 1097 scmnd->result = vm_srb->scsi_status; 1098 1099 if (scmnd->result) { 1100 sense_ok = scsi_normalize_sense(scmnd->sense_buffer, 1101 SCSI_SENSE_BUFFERSIZE, &sense_hdr); 1102 1103 if (sense_ok && do_logging(STORVSC_LOGGING_WARN)) 1104 scsi_print_sense_hdr(scmnd->device, "storvsc", 1105 &sense_hdr); 1106 } 1107 1108 if (vm_srb->srb_status != SRB_STATUS_SUCCESS) { 1109 storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc, 1110 sense_hdr.ascq); 1111 /* 1112 * The Windows driver set data_transfer_length on 1113 * SRB_STATUS_DATA_OVERRUN. On other errors, this value 1114 * is untouched. In these cases we set it to 0. 1115 */ 1116 if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN) 1117 data_transfer_length = 0; 1118 } 1119 1120 /* Validate data_transfer_length (from Hyper-V) */ 1121 if (data_transfer_length > cmd_request->payload->range.len) 1122 data_transfer_length = cmd_request->payload->range.len; 1123 1124 scsi_set_resid(scmnd, 1125 cmd_request->payload->range.len - data_transfer_length); 1126 1127 scsi_done(scmnd); 1128 1129 if (payload_sz > 1130 sizeof(struct vmbus_channel_packet_multipage_buffer)) 1131 kfree(payload); 1132} 1133 1134/* 1135 * The current SCSI handling on the host side does not correctly handle: 1136 * INQUIRY with page code 0x80, MODE_SENSE / MODE_SENSE_10 with cmd[2] == 0x1c, 1137 * and (for FC) MAINTENANCE_IN / PERSISTENT_RESERVE_IN passthrough. 1138 */ 1139static bool storvsc_host_mishandles_cmd(u8 opcode, struct hv_device *device) 1140{ 1141 switch (opcode) { 1142 case INQUIRY: 1143 case MODE_SENSE: 1144 case MODE_SENSE_10: 1145 return true; 1146 case MAINTENANCE_IN: 1147 case PERSISTENT_RESERVE_IN: 1148 return hv_dev_is_fc(device); 1149 default: 1150 return false; 1151 } 1152} 1153 1154static void storvsc_on_io_completion(struct storvsc_device *stor_device, 1155 struct vstor_packet *vstor_packet, 1156 struct storvsc_cmd_request *request) 1157{ 1158 struct vstor_packet *stor_pkt; 1159 struct hv_device *device = stor_device->device; 1160 1161 stor_pkt = &request->vstor_packet; 1162 1163 /* 1164 * Setup srb and scsi status so this won't be fatal. 1165 * We do this so we can distinguish truly fatal failues 1166 * (srb status == 0x4) and off-line the device in that case. 1167 */ 1168 1169 if (storvsc_host_mishandles_cmd(stor_pkt->vm_srb.cdb[0], device)) { 1170 vstor_packet->vm_srb.scsi_status = 0; 1171 vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS; 1172 } 1173 1174 /* Copy over the status...etc */ 1175 stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status; 1176 stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status; 1177 1178 /* 1179 * Copy over the sense_info_length, but limit to the known max 1180 * size if Hyper-V returns a bad value. 1181 */ 1182 stor_pkt->vm_srb.sense_info_length = min_t(u8, STORVSC_SENSE_BUFFER_SIZE, 1183 vstor_packet->vm_srb.sense_info_length); 1184 1185 if (vstor_packet->vm_srb.scsi_status != 0 || 1186 vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) { 1187 1188 /* 1189 * Log TEST_UNIT_READY errors only as warnings. Hyper-V can 1190 * return errors when detecting devices using TEST_UNIT_READY, 1191 * and logging these as errors produces unhelpful noise. 1192 */ 1193 int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ? 1194 STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR; 1195 1196 storvsc_log_ratelimited(device, loglevel, 1197 "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x host 0x%x\n", 1198 scsi_cmd_to_rq(request->cmd)->tag, 1199 stor_pkt->vm_srb.cdb[0], 1200 vstor_packet->vm_srb.scsi_status, 1201 vstor_packet->vm_srb.srb_status, 1202 vstor_packet->status); 1203 } 1204 1205 if (vstor_packet->vm_srb.scsi_status == SAM_STAT_CHECK_CONDITION && 1206 (vstor_packet->vm_srb.srb_status & SRB_STATUS_AUTOSENSE_VALID)) 1207 memcpy(request->cmd->sense_buffer, 1208 vstor_packet->vm_srb.sense_data, 1209 stor_pkt->vm_srb.sense_info_length); 1210 1211 stor_pkt->vm_srb.data_transfer_length = 1212 vstor_packet->vm_srb.data_transfer_length; 1213 1214 storvsc_command_completion(request, stor_device); 1215 1216 if (atomic_dec_and_test(&stor_device->num_outstanding_req) && 1217 stor_device->drain_notify) 1218 wake_up(&stor_device->waiting_to_drain); 1219} 1220 1221static void storvsc_on_receive(struct storvsc_device *stor_device, 1222 struct vstor_packet *vstor_packet, 1223 struct storvsc_cmd_request *request) 1224{ 1225 struct hv_host_device *host_dev; 1226 switch (vstor_packet->operation) { 1227 case VSTOR_OPERATION_COMPLETE_IO: 1228 storvsc_on_io_completion(stor_device, vstor_packet, request); 1229 break; 1230 1231 case VSTOR_OPERATION_REMOVE_DEVICE: 1232 case VSTOR_OPERATION_ENUMERATE_BUS: 1233 host_dev = shost_priv(stor_device->host); 1234 queue_work( 1235 host_dev->handle_error_wq, &host_dev->host_scan_work); 1236 break; 1237 1238 case VSTOR_OPERATION_FCHBA_DATA: 1239 cache_wwn(stor_device, vstor_packet); 1240#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 1241 fc_host_node_name(stor_device->host) = stor_device->node_name; 1242 fc_host_port_name(stor_device->host) = stor_device->port_name; 1243#endif 1244 break; 1245 default: 1246 break; 1247 } 1248} 1249 1250static void storvsc_on_channel_callback(void *context) 1251{ 1252 struct vmbus_channel *channel = (struct vmbus_channel *)context; 1253 const struct vmpacket_descriptor *desc; 1254 struct hv_device *device; 1255 struct storvsc_device *stor_device; 1256 struct Scsi_Host *shost; 1257 unsigned long time_limit = jiffies + msecs_to_jiffies(CALLBACK_TIMEOUT); 1258 1259 if (channel->primary_channel != NULL) 1260 device = channel->primary_channel->device_obj; 1261 else 1262 device = channel->device_obj; 1263 1264 stor_device = get_in_stor_device(device); 1265 if (!stor_device) 1266 return; 1267 1268 shost = stor_device->host; 1269 1270 foreach_vmbus_pkt(desc, channel) { 1271 struct vstor_packet *packet = hv_pkt_data(desc); 1272 struct storvsc_cmd_request *request = NULL; 1273 u32 pktlen = hv_pkt_datalen(desc); 1274 u64 rqst_id = desc->trans_id; 1275 u32 minlen = rqst_id ? sizeof(struct vstor_packet) : 1276 sizeof(enum vstor_packet_operation); 1277 1278 if (unlikely(time_after(jiffies, time_limit))) { 1279 hv_pkt_iter_close(channel); 1280 return; 1281 } 1282 1283 if (pktlen < minlen) { 1284 dev_err(&device->device, 1285 "Invalid pkt: id=%llu, len=%u, minlen=%u\n", 1286 rqst_id, pktlen, minlen); 1287 continue; 1288 } 1289 1290 if (rqst_id == VMBUS_RQST_INIT) { 1291 request = &stor_device->init_request; 1292 } else if (rqst_id == VMBUS_RQST_RESET) { 1293 request = &stor_device->reset_request; 1294 } else { 1295 /* Hyper-V can send an unsolicited message with ID of 0 */ 1296 if (rqst_id == 0) { 1297 /* 1298 * storvsc_on_receive() looks at the vstor_packet in the message 1299 * from the ring buffer. 1300 * 1301 * - If the operation in the vstor_packet is COMPLETE_IO, then 1302 * we call storvsc_on_io_completion(), and dereference the 1303 * guest memory address. Make sure we don't call 1304 * storvsc_on_io_completion() with a guest memory address 1305 * that is zero if Hyper-V were to construct and send such 1306 * a bogus packet. 1307 * 1308 * - If the operation in the vstor_packet is FCHBA_DATA, then 1309 * we call cache_wwn(), and access the data payload area of 1310 * the packet (wwn_packet); however, there is no guarantee 1311 * that the packet is big enough to contain such area. 1312 * Future-proof the code by rejecting such a bogus packet. 1313 */ 1314 if (packet->operation == VSTOR_OPERATION_COMPLETE_IO || 1315 packet->operation == VSTOR_OPERATION_FCHBA_DATA) { 1316 dev_err(&device->device, "Invalid packet with ID of 0\n"); 1317 continue; 1318 } 1319 } else { 1320 struct scsi_cmnd *scmnd; 1321 1322 /* Transaction 'rqst_id' corresponds to tag 'rqst_id - 1' */ 1323 scmnd = scsi_host_find_tag(shost, rqst_id - 1); 1324 if (scmnd == NULL) { 1325 dev_err(&device->device, "Incorrect transaction ID\n"); 1326 continue; 1327 } 1328 request = (struct storvsc_cmd_request *)scsi_cmd_priv(scmnd); 1329 scsi_dma_unmap(scmnd); 1330 } 1331 1332 storvsc_on_receive(stor_device, packet, request); 1333 continue; 1334 } 1335 1336 memcpy(&request->vstor_packet, packet, 1337 sizeof(struct vstor_packet)); 1338 complete(&request->wait_event); 1339 } 1340} 1341 1342static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size, 1343 bool is_fc) 1344{ 1345 struct vmstorage_channel_properties props; 1346 int ret; 1347 1348 memset(&props, 0, sizeof(struct vmstorage_channel_properties)); 1349 1350 device->channel->max_pkt_size = STORVSC_MAX_PKT_SIZE; 1351 device->channel->next_request_id_callback = storvsc_next_request_id; 1352 1353 ret = vmbus_open(device->channel, 1354 ring_size, 1355 ring_size, 1356 (void *)&props, 1357 sizeof(struct vmstorage_channel_properties), 1358 storvsc_on_channel_callback, device->channel); 1359 1360 if (ret != 0) 1361 return ret; 1362 1363 ret = storvsc_channel_init(device, is_fc); 1364 if (ret) 1365 vmbus_close(device->channel); 1366 1367 return ret; 1368} 1369 1370static int storvsc_dev_remove(struct hv_device *device) 1371{ 1372 struct storvsc_device *stor_device; 1373 1374 stor_device = hv_get_drvdata(device); 1375 1376 stor_device->destroy = true; 1377 1378 /* Make sure flag is set before waiting */ 1379 wmb(); 1380 1381 /* 1382 * At this point, all outbound traffic should be disable. We 1383 * only allow inbound traffic (responses) to proceed so that 1384 * outstanding requests can be completed. 1385 */ 1386 1387 storvsc_wait_to_drain(stor_device); 1388 1389 /* 1390 * Since we have already drained, we don't need to busy wait 1391 * as was done in final_release_stor_device() 1392 * Note that we cannot set the ext pointer to NULL until 1393 * we have drained - to drain the outgoing packets, we need to 1394 * allow incoming packets. 1395 */ 1396 hv_set_drvdata(device, NULL); 1397 1398 /* Close the channel */ 1399 vmbus_close(device->channel); 1400 1401 kfree(stor_device->stor_chns); 1402 kfree(stor_device); 1403 return 0; 1404} 1405 1406static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device, 1407 u16 q_num) 1408{ 1409 u16 slot = 0; 1410 u16 hash_qnum; 1411 const struct cpumask *node_mask; 1412 int num_channels, tgt_cpu; 1413 1414 if (stor_device->num_sc == 0) { 1415 stor_device->stor_chns[q_num] = stor_device->device->channel; 1416 return stor_device->device->channel; 1417 } 1418 1419 /* 1420 * Our channel array could be sparsley populated and we 1421 * initiated I/O on a processor/hw-q that does not 1422 * currently have a designated channel. Fix this. 1423 * The strategy is simple: 1424 * I. Prefer the channel associated with the current CPU 1425 * II. Ensure NUMA locality 1426 * III. Distribute evenly (best effort) 1427 */ 1428 1429 /* Prefer the channel on the I/O issuing processor/hw-q */ 1430 if (cpumask_test_cpu(q_num, &stor_device->alloced_cpus)) 1431 return stor_device->stor_chns[q_num]; 1432 1433 node_mask = cpumask_of_node(cpu_to_node(q_num)); 1434 1435 num_channels = 0; 1436 for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) { 1437 if (cpumask_test_cpu(tgt_cpu, node_mask)) 1438 num_channels++; 1439 } 1440 if (num_channels == 0) { 1441 stor_device->stor_chns[q_num] = stor_device->device->channel; 1442 return stor_device->device->channel; 1443 } 1444 1445 hash_qnum = q_num; 1446 while (hash_qnum >= num_channels) 1447 hash_qnum -= num_channels; 1448 1449 for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) { 1450 if (!cpumask_test_cpu(tgt_cpu, node_mask)) 1451 continue; 1452 if (slot == hash_qnum) 1453 break; 1454 slot++; 1455 } 1456 1457 stor_device->stor_chns[q_num] = stor_device->stor_chns[tgt_cpu]; 1458 1459 return stor_device->stor_chns[q_num]; 1460} 1461 1462 1463static int storvsc_do_io(struct hv_device *device, 1464 struct storvsc_cmd_request *request, u16 q_num) 1465{ 1466 struct storvsc_device *stor_device; 1467 struct vstor_packet *vstor_packet; 1468 struct vmbus_channel *outgoing_channel, *channel; 1469 unsigned long flags; 1470 int ret = 0; 1471 const struct cpumask *node_mask; 1472 int tgt_cpu; 1473 1474 vstor_packet = &request->vstor_packet; 1475 stor_device = get_out_stor_device(device); 1476 1477 if (!stor_device) 1478 return -ENODEV; 1479 1480 1481 request->device = device; 1482 /* 1483 * Select an appropriate channel to send the request out. 1484 */ 1485 /* See storvsc_change_target_cpu(). */ 1486 outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]); 1487 if (outgoing_channel != NULL) { 1488 if (hv_get_avail_to_write_percent(&outgoing_channel->outbound) 1489 > ring_avail_percent_lowater) 1490 goto found_channel; 1491 1492 /* 1493 * Channel is busy, try to find a channel on the same NUMA node 1494 */ 1495 node_mask = cpumask_of_node(cpu_to_node(q_num)); 1496 for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus, 1497 q_num + 1) { 1498 if (!cpumask_test_cpu(tgt_cpu, node_mask)) 1499 continue; 1500 channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]); 1501 if (!channel) 1502 continue; 1503 if (hv_get_avail_to_write_percent(&channel->outbound) 1504 > ring_avail_percent_lowater) { 1505 outgoing_channel = channel; 1506 goto found_channel; 1507 } 1508 } 1509 1510 /* 1511 * If we reach here, all the channels on the current 1512 * NUMA node are busy. Try to find a channel in 1513 * all NUMA nodes 1514 */ 1515 for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus, 1516 q_num + 1) { 1517 channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]); 1518 if (!channel) 1519 continue; 1520 if (hv_get_avail_to_write_percent(&channel->outbound) 1521 > ring_avail_percent_lowater) { 1522 outgoing_channel = channel; 1523 goto found_channel; 1524 } 1525 } 1526 /* 1527 * If we reach here, all the channels are busy. Use the 1528 * original channel found. 1529 */ 1530 } else { 1531 spin_lock_irqsave(&stor_device->lock, flags); 1532 outgoing_channel = stor_device->stor_chns[q_num]; 1533 if (outgoing_channel != NULL) { 1534 spin_unlock_irqrestore(&stor_device->lock, flags); 1535 goto found_channel; 1536 } 1537 outgoing_channel = get_og_chn(stor_device, q_num); 1538 spin_unlock_irqrestore(&stor_device->lock, flags); 1539 } 1540 1541found_channel: 1542 vstor_packet->flags |= REQUEST_COMPLETION_FLAG; 1543 1544 vstor_packet->vm_srb.length = sizeof(struct vmscsi_request); 1545 1546 1547 vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE; 1548 1549 1550 vstor_packet->vm_srb.data_transfer_length = 1551 request->payload->range.len; 1552 1553 vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB; 1554 1555 if (request->payload->range.len) { 1556 1557 ret = vmbus_sendpacket_mpb_desc(outgoing_channel, 1558 request->payload, request->payload_sz, 1559 vstor_packet, 1560 sizeof(struct vstor_packet), 1561 (unsigned long)request); 1562 } else { 1563 ret = vmbus_sendpacket(outgoing_channel, vstor_packet, 1564 sizeof(struct vstor_packet), 1565 (unsigned long)request, 1566 VM_PKT_DATA_INBAND, 1567 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1568 } 1569 1570 if (ret != 0) 1571 return ret; 1572 1573 atomic_inc(&stor_device->num_outstanding_req); 1574 1575 return ret; 1576} 1577 1578static int storvsc_device_alloc(struct scsi_device *sdevice) 1579{ 1580 /* 1581 * Set blist flag to permit the reading of the VPD pages even when 1582 * the target may claim SPC-2 compliance. MSFT targets currently 1583 * claim SPC-2 compliance while they implement post SPC-2 features. 1584 * With this flag we can correctly handle WRITE_SAME_16 issues. 1585 * 1586 * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but 1587 * still supports REPORT LUN. 1588 */ 1589 sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES; 1590 1591 return 0; 1592} 1593 1594static int storvsc_sdev_configure(struct scsi_device *sdevice, 1595 struct queue_limits *lim) 1596{ 1597 blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ)); 1598 1599 /* storvsc devices don't support MAINTENANCE_IN SCSI cmd */ 1600 sdevice->no_report_opcodes = 1; 1601 sdevice->no_write_same = 1; 1602 1603 /* 1604 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3 1605 * if the device is a MSFT virtual device. If the host is 1606 * WIN10 or newer, allow write_same. 1607 */ 1608 if (!strncmp(sdevice->vendor, "Msft", 4)) { 1609 switch (vmstor_proto_version) { 1610 case VMSTOR_PROTO_VERSION_WIN8: 1611 case VMSTOR_PROTO_VERSION_WIN8_1: 1612 sdevice->scsi_level = SCSI_SPC_3; 1613 break; 1614 } 1615 1616 if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN10) 1617 sdevice->no_write_same = 0; 1618 } 1619 1620 return 0; 1621} 1622 1623static int storvsc_get_chs(struct scsi_device *sdev, struct gendisk *unused, 1624 sector_t capacity, int *info) 1625{ 1626 sector_t nsect = capacity; 1627 sector_t cylinders = nsect; 1628 int heads, sectors_pt; 1629 1630 /* 1631 * We are making up these values; let us keep it simple. 1632 */ 1633 heads = 0xff; 1634 sectors_pt = 0x3f; /* Sectors per track */ 1635 sector_div(cylinders, heads * sectors_pt); 1636 if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect) 1637 cylinders = 0xffff; 1638 1639 info[0] = heads; 1640 info[1] = sectors_pt; 1641 info[2] = (int)cylinders; 1642 1643 return 0; 1644} 1645 1646static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) 1647{ 1648 struct hv_host_device *host_dev = shost_priv(scmnd->device->host); 1649 struct hv_device *device = host_dev->dev; 1650 1651 struct storvsc_device *stor_device; 1652 struct storvsc_cmd_request *request; 1653 struct vstor_packet *vstor_packet; 1654 int ret, t; 1655 1656 stor_device = get_out_stor_device(device); 1657 if (!stor_device) 1658 return FAILED; 1659 1660 request = &stor_device->reset_request; 1661 vstor_packet = &request->vstor_packet; 1662 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 1663 1664 init_completion(&request->wait_event); 1665 1666 vstor_packet->operation = VSTOR_OPERATION_RESET_BUS; 1667 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 1668 vstor_packet->vm_srb.path_id = stor_device->path_id; 1669 1670 ret = vmbus_sendpacket(device->channel, vstor_packet, 1671 sizeof(struct vstor_packet), 1672 VMBUS_RQST_RESET, 1673 VM_PKT_DATA_INBAND, 1674 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1675 if (ret != 0) 1676 return FAILED; 1677 1678 t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ); 1679 if (t == 0) 1680 return TIMEOUT_ERROR; 1681 1682 1683 /* 1684 * At this point, all outstanding requests in the adapter 1685 * should have been flushed out and return to us 1686 * There is a potential race here where the host may be in 1687 * the process of responding when we return from here. 1688 * Just wait for all in-transit packets to be accounted for 1689 * before we return from here. 1690 */ 1691 storvsc_wait_to_drain(stor_device); 1692 1693 return SUCCESS; 1694} 1695 1696/* 1697 * The host guarantees to respond to each command, although I/O latencies might 1698 * be unbounded on Azure. Reset the timer unconditionally to give the host a 1699 * chance to perform EH. 1700 */ 1701static enum scsi_timeout_action storvsc_eh_timed_out(struct scsi_cmnd *scmnd) 1702{ 1703 return SCSI_EH_RESET_TIMER; 1704} 1705 1706static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd) 1707{ 1708 bool allowed = true; 1709 u8 scsi_op = scmnd->cmnd[0]; 1710 1711 switch (scsi_op) { 1712 /* the host does not handle WRITE_SAME, log accident usage */ 1713 case WRITE_SAME: 1714 /* 1715 * smartd sends this command and the host does not handle 1716 * this. So, don't send it. 1717 */ 1718 case SET_WINDOW: 1719 set_host_byte(scmnd, DID_ERROR); 1720 allowed = false; 1721 break; 1722 default: 1723 break; 1724 } 1725 return allowed; 1726} 1727 1728static enum scsi_qc_status storvsc_queuecommand(struct Scsi_Host *host, 1729 struct scsi_cmnd *scmnd) 1730{ 1731 int ret; 1732 struct hv_host_device *host_dev = shost_priv(host); 1733 struct hv_device *dev = host_dev->dev; 1734 struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd); 1735 struct scatterlist *sgl; 1736 struct vmscsi_request *vm_srb; 1737 struct vmbus_packet_mpb_array *payload; 1738 u32 payload_sz; 1739 u32 length; 1740 1741 if (vmstor_proto_version <= VMSTOR_PROTO_VERSION_WIN8) { 1742 /* 1743 * On legacy hosts filter unimplemented commands. 1744 * Future hosts are expected to correctly handle 1745 * unsupported commands. Furthermore, it is 1746 * possible that some of the currently 1747 * unsupported commands maybe supported in 1748 * future versions of the host. 1749 */ 1750 if (!storvsc_scsi_cmd_ok(scmnd)) { 1751 scsi_done(scmnd); 1752 return 0; 1753 } 1754 } 1755 1756 /* Setup the cmd request */ 1757 cmd_request->cmd = scmnd; 1758 1759 memset(&cmd_request->vstor_packet, 0, sizeof(struct vstor_packet)); 1760 vm_srb = &cmd_request->vstor_packet.vm_srb; 1761 vm_srb->time_out_value = 60; 1762 1763 vm_srb->srb_flags |= 1764 SRB_FLAGS_DISABLE_SYNCH_TRANSFER; 1765 1766 if (scmnd->device->tagged_supported) { 1767 vm_srb->srb_flags |= 1768 (SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE); 1769 vm_srb->queue_tag = SP_UNTAGGED; 1770 vm_srb->queue_action = SRB_SIMPLE_TAG_REQUEST; 1771 } 1772 1773 /* Build the SRB */ 1774 switch (scmnd->sc_data_direction) { 1775 case DMA_TO_DEVICE: 1776 vm_srb->data_in = WRITE_TYPE; 1777 vm_srb->srb_flags |= SRB_FLAGS_DATA_OUT; 1778 break; 1779 case DMA_FROM_DEVICE: 1780 vm_srb->data_in = READ_TYPE; 1781 vm_srb->srb_flags |= SRB_FLAGS_DATA_IN; 1782 break; 1783 case DMA_NONE: 1784 vm_srb->data_in = UNKNOWN_TYPE; 1785 vm_srb->srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER; 1786 break; 1787 default: 1788 /* 1789 * This is DMA_BIDIRECTIONAL or something else we are never 1790 * supposed to see here. 1791 */ 1792 WARN(1, "Unexpected data direction: %d\n", 1793 scmnd->sc_data_direction); 1794 return -EINVAL; 1795 } 1796 1797 1798 vm_srb->port_number = host_dev->port; 1799 vm_srb->path_id = scmnd->device->channel; 1800 vm_srb->target_id = scmnd->device->id; 1801 vm_srb->lun = scmnd->device->lun; 1802 1803 vm_srb->cdb_length = scmnd->cmd_len; 1804 1805 memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length); 1806 1807 sgl = (struct scatterlist *)scsi_sglist(scmnd); 1808 1809 length = scsi_bufflen(scmnd); 1810 payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb; 1811 payload->range.len = 0; 1812 payload_sz = 0; 1813 1814 if (scsi_sg_count(scmnd)) { 1815 unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset); 1816 unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length); 1817 struct scatterlist *sg; 1818 unsigned long hvpfn, hvpfns_to_add; 1819 int j, i = 0, sg_count; 1820 1821 payload_sz = (hvpg_count * sizeof(u64) + 1822 sizeof(struct vmbus_packet_mpb_array)); 1823 1824 if (hvpg_count > MAX_PAGE_BUFFER_COUNT) { 1825 payload = kzalloc(payload_sz, GFP_ATOMIC); 1826 if (!payload) 1827 return SCSI_MLQUEUE_DEVICE_BUSY; 1828 } 1829 1830 payload->rangecount = 1; 1831 payload->range.len = length; 1832 payload->range.offset = offset_in_hvpg; 1833 1834 sg_count = scsi_dma_map(scmnd); 1835 if (sg_count < 0) { 1836 ret = SCSI_MLQUEUE_DEVICE_BUSY; 1837 goto err_free_payload; 1838 } 1839 1840 for_each_sg(sgl, sg, sg_count, j) { 1841 /* 1842 * Init values for the current sgl entry. hvpfns_to_add 1843 * is in units of Hyper-V size pages. Handling the 1844 * PAGE_SIZE != HV_HYP_PAGE_SIZE case also handles 1845 * values of sgl->offset that are larger than PAGE_SIZE. 1846 * Such offsets are handled even on other than the first 1847 * sgl entry, provided they are a multiple of PAGE_SIZE. 1848 */ 1849 hvpfn = HVPFN_DOWN(sg_dma_address(sg)); 1850 hvpfns_to_add = HVPFN_UP(sg_dma_address(sg) + 1851 sg_dma_len(sg)) - hvpfn; 1852 1853 /* 1854 * Fill the next portion of the PFN array with 1855 * sequential Hyper-V PFNs for the continguous physical 1856 * memory described by the sgl entry. The end of the 1857 * last sgl should be reached at the same time that 1858 * the PFN array is filled. 1859 */ 1860 while (hvpfns_to_add--) 1861 payload->range.pfn_array[i++] = hvpfn++; 1862 } 1863 } 1864 1865 cmd_request->payload = payload; 1866 cmd_request->payload_sz = payload_sz; 1867 1868 /* Invokes the vsc to start an IO */ 1869 migrate_disable(); 1870 ret = storvsc_do_io(dev, cmd_request, smp_processor_id()); 1871 migrate_enable(); 1872 1873 if (ret) 1874 scsi_dma_unmap(scmnd); 1875 1876 if (ret == -EAGAIN) { 1877 /* no more space */ 1878 ret = SCSI_MLQUEUE_DEVICE_BUSY; 1879 goto err_free_payload; 1880 } 1881 1882 return 0; 1883 1884err_free_payload: 1885 if (payload_sz > sizeof(cmd_request->mpb)) 1886 kfree(payload); 1887 1888 return ret; 1889} 1890 1891static struct scsi_host_template scsi_driver = { 1892 .module = THIS_MODULE, 1893 .name = "storvsc_host_t", 1894 .cmd_size = sizeof(struct storvsc_cmd_request), 1895 .bios_param = storvsc_get_chs, 1896 .queuecommand = storvsc_queuecommand, 1897 .eh_host_reset_handler = storvsc_host_reset_handler, 1898 .proc_name = "storvsc_host", 1899 .eh_timed_out = storvsc_eh_timed_out, 1900 .sdev_init = storvsc_device_alloc, 1901 .sdev_configure = storvsc_sdev_configure, 1902 .cmd_per_lun = 2048, 1903 .this_id = -1, 1904 /* Ensure there are no gaps in presented sgls */ 1905 .virt_boundary_mask = HV_HYP_PAGE_SIZE - 1, 1906 .no_write_same = 1, 1907 .track_queue_depth = 1, 1908 .change_queue_depth = storvsc_change_queue_depth, 1909}; 1910 1911enum { 1912 SCSI_GUID, 1913 IDE_GUID, 1914 SFC_GUID, 1915}; 1916 1917static const struct hv_vmbus_device_id id_table[] = { 1918 /* SCSI guid */ 1919 { HV_SCSI_GUID, 1920 .driver_data = SCSI_GUID 1921 }, 1922 /* IDE guid */ 1923 { HV_IDE_GUID, 1924 .driver_data = IDE_GUID 1925 }, 1926 /* Fibre Channel GUID */ 1927 { 1928 HV_SYNTHFC_GUID, 1929 .driver_data = SFC_GUID 1930 }, 1931 { }, 1932}; 1933 1934MODULE_DEVICE_TABLE(vmbus, id_table); 1935 1936static const struct { guid_t guid; } fc_guid = { HV_SYNTHFC_GUID }; 1937 1938static bool hv_dev_is_fc(struct hv_device *hv_dev) 1939{ 1940 return guid_equal(&fc_guid.guid, &hv_dev->dev_type); 1941} 1942 1943static int storvsc_probe(struct hv_device *device, 1944 const struct hv_vmbus_device_id *dev_id) 1945{ 1946 int ret; 1947 int num_cpus = num_online_cpus(); 1948 int num_present_cpus = num_present_cpus(); 1949 struct Scsi_Host *host; 1950 struct hv_host_device *host_dev; 1951 bool dev_is_ide = dev_id->driver_data == IDE_GUID; 1952 bool is_fc = dev_id->driver_data == SFC_GUID; 1953 int target = 0; 1954 struct storvsc_device *stor_device; 1955 int max_sub_channels = 0; 1956 u32 max_xfer_bytes; 1957 1958 /* 1959 * We support sub-channels for storage on SCSI and FC controllers. 1960 * The number of sub-channels offerred is based on the number of 1961 * VCPUs in the guest. 1962 */ 1963 if (!dev_is_ide) 1964 max_sub_channels = 1965 (num_cpus - 1) / storvsc_vcpus_per_sub_channel; 1966 1967 scsi_driver.can_queue = max_outstanding_req_per_channel * 1968 (max_sub_channels + 1) * 1969 (100 - ring_avail_percent_lowater) / 100; 1970 1971 host = scsi_host_alloc(&scsi_driver, 1972 sizeof(struct hv_host_device)); 1973 if (!host) 1974 return -ENOMEM; 1975 1976 host_dev = shost_priv(host); 1977 memset(host_dev, 0, sizeof(struct hv_host_device)); 1978 1979 host_dev->port = host->host_no; 1980 host_dev->dev = device; 1981 host_dev->host = host; 1982 1983 1984 stor_device = kzalloc_obj(struct storvsc_device); 1985 if (!stor_device) { 1986 ret = -ENOMEM; 1987 goto err_out0; 1988 } 1989 1990 stor_device->destroy = false; 1991 init_waitqueue_head(&stor_device->waiting_to_drain); 1992 stor_device->device = device; 1993 stor_device->host = host; 1994 spin_lock_init(&stor_device->lock); 1995 hv_set_drvdata(device, stor_device); 1996 dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1); 1997 1998 stor_device->port_number = host->host_no; 1999 ret = storvsc_connect_to_vsp(device, aligned_ringbuffer_size, is_fc); 2000 if (ret) 2001 goto err_out1; 2002 2003 host_dev->path = stor_device->path_id; 2004 host_dev->target = stor_device->target_id; 2005 2006 switch (dev_id->driver_data) { 2007 case SFC_GUID: 2008 host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET; 2009 host->max_id = STORVSC_FC_MAX_TARGETS; 2010 host->max_channel = STORVSC_FC_MAX_CHANNELS - 1; 2011#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 2012 host->transportt = fc_transport_template; 2013#endif 2014 break; 2015 2016 case SCSI_GUID: 2017 host->max_lun = STORVSC_MAX_LUNS_PER_TARGET; 2018 host->max_id = STORVSC_MAX_TARGETS; 2019 host->max_channel = STORVSC_MAX_CHANNELS - 1; 2020 break; 2021 2022 default: 2023 host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET; 2024 host->max_id = STORVSC_IDE_MAX_TARGETS; 2025 host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1; 2026 break; 2027 } 2028 /* max cmd length */ 2029 host->max_cmd_len = STORVSC_MAX_CMD_LEN; 2030 /* 2031 * Any reasonable Hyper-V configuration should provide 2032 * max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE, 2033 * protecting it from any weird value. 2034 */ 2035 max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE); 2036 if (is_fc) 2037 max_xfer_bytes = min(max_xfer_bytes, STORVSC_FC_MAX_XFER_SIZE); 2038 2039 /* max_hw_sectors_kb */ 2040 host->max_sectors = max_xfer_bytes >> 9; 2041 /* 2042 * There are 2 requirements for Hyper-V storvsc sgl segments, 2043 * based on which the below calculation for max segments is 2044 * done: 2045 * 2046 * 1. Except for the first and last sgl segment, all sgl segments 2047 * should be align to HV_HYP_PAGE_SIZE, that also means the 2048 * maximum number of segments in a sgl can be calculated by 2049 * dividing the total max transfer length by HV_HYP_PAGE_SIZE. 2050 * 2051 * 2. Except for the first and last, each entry in the SGL must 2052 * have an offset that is a multiple of HV_HYP_PAGE_SIZE. 2053 */ 2054 host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1; 2055 /* 2056 * For non-IDE disks, the host supports multiple channels. 2057 * Set the number of HW queues we are supporting. 2058 */ 2059 if (!dev_is_ide) { 2060 if (storvsc_max_hw_queues > num_present_cpus) { 2061 storvsc_max_hw_queues = 0; 2062 storvsc_log(device, STORVSC_LOGGING_WARN, 2063 "Resetting invalid storvsc_max_hw_queues value to default.\n"); 2064 } 2065 if (storvsc_max_hw_queues) 2066 host->nr_hw_queues = storvsc_max_hw_queues; 2067 else 2068 host->nr_hw_queues = num_present_cpus; 2069 } 2070 2071 /* 2072 * Set the error handler work queue. 2073 */ 2074 host_dev->handle_error_wq = 2075 alloc_ordered_workqueue("storvsc_error_wq_%d", 2076 0, 2077 host->host_no); 2078 if (!host_dev->handle_error_wq) { 2079 ret = -ENOMEM; 2080 goto err_out2; 2081 } 2082 INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan); 2083 /* Register the HBA and start the scsi bus scan */ 2084 ret = scsi_add_host(host, &device->device); 2085 if (ret != 0) 2086 goto err_out3; 2087 2088 if (!dev_is_ide) { 2089 scsi_scan_host(host); 2090 } else { 2091 target = (device->dev_instance.b[5] << 8 | 2092 device->dev_instance.b[4]); 2093 ret = scsi_add_device(host, 0, target, 0); 2094 if (ret) 2095 goto err_out4; 2096 } 2097#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 2098 if (host->transportt == fc_transport_template) { 2099 struct fc_rport_identifiers ids = { 2100 .roles = FC_PORT_ROLE_FCP_DUMMY_INITIATOR, 2101 }; 2102 2103 fc_host_node_name(host) = stor_device->node_name; 2104 fc_host_port_name(host) = stor_device->port_name; 2105 stor_device->rport = fc_remote_port_add(host, 0, &ids); 2106 if (!stor_device->rport) { 2107 ret = -ENOMEM; 2108 goto err_out4; 2109 } 2110 } 2111#endif 2112 return 0; 2113 2114err_out4: 2115 scsi_remove_host(host); 2116 2117err_out3: 2118 destroy_workqueue(host_dev->handle_error_wq); 2119 2120err_out2: 2121 /* 2122 * Once we have connected with the host, we would need to 2123 * invoke storvsc_dev_remove() to rollback this state and 2124 * this call also frees up the stor_device; hence the jump around 2125 * err_out1 label. 2126 */ 2127 storvsc_dev_remove(device); 2128 goto err_out0; 2129 2130err_out1: 2131 kfree(stor_device->stor_chns); 2132 kfree(stor_device); 2133 2134err_out0: 2135 scsi_host_put(host); 2136 return ret; 2137} 2138 2139/* Change a scsi target's queue depth */ 2140static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth) 2141{ 2142 if (queue_depth > scsi_driver.can_queue) 2143 queue_depth = scsi_driver.can_queue; 2144 2145 return scsi_change_queue_depth(sdev, queue_depth); 2146} 2147 2148static void storvsc_remove(struct hv_device *dev) 2149{ 2150 struct storvsc_device *stor_device = hv_get_drvdata(dev); 2151 struct Scsi_Host *host = stor_device->host; 2152 struct hv_host_device *host_dev = shost_priv(host); 2153 2154#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 2155 if (host->transportt == fc_transport_template) { 2156 fc_remote_port_delete(stor_device->rport); 2157 fc_remove_host(host); 2158 } 2159#endif 2160 destroy_workqueue(host_dev->handle_error_wq); 2161 scsi_remove_host(host); 2162 storvsc_dev_remove(dev); 2163 scsi_host_put(host); 2164} 2165 2166static int storvsc_suspend(struct hv_device *hv_dev) 2167{ 2168 struct storvsc_device *stor_device = hv_get_drvdata(hv_dev); 2169 struct Scsi_Host *host = stor_device->host; 2170 struct hv_host_device *host_dev = shost_priv(host); 2171 2172 storvsc_wait_to_drain(stor_device); 2173 2174 drain_workqueue(host_dev->handle_error_wq); 2175 2176 vmbus_close(hv_dev->channel); 2177 2178 kfree(stor_device->stor_chns); 2179 stor_device->stor_chns = NULL; 2180 2181 cpumask_clear(&stor_device->alloced_cpus); 2182 2183 return 0; 2184} 2185 2186static int storvsc_resume(struct hv_device *hv_dev) 2187{ 2188 int ret; 2189 2190 ret = storvsc_connect_to_vsp(hv_dev, aligned_ringbuffer_size, 2191 hv_dev_is_fc(hv_dev)); 2192 return ret; 2193} 2194 2195static struct hv_driver storvsc_drv = { 2196 .name = KBUILD_MODNAME, 2197 .id_table = id_table, 2198 .probe = storvsc_probe, 2199 .remove = storvsc_remove, 2200 .suspend = storvsc_suspend, 2201 .resume = storvsc_resume, 2202 .driver = { 2203 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2204 }, 2205}; 2206 2207#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 2208static struct fc_function_template fc_transport_functions = { 2209 .show_host_node_name = 1, 2210 .show_host_port_name = 1, 2211}; 2212#endif 2213 2214static int __init storvsc_drv_init(void) 2215{ 2216 int ret; 2217 2218 /* 2219 * Divide the ring buffer data size (which is 1 page less 2220 * than the ring buffer size since that page is reserved for 2221 * the ring buffer indices) by the max request size (which is 2222 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64) 2223 */ 2224 aligned_ringbuffer_size = VMBUS_RING_SIZE(storvsc_ringbuffer_size); 2225 max_outstanding_req_per_channel = 2226 ((aligned_ringbuffer_size - PAGE_SIZE) / 2227 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET + 2228 sizeof(struct vstor_packet) + sizeof(u64), 2229 sizeof(u64))); 2230 2231#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 2232 fc_transport_template = fc_attach_transport(&fc_transport_functions); 2233 if (!fc_transport_template) 2234 return -ENODEV; 2235#endif 2236 2237 ret = vmbus_driver_register(&storvsc_drv); 2238 2239#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 2240 if (ret) 2241 fc_release_transport(fc_transport_template); 2242#endif 2243 2244 return ret; 2245} 2246 2247static void __exit storvsc_drv_exit(void) 2248{ 2249 vmbus_driver_unregister(&storvsc_drv); 2250#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) 2251 fc_release_transport(fc_transport_template); 2252#endif 2253} 2254 2255MODULE_LICENSE("GPL"); 2256MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver"); 2257module_init(storvsc_drv_init); 2258module_exit(storvsc_drv_exit);