Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'cxl-fixes-7.0-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl

Pull cxl fixes from Dave Jiang:

- Fix incorrect usages of decoder flags

- Validate payload size before accessing contents

- Fix race condition when creating nvdimm objects

- Fix deadlock on attach failure

* tag 'cxl-fixes-7.0-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl:
cxl/region: Test CXL_DECODER_F_NORMALIZED_ADDRESSING as a bitmask
cxl: Test CXL_DECODER_F_LOCK as a bitmask
cxl/mbox: validate payload size before accessing contents in cxl_payload_from_user_allowed()
cxl: Fix race of nvdimm_bus object when creating nvdimm objects
cxl: Move devm_cxl_add_nvdimm_bridge() to cxl_pmem.ko
cxl/port: Hold port host lock during dport adding.
cxl/port: Introduce port_to_host() helper
cxl/memdev: fix deadlock in cxl_memdev_autoremove() on attach failure

+117 -54
+18
drivers/cxl/core/core.h
··· 152 152 int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port, 153 153 struct access_coordinate *c); 154 154 155 + static inline struct device *port_to_host(struct cxl_port *port) 156 + { 157 + struct cxl_port *parent = is_cxl_root(port) ? NULL : 158 + to_cxl_port(port->dev.parent); 159 + 160 + /* 161 + * The host of CXL root port and the first level of ports is 162 + * the platform firmware device, the host of all other ports 163 + * is their parent port. 164 + */ 165 + if (!parent) 166 + return port->uport_dev; 167 + else if (is_cxl_root(parent)) 168 + return parent->uport_dev; 169 + else 170 + return &parent->dev; 171 + } 172 + 155 173 static inline struct device *dport_to_host(struct cxl_dport *dport) 156 174 { 157 175 struct cxl_port *port = dport->port;
+1 -1
drivers/cxl/core/hdm.c
··· 904 904 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) 905 905 return; 906 906 907 - if (test_bit(CXL_DECODER_F_LOCK, &cxld->flags)) 907 + if (cxld->flags & CXL_DECODER_F_LOCK) 908 908 return; 909 909 910 910 if (port->commit_end == id)
+9 -2
drivers/cxl/core/mbox.c
··· 311 311 * cxl_payload_from_user_allowed() - Check contents of in_payload. 312 312 * @opcode: The mailbox command opcode. 313 313 * @payload_in: Pointer to the input payload passed in from user space. 314 + * @in_size: Size of @payload_in in bytes. 314 315 * 315 316 * Return: 316 317 * * true - payload_in passes check for @opcode. ··· 326 325 * 327 326 * The specific checks are determined by the opcode. 328 327 */ 329 - static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in) 328 + static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in, 329 + size_t in_size) 330 330 { 331 331 switch (opcode) { 332 332 case CXL_MBOX_OP_SET_PARTITION_INFO: { 333 333 struct cxl_mbox_set_partition_info *pi = payload_in; 334 334 335 + if (in_size < sizeof(*pi)) 336 + return false; 335 337 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG) 336 338 return false; 337 339 break; ··· 342 338 case CXL_MBOX_OP_CLEAR_LOG: { 343 339 const uuid_t *uuid = (uuid_t *)payload_in; 344 340 341 + if (in_size < sizeof(uuid_t)) 342 + return false; 345 343 /* 346 344 * Restrict the ‘Clear log’ action to only apply to 347 345 * Vendor debug logs. ··· 371 365 if (IS_ERR(mbox_cmd->payload_in)) 372 366 return PTR_ERR(mbox_cmd->payload_in); 373 367 374 - if (!cxl_payload_from_user_allowed(opcode, mbox_cmd->payload_in)) { 368 + if (!cxl_payload_from_user_allowed(opcode, mbox_cmd->payload_in, 369 + in_size)) { 375 370 dev_dbg(cxl_mbox->host, "%s: input payload not allowed\n", 376 371 cxl_mem_opcode_to_name(opcode)); 377 372 kvfree(mbox_cmd->payload_in);
+9 -4
drivers/cxl/core/memdev.c
··· 1089 1089 DEFINE_FREE(put_cxlmd, struct cxl_memdev *, 1090 1090 if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev)) 1091 1091 1092 - static struct cxl_memdev *cxl_memdev_autoremove(struct cxl_memdev *cxlmd) 1092 + static bool cxl_memdev_attach_failed(struct cxl_memdev *cxlmd) 1093 1093 { 1094 - int rc; 1095 - 1096 1094 /* 1097 1095 * If @attach is provided fail if the driver is not attached upon 1098 1096 * return. Note that failure here could be the result of a race to ··· 1098 1100 * succeeded and then cxl_mem unbound before the lock is acquired. 1099 1101 */ 1100 1102 guard(device)(&cxlmd->dev); 1101 - if (cxlmd->attach && !cxlmd->dev.driver) { 1103 + return (cxlmd->attach && !cxlmd->dev.driver); 1104 + } 1105 + 1106 + static struct cxl_memdev *cxl_memdev_autoremove(struct cxl_memdev *cxlmd) 1107 + { 1108 + int rc; 1109 + 1110 + if (cxl_memdev_attach_failed(cxlmd)) { 1102 1111 cxl_memdev_unregister(cxlmd); 1103 1112 return ERR_PTR(-ENXIO); 1104 1113 }
+32 -10
drivers/cxl/core/pmem.c
··· 115 115 device_unregister(&cxl_nvb->dev); 116 116 } 117 117 118 - /** 119 - * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology 120 - * @host: platform firmware root device 121 - * @port: CXL port at the root of a CXL topology 122 - * 123 - * Return: bridge device that can host cxl_nvdimm objects 124 - */ 125 - struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, 126 - struct cxl_port *port) 118 + static bool cxl_nvdimm_bridge_failed_attach(struct cxl_nvdimm_bridge *cxl_nvb) 119 + { 120 + struct device *dev = &cxl_nvb->dev; 121 + 122 + guard(device)(dev); 123 + /* If the device has no driver, then it failed to attach. */ 124 + return dev->driver == NULL; 125 + } 126 + 127 + struct cxl_nvdimm_bridge *__devm_cxl_add_nvdimm_bridge(struct device *host, 128 + struct cxl_port *port) 127 129 { 128 130 struct cxl_nvdimm_bridge *cxl_nvb; 129 131 struct device *dev; ··· 147 145 if (rc) 148 146 goto err; 149 147 148 + if (cxl_nvdimm_bridge_failed_attach(cxl_nvb)) { 149 + unregister_nvb(cxl_nvb); 150 + return ERR_PTR(-ENODEV); 151 + } 152 + 150 153 rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb); 151 154 if (rc) 152 155 return ERR_PTR(rc); ··· 162 155 put_device(dev); 163 156 return ERR_PTR(rc); 164 157 } 165 - EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, "CXL"); 158 + EXPORT_SYMBOL_FOR_MODULES(__devm_cxl_add_nvdimm_bridge, "cxl_pmem"); 166 159 167 160 static void cxl_nvdimm_release(struct device *dev) 168 161 { ··· 261 254 cxl_nvb = cxl_find_nvdimm_bridge(port); 262 255 if (!cxl_nvb) 263 256 return -ENODEV; 257 + 258 + /* 259 + * Take the uport_dev lock to guard against race of nvdimm_bus object. 260 + * cxl_acpi_probe() registers the nvdimm_bus and is done under the 261 + * root port uport_dev lock. 262 + * 263 + * Take the cxl_nvb device lock to ensure that cxl_nvb driver is in a 264 + * consistent state. And the driver registers nvdimm_bus. 265 + */ 266 + guard(device)(cxl_nvb->port->uport_dev); 267 + guard(device)(&cxl_nvb->dev); 268 + if (!cxl_nvb->nvdimm_bus) { 269 + rc = -ENODEV; 270 + goto err_alloc; 271 + } 264 272 265 273 cxl_nvd = cxl_nvdimm_alloc(cxl_nvb, cxlmd); 266 274 if (IS_ERR(cxl_nvd)) {
+18 -34
drivers/cxl/core/port.c
··· 615 615 static void unregister_port(void *_port) 616 616 { 617 617 struct cxl_port *port = _port; 618 - struct cxl_port *parent = parent_port_of(port); 619 - struct device *lock_dev; 620 618 621 - /* 622 - * CXL root port's and the first level of ports are unregistered 623 - * under the platform firmware device lock, all other ports are 624 - * unregistered while holding their parent port lock. 625 - */ 626 - if (!parent) 627 - lock_dev = port->uport_dev; 628 - else if (is_cxl_root(parent)) 629 - lock_dev = parent->uport_dev; 630 - else 631 - lock_dev = &parent->dev; 632 - 633 - device_lock_assert(lock_dev); 619 + device_lock_assert(port_to_host(port)); 634 620 port->dead = true; 635 621 device_unregister(&port->dev); 636 622 } ··· 1413 1427 return NULL; 1414 1428 } 1415 1429 1416 - static struct device *endpoint_host(struct cxl_port *endpoint) 1417 - { 1418 - struct cxl_port *port = to_cxl_port(endpoint->dev.parent); 1419 - 1420 - if (is_cxl_root(port)) 1421 - return port->uport_dev; 1422 - return &port->dev; 1423 - } 1424 - 1425 1430 static void delete_endpoint(void *data) 1426 1431 { 1427 1432 struct cxl_memdev *cxlmd = data; 1428 1433 struct cxl_port *endpoint = cxlmd->endpoint; 1429 - struct device *host = endpoint_host(endpoint); 1434 + struct device *host = port_to_host(endpoint); 1430 1435 1431 1436 scoped_guard(device, host) { 1432 1437 if (host->driver && !endpoint->dead) { ··· 1433 1456 1434 1457 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint) 1435 1458 { 1436 - struct device *host = endpoint_host(endpoint); 1459 + struct device *host = port_to_host(endpoint); 1437 1460 struct device *dev = &cxlmd->dev; 1438 1461 1439 1462 get_device(host); ··· 1767 1790 { 1768 1791 struct cxl_dport *dport; 1769 1792 1770 - device_lock_assert(&port->dev); 1793 + /* 1794 + * The port is already visible in CXL hierarchy, but it may still 1795 + * be in the process of binding to the CXL port driver at this point. 1796 + * 1797 + * port creation and driver binding are protected by the port's host 1798 + * lock, so acquire the host lock here to ensure the port has completed 1799 + * driver binding before proceeding with dport addition. 1800 + */ 1801 + guard(device)(port_to_host(port)); 1802 + guard(device)(&port->dev); 1771 1803 dport = cxl_find_dport_by_dev(port, dport_dev); 1772 1804 if (!dport) { 1773 1805 dport = probe_dport(port, dport_dev); ··· 1843 1857 * RP port enumerated by cxl_acpi without dport will 1844 1858 * have the dport added here. 1845 1859 */ 1846 - scoped_guard(device, &port->dev) { 1847 - dport = find_or_add_dport(port, dport_dev); 1848 - if (IS_ERR(dport)) { 1849 - if (PTR_ERR(dport) == -EAGAIN) 1850 - goto retry; 1851 - return PTR_ERR(dport); 1852 - } 1860 + dport = find_or_add_dport(port, dport_dev); 1861 + if (IS_ERR(dport)) { 1862 + if (PTR_ERR(dport) == -EAGAIN) 1863 + goto retry; 1864 + return PTR_ERR(dport); 1853 1865 } 1854 1866 1855 1867 rc = cxl_add_ep(dport, &cxlmd->dev);
+2 -2
drivers/cxl/core/region.c
··· 1100 1100 static void cxl_region_setup_flags(struct cxl_region *cxlr, 1101 1101 struct cxl_decoder *cxld) 1102 1102 { 1103 - if (test_bit(CXL_DECODER_F_LOCK, &cxld->flags)) { 1103 + if (cxld->flags & CXL_DECODER_F_LOCK) { 1104 1104 set_bit(CXL_REGION_F_LOCK, &cxlr->flags); 1105 1105 clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); 1106 1106 } 1107 1107 1108 - if (test_bit(CXL_DECODER_F_NORMALIZED_ADDRESSING, &cxld->flags)) 1108 + if (cxld->flags & CXL_DECODER_F_NORMALIZED_ADDRESSING) 1109 1109 set_bit(CXL_REGION_F_NORMALIZED_ADDRESSING, &cxlr->flags); 1110 1110 } 1111 1111
+7
drivers/cxl/cxl.h
··· 574 574 575 575 #define CXL_DEV_ID_LEN 19 576 576 577 + enum { 578 + CXL_NVD_F_INVALIDATED = 0, 579 + }; 580 + 577 581 struct cxl_nvdimm { 578 582 struct device dev; 579 583 struct cxl_memdev *cxlmd; 580 584 u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */ 581 585 u64 dirty_shutdowns; 586 + unsigned long flags; 582 587 }; 583 588 584 589 struct cxl_pmem_region_mapping { ··· 925 920 struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev); 926 921 struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, 927 922 struct cxl_port *port); 923 + struct cxl_nvdimm_bridge *__devm_cxl_add_nvdimm_bridge(struct device *host, 924 + struct cxl_port *port); 928 925 struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev); 929 926 bool is_cxl_nvdimm(struct device *dev); 930 927 int devm_cxl_add_nvdimm(struct device *host, struct cxl_port *port,
+21 -1
drivers/cxl/pmem.c
··· 13 13 14 14 static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); 15 15 16 + /** 17 + * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology 18 + * @host: platform firmware root device 19 + * @port: CXL port at the root of a CXL topology 20 + * 21 + * Return: bridge device that can host cxl_nvdimm objects 22 + */ 23 + struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, 24 + struct cxl_port *port) 25 + { 26 + return __devm_cxl_add_nvdimm_bridge(host, port); 27 + } 28 + EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, "CXL"); 29 + 16 30 static void clear_exclusive(void *mds) 17 31 { 18 32 clear_exclusive_cxl_commands(mds, exclusive_cmds); ··· 142 128 unsigned long flags = 0, cmd_mask = 0; 143 129 struct nvdimm *nvdimm; 144 130 int rc; 131 + 132 + if (test_bit(CXL_NVD_F_INVALIDATED, &cxl_nvd->flags)) 133 + return -EBUSY; 145 134 146 135 set_exclusive_cxl_commands(mds, exclusive_cmds); 147 136 rc = devm_add_action_or_reset(dev, clear_exclusive, mds); ··· 326 309 scoped_guard(device, dev) { 327 310 if (dev->driver) { 328 311 cxl_nvd = to_cxl_nvdimm(dev); 329 - if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data) 312 + if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data) { 330 313 release = true; 314 + set_bit(CXL_NVD_F_INVALIDATED, &cxl_nvd->flags); 315 + } 331 316 } 332 317 } 333 318 if (release) ··· 372 353 .probe = cxl_nvdimm_bridge_probe, 373 354 .id = CXL_DEVICE_NVDIMM_BRIDGE, 374 355 .drv = { 356 + .probe_type = PROBE_FORCE_SYNCHRONOUS, 375 357 .suppress_bind_attrs = true, 376 358 }, 377 359 };