Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'cxl-fixes-6.6-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl

Pull cxl fixes from Dan Williams:
"A collection of regression fixes, bug fixes, and some small cleanups
to the Compute Express Link code.

The regressions arrived in the v6.5 dev cycle and missed the v6.6
merge window due to my personal absences this cycle. The most
important fixes are for scenarios where the CXL subsystem fails to
parse valid region configurations established by platform firmware.
This is important because agreement between OS and BIOS on the CXL
configuration is fundamental to implementing "OS native" error
handling, i.e. address translation and component failure
identification.

Other important fixes are a driver load error when the BIOS lets the
Linux PCI core handle AER events, but not CXL memory errors.

The other fixex might have end user impact, but for now are only known
to trigger in our test/emulation environment.

Summary:

- Fix multiple scenarios where platform firmware defined regions fail
to be assembled by the CXL core.

- Fix a spurious driver-load failure on platforms that enable OS
native AER, but not OS native CXL error handling.

- Fix a regression detecting "poison" commands when "security"
commands are also defined.

- Fix a cxl_test regression with the move to centralize CXL port
register enumeration in the CXL core.

- Miscellaneous small fixes and cleanups"

* tag 'cxl-fixes-6.6-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl:
cxl/acpi: Annotate struct cxl_cxims_data with __counted_by
cxl/port: Fix cxl_test register enumeration regression
cxl/region: Refactor granularity select in cxl_port_setup_targets()
cxl/region: Match auto-discovered region decoders by HPA range
cxl/mbox: Fix CEL logic for poison and security commands
cxl/pci: Replace host_bridge->native_aer with pcie_aer_is_native()
PCI/AER: Export pcie_aer_is_native()
cxl/pci: Fix appropriate checking for _OSC while handling CXL RAS registers

+60 -33
+2 -2
drivers/cxl/acpi.c
··· 14 14 15 15 struct cxl_cxims_data { 16 16 int nr_maps; 17 - u64 xormaps[]; 17 + u64 xormaps[] __counted_by(nr_maps); 18 18 }; 19 19 20 20 /* ··· 112 112 GFP_KERNEL); 113 113 if (!cximsd) 114 114 return -ENOMEM; 115 + cximsd->nr_maps = nr_maps; 115 116 memcpy(cximsd->xormaps, cxims->xormap_list, 116 117 nr_maps * sizeof(*cximsd->xormaps)); 117 - cximsd->nr_maps = nr_maps; 118 118 cxlrd->platform_data = cximsd; 119 119 120 120 return 0;
+12 -11
drivers/cxl/core/mbox.c
··· 715 715 for (i = 0; i < cel_entries; i++) { 716 716 u16 opcode = le16_to_cpu(cel_entry[i].opcode); 717 717 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); 718 + int enabled = 0; 718 719 719 - if (!cmd && (!cxl_is_poison_command(opcode) || 720 - !cxl_is_security_command(opcode))) { 721 - dev_dbg(dev, 722 - "Opcode 0x%04x unsupported by driver\n", opcode); 723 - continue; 720 + if (cmd) { 721 + set_bit(cmd->info.id, mds->enabled_cmds); 722 + enabled++; 724 723 } 725 724 726 - if (cmd) 727 - set_bit(cmd->info.id, mds->enabled_cmds); 728 - 729 - if (cxl_is_poison_command(opcode)) 725 + if (cxl_is_poison_command(opcode)) { 730 726 cxl_set_poison_cmd_enabled(&mds->poison, opcode); 727 + enabled++; 728 + } 731 729 732 - if (cxl_is_security_command(opcode)) 730 + if (cxl_is_security_command(opcode)) { 733 731 cxl_set_security_cmd_enabled(&mds->security, opcode); 732 + enabled++; 733 + } 734 734 735 - dev_dbg(dev, "Opcode 0x%04x enabled\n", opcode); 735 + dev_dbg(dev, "Opcode 0x%04x %s\n", opcode, 736 + enabled ? "enabled" : "unsupported by driver"); 736 737 } 737 738 } 738 739
+9 -4
drivers/cxl/core/port.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ 3 + #include <linux/platform_device.h> 3 4 #include <linux/memregion.h> 4 5 #include <linux/workqueue.h> 5 6 #include <linux/debugfs.h> ··· 707 706 return cxl_setup_regs(map); 708 707 } 709 708 710 - static inline int cxl_port_setup_regs(struct cxl_port *port, 711 - resource_size_t component_reg_phys) 709 + static int cxl_port_setup_regs(struct cxl_port *port, 710 + resource_size_t component_reg_phys) 712 711 { 712 + if (dev_is_platform(port->uport_dev)) 713 + return 0; 713 714 return cxl_setup_comp_regs(&port->dev, &port->comp_map, 714 715 component_reg_phys); 715 716 } 716 717 717 - static inline int cxl_dport_setup_regs(struct cxl_dport *dport, 718 - resource_size_t component_reg_phys) 718 + static int cxl_dport_setup_regs(struct cxl_dport *dport, 719 + resource_size_t component_reg_phys) 719 720 { 721 + if (dev_is_platform(dport->dport_dev)) 722 + return 0; 720 723 return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map, 721 724 component_reg_phys); 722 725 }
+31 -10
drivers/cxl/core/region.c
··· 717 717 return 0; 718 718 } 719 719 720 + static int match_auto_decoder(struct device *dev, void *data) 721 + { 722 + struct cxl_region_params *p = data; 723 + struct cxl_decoder *cxld; 724 + struct range *r; 725 + 726 + if (!is_switch_decoder(dev)) 727 + return 0; 728 + 729 + cxld = to_cxl_decoder(dev); 730 + r = &cxld->hpa_range; 731 + 732 + if (p->res && p->res->start == r->start && p->res->end == r->end) 733 + return 1; 734 + 735 + return 0; 736 + } 737 + 720 738 static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port, 721 739 struct cxl_region *cxlr) 722 740 { 723 741 struct device *dev; 724 742 int id = 0; 725 743 726 - dev = device_find_child(&port->dev, &id, match_free_decoder); 744 + if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) 745 + dev = device_find_child(&port->dev, &cxlr->params, 746 + match_auto_decoder); 747 + else 748 + dev = device_find_child(&port->dev, &id, match_free_decoder); 727 749 if (!dev) 728 750 return NULL; 729 751 /* ··· 1176 1154 } 1177 1155 1178 1156 /* 1179 - * If @parent_port is masking address bits, pick the next unused address 1180 - * bit to route @port's targets. 1157 + * Interleave granularity is a multiple of @parent_port granularity. 1158 + * Multiplier is the parent port interleave ways. 1181 1159 */ 1182 - if (parent_iw > 1 && cxl_rr->nr_targets > 1) { 1183 - u32 address_bit = max(peig + peiw, eiw + peig); 1184 - 1185 - eig = address_bit - eiw + 1; 1186 - } else { 1187 - eiw = peiw; 1188 - eig = peig; 1160 + rc = granularity_to_eig(parent_ig * parent_iw, &eig); 1161 + if (rc) { 1162 + dev_dbg(&cxlr->dev, 1163 + "%s: invalid granularity calculation (%d * %d)\n", 1164 + dev_name(&parent_port->dev), parent_ig, parent_iw); 1165 + return rc; 1189 1166 } 1190 1167 1191 1168 rc = eig_to_granularity(eig, &ig);
+3 -4
drivers/cxl/pci.c
··· 529 529 530 530 static int cxl_pci_ras_unmask(struct pci_dev *pdev) 531 531 { 532 - struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus); 533 532 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); 534 533 void __iomem *addr; 535 534 u32 orig_val, val, mask; ··· 540 541 return 0; 541 542 } 542 543 543 - /* BIOS has CXL error control */ 544 - if (!host_bridge->native_cxl_error) 545 - return -ENXIO; 544 + /* BIOS has PCIe AER error control */ 545 + if (!pcie_aer_is_native(pdev)) 546 + return 0; 546 547 547 548 rc = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap); 548 549 if (rc)
+1
drivers/pci/pcie/aer.c
··· 229 229 230 230 return pcie_ports_native || host->native_aer; 231 231 } 232 + EXPORT_SYMBOL_NS_GPL(pcie_aer_is_native, CXL); 232 233 233 234 static int pci_enable_pcie_error_reporting(struct pci_dev *dev) 234 235 {
-2
drivers/pci/pcie/portdrv.h
··· 29 29 30 30 #ifdef CONFIG_PCIEAER 31 31 int pcie_aer_init(void); 32 - int pcie_aer_is_native(struct pci_dev *dev); 33 32 #else 34 33 static inline int pcie_aer_init(void) { return 0; } 35 - static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; } 36 34 #endif 37 35 38 36 #ifdef CONFIG_HOTPLUG_PCI_PCIE
+2
include/linux/aer.h
··· 42 42 43 43 #if defined(CONFIG_PCIEAER) 44 44 int pci_aer_clear_nonfatal_status(struct pci_dev *dev); 45 + int pcie_aer_is_native(struct pci_dev *dev); 45 46 #else 46 47 static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev) 47 48 { 48 49 return -EINVAL; 49 50 } 51 + static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; } 50 52 #endif 51 53 52 54 void cper_print_aer(struct pci_dev *dev, int aer_severity,