Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'cxl/for-6.13/dcd-prep' into cxl-for-next

Add preparation patches for coming soon DCD changes.

- Add range_overlaps()
- Add CDAT/DSMAS shared and read only flag in ACPICA
- Add documentation to struct dev_dax_range
- Delay event buffer allocation in CXL PCI
- Use guard() in cxl_dpa_set_mode()
- Refactor common create region code to reduce redudant code

+56 -47
+6 -15
drivers/cxl/core/hdm.c
··· 424 424 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 425 425 struct cxl_dev_state *cxlds = cxlmd->cxlds; 426 426 struct device *dev = &cxled->cxld.dev; 427 - int rc; 428 427 429 428 switch (mode) { 430 429 case CXL_DECODER_RAM: ··· 434 435 return -EINVAL; 435 436 } 436 437 437 - down_write(&cxl_dpa_rwsem); 438 - if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 439 - rc = -EBUSY; 440 - goto out; 441 - } 438 + guard(rwsem_write)(&cxl_dpa_rwsem); 439 + if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) 440 + return -EBUSY; 442 441 443 442 /* 444 443 * Only allow modes that are supported by the current partition ··· 444 447 */ 445 448 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) { 446 449 dev_dbg(dev, "no available pmem capacity\n"); 447 - rc = -ENXIO; 448 - goto out; 450 + return -ENXIO; 449 451 } 450 452 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) { 451 453 dev_dbg(dev, "no available ram capacity\n"); 452 - rc = -ENXIO; 453 - goto out; 454 + return -ENXIO; 454 455 } 455 456 456 457 cxled->mode = mode; 457 - rc = 0; 458 - out: 459 - up_write(&cxl_dpa_rwsem); 460 - 461 - return rc; 458 + return 0; 462 459 } 463 460 464 461 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+11 -17
drivers/cxl/core/region.c
··· 2536 2536 return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM); 2537 2537 } 2538 2538 2539 - static ssize_t create_pmem_region_store(struct device *dev, 2540 - struct device_attribute *attr, 2541 - const char *buf, size_t len) 2539 + static ssize_t create_region_store(struct device *dev, const char *buf, 2540 + size_t len, enum cxl_decoder_mode mode) 2542 2541 { 2543 2542 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 2544 2543 struct cxl_region *cxlr; ··· 2547 2548 if (rc != 1) 2548 2549 return -EINVAL; 2549 2550 2550 - cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id); 2551 + cxlr = __create_region(cxlrd, mode, id); 2551 2552 if (IS_ERR(cxlr)) 2552 2553 return PTR_ERR(cxlr); 2553 2554 2554 2555 return len; 2556 + } 2557 + 2558 + static ssize_t create_pmem_region_store(struct device *dev, 2559 + struct device_attribute *attr, 2560 + const char *buf, size_t len) 2561 + { 2562 + return create_region_store(dev, buf, len, CXL_DECODER_PMEM); 2555 2563 } 2556 2564 DEVICE_ATTR_RW(create_pmem_region); 2557 2565 ··· 2566 2560 struct device_attribute *attr, 2567 2561 const char *buf, size_t len) 2568 2562 { 2569 - struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 2570 - struct cxl_region *cxlr; 2571 - int rc, id; 2572 - 2573 - rc = sscanf(buf, "region%d\n", &id); 2574 - if (rc != 1) 2575 - return -EINVAL; 2576 - 2577 - cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id); 2578 - if (IS_ERR(cxlr)) 2579 - return PTR_ERR(cxlr); 2580 - 2581 - return len; 2563 + return create_region_store(dev, buf, len, CXL_DECODER_RAM); 2582 2564 } 2583 2565 DEVICE_ATTR_RW(create_ram_region); 2584 2566
+4 -4
drivers/cxl/pci.c
··· 777 777 return 0; 778 778 } 779 779 780 - rc = cxl_mem_alloc_event_buf(mds); 781 - if (rc) 782 - return rc; 783 - 784 780 rc = cxl_event_get_int_policy(mds, &policy); 785 781 if (rc) 786 782 return rc; ··· 789 793 "FW still in control of Event Logs despite _OSC settings\n"); 790 794 return -EBUSY; 791 795 } 796 + 797 + rc = cxl_mem_alloc_event_buf(mds); 798 + if (rc) 799 + return rc; 792 800 793 801 rc = cxl_event_irqsetup(mds); 794 802 if (rc)
+20 -6
drivers/dax/dax-private.h
··· 40 40 struct device *youngest; 41 41 }; 42 42 43 + /** 44 + * struct dax_mapping - device to display mapping range attributes 45 + * @dev: device representing this range 46 + * @range_id: index within dev_dax ranges array 47 + * @id: ida of this mapping 48 + */ 43 49 struct dax_mapping { 44 50 struct device dev; 45 51 int range_id; 46 52 int id; 53 + }; 54 + 55 + /** 56 + * struct dev_dax_range - tuple represenging a range of memory used by dev_dax 57 + * @pgoff: page offset 58 + * @range: resource-span 59 + * @mapping: reference to the dax_mapping for this range 60 + */ 61 + struct dev_dax_range { 62 + unsigned long pgoff; 63 + struct range range; 64 + struct dax_mapping *mapping; 47 65 }; 48 66 49 67 /** ··· 76 58 * @dev - device core 77 59 * @pgmap - pgmap for memmap setup / lifetime (driver owned) 78 60 * @nr_range: size of @ranges 79 - * @ranges: resource-span + pgoff tuples for the instance 61 + * @ranges: range tuples of memory used 80 62 */ 81 63 struct dev_dax { 82 64 struct dax_region *region; ··· 90 72 struct dev_pagemap *pgmap; 91 73 bool memmap_on_memory; 92 74 int nr_range; 93 - struct dev_dax_range { 94 - unsigned long pgoff; 95 - struct range range; 96 - struct dax_mapping *mapping; 97 - } *ranges; 75 + struct dev_dax_range *ranges; 98 76 }; 99 77 100 78 /*
+5 -5
fs/btrfs/ordered-data.c
··· 111 111 return NULL; 112 112 } 113 113 114 - static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, 115 - u64 len) 114 + static int btrfs_range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, 115 + u64 len) 116 116 { 117 117 if (file_offset + len <= entry->file_offset || 118 118 entry->file_offset + entry->num_bytes <= file_offset) ··· 985 985 986 986 while (1) { 987 987 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 988 - if (range_overlaps(entry, file_offset, len)) 988 + if (btrfs_range_overlaps(entry, file_offset, len)) 989 989 break; 990 990 991 991 if (entry->file_offset >= file_offset + len) { ··· 1114 1114 } 1115 1115 if (prev) { 1116 1116 entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node); 1117 - if (range_overlaps(entry, file_offset, len)) 1117 + if (btrfs_range_overlaps(entry, file_offset, len)) 1118 1118 goto out; 1119 1119 } 1120 1120 if (next) { 1121 1121 entry = rb_entry(next, struct btrfs_ordered_extent, rb_node); 1122 - if (range_overlaps(entry, file_offset, len)) 1122 + if (btrfs_range_overlaps(entry, file_offset, len)) 1123 1123 goto out; 1124 1124 } 1125 1125 /* No ordered extent in the range */
+2
include/acpi/actbl1.h
··· 403 403 /* Flags for subtable above */ 404 404 405 405 #define ACPI_CDAT_DSMAS_NON_VOLATILE (1 << 2) 406 + #define ACPI_CDAT_DSMAS_SHAREABLE (1 << 3) 407 + #define ACPI_CDAT_DSMAS_READ_ONLY (1 << 6) 406 408 407 409 /* Subtable 1: Device scoped Latency and Bandwidth Information Structure (DSLBIS) */ 408 410
+8
include/linux/range.h
··· 13 13 return range->end - range->start + 1; 14 14 } 15 15 16 + /* True if r1 completely contains r2 */ 16 17 static inline bool range_contains(const struct range *r1, 17 18 const struct range *r2) 18 19 { 19 20 return r1->start <= r2->start && r1->end >= r2->end; 21 + } 22 + 23 + /* True if any part of r1 overlaps r2 */ 24 + static inline bool range_overlaps(const struct range *r1, 25 + const struct range *r2) 26 + { 27 + return r1->start <= r2->end && r1->end >= r2->start; 20 28 } 21 29 22 30 int add_range(struct range *range, int az, int nr_range,