Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'v7.0-rc5' into next

Sync up with mainline to pull in a fix for smb compilation error.

+9938 -5519
+3 -1
.mailmap
··· 327 327 Herbert Xu <herbert@gondor.apana.org.au> 328 328 Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com> 329 329 Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn> 330 + Ignat Korchagin <ignat@linux.win> <ignat@cloudflare.com> 330 331 Ike Panhc <ikepanhc@gmail.com> <ike.pan@canonical.com> 331 332 J. Bruce Fields <bfields@fieldses.org> <bfields@redhat.com> 332 333 J. Bruce Fields <bfields@fieldses.org> <bfields@citi.umich.edu> ··· 499 498 Loic Poulain <loic.poulain@oss.qualcomm.com> <loic.poulain@linaro.org> 500 499 Loic Poulain <loic.poulain@oss.qualcomm.com> <loic.poulain@intel.com> 501 500 Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com> 502 - Lorenzo Stoakes <lorenzo.stoakes@oracle.com> <lstoakes@gmail.com> 501 + Lorenzo Stoakes <ljs@kernel.org> <lstoakes@gmail.com> 502 + Lorenzo Stoakes <ljs@kernel.org> <lorenzo.stoakes@oracle.com> 503 503 Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net> 504 504 Luca Weiss <luca@lucaweiss.eu> <luca@z3ntu.xyz> 505 505 Lucas De Marchi <demarchi@kernel.org> <lucas.demarchi@intel.com>
+2 -2
Documentation/ABI/testing/sysfs-block-zram
··· 151 151 The algorithm_params file is write-only and is used to setup 152 152 compression algorithm parameters. 153 153 154 - What: /sys/block/zram<id>/writeback_compressed 154 + What: /sys/block/zram<id>/compressed_writeback 155 155 Date: Decemeber 2025 156 156 Contact: Richard Chang <richardycc@google.com> 157 157 Description: 158 - The writeback_compressed device atrribute toggles compressed 158 + The compressed_writeback device atrribute toggles compressed 159 159 writeback feature. 160 160 161 161 What: /sys/block/zram<id>/writeback_batch_size
+3 -3
Documentation/admin-guide/blockdev/zram.rst
··· 216 216 writeback_limit_enable RW show and set writeback_limit feature 217 217 writeback_batch_size RW show and set maximum number of in-flight 218 218 writeback operations 219 - writeback_compressed RW show and set compressed writeback feature 219 + compressed_writeback RW show and set compressed writeback feature 220 220 comp_algorithm RW show and change the compression algorithm 221 221 algorithm_params WO setup compression algorithm parameters 222 222 compact WO trigger memory compaction ··· 439 439 By default zram stores written back pages in decompressed (raw) form, which 440 440 means that writeback operation involves decompression of the page before 441 441 writing it to the backing device. This behavior can be changed by enabling 442 - `writeback_compressed` feature, which causes zram to write compressed pages 442 + `compressed_writeback` feature, which causes zram to write compressed pages 443 443 to the backing device, thus avoiding decompression overhead. To enable 444 444 this feature, execute:: 445 445 446 - $ echo yes > /sys/block/zramX/writeback_compressed 446 + $ echo yes > /sys/block/zramX/compressed_writeback 447 447 448 448 Note that this feature should be configured before the `zramX` device is 449 449 initialized.
+3
Documentation/admin-guide/kernel-parameters.txt
··· 8196 8196 p = USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT 8197 8197 (Reduce timeout of the SET_ADDRESS 8198 8198 request from 5000 ms to 500 ms); 8199 + q = USB_QUIRK_FORCE_ONE_CONFIG (Device 8200 + claims zero configurations, 8201 + forcing to 1); 8199 8202 Example: quirks=0781:5580:bk,0a5c:5834:gij 8200 8203 8201 8204 usbhid.mousepoll=
+2
Documentation/dev-tools/kunit/run_wrapper.rst
··· 336 336 - ``--list_tests_attr``: If set, lists all tests that will be run and all of their 337 337 attributes. 338 338 339 + - ``--list_suites``: If set, lists all suites that will be run. 340 + 339 341 Command-line completion 340 342 ============================== 341 343
+20 -1
Documentation/devicetree/bindings/display/msm/dp-controller.yaml
··· 253 253 enum: 254 254 # these platforms support 2 streams MST on some interfaces, 255 255 # others are SST only 256 - - qcom,glymur-dp 257 256 - qcom,sc8280xp-dp 258 257 - qcom,x1e80100-dp 259 258 then: ··· 308 309 clocks-names: 309 310 minItems: 6 310 311 maxItems: 8 312 + 313 + - if: 314 + properties: 315 + compatible: 316 + contains: 317 + enum: 318 + # these platforms support 2 streams MST on some interfaces, 319 + # others are SST only, but all controllers have 4 ports 320 + - qcom,glymur-dp 321 + then: 322 + properties: 323 + reg: 324 + minItems: 9 325 + maxItems: 9 326 + clocks: 327 + minItems: 5 328 + maxItems: 6 329 + clocks-names: 330 + minItems: 5 331 + maxItems: 6 311 332 312 333 unevaluatedProperties: false 313 334
+10 -6
Documentation/devicetree/bindings/display/msm/qcom,glymur-mdss.yaml
··· 176 176 }; 177 177 }; 178 178 179 - displayport-controller@ae90000 { 179 + displayport-controller@af54000 { 180 180 compatible = "qcom,glymur-dp"; 181 - reg = <0xae90000 0x200>, 182 - <0xae90200 0x200>, 183 - <0xae90400 0x600>, 184 - <0xae91000 0x400>, 185 - <0xae91400 0x400>; 181 + reg = <0xaf54000 0x200>, 182 + <0xaf54200 0x200>, 183 + <0xaf55000 0xc00>, 184 + <0xaf56000 0x400>, 185 + <0xaf57000 0x400>, 186 + <0xaf58000 0x400>, 187 + <0xaf59000 0x400>, 188 + <0xaf5a000 0x600>, 189 + <0xaf5b000 0x600>; 186 190 187 191 interrupt-parent = <&mdss>; 188 192 interrupts = <12>;
+1 -1
Documentation/devicetree/bindings/display/msm/qcom,sm8750-mdss.yaml
··· 10 10 - Krzysztof Kozlowski <krzk@kernel.org> 11 11 12 12 description: 13 - SM8650 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like 13 + SM8750 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like 14 14 DPU display controller, DSI and DP interfaces etc. 15 15 16 16 $ref: /schemas/display/msm/mdss-common.yaml#
+1 -1
Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml
··· 7 7 title: Synopsys DesignWare APB I2C Controller 8 8 9 9 maintainers: 10 - - Jarkko Nikula <jarkko.nikula@linux.intel.com> 10 + - Mika Westerberg <mika.westerberg@linux.intel.com> 11 11 12 12 allOf: 13 13 - $ref: /schemas/i2c/i2c-controller.yaml#
+19 -7
Documentation/devicetree/bindings/mtd/st,spear600-smi.yaml
··· 19 19 Flash sub nodes describe the memory range and optional per-flash 20 20 properties. 21 21 22 - allOf: 23 - - $ref: mtd.yaml# 24 - 25 22 properties: 26 23 compatible: 27 24 const: st,spear600-smi ··· 39 42 $ref: /schemas/types.yaml#/definitions/uint32 40 43 description: Functional clock rate of the SMI controller in Hz. 41 44 42 - st,smi-fast-mode: 43 - type: boolean 44 - description: Indicates that the attached flash supports fast read mode. 45 + patternProperties: 46 + "^flash@.*$": 47 + $ref: /schemas/mtd/mtd.yaml# 48 + 49 + properties: 50 + reg: 51 + maxItems: 1 52 + 53 + st,smi-fast-mode: 54 + type: boolean 55 + description: Indicates that the attached flash supports fast read mode. 56 + 57 + unevaluatedProperties: false 58 + 59 + required: 60 + - reg 45 61 46 62 required: 47 63 - compatible 48 64 - reg 49 65 - clock-rate 66 + - "#address-cells" 67 + - "#size-cells" 50 68 51 69 unevaluatedProperties: false 52 70 ··· 76 64 interrupts = <12>; 77 65 clock-rate = <50000000>; /* 50 MHz */ 78 66 79 - flash@f8000000 { 67 + flash@fc000000 { 80 68 reg = <0xfc000000 0x1000>; 81 69 st,smi-fast-mode; 82 70 };
+93
Documentation/devicetree/bindings/powerpc/fsl/fsl,mpc83xx.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/powerpc/fsl/fsl,mpc83xx.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Freescale PowerQUICC II Pro (MPC83xx) platforms 8 + 9 + maintainers: 10 + - J. Neuschäfer <j.ne@posteo.net> 11 + 12 + properties: 13 + $nodename: 14 + const: '/' 15 + compatible: 16 + oneOf: 17 + - description: MPC83xx Reference Design Boards 18 + items: 19 + - enum: 20 + - fsl,mpc8308rdb 21 + - fsl,mpc8315erdb 22 + - fsl,mpc8360rdk 23 + - fsl,mpc8377rdb 24 + - fsl,mpc8377wlan 25 + - fsl,mpc8378rdb 26 + - fsl,mpc8379rdb 27 + 28 + - description: MPC8313E Reference Design Board 29 + items: 30 + - const: MPC8313ERDB 31 + - const: MPC831xRDB 32 + - const: MPC83xxRDB 33 + 34 + - description: MPC8323E Reference Design Board 35 + items: 36 + - const: MPC8323ERDB 37 + - const: MPC832xRDB 38 + - const: MPC83xxRDB 39 + 40 + - description: MPC8349E-mITX(-GP) Reference Design Platform 41 + items: 42 + - enum: 43 + - MPC8349EMITX 44 + - MPC8349EMITXGP 45 + - const: MPC834xMITX 46 + - const: MPC83xxMITX 47 + 48 + - description: Keymile KMETER1 board 49 + const: keymile,KMETER1 50 + 51 + - description: MPC8308 P1M board 52 + const: denx,mpc8308_p1m 53 + 54 + patternProperties: 55 + "^soc@.*$": 56 + type: object 57 + properties: 58 + compatible: 59 + oneOf: 60 + - items: 61 + - enum: 62 + - fsl,mpc8315-immr 63 + - fsl,mpc8308-immr 64 + - const: simple-bus 65 + - items: 66 + - const: fsl,mpc8360-immr 67 + - const: fsl,immr 68 + - const: fsl,soc 69 + - const: simple-bus 70 + - const: simple-bus 71 + 72 + additionalProperties: true 73 + 74 + examples: 75 + - | 76 + / { 77 + compatible = "fsl,mpc8315erdb"; 78 + model = "MPC8315E-RDB"; 79 + #address-cells = <1>; 80 + #size-cells = <1>; 81 + 82 + soc@e0000000 { 83 + compatible = "fsl,mpc8315-immr", "simple-bus"; 84 + reg = <0xe0000000 0x00000200>; 85 + #address-cells = <1>; 86 + #size-cells = <1>; 87 + device_type = "soc"; 88 + ranges = <0 0xe0000000 0x00100000>; 89 + bus-frequency = <0>; 90 + }; 91 + }; 92 + 93 + ...
+2 -2
Documentation/devicetree/bindings/regulator/regulator.yaml
··· 168 168 offset from voltage set to regulator. 169 169 170 170 regulator-uv-protection-microvolt: 171 - description: Set over under voltage protection limit. This is a limit where 171 + description: Set under voltage protection limit. This is a limit where 172 172 hardware performs emergency shutdown. Zero can be passed to disable 173 173 protection and value '1' indicates that protection should be enabled but 174 174 limit setting can be omitted. Limit is given as microvolt offset from ··· 182 182 is given as microvolt offset from voltage set to regulator. 183 183 184 184 regulator-uv-warn-microvolt: 185 - description: Set over under voltage warning limit. This is a limit where 185 + description: Set under voltage warning limit. This is a limit where 186 186 hardware is assumed still to be functional but approaching limit where 187 187 it gets damaged. Recovery actions should be initiated. Zero can be passed 188 188 to disable detection and value '1' indicates that detection should
+24 -5
Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
··· 6 6 7 7 title: Allwinner A31 SPI Controller 8 8 9 - allOf: 10 - - $ref: spi-controller.yaml 11 - 12 9 maintainers: 13 10 - Chen-Yu Tsai <wens@csie.org> 14 11 - Maxime Ripard <mripard@kernel.org> ··· 79 82 80 83 spi-rx-bus-width: 81 84 items: 82 - - const: 1 85 + enum: [0, 1, 2, 4] 83 86 84 87 spi-tx-bus-width: 85 88 items: 86 - - const: 1 89 + enum: [0, 1, 2, 4] 87 90 88 91 required: 89 92 - compatible ··· 91 94 - interrupts 92 95 - clocks 93 96 - clock-names 97 + 98 + allOf: 99 + - $ref: spi-controller.yaml 100 + - if: 101 + not: 102 + properties: 103 + compatible: 104 + contains: 105 + enum: 106 + - allwinner,sun50i-r329-spi 107 + - allwinner,sun55i-a523-spi 108 + then: 109 + patternProperties: 110 + "^.*@[0-9a-f]+": 111 + properties: 112 + spi-rx-bus-width: 113 + items: 114 + enum: [0, 1] 115 + 116 + spi-tx-bus-width: 117 + items: 118 + enum: [0, 1] 94 119 95 120 unevaluatedProperties: false 96 121
+48
Documentation/driver-api/driver-model/binding.rst
··· 99 99 When a driver is removed, the list of devices that it supports is 100 100 iterated over, and the driver's remove callback is called for each 101 101 one. The device is removed from that list and the symlinks removed. 102 + 103 + 104 + Driver Override 105 + ~~~~~~~~~~~~~~~ 106 + 107 + Userspace may override the standard matching by writing a driver name to 108 + a device's ``driver_override`` sysfs attribute. When set, only a driver 109 + whose name matches the override will be considered during binding. This 110 + bypasses all bus-specific matching (OF, ACPI, ID tables, etc.). 111 + 112 + The override may be cleared by writing an empty string, which returns 113 + the device to standard matching rules. Writing to ``driver_override`` 114 + does not automatically unbind the device from its current driver or 115 + make any attempt to load the specified driver. 116 + 117 + Buses opt into this mechanism by setting the ``driver_override`` flag in 118 + their ``struct bus_type``:: 119 + 120 + const struct bus_type example_bus_type = { 121 + ... 122 + .driver_override = true, 123 + }; 124 + 125 + When the flag is set, the driver core automatically creates the 126 + ``driver_override`` sysfs attribute for every device on that bus. 127 + 128 + The bus's ``match()`` callback should check the override before performing 129 + its own matching, using ``device_match_driver_override()``:: 130 + 131 + static int example_match(struct device *dev, const struct device_driver *drv) 132 + { 133 + int ret; 134 + 135 + ret = device_match_driver_override(dev, drv); 136 + if (ret >= 0) 137 + return ret; 138 + 139 + /* Fall through to bus-specific matching... */ 140 + } 141 + 142 + ``device_match_driver_override()`` returns > 0 if the override matches 143 + the given driver, 0 if the override is set but does not match, or < 0 if 144 + no override is set at all. 145 + 146 + Additional helpers are available: 147 + 148 + - ``device_set_driver_override()`` - set or clear the override from kernel code. 149 + - ``device_has_driver_override()`` - check whether an override is set.
+6 -6
Documentation/netlink/specs/net_shaper.yaml
··· 247 247 flags: [admin-perm] 248 248 249 249 do: 250 - pre: net-shaper-nl-pre-doit 251 - post: net-shaper-nl-post-doit 250 + pre: net-shaper-nl-pre-doit-write 251 + post: net-shaper-nl-post-doit-write 252 252 request: 253 253 attributes: 254 254 - ifindex ··· 278 278 flags: [admin-perm] 279 279 280 280 do: 281 - pre: net-shaper-nl-pre-doit 282 - post: net-shaper-nl-post-doit 281 + pre: net-shaper-nl-pre-doit-write 282 + post: net-shaper-nl-post-doit-write 283 283 request: 284 284 attributes: *ns-binding 285 285 ··· 309 309 flags: [admin-perm] 310 310 311 311 do: 312 - pre: net-shaper-nl-pre-doit 313 - post: net-shaper-nl-post-doit 312 + pre: net-shaper-nl-pre-doit-write 313 + post: net-shaper-nl-post-doit-write 314 314 request: 315 315 attributes: 316 316 - ifindex
+27 -3
Documentation/scheduler/sched-ext.rst
··· 43 43 CONFIG_DEBUG_INFO_BTF=y 44 44 CONFIG_BPF_JIT_ALWAYS_ON=y 45 45 CONFIG_BPF_JIT_DEFAULT_ON=y 46 - CONFIG_PAHOLE_HAS_BTF_TAG=y 47 46 48 47 sched_ext is used only when the BPF scheduler is loaded and running. 49 48 ··· 57 58 However, when the BPF scheduler is loaded and ``SCX_OPS_SWITCH_PARTIAL`` is 58 59 set in ``ops->flags``, only tasks with the ``SCHED_EXT`` policy are scheduled 59 60 by sched_ext, while tasks with ``SCHED_NORMAL``, ``SCHED_BATCH`` and 60 - ``SCHED_IDLE`` policies are scheduled by the fair-class scheduler. 61 + ``SCHED_IDLE`` policies are scheduled by the fair-class scheduler which has 62 + higher sched_class precedence than ``SCHED_EXT``. 61 63 62 64 Terminating the sched_ext scheduler program, triggering `SysRq-S`, or 63 65 detection of any internal error including stalled runnable tasks aborts the ··· 345 345 The functions prefixed with ``scx_bpf_`` can be called from the BPF 346 346 scheduler. 347 347 348 + * ``kernel/sched/ext_idle.c`` contains the built-in idle CPU selection policy. 349 + 348 350 * ``tools/sched_ext/`` hosts example BPF scheduler implementations. 349 351 350 352 * ``scx_simple[.bpf].c``: Minimal global FIFO scheduler example using a ··· 355 353 * ``scx_qmap[.bpf].c``: A multi-level FIFO scheduler supporting five 356 354 levels of priority implemented with ``BPF_MAP_TYPE_QUEUE``. 357 355 356 + * ``scx_central[.bpf].c``: A central FIFO scheduler where all scheduling 357 + decisions are made on one CPU, demonstrating ``LOCAL_ON`` dispatching, 358 + tickless operation, and kthread preemption. 359 + 360 + * ``scx_cpu0[.bpf].c``: A scheduler that queues all tasks to a shared DSQ 361 + and only dispatches them on CPU0 in FIFO order. Useful for testing bypass 362 + behavior. 363 + 364 + * ``scx_flatcg[.bpf].c``: A flattened cgroup hierarchy scheduler 365 + implementing hierarchical weight-based cgroup CPU control by compounding 366 + each cgroup's share at every level into a single flat scheduling layer. 367 + 368 + * ``scx_pair[.bpf].c``: A core-scheduling example that always makes 369 + sibling CPU pairs execute tasks from the same CPU cgroup. 370 + 371 + * ``scx_sdt[.bpf].c``: A variation of ``scx_simple`` demonstrating BPF 372 + arena memory management for per-task data. 373 + 374 + * ``scx_userland[.bpf].c``: A minimal scheduler demonstrating user space 375 + scheduling. Tasks with CPU affinity are direct-dispatched in FIFO order; 376 + all others are scheduled in user space by a simple vruntime scheduler. 377 + 358 378 ABI Instability 359 379 =============== 360 380 361 381 The APIs provided by sched_ext to BPF schedulers programs have no stability 362 382 guarantees. This includes the ops table callbacks and constants defined in 363 383 ``include/linux/sched/ext.h``, as well as the ``scx_bpf_`` kfuncs defined in 364 - ``kernel/sched/ext.c``. 384 + ``kernel/sched/ext.c`` and ``kernel/sched/ext_idle.c``. 365 385 366 386 While we will attempt to provide a relatively stable API surface when 367 387 possible, they are subject to change without warning between kernel
+107 -99
Documentation/virt/kvm/api.rst
··· 8435 8435 8436 8436 The valid bits in cap.args[0] are: 8437 8437 8438 - =================================== ============================================ 8439 - KVM_X86_QUIRK_LINT0_REENABLED By default, the reset value for the LVT 8440 - LINT0 register is 0x700 (APIC_MODE_EXTINT). 8441 - When this quirk is disabled, the reset value 8442 - is 0x10000 (APIC_LVT_MASKED). 8438 + ======================================== ================================================ 8439 + KVM_X86_QUIRK_LINT0_REENABLED By default, the reset value for the LVT 8440 + LINT0 register is 0x700 (APIC_MODE_EXTINT). 8441 + When this quirk is disabled, the reset value 8442 + is 0x10000 (APIC_LVT_MASKED). 8443 8443 8444 - KVM_X86_QUIRK_CD_NW_CLEARED By default, KVM clears CR0.CD and CR0.NW on 8445 - AMD CPUs to workaround buggy guest firmware 8446 - that runs in perpetuity with CR0.CD, i.e. 8447 - with caches in "no fill" mode. 8444 + KVM_X86_QUIRK_CD_NW_CLEARED By default, KVM clears CR0.CD and CR0.NW on 8445 + AMD CPUs to workaround buggy guest firmware 8446 + that runs in perpetuity with CR0.CD, i.e. 8447 + with caches in "no fill" mode. 8448 8448 8449 - When this quirk is disabled, KVM does not 8450 - change the value of CR0.CD and CR0.NW. 8449 + When this quirk is disabled, KVM does not 8450 + change the value of CR0.CD and CR0.NW. 8451 8451 8452 - KVM_X86_QUIRK_LAPIC_MMIO_HOLE By default, the MMIO LAPIC interface is 8453 - available even when configured for x2APIC 8454 - mode. When this quirk is disabled, KVM 8455 - disables the MMIO LAPIC interface if the 8456 - LAPIC is in x2APIC mode. 8452 + KVM_X86_QUIRK_LAPIC_MMIO_HOLE By default, the MMIO LAPIC interface is 8453 + available even when configured for x2APIC 8454 + mode. When this quirk is disabled, KVM 8455 + disables the MMIO LAPIC interface if the 8456 + LAPIC is in x2APIC mode. 8457 8457 8458 - KVM_X86_QUIRK_OUT_7E_INC_RIP By default, KVM pre-increments %rip before 8459 - exiting to userspace for an OUT instruction 8460 - to port 0x7e. When this quirk is disabled, 8461 - KVM does not pre-increment %rip before 8462 - exiting to userspace. 8458 + KVM_X86_QUIRK_OUT_7E_INC_RIP By default, KVM pre-increments %rip before 8459 + exiting to userspace for an OUT instruction 8460 + to port 0x7e. When this quirk is disabled, 8461 + KVM does not pre-increment %rip before 8462 + exiting to userspace. 8463 8463 8464 - KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT When this quirk is disabled, KVM sets 8465 - CPUID.01H:ECX[bit 3] (MONITOR/MWAIT) if 8466 - IA32_MISC_ENABLE[bit 18] (MWAIT) is set. 8467 - Additionally, when this quirk is disabled, 8468 - KVM clears CPUID.01H:ECX[bit 3] if 8469 - IA32_MISC_ENABLE[bit 18] is cleared. 8464 + KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT When this quirk is disabled, KVM sets 8465 + CPUID.01H:ECX[bit 3] (MONITOR/MWAIT) if 8466 + IA32_MISC_ENABLE[bit 18] (MWAIT) is set. 8467 + Additionally, when this quirk is disabled, 8468 + KVM clears CPUID.01H:ECX[bit 3] if 8469 + IA32_MISC_ENABLE[bit 18] is cleared. 8470 8470 8471 - KVM_X86_QUIRK_FIX_HYPERCALL_INSN By default, KVM rewrites guest 8472 - VMMCALL/VMCALL instructions to match the 8473 - vendor's hypercall instruction for the 8474 - system. When this quirk is disabled, KVM 8475 - will no longer rewrite invalid guest 8476 - hypercall instructions. Executing the 8477 - incorrect hypercall instruction will 8478 - generate a #UD within the guest. 8471 + KVM_X86_QUIRK_FIX_HYPERCALL_INSN By default, KVM rewrites guest 8472 + VMMCALL/VMCALL instructions to match the 8473 + vendor's hypercall instruction for the 8474 + system. When this quirk is disabled, KVM 8475 + will no longer rewrite invalid guest 8476 + hypercall instructions. Executing the 8477 + incorrect hypercall instruction will 8478 + generate a #UD within the guest. 8479 8479 8480 - KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS By default, KVM emulates MONITOR/MWAIT (if 8481 - they are intercepted) as NOPs regardless of 8482 - whether or not MONITOR/MWAIT are supported 8483 - according to guest CPUID. When this quirk 8484 - is disabled and KVM_X86_DISABLE_EXITS_MWAIT 8485 - is not set (MONITOR/MWAIT are intercepted), 8486 - KVM will inject a #UD on MONITOR/MWAIT if 8487 - they're unsupported per guest CPUID. Note, 8488 - KVM will modify MONITOR/MWAIT support in 8489 - guest CPUID on writes to MISC_ENABLE if 8490 - KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT is 8491 - disabled. 8480 + KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS By default, KVM emulates MONITOR/MWAIT (if 8481 + they are intercepted) as NOPs regardless of 8482 + whether or not MONITOR/MWAIT are supported 8483 + according to guest CPUID. When this quirk 8484 + is disabled and KVM_X86_DISABLE_EXITS_MWAIT 8485 + is not set (MONITOR/MWAIT are intercepted), 8486 + KVM will inject a #UD on MONITOR/MWAIT if 8487 + they're unsupported per guest CPUID. Note, 8488 + KVM will modify MONITOR/MWAIT support in 8489 + guest CPUID on writes to MISC_ENABLE if 8490 + KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT is 8491 + disabled. 8492 8492 8493 - KVM_X86_QUIRK_SLOT_ZAP_ALL By default, for KVM_X86_DEFAULT_VM VMs, KVM 8494 - invalidates all SPTEs in all memslots and 8495 - address spaces when a memslot is deleted or 8496 - moved. When this quirk is disabled (or the 8497 - VM type isn't KVM_X86_DEFAULT_VM), KVM only 8498 - ensures the backing memory of the deleted 8499 - or moved memslot isn't reachable, i.e KVM 8500 - _may_ invalidate only SPTEs related to the 8501 - memslot. 8493 + KVM_X86_QUIRK_SLOT_ZAP_ALL By default, for KVM_X86_DEFAULT_VM VMs, KVM 8494 + invalidates all SPTEs in all memslots and 8495 + address spaces when a memslot is deleted or 8496 + moved. When this quirk is disabled (or the 8497 + VM type isn't KVM_X86_DEFAULT_VM), KVM only 8498 + ensures the backing memory of the deleted 8499 + or moved memslot isn't reachable, i.e KVM 8500 + _may_ invalidate only SPTEs related to the 8501 + memslot. 8502 8502 8503 - KVM_X86_QUIRK_STUFF_FEATURE_MSRS By default, at vCPU creation, KVM sets the 8504 - vCPU's MSR_IA32_PERF_CAPABILITIES (0x345), 8505 - MSR_IA32_ARCH_CAPABILITIES (0x10a), 8506 - MSR_PLATFORM_INFO (0xce), and all VMX MSRs 8507 - (0x480..0x492) to the maximal capabilities 8508 - supported by KVM. KVM also sets 8509 - MSR_IA32_UCODE_REV (0x8b) to an arbitrary 8510 - value (which is different for Intel vs. 8511 - AMD). Lastly, when guest CPUID is set (by 8512 - userspace), KVM modifies select VMX MSR 8513 - fields to force consistency between guest 8514 - CPUID and L2's effective ISA. When this 8515 - quirk is disabled, KVM zeroes the vCPU's MSR 8516 - values (with two exceptions, see below), 8517 - i.e. treats the feature MSRs like CPUID 8518 - leaves and gives userspace full control of 8519 - the vCPU model definition. This quirk does 8520 - not affect VMX MSRs CR0/CR4_FIXED1 (0x487 8521 - and 0x489), as KVM does now allow them to 8522 - be set by userspace (KVM sets them based on 8523 - guest CPUID, for safety purposes). 8503 + KVM_X86_QUIRK_STUFF_FEATURE_MSRS By default, at vCPU creation, KVM sets the 8504 + vCPU's MSR_IA32_PERF_CAPABILITIES (0x345), 8505 + MSR_IA32_ARCH_CAPABILITIES (0x10a), 8506 + MSR_PLATFORM_INFO (0xce), and all VMX MSRs 8507 + (0x480..0x492) to the maximal capabilities 8508 + supported by KVM. KVM also sets 8509 + MSR_IA32_UCODE_REV (0x8b) to an arbitrary 8510 + value (which is different for Intel vs. 8511 + AMD). Lastly, when guest CPUID is set (by 8512 + userspace), KVM modifies select VMX MSR 8513 + fields to force consistency between guest 8514 + CPUID and L2's effective ISA. When this 8515 + quirk is disabled, KVM zeroes the vCPU's MSR 8516 + values (with two exceptions, see below), 8517 + i.e. treats the feature MSRs like CPUID 8518 + leaves and gives userspace full control of 8519 + the vCPU model definition. This quirk does 8520 + not affect VMX MSRs CR0/CR4_FIXED1 (0x487 8521 + and 0x489), as KVM does now allow them to 8522 + be set by userspace (KVM sets them based on 8523 + guest CPUID, for safety purposes). 8524 8524 8525 - KVM_X86_QUIRK_IGNORE_GUEST_PAT By default, on Intel platforms, KVM ignores 8526 - guest PAT and forces the effective memory 8527 - type to WB in EPT. The quirk is not available 8528 - on Intel platforms which are incapable of 8529 - safely honoring guest PAT (i.e., without CPU 8530 - self-snoop, KVM always ignores guest PAT and 8531 - forces effective memory type to WB). It is 8532 - also ignored on AMD platforms or, on Intel, 8533 - when a VM has non-coherent DMA devices 8534 - assigned; KVM always honors guest PAT in 8535 - such case. The quirk is needed to avoid 8536 - slowdowns on certain Intel Xeon platforms 8537 - (e.g. ICX, SPR) where self-snoop feature is 8538 - supported but UC is slow enough to cause 8539 - issues with some older guests that use 8540 - UC instead of WC to map the video RAM. 8541 - Userspace can disable the quirk to honor 8542 - guest PAT if it knows that there is no such 8543 - guest software, for example if it does not 8544 - expose a bochs graphics device (which is 8545 - known to have had a buggy driver). 8546 - =================================== ============================================ 8525 + KVM_X86_QUIRK_IGNORE_GUEST_PAT By default, on Intel platforms, KVM ignores 8526 + guest PAT and forces the effective memory 8527 + type to WB in EPT. The quirk is not available 8528 + on Intel platforms which are incapable of 8529 + safely honoring guest PAT (i.e., without CPU 8530 + self-snoop, KVM always ignores guest PAT and 8531 + forces effective memory type to WB). It is 8532 + also ignored on AMD platforms or, on Intel, 8533 + when a VM has non-coherent DMA devices 8534 + assigned; KVM always honors guest PAT in 8535 + such case. The quirk is needed to avoid 8536 + slowdowns on certain Intel Xeon platforms 8537 + (e.g. ICX, SPR) where self-snoop feature is 8538 + supported but UC is slow enough to cause 8539 + issues with some older guests that use 8540 + UC instead of WC to map the video RAM. 8541 + Userspace can disable the quirk to honor 8542 + guest PAT if it knows that there is no such 8543 + guest software, for example if it does not 8544 + expose a bochs graphics device (which is 8545 + known to have had a buggy driver). 8546 + 8547 + KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM By default, KVM relaxes the consistency 8548 + check for GUEST_IA32_DEBUGCTL in vmcs12 8549 + to allow FREEZE_IN_SMM to be set. When 8550 + this quirk is disabled, KVM requires this 8551 + bit to be cleared. Note that the vmcs02 8552 + bit is still completely controlled by the 8553 + host, regardless of the quirk setting. 8554 + ======================================== ================================================ 8547 8555 8548 8556 7.32 KVM_CAP_MAX_VCPU_ID 8549 8557 ------------------------
+2
Documentation/virt/kvm/locking.rst
··· 17 17 18 18 - kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock 19 19 20 + - vcpu->mutex is taken outside kvm->slots_lock and kvm->slots_arch_lock 21 + 20 22 - kvm->slots_lock is taken outside kvm->irq_lock, though acquiring 21 23 them together is quite rare. 22 24
+33 -21
MAINTAINERS
··· 4022 4022 ASYMMETRIC KEYS 4023 4023 M: David Howells <dhowells@redhat.com> 4024 4024 M: Lukas Wunner <lukas@wunner.de> 4025 - M: Ignat Korchagin <ignat@cloudflare.com> 4025 + M: Ignat Korchagin <ignat@linux.win> 4026 4026 L: keyrings@vger.kernel.org 4027 4027 L: linux-crypto@vger.kernel.org 4028 4028 S: Maintained ··· 4035 4035 4036 4036 ASYMMETRIC KEYS - ECDSA 4037 4037 M: Lukas Wunner <lukas@wunner.de> 4038 - M: Ignat Korchagin <ignat@cloudflare.com> 4038 + M: Ignat Korchagin <ignat@linux.win> 4039 4039 R: Stefan Berger <stefanb@linux.ibm.com> 4040 4040 L: linux-crypto@vger.kernel.org 4041 4041 S: Maintained ··· 4045 4045 4046 4046 ASYMMETRIC KEYS - GOST 4047 4047 M: Lukas Wunner <lukas@wunner.de> 4048 - M: Ignat Korchagin <ignat@cloudflare.com> 4048 + M: Ignat Korchagin <ignat@linux.win> 4049 4049 L: linux-crypto@vger.kernel.org 4050 4050 S: Odd fixes 4051 4051 F: crypto/ecrdsa* 4052 4052 4053 4053 ASYMMETRIC KEYS - RSA 4054 4054 M: Lukas Wunner <lukas@wunner.de> 4055 - M: Ignat Korchagin <ignat@cloudflare.com> 4055 + M: Ignat Korchagin <ignat@linux.win> 4056 4056 L: linux-crypto@vger.kernel.org 4057 4057 S: Maintained 4058 4058 F: crypto/rsa* ··· 7998 7998 F: drivers/gpu/drm/tiny/hx8357d.c 7999 7999 8000 8000 DRM DRIVER FOR HYPERV SYNTHETIC VIDEO DEVICE 8001 - M: Deepak Rawat <drawat.floss@gmail.com> 8001 + M: Dexuan Cui <decui@microsoft.com> 8002 + M: Long Li <longli@microsoft.com> 8003 + M: Saurabh Sengar <ssengar@linux.microsoft.com> 8002 8004 L: linux-hyperv@vger.kernel.org 8003 8005 L: dri-devel@lists.freedesktop.org 8004 8006 S: Maintained ··· 8628 8626 F: include/uapi/drm/lima_drm.h 8629 8627 8630 8628 DRM DRIVERS FOR LOONGSON 8631 - M: Sui Jingfeng <suijingfeng@loongson.cn> 8632 8629 L: dri-devel@lists.freedesktop.org 8633 - S: Supported 8630 + S: Orphan 8634 8631 T: git https://gitlab.freedesktop.org/drm/misc/kernel.git 8635 8632 F: drivers/gpu/drm/loongson/ 8636 8633 ··· 16359 16358 16360 16359 MEDIATEK T7XX 5G WWAN MODEM DRIVER 16361 16360 M: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com> 16362 - R: Chiranjeevi Rapolu <chiranjeevi.rapolu@linux.intel.com> 16363 16361 R: Liu Haijun <haijun.liu@mediatek.com> 16364 16362 R: Ricardo Martinez <ricardo.martinez@linux.intel.com> 16365 16363 L: netdev@vger.kernel.org ··· 16643 16643 MEMORY MANAGEMENT - CORE 16644 16644 M: Andrew Morton <akpm@linux-foundation.org> 16645 16645 M: David Hildenbrand <david@kernel.org> 16646 - R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16646 + R: Lorenzo Stoakes <ljs@kernel.org> 16647 16647 R: Liam R. Howlett <Liam.Howlett@oracle.com> 16648 16648 R: Vlastimil Babka <vbabka@kernel.org> 16649 16649 R: Mike Rapoport <rppt@kernel.org> ··· 16773 16773 MEMORY MANAGEMENT - MISC 16774 16774 M: Andrew Morton <akpm@linux-foundation.org> 16775 16775 M: David Hildenbrand <david@kernel.org> 16776 - R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16776 + R: Lorenzo Stoakes <ljs@kernel.org> 16777 16777 R: Liam R. Howlett <Liam.Howlett@oracle.com> 16778 16778 R: Vlastimil Babka <vbabka@kernel.org> 16779 16779 R: Mike Rapoport <rppt@kernel.org> ··· 16864 16864 R: Michal Hocko <mhocko@kernel.org> 16865 16865 R: Qi Zheng <zhengqi.arch@bytedance.com> 16866 16866 R: Shakeel Butt <shakeel.butt@linux.dev> 16867 - R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16867 + R: Lorenzo Stoakes <ljs@kernel.org> 16868 16868 L: linux-mm@kvack.org 16869 16869 S: Maintained 16870 16870 F: mm/vmscan.c ··· 16873 16873 MEMORY MANAGEMENT - RMAP (REVERSE MAPPING) 16874 16874 M: Andrew Morton <akpm@linux-foundation.org> 16875 16875 M: David Hildenbrand <david@kernel.org> 16876 - M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16876 + M: Lorenzo Stoakes <ljs@kernel.org> 16877 16877 R: Rik van Riel <riel@surriel.com> 16878 16878 R: Liam R. Howlett <Liam.Howlett@oracle.com> 16879 16879 R: Vlastimil Babka <vbabka@kernel.org> ··· 16918 16918 MEMORY MANAGEMENT - THP (TRANSPARENT HUGE PAGE) 16919 16919 M: Andrew Morton <akpm@linux-foundation.org> 16920 16920 M: David Hildenbrand <david@kernel.org> 16921 - M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16921 + M: Lorenzo Stoakes <ljs@kernel.org> 16922 16922 R: Zi Yan <ziy@nvidia.com> 16923 16923 R: Baolin Wang <baolin.wang@linux.alibaba.com> 16924 16924 R: Liam R. Howlett <Liam.Howlett@oracle.com> ··· 16958 16958 16959 16959 MEMORY MANAGEMENT - RUST 16960 16960 M: Alice Ryhl <aliceryhl@google.com> 16961 - R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16961 + R: Lorenzo Stoakes <ljs@kernel.org> 16962 16962 R: Liam R. Howlett <Liam.Howlett@oracle.com> 16963 16963 L: linux-mm@kvack.org 16964 16964 L: rust-for-linux@vger.kernel.org ··· 16974 16974 MEMORY MAPPING 16975 16975 M: Andrew Morton <akpm@linux-foundation.org> 16976 16976 M: Liam R. Howlett <Liam.Howlett@oracle.com> 16977 - M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16977 + M: Lorenzo Stoakes <ljs@kernel.org> 16978 16978 R: Vlastimil Babka <vbabka@kernel.org> 16979 16979 R: Jann Horn <jannh@google.com> 16980 16980 R: Pedro Falcato <pfalcato@suse.de> ··· 17004 17004 M: Andrew Morton <akpm@linux-foundation.org> 17005 17005 M: Suren Baghdasaryan <surenb@google.com> 17006 17006 M: Liam R. Howlett <Liam.Howlett@oracle.com> 17007 - M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 17007 + M: Lorenzo Stoakes <ljs@kernel.org> 17008 17008 R: Vlastimil Babka <vbabka@kernel.org> 17009 17009 R: Shakeel Butt <shakeel.butt@linux.dev> 17010 17010 L: linux-mm@kvack.org ··· 17019 17019 MEMORY MAPPING - MADVISE (MEMORY ADVICE) 17020 17020 M: Andrew Morton <akpm@linux-foundation.org> 17021 17021 M: Liam R. Howlett <Liam.Howlett@oracle.com> 17022 - M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 17022 + M: Lorenzo Stoakes <ljs@kernel.org> 17023 17023 M: David Hildenbrand <david@kernel.org> 17024 17024 R: Vlastimil Babka <vbabka@kernel.org> 17025 17025 R: Jann Horn <jannh@google.com> ··· 21938 21938 21939 21939 RADOS BLOCK DEVICE (RBD) 21940 21940 M: Ilya Dryomov <idryomov@gmail.com> 21941 - R: Dongsheng Yang <dongsheng.yang@easystack.cn> 21941 + R: Dongsheng Yang <dongsheng.yang@linux.dev> 21942 21942 L: ceph-devel@vger.kernel.org 21943 21943 S: Supported 21944 21944 W: http://ceph.com/ ··· 22266 22266 L: linux-wireless@vger.kernel.org 22267 22267 S: Orphan 22268 22268 F: drivers/net/wireless/rsi/ 22269 + 22270 + RELAY 22271 + M: Andrew Morton <akpm@linux-foundation.org> 22272 + M: Jens Axboe <axboe@kernel.dk> 22273 + M: Jason Xing <kernelxing@tencent.com> 22274 + L: linux-kernel@vger.kernel.org 22275 + S: Maintained 22276 + F: Documentation/filesystems/relay.rst 22277 + F: include/linux/relay.h 22278 + F: kernel/relay.c 22269 22279 22270 22280 REGISTER MAP ABSTRACTION 22271 22281 M: Mark Brown <broonie@kernel.org> ··· 23166 23156 23167 23157 RUST [ALLOC] 23168 23158 M: Danilo Krummrich <dakr@kernel.org> 23169 - R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 23159 + R: Lorenzo Stoakes <ljs@kernel.org> 23170 23160 R: Vlastimil Babka <vbabka@kernel.org> 23171 23161 R: Liam R. Howlett <Liam.Howlett@oracle.com> 23172 23162 R: Uladzislau Rezki <urezki@gmail.com> ··· 24343 24333 24344 24334 SLAB ALLOCATOR 24345 24335 M: Vlastimil Babka <vbabka@kernel.org> 24336 + M: Harry Yoo <harry.yoo@oracle.com> 24346 24337 M: Andrew Morton <akpm@linux-foundation.org> 24338 + R: Hao Li <hao.li@linux.dev> 24347 24339 R: Christoph Lameter <cl@gentwo.org> 24348 24340 R: David Rientjes <rientjes@google.com> 24349 24341 R: Roman Gushchin <roman.gushchin@linux.dev> 24350 - R: Harry Yoo <harry.yoo@oracle.com> 24351 24342 L: linux-mm@kvack.org 24352 24343 S: Maintained 24353 24344 T: git git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab.git ··· 24902 24891 F: drivers/pinctrl/spear/ 24903 24892 24904 24893 SPI NOR SUBSYSTEM 24905 - M: Tudor Ambarus <tudor.ambarus@linaro.org> 24906 24894 M: Pratyush Yadav <pratyush@kernel.org> 24907 24895 M: Michael Walle <mwalle@kernel.org> 24896 + R: Takahiro Kuwano <takahiro.kuwano@infineon.com> 24908 24897 L: linux-mtd@lists.infradead.org 24909 24898 S: Maintained 24910 24899 W: http://www.linux-mtd.infradead.org/ ··· 25759 25748 F: include/net/pkt_sched.h 25760 25749 F: include/net/sch_priv.h 25761 25750 F: include/net/tc_act/ 25751 + F: include/net/tc_wrapper.h 25762 25752 F: include/uapi/linux/pkt_cls.h 25763 25753 F: include/uapi/linux/pkt_sched.h 25764 25754 F: include/uapi/linux/tc_act/
+5 -1
Makefile
··· 2 2 VERSION = 7 3 3 PATCHLEVEL = 0 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc3 5 + EXTRAVERSION = -rc5 6 6 NAME = Baby Opossum Posse 7 7 8 8 # *DOCUMENTATION* ··· 476 476 export rust_common_flags := --edition=2021 \ 477 477 -Zbinary_dep_depinfo=y \ 478 478 -Astable_features \ 479 + -Aunused_features \ 479 480 -Dnon_ascii_idents \ 480 481 -Dunsafe_op_in_unsafe_fn \ 481 482 -Wmissing_docs \ ··· 1114 1113 # change __FILE__ to the relative path to the source directory 1115 1114 ifdef building_out_of_srctree 1116 1115 KBUILD_CPPFLAGS += -fmacro-prefix-map=$(srcroot)/= 1116 + ifeq ($(call rustc-option-yn, --remap-path-scope=macro),y) 1117 + KBUILD_RUSTFLAGS += --remap-path-prefix=$(srcroot)/= --remap-path-scope=macro 1118 + endif 1117 1119 endif 1118 1120 1119 1121 # include additional Makefiles when needed
-1
arch/arm/configs/multi_v7_defconfig
··· 279 279 CONFIG_TI_CPTS=y 280 280 CONFIG_TI_KEYSTONE_NETCP=y 281 281 CONFIG_TI_KEYSTONE_NETCP_ETHSS=y 282 - CONFIG_TI_PRUSS=m 283 282 CONFIG_TI_PRUETH=m 284 283 CONFIG_XILINX_EMACLITE=y 285 284 CONFIG_SFP=m
+8 -8
arch/arm64/boot/dts/renesas/r8a78000.dtsi
··· 698 698 compatible = "renesas,scif-r8a78000", 699 699 "renesas,rcar-gen5-scif", "renesas,scif"; 700 700 reg = <0 0xc0700000 0 0x40>; 701 - interrupts = <GIC_SPI 4074 IRQ_TYPE_LEVEL_HIGH>; 701 + interrupts = <GIC_ESPI 10 IRQ_TYPE_LEVEL_HIGH>; 702 702 clocks = <&dummy_clk_sgasyncd16>, <&dummy_clk_sgasyncd16>, <&scif_clk>; 703 703 clock-names = "fck", "brg_int", "scif_clk"; 704 704 status = "disabled"; ··· 708 708 compatible = "renesas,scif-r8a78000", 709 709 "renesas,rcar-gen5-scif", "renesas,scif"; 710 710 reg = <0 0xc0704000 0 0x40>; 711 - interrupts = <GIC_SPI 4075 IRQ_TYPE_LEVEL_HIGH>; 711 + interrupts = <GIC_ESPI 11 IRQ_TYPE_LEVEL_HIGH>; 712 712 clocks = <&dummy_clk_sgasyncd16>, <&dummy_clk_sgasyncd16>, <&scif_clk>; 713 713 clock-names = "fck", "brg_int", "scif_clk"; 714 714 status = "disabled"; ··· 718 718 compatible = "renesas,scif-r8a78000", 719 719 "renesas,rcar-gen5-scif", "renesas,scif"; 720 720 reg = <0 0xc0708000 0 0x40>; 721 - interrupts = <GIC_SPI 4076 IRQ_TYPE_LEVEL_HIGH>; 721 + interrupts = <GIC_ESPI 12 IRQ_TYPE_LEVEL_HIGH>; 722 722 clocks = <&dummy_clk_sgasyncd16>, <&dummy_clk_sgasyncd16>, <&scif_clk>; 723 723 clock-names = "fck", "brg_int", "scif_clk"; 724 724 status = "disabled"; ··· 728 728 compatible = "renesas,scif-r8a78000", 729 729 "renesas,rcar-gen5-scif", "renesas,scif"; 730 730 reg = <0 0xc070c000 0 0x40>; 731 - interrupts = <GIC_SPI 4077 IRQ_TYPE_LEVEL_HIGH>; 731 + interrupts = <GIC_ESPI 13 IRQ_TYPE_LEVEL_HIGH>; 732 732 clocks = <&dummy_clk_sgasyncd16>, <&dummy_clk_sgasyncd16>, <&scif_clk>; 733 733 clock-names = "fck", "brg_int", "scif_clk"; 734 734 status = "disabled"; ··· 738 738 compatible = "renesas,hscif-r8a78000", 739 739 "renesas,rcar-gen5-hscif", "renesas,hscif"; 740 740 reg = <0 0xc0710000 0 0x60>; 741 - interrupts = <GIC_SPI 4078 IRQ_TYPE_LEVEL_HIGH>; 741 + interrupts = <GIC_ESPI 14 IRQ_TYPE_LEVEL_HIGH>; 742 742 clocks = <&dummy_clk_sgasyncd4>, <&dummy_clk_sgasyncd4>, <&scif_clk>; 743 743 clock-names = "fck", "brg_int", "scif_clk"; 744 744 status = "disabled"; ··· 748 748 compatible = "renesas,hscif-r8a78000", 749 749 "renesas,rcar-gen5-hscif", "renesas,hscif"; 750 750 reg = <0 0xc0714000 0 0x60>; 751 - interrupts = <GIC_SPI 4079 IRQ_TYPE_LEVEL_HIGH>; 751 + interrupts = <GIC_ESPI 15 IRQ_TYPE_LEVEL_HIGH>; 752 752 clocks = <&dummy_clk_sgasyncd4>, <&dummy_clk_sgasyncd4>, <&scif_clk>; 753 753 clock-names = "fck", "brg_int", "scif_clk"; 754 754 status = "disabled"; ··· 758 758 compatible = "renesas,hscif-r8a78000", 759 759 "renesas,rcar-gen5-hscif", "renesas,hscif"; 760 760 reg = <0 0xc0718000 0 0x60>; 761 - interrupts = <GIC_SPI 4080 IRQ_TYPE_LEVEL_HIGH>; 761 + interrupts = <GIC_ESPI 16 IRQ_TYPE_LEVEL_HIGH>; 762 762 clocks = <&dummy_clk_sgasyncd4>, <&dummy_clk_sgasyncd4>, <&scif_clk>; 763 763 clock-names = "fck", "brg_int", "scif_clk"; 764 764 status = "disabled"; ··· 768 768 compatible = "renesas,hscif-r8a78000", 769 769 "renesas,rcar-gen5-hscif", "renesas,hscif"; 770 770 reg = <0 0xc071c000 0 0x60>; 771 - interrupts = <GIC_SPI 4081 IRQ_TYPE_LEVEL_HIGH>; 771 + interrupts = <GIC_ESPI 17 IRQ_TYPE_LEVEL_HIGH>; 772 772 clocks = <&dummy_clk_sgasyncd4>, <&dummy_clk_sgasyncd4>, <&scif_clk>; 773 773 clock-names = "fck", "brg_int", "scif_clk"; 774 774 status = "disabled";
-30
arch/arm64/boot/dts/renesas/r9a09g057.dtsi
··· 581 581 status = "disabled"; 582 582 }; 583 583 584 - wdt0: watchdog@11c00400 { 585 - compatible = "renesas,r9a09g057-wdt"; 586 - reg = <0 0x11c00400 0 0x400>; 587 - clocks = <&cpg CPG_MOD 0x4b>, <&cpg CPG_MOD 0x4c>; 588 - clock-names = "pclk", "oscclk"; 589 - resets = <&cpg 0x75>; 590 - power-domains = <&cpg>; 591 - status = "disabled"; 592 - }; 593 - 594 584 wdt1: watchdog@14400000 { 595 585 compatible = "renesas,r9a09g057-wdt"; 596 586 reg = <0 0x14400000 0 0x400>; 597 587 clocks = <&cpg CPG_MOD 0x4d>, <&cpg CPG_MOD 0x4e>; 598 588 clock-names = "pclk", "oscclk"; 599 589 resets = <&cpg 0x76>; 600 - power-domains = <&cpg>; 601 - status = "disabled"; 602 - }; 603 - 604 - wdt2: watchdog@13000000 { 605 - compatible = "renesas,r9a09g057-wdt"; 606 - reg = <0 0x13000000 0 0x400>; 607 - clocks = <&cpg CPG_MOD 0x4f>, <&cpg CPG_MOD 0x50>; 608 - clock-names = "pclk", "oscclk"; 609 - resets = <&cpg 0x77>; 610 - power-domains = <&cpg>; 611 - status = "disabled"; 612 - }; 613 - 614 - wdt3: watchdog@13000400 { 615 - compatible = "renesas,r9a09g057-wdt"; 616 - reg = <0 0x13000400 0 0x400>; 617 - clocks = <&cpg CPG_MOD 0x51>, <&cpg CPG_MOD 0x52>; 618 - clock-names = "pclk", "oscclk"; 619 - resets = <&cpg 0x78>; 620 590 power-domains = <&cpg>; 621 591 status = "disabled"; 622 592 };
+2 -2
arch/arm64/boot/dts/renesas/r9a09g077.dtsi
··· 974 974 975 975 cpg: clock-controller@80280000 { 976 976 compatible = "renesas,r9a09g077-cpg-mssr"; 977 - reg = <0 0x80280000 0 0x1000>, 978 - <0 0x81280000 0 0x9000>; 977 + reg = <0 0x80280000 0 0x10000>, 978 + <0 0x81280000 0 0x10000>; 979 979 clocks = <&extal_clk>; 980 980 clock-names = "extal"; 981 981 #clock-cells = <2>;
+2 -2
arch/arm64/boot/dts/renesas/r9a09g087.dtsi
··· 977 977 978 978 cpg: clock-controller@80280000 { 979 979 compatible = "renesas,r9a09g087-cpg-mssr"; 980 - reg = <0 0x80280000 0 0x1000>, 981 - <0 0x81280000 0 0x9000>; 980 + reg = <0 0x80280000 0 0x10000>, 981 + <0 0x81280000 0 0x10000>; 982 982 clocks = <&extal_clk>; 983 983 clock-names = "extal"; 984 984 #clock-cells = <2>;
+1 -1
arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
··· 162 162 <100000000>; 163 163 renesas,settings = [ 164 164 80 00 11 19 4c 42 dc 2f 06 7d 20 1a 5f 1e f2 27 165 - 00 40 00 00 00 00 00 00 06 0c 19 02 3f f0 90 86 165 + 00 40 00 00 00 00 00 00 06 0c 19 02 3b f0 90 86 166 166 a0 80 30 30 9c 167 167 ]; 168 168 };
+1
arch/arm64/boot/dts/renesas/rzt2h-n2h-evk-common.dtsi
··· 53 53 regulator-max-microvolt = <3300000>; 54 54 gpios-states = <0>; 55 55 states = <3300000 0>, <1800000 1>; 56 + regulator-ramp-delay = <60>; 56 57 }; 57 58 #endif 58 59
+1
arch/arm64/boot/dts/renesas/rzv2-evk-cn15-sd.dtso
··· 25 25 regulator-max-microvolt = <3300000>; 26 26 gpios-states = <0>; 27 27 states = <3300000 0>, <1800000 1>; 28 + regulator-ramp-delay = <60>; 28 29 }; 29 30 }; 30 31
+23 -14
arch/arm64/crypto/aes-neonbs-glue.c
··· 76 76 unsigned int key_len) 77 77 { 78 78 struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm); 79 - struct crypto_aes_ctx rk; 79 + struct crypto_aes_ctx *rk; 80 80 int err; 81 81 82 - err = aes_expandkey(&rk, in_key, key_len); 82 + rk = kmalloc(sizeof(*rk), GFP_KERNEL); 83 + if (!rk) 84 + return -ENOMEM; 85 + 86 + err = aes_expandkey(rk, in_key, key_len); 83 87 if (err) 84 - return err; 88 + goto out; 85 89 86 90 ctx->rounds = 6 + key_len / 4; 87 91 88 92 scoped_ksimd() 89 - aesbs_convert_key(ctx->rk, rk.key_enc, ctx->rounds); 90 - 91 - return 0; 93 + aesbs_convert_key(ctx->rk, rk->key_enc, ctx->rounds); 94 + out: 95 + kfree_sensitive(rk); 96 + return err; 92 97 } 93 98 94 99 static int __ecb_crypt(struct skcipher_request *req, ··· 138 133 unsigned int key_len) 139 134 { 140 135 struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); 141 - struct crypto_aes_ctx rk; 136 + struct crypto_aes_ctx *rk; 142 137 int err; 143 138 144 - err = aes_expandkey(&rk, in_key, key_len); 139 + rk = kmalloc(sizeof(*rk), GFP_KERNEL); 140 + if (!rk) 141 + return -ENOMEM; 142 + 143 + err = aes_expandkey(rk, in_key, key_len); 145 144 if (err) 146 - return err; 145 + goto out; 147 146 148 147 ctx->key.rounds = 6 + key_len / 4; 149 148 150 - memcpy(ctx->enc, rk.key_enc, sizeof(ctx->enc)); 149 + memcpy(ctx->enc, rk->key_enc, sizeof(ctx->enc)); 151 150 152 151 scoped_ksimd() 153 - aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds); 154 - memzero_explicit(&rk, sizeof(rk)); 155 - 156 - return 0; 152 + aesbs_convert_key(ctx->key.rk, rk->key_enc, ctx->key.rounds); 153 + out: 154 + kfree_sensitive(rk); 155 + return err; 157 156 } 158 157 159 158 static int cbc_encrypt(struct skcipher_request *req)
+3
arch/arm64/include/asm/kvm_host.h
··· 784 784 /* Number of debug breakpoints/watchpoints for this CPU (minus 1) */ 785 785 unsigned int debug_brps; 786 786 unsigned int debug_wrps; 787 + 788 + /* Last vgic_irq part of the AP list recorded in an LR */ 789 + struct vgic_irq *last_lr_irq; 787 790 }; 788 791 789 792 struct kvm_host_psci_config {
+9
arch/arm64/kernel/cpufeature.c
··· 2345 2345 !is_midr_in_range_list(has_vgic_v3)) 2346 2346 return false; 2347 2347 2348 + /* 2349 + * pKVM prevents late onlining of CPUs. This means that whatever 2350 + * state the capability is in after deprivilege cannot be affected 2351 + * by a new CPU booting -- this is garanteed to be a CPU we have 2352 + * already seen, and the cap is therefore unchanged. 2353 + */ 2354 + if (system_capabilities_finalized() && is_protected_kvm_enabled()) 2355 + return cpus_have_final_cap(ARM64_HAS_ICH_HCR_EL2_TDIR); 2356 + 2348 2357 if (is_kernel_in_hyp_mode()) 2349 2358 res.a1 = read_sysreg_s(SYS_ICH_VTR_EL2); 2350 2359 else
+8
arch/arm64/kernel/pi/patch-scs.c
··· 192 192 size -= 2; 193 193 break; 194 194 195 + case DW_CFA_advance_loc4: 196 + loc += *opcode++ * code_alignment_factor; 197 + loc += (*opcode++ << 8) * code_alignment_factor; 198 + loc += (*opcode++ << 16) * code_alignment_factor; 199 + loc += (*opcode++ << 24) * code_alignment_factor; 200 + size -= 4; 201 + break; 202 + 195 203 case DW_CFA_def_cfa: 196 204 case DW_CFA_offset_extended: 197 205 size = skip_xleb128(&opcode, size);
+2 -1
arch/arm64/kernel/rsi.c
··· 12 12 13 13 #include <asm/io.h> 14 14 #include <asm/mem_encrypt.h> 15 + #include <asm/pgtable.h> 15 16 #include <asm/rsi.h> 16 17 17 18 static struct realm_config config; ··· 147 146 return; 148 147 if (WARN_ON(rsi_get_realm_config(&config))) 149 148 return; 150 - prot_ns_shared = BIT(config.ipa_bits - 1); 149 + prot_ns_shared = __phys_to_pte_val(BIT(config.ipa_bits - 1)); 151 150 152 151 if (arm64_ioremap_prot_hook_register(realm_ioremap_hook)) 153 152 return;
-2
arch/arm64/kvm/at.c
··· 1504 1504 fail = true; 1505 1505 } 1506 1506 1507 - isb(); 1508 - 1509 1507 if (!fail) 1510 1508 par = read_sysreg_par(); 1511 1509
+2 -2
arch/arm64/kvm/guest.c
··· 29 29 30 30 #include "trace.h" 31 31 32 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 32 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 33 33 KVM_GENERIC_VM_STATS() 34 34 }; 35 35 ··· 42 42 sizeof(kvm_vm_stats_desc), 43 43 }; 44 44 45 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 45 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 46 46 KVM_GENERIC_VCPU_STATS(), 47 47 STATS_DESC_COUNTER(VCPU, hvc_exit_stat), 48 48 STATS_DESC_COUNTER(VCPU, wfe_exit_stat),
+1 -1
arch/arm64/kvm/hyp/nvhe/mem_protect.c
··· 518 518 granule = kvm_granule_size(level); 519 519 cur.start = ALIGN_DOWN(addr, granule); 520 520 cur.end = cur.start + granule; 521 - if (!range_included(&cur, range)) 521 + if (!range_included(&cur, range) && level < KVM_PGTABLE_LAST_LEVEL) 522 522 continue; 523 523 *range = cur; 524 524 return 0;
+9 -5
arch/arm64/kvm/mmu.c
··· 1751 1751 1752 1752 force_pte = (max_map_size == PAGE_SIZE); 1753 1753 vma_pagesize = min_t(long, vma_pagesize, max_map_size); 1754 + vma_shift = __ffs(vma_pagesize); 1754 1755 } 1755 1756 1756 1757 /* ··· 1838 1837 if (exec_fault && s2_force_noncacheable) 1839 1838 ret = -ENOEXEC; 1840 1839 1841 - if (ret) { 1842 - kvm_release_page_unused(page); 1843 - return ret; 1844 - } 1840 + if (ret) 1841 + goto out_put_page; 1845 1842 1846 1843 /* 1847 1844 * Guest performs atomic/exclusive operations on memory with unsupported ··· 1849 1850 */ 1850 1851 if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(vcpu))) { 1851 1852 kvm_inject_dabt_excl_atomic(vcpu, kvm_vcpu_get_hfar(vcpu)); 1852 - return 1; 1853 + ret = 1; 1854 + goto out_put_page; 1853 1855 } 1854 1856 1855 1857 if (nested) ··· 1936 1936 mark_page_dirty_in_slot(kvm, memslot, gfn); 1937 1937 1938 1938 return ret != -EAGAIN ? ret : 0; 1939 + 1940 + out_put_page: 1941 + kvm_release_page_unused(page); 1942 + return ret; 1939 1943 } 1940 1944 1941 1945 /* Resolve the access fault by making the page young again. */
+16 -11
arch/arm64/kvm/nested.c
··· 152 152 return 64 - wi->t0sz; 153 153 } 154 154 155 - static int check_base_s2_limits(struct s2_walk_info *wi, 155 + static int check_base_s2_limits(struct kvm_vcpu *vcpu, struct s2_walk_info *wi, 156 156 int level, int input_size, int stride) 157 157 { 158 - int start_size, ia_size; 158 + int start_size, pa_max; 159 159 160 - ia_size = get_ia_size(wi); 160 + pa_max = kvm_get_pa_bits(vcpu->kvm); 161 161 162 162 /* Check translation limits */ 163 163 switch (BIT(wi->pgshift)) { 164 164 case SZ_64K: 165 - if (level == 0 || (level == 1 && ia_size <= 42)) 165 + if (level == 0 || (level == 1 && pa_max <= 42)) 166 166 return -EFAULT; 167 167 break; 168 168 case SZ_16K: 169 - if (level == 0 || (level == 1 && ia_size <= 40)) 169 + if (level == 0 || (level == 1 && pa_max <= 40)) 170 170 return -EFAULT; 171 171 break; 172 172 case SZ_4K: 173 - if (level < 0 || (level == 0 && ia_size <= 42)) 173 + if (level < 0 || (level == 0 && pa_max <= 42)) 174 174 return -EFAULT; 175 175 break; 176 176 } 177 177 178 178 /* Check input size limits */ 179 - if (input_size > ia_size) 179 + if (input_size > pa_max) 180 180 return -EFAULT; 181 181 182 182 /* Check number of entries in starting level table */ ··· 269 269 if (input_size > 48 || input_size < 25) 270 270 return -EFAULT; 271 271 272 - ret = check_base_s2_limits(wi, level, input_size, stride); 273 - if (WARN_ON(ret)) 272 + ret = check_base_s2_limits(vcpu, wi, level, input_size, stride); 273 + if (WARN_ON(ret)) { 274 + out->esr = compute_fsc(0, ESR_ELx_FSC_FAULT); 274 275 return ret; 276 + } 275 277 276 278 base_lower_bound = 3 + input_size - ((3 - level) * stride + 277 279 wi->pgshift); 278 280 base_addr = wi->baddr & GENMASK_ULL(47, base_lower_bound); 279 281 280 282 if (check_output_size(wi, base_addr)) { 281 - out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ); 283 + /* R_BFHQH */ 284 + out->esr = compute_fsc(0, ESR_ELx_FSC_ADDRSZ); 282 285 return 1; 283 286 } 284 287 ··· 296 293 297 294 paddr = base_addr | index; 298 295 ret = read_guest_s2_desc(vcpu, paddr, &desc, wi); 299 - if (ret < 0) 296 + if (ret < 0) { 297 + out->esr = ESR_ELx_FSC_SEA_TTW(level); 300 298 return ret; 299 + } 301 300 302 301 new_desc = desc; 303 302
+17 -17
arch/arm64/kvm/vgic/vgic-init.c
··· 143 143 kvm->arch.vgic.in_kernel = true; 144 144 kvm->arch.vgic.vgic_model = type; 145 145 kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST; 146 - 147 - kvm_for_each_vcpu(i, vcpu, kvm) { 148 - ret = vgic_allocate_private_irqs_locked(vcpu, type); 149 - if (ret) 150 - break; 151 - } 152 - 153 - if (ret) { 154 - kvm_for_each_vcpu(i, vcpu, kvm) { 155 - struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 156 - kfree(vgic_cpu->private_irqs); 157 - vgic_cpu->private_irqs = NULL; 158 - } 159 - 160 - goto out_unlock; 161 - } 162 - 163 146 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; 164 147 165 148 aa64pfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC; ··· 158 175 159 176 kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, aa64pfr0); 160 177 kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, pfr1); 178 + 179 + kvm_for_each_vcpu(i, vcpu, kvm) { 180 + ret = vgic_allocate_private_irqs_locked(vcpu, type); 181 + if (ret) 182 + break; 183 + } 184 + 185 + if (ret) { 186 + kvm_for_each_vcpu(i, vcpu, kvm) { 187 + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 188 + kfree(vgic_cpu->private_irqs); 189 + vgic_cpu->private_irqs = NULL; 190 + } 191 + 192 + kvm->arch.vgic.vgic_model = 0; 193 + goto out_unlock; 194 + } 161 195 162 196 if (type == KVM_DEV_TYPE_ARM_VGIC_V3) 163 197 kvm->arch.vgic.nassgicap = system_supports_direct_sgis();
+2 -2
arch/arm64/kvm/vgic/vgic-v2.c
··· 115 115 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 116 116 struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2; 117 117 u32 eoicount = FIELD_GET(GICH_HCR_EOICOUNT, cpuif->vgic_hcr); 118 - struct vgic_irq *irq; 118 + struct vgic_irq *irq = *host_data_ptr(last_lr_irq); 119 119 120 120 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 121 121 ··· 123 123 vgic_v2_fold_lr(vcpu, cpuif->vgic_lr[lr]); 124 124 125 125 /* See the GICv3 equivalent for the EOIcount handling rationale */ 126 - list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 126 + list_for_each_entry_continue(irq, &vgic_cpu->ap_list_head, ap_list) { 127 127 u32 lr; 128 128 129 129 if (!eoicount) {
+6 -6
arch/arm64/kvm/vgic/vgic-v3.c
··· 148 148 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 149 149 struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3; 150 150 u32 eoicount = FIELD_GET(ICH_HCR_EL2_EOIcount, cpuif->vgic_hcr); 151 - struct vgic_irq *irq; 151 + struct vgic_irq *irq = *host_data_ptr(last_lr_irq); 152 152 153 153 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 154 154 ··· 158 158 /* 159 159 * EOIMode=0: use EOIcount to emulate deactivation. We are 160 160 * guaranteed to deactivate in reverse order of the activation, so 161 - * just pick one active interrupt after the other in the ap_list, 162 - * and replay the deactivation as if the CPU was doing it. We also 163 - * rely on priority drop to have taken place, and the list to be 164 - * sorted by priority. 161 + * just pick one active interrupt after the other in the tail part 162 + * of the ap_list, past the LRs, and replay the deactivation as if 163 + * the CPU was doing it. We also rely on priority drop to have taken 164 + * place, and the list to be sorted by priority. 165 165 */ 166 - list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 166 + list_for_each_entry_continue(irq, &vgic_cpu->ap_list_head, ap_list) { 167 167 u64 lr; 168 168 169 169 /*
+6
arch/arm64/kvm/vgic/vgic.c
··· 814 814 815 815 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) 816 816 { 817 + if (!*host_data_ptr(last_lr_irq)) 818 + return; 819 + 817 820 if (kvm_vgic_global_state.type == VGIC_V2) 818 821 vgic_v2_fold_lr_state(vcpu); 819 822 else ··· 963 960 if (irqs_outside_lrs(&als)) 964 961 vgic_sort_ap_list(vcpu); 965 962 963 + *host_data_ptr(last_lr_irq) = NULL; 964 + 966 965 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 967 966 scoped_guard(raw_spinlock, &irq->irq_lock) { 968 967 if (likely(vgic_target_oracle(irq) == vcpu)) { 969 968 vgic_populate_lr(vcpu, irq, count++); 969 + *host_data_ptr(last_lr_irq) = irq; 970 970 } 971 971 } 972 972
+3
arch/loongarch/Kconfig
··· 304 304 config AS_HAS_LVZ_EXTENSION 305 305 def_bool $(as-instr,hvcl 0) 306 306 307 + config AS_HAS_SCQ_EXTENSION 308 + def_bool $(as-instr,sc.q \$t0$(comma)\$t1$(comma)\$t2) 309 + 307 310 config CC_HAS_ANNOTATE_TABLEJUMP 308 311 def_bool $(cc-option,-mannotate-tablejump) 309 312
+5
arch/loongarch/include/asm/cmpxchg.h
··· 238 238 arch_cmpxchg((ptr), (o), (n)); \ 239 239 }) 240 240 241 + #ifdef CONFIG_AS_HAS_SCQ_EXTENSION 242 + 241 243 union __u128_halves { 242 244 u128 full; 243 245 struct { ··· 292 290 BUILD_BUG_ON(sizeof(*(ptr)) != 16); \ 293 291 __arch_cmpxchg128(ptr, o, n, ""); \ 294 292 }) 293 + 294 + #endif /* CONFIG_AS_HAS_SCQ_EXTENSION */ 295 + 295 296 #else 296 297 #include <asm-generic/cmpxchg-local.h> 297 298 #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+12 -2
arch/loongarch/include/asm/uaccess.h
··· 253 253 \ 254 254 __get_kernel_common(*((type *)(dst)), sizeof(type), \ 255 255 (__force type *)(src)); \ 256 - if (unlikely(__gu_err)) \ 256 + if (unlikely(__gu_err)) { \ 257 + pr_info("%s: memory access failed, ecode 0x%x\n", \ 258 + __func__, read_csr_excode()); \ 259 + pr_info("%s: the caller is %pS\n", \ 260 + __func__, __builtin_return_address(0)); \ 257 261 goto err_label; \ 262 + } \ 258 263 } while (0) 259 264 260 265 #define __put_kernel_nofault(dst, src, type, err_label) \ ··· 269 264 \ 270 265 __pu_val = *(__force type *)(src); \ 271 266 __put_kernel_common(((type *)(dst)), sizeof(type)); \ 272 - if (unlikely(__pu_err)) \ 267 + if (unlikely(__pu_err)) { \ 268 + pr_info("%s: memory access failed, ecode 0x%x\n", \ 269 + __func__, read_csr_excode()); \ 270 + pr_info("%s: the caller is %pS\n", \ 271 + __func__, __builtin_return_address(0)); \ 273 272 goto err_label; \ 273 + } \ 274 274 } while (0) 275 275 276 276 extern unsigned long __copy_user(void *to, const void *from, __kernel_size_t n);
+25 -6
arch/loongarch/kernel/inst.c
··· 246 246 247 247 if (smp_processor_id() == copy->cpu) { 248 248 ret = copy_to_kernel_nofault(copy->dst, copy->src, copy->len); 249 - if (ret) 249 + if (ret) { 250 250 pr_err("%s: operation failed\n", __func__); 251 + return ret; 252 + } 251 253 } 252 254 253 255 flush_icache_range((unsigned long)copy->dst, (unsigned long)copy->dst + copy->len); 254 256 255 - return ret; 257 + return 0; 256 258 } 257 259 258 260 int larch_insn_text_copy(void *dst, void *src, size_t len) 259 261 { 260 262 int ret = 0; 263 + int err = 0; 261 264 size_t start, end; 262 265 struct insn_copy copy = { 263 266 .dst = dst, 264 267 .src = src, 265 268 .len = len, 266 - .cpu = smp_processor_id(), 269 + .cpu = raw_smp_processor_id(), 267 270 }; 271 + 272 + /* 273 + * Ensure copy.cpu won't be hot removed before stop_machine. 274 + * If it is removed nobody will really update the text. 275 + */ 276 + lockdep_assert_cpus_held(); 268 277 269 278 start = round_down((size_t)dst, PAGE_SIZE); 270 279 end = round_up((size_t)dst + len, PAGE_SIZE); 271 280 272 - set_memory_rw(start, (end - start) / PAGE_SIZE); 273 - ret = stop_machine(text_copy_cb, &copy, cpu_online_mask); 274 - set_memory_rox(start, (end - start) / PAGE_SIZE); 281 + err = set_memory_rw(start, (end - start) / PAGE_SIZE); 282 + if (err) { 283 + pr_info("%s: set_memory_rw() failed\n", __func__); 284 + return err; 285 + } 286 + 287 + ret = stop_machine_cpuslocked(text_copy_cb, &copy, cpu_online_mask); 288 + 289 + err = set_memory_rox(start, (end - start) / PAGE_SIZE); 290 + if (err) { 291 + pr_info("%s: set_memory_rox() failed\n", __func__); 292 + return err; 293 + } 275 294 276 295 return ret; 277 296 }
+1 -1
arch/loongarch/kvm/vcpu.c
··· 14 14 #define CREATE_TRACE_POINTS 15 15 #include "trace.h" 16 16 17 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 17 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 18 18 KVM_GENERIC_VCPU_STATS(), 19 19 STATS_DESC_COUNTER(VCPU, int_exits), 20 20 STATS_DESC_COUNTER(VCPU, idle_exits),
+3 -3
arch/loongarch/kvm/vm.c
··· 10 10 #include <asm/kvm_eiointc.h> 11 11 #include <asm/kvm_pch_pic.h> 12 12 13 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 13 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 14 14 KVM_GENERIC_VM_STATS(), 15 15 STATS_DESC_ICOUNTER(VM, pages), 16 16 STATS_DESC_ICOUNTER(VM, hugepages), ··· 49 49 kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PMU); 50 50 51 51 /* Enable all PV features by default */ 52 - kvm->arch.pv_features = BIT(KVM_FEATURE_IPI); 53 - kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI); 52 + kvm->arch.pv_features |= BIT(KVM_FEATURE_IPI); 53 + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI); 54 54 if (kvm_pvtime_supported()) { 55 55 kvm->arch.pv_features |= BIT(KVM_FEATURE_PREEMPT); 56 56 kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
+11
arch/loongarch/net/bpf_jit.c
··· 1379 1379 { 1380 1380 int ret; 1381 1381 1382 + cpus_read_lock(); 1382 1383 mutex_lock(&text_mutex); 1383 1384 ret = larch_insn_text_copy(dst, src, len); 1384 1385 mutex_unlock(&text_mutex); 1386 + cpus_read_unlock(); 1385 1387 1386 1388 return ret ? ERR_PTR(-EINVAL) : dst; 1387 1389 } ··· 1431 1429 if (ret) 1432 1430 return ret; 1433 1431 1432 + cpus_read_lock(); 1434 1433 mutex_lock(&text_mutex); 1435 1434 if (memcmp(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES)) 1436 1435 ret = larch_insn_text_copy(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES); 1437 1436 mutex_unlock(&text_mutex); 1437 + cpus_read_unlock(); 1438 1438 1439 1439 return ret; 1440 1440 } ··· 1454 1450 for (i = 0; i < (len / sizeof(u32)); i++) 1455 1451 inst[i] = INSN_BREAK; 1456 1452 1453 + cpus_read_lock(); 1457 1454 mutex_lock(&text_mutex); 1458 1455 if (larch_insn_text_copy(dst, inst, len)) 1459 1456 ret = -EINVAL; 1460 1457 mutex_unlock(&text_mutex); 1458 + cpus_read_unlock(); 1461 1459 1462 1460 kvfree(inst); 1463 1461 ··· 1572 1566 void arch_free_bpf_trampoline(void *image, unsigned int size) 1573 1567 { 1574 1568 bpf_prog_pack_free(image, size); 1569 + } 1570 + 1571 + int arch_protect_bpf_trampoline(void *image, unsigned int size) 1572 + { 1573 + return 0; 1575 1574 } 1576 1575 1577 1576 /*
+2 -2
arch/mips/kvm/mips.c
··· 38 38 #define VECTORSPACING 0x100 /* for EI/VI mode */ 39 39 #endif 40 40 41 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 41 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 42 42 KVM_GENERIC_VM_STATS() 43 43 }; 44 44 ··· 51 51 sizeof(kvm_vm_stats_desc), 52 52 }; 53 53 54 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 54 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 55 55 KVM_GENERIC_VCPU_STATS(), 56 56 STATS_DESC_COUNTER(VCPU, wait_exits), 57 57 STATS_DESC_COUNTER(VCPU, cache_exits),
+2 -2
arch/parisc/kernel/cache.c
··· 953 953 #else 954 954 "1: cmpb,<<,n %0,%2,1b\n" 955 955 #endif 956 - " fic,m %3(%4,%0)\n" 956 + " fdc,m %3(%4,%0)\n" 957 957 "2: sync\n" 958 958 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1") 959 959 : "+r" (start), "+r" (error) ··· 968 968 #else 969 969 "1: cmpb,<<,n %0,%2,1b\n" 970 970 #endif 971 - " fdc,m %3(%4,%0)\n" 971 + " fic,m %3(%4,%0)\n" 972 972 "2: sync\n" 973 973 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1") 974 974 : "+r" (start), "+r" (error)
+2 -2
arch/powerpc/Kconfig
··· 573 573 depends on FUNCTION_TRACER && (PPC32 || PPC64_ELF_ABI_V2) 574 574 depends on $(cc-option,-fpatchable-function-entry=2) 575 575 def_bool y if PPC32 576 - def_bool $(success,$(srctree)/arch/powerpc/tools/gcc-check-fpatchable-function-entry.sh $(CC) -mlittle-endian) if PPC64 && CPU_LITTLE_ENDIAN 577 - def_bool $(success,$(srctree)/arch/powerpc/tools/gcc-check-fpatchable-function-entry.sh $(CC) -mbig-endian) if PPC64 && CPU_BIG_ENDIAN 576 + def_bool $(success,$(srctree)/arch/powerpc/tools/check-fpatchable-function-entry.sh $(CC) $(CLANG_FLAGS) -mlittle-endian) if PPC64 && CPU_LITTLE_ENDIAN 577 + def_bool $(success,$(srctree)/arch/powerpc/tools/check-fpatchable-function-entry.sh $(CC) -mbig-endian) if PPC64 && CPU_BIG_ENDIAN 578 578 579 579 config PPC_FTRACE_OUT_OF_LINE 580 580 def_bool PPC64 && ARCH_USING_PATCHABLE_FUNCTION_ENTRY
+1 -1
arch/powerpc/boot/dts/asp834x-redboot.dts
··· 37 37 }; 38 38 }; 39 39 40 - memory { 40 + memory@0 { 41 41 device_type = "memory"; 42 42 reg = <0x00000000 0x8000000>; // 128MB at 0 43 43 };
-156
arch/powerpc/boot/dts/fsl/interlaken-lac-portals.dtsi
··· 1 - /* T4240 Interlaken LAC Portal device tree stub with 24 portals. 2 - * 3 - * Copyright 2012 Freescale Semiconductor Inc. 4 - * 5 - * Redistribution and use in source and binary forms, with or without 6 - * modification, are permitted provided that the following conditions are met: 7 - * * Redistributions of source code must retain the above copyright 8 - * notice, this list of conditions and the following disclaimer. 9 - * * Redistributions in binary form must reproduce the above copyright 10 - * notice, this list of conditions and the following disclaimer in the 11 - * documentation and/or other materials provided with the distribution. 12 - * * Neither the name of Freescale Semiconductor nor the 13 - * names of its contributors may be used to endorse or promote products 14 - * derived from this software without specific prior written permission. 15 - * 16 - * 17 - * ALTERNATIVELY, this software may be distributed under the terms of the 18 - * GNU General Public License ("GPL") as published by the Free Software 19 - * Foundation, either version 2 of that License or (at your option) any 20 - * later version. 21 - * 22 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY 23 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 24 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 26 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 27 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 29 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 - */ 33 - 34 - #address-cells = <0x1>; 35 - #size-cells = <0x1>; 36 - compatible = "fsl,interlaken-lac-portals"; 37 - 38 - lportal0: lac-portal@0 { 39 - compatible = "fsl,interlaken-lac-portal-v1.0"; 40 - reg = <0x0 0x1000>; 41 - }; 42 - 43 - lportal1: lac-portal@1000 { 44 - compatible = "fsl,interlaken-lac-portal-v1.0"; 45 - reg = <0x1000 0x1000>; 46 - }; 47 - 48 - lportal2: lac-portal@2000 { 49 - compatible = "fsl,interlaken-lac-portal-v1.0"; 50 - reg = <0x2000 0x1000>; 51 - }; 52 - 53 - lportal3: lac-portal@3000 { 54 - compatible = "fsl,interlaken-lac-portal-v1.0"; 55 - reg = <0x3000 0x1000>; 56 - }; 57 - 58 - lportal4: lac-portal@4000 { 59 - compatible = "fsl,interlaken-lac-portal-v1.0"; 60 - reg = <0x4000 0x1000>; 61 - }; 62 - 63 - lportal5: lac-portal@5000 { 64 - compatible = "fsl,interlaken-lac-portal-v1.0"; 65 - reg = <0x5000 0x1000>; 66 - }; 67 - 68 - lportal6: lac-portal@6000 { 69 - compatible = "fsl,interlaken-lac-portal-v1.0"; 70 - reg = <0x6000 0x1000>; 71 - }; 72 - 73 - lportal7: lac-portal@7000 { 74 - compatible = "fsl,interlaken-lac-portal-v1.0"; 75 - reg = <0x7000 0x1000>; 76 - }; 77 - 78 - lportal8: lac-portal@8000 { 79 - compatible = "fsl,interlaken-lac-portal-v1.0"; 80 - reg = <0x8000 0x1000>; 81 - }; 82 - 83 - lportal9: lac-portal@9000 { 84 - compatible = "fsl,interlaken-lac-portal-v1.0"; 85 - reg = <0x9000 0x1000>; 86 - }; 87 - 88 - lportal10: lac-portal@A000 { 89 - compatible = "fsl,interlaken-lac-portal-v1.0"; 90 - reg = <0xA000 0x1000>; 91 - }; 92 - 93 - lportal11: lac-portal@B000 { 94 - compatible = "fsl,interlaken-lac-portal-v1.0"; 95 - reg = <0xB000 0x1000>; 96 - }; 97 - 98 - lportal12: lac-portal@C000 { 99 - compatible = "fsl,interlaken-lac-portal-v1.0"; 100 - reg = <0xC000 0x1000>; 101 - }; 102 - 103 - lportal13: lac-portal@D000 { 104 - compatible = "fsl,interlaken-lac-portal-v1.0"; 105 - reg = <0xD000 0x1000>; 106 - }; 107 - 108 - lportal14: lac-portal@E000 { 109 - compatible = "fsl,interlaken-lac-portal-v1.0"; 110 - reg = <0xE000 0x1000>; 111 - }; 112 - 113 - lportal15: lac-portal@F000 { 114 - compatible = "fsl,interlaken-lac-portal-v1.0"; 115 - reg = <0xF000 0x1000>; 116 - }; 117 - 118 - lportal16: lac-portal@10000 { 119 - compatible = "fsl,interlaken-lac-portal-v1.0"; 120 - reg = <0x10000 0x1000>; 121 - }; 122 - 123 - lportal17: lac-portal@11000 { 124 - compatible = "fsl,interlaken-lac-portal-v1.0"; 125 - reg = <0x11000 0x1000>; 126 - }; 127 - 128 - lportal18: lac-portal@1200 { 129 - compatible = "fsl,interlaken-lac-portal-v1.0"; 130 - reg = <0x12000 0x1000>; 131 - }; 132 - 133 - lportal19: lac-portal@13000 { 134 - compatible = "fsl,interlaken-lac-portal-v1.0"; 135 - reg = <0x13000 0x1000>; 136 - }; 137 - 138 - lportal20: lac-portal@14000 { 139 - compatible = "fsl,interlaken-lac-portal-v1.0"; 140 - reg = <0x14000 0x1000>; 141 - }; 142 - 143 - lportal21: lac-portal@15000 { 144 - compatible = "fsl,interlaken-lac-portal-v1.0"; 145 - reg = <0x15000 0x1000>; 146 - }; 147 - 148 - lportal22: lac-portal@16000 { 149 - compatible = "fsl,interlaken-lac-portal-v1.0"; 150 - reg = <0x16000 0x1000>; 151 - }; 152 - 153 - lportal23: lac-portal@17000 { 154 - compatible = "fsl,interlaken-lac-portal-v1.0"; 155 - reg = <0x17000 0x1000>; 156 - };
-45
arch/powerpc/boot/dts/fsl/interlaken-lac.dtsi
··· 1 - /* 2 - * T4 Interlaken Look-aside Controller (LAC) device tree stub 3 - * 4 - * Copyright 2012 Freescale Semiconductor Inc. 5 - * 6 - * Redistribution and use in source and binary forms, with or without 7 - * modification, are permitted provided that the following conditions are met: 8 - * * Redistributions of source code must retain the above copyright 9 - * notice, this list of conditions and the following disclaimer. 10 - * * Redistributions in binary form must reproduce the above copyright 11 - * notice, this list of conditions and the following disclaimer in the 12 - * documentation and/or other materials provided with the distribution. 13 - * * Neither the name of Freescale Semiconductor nor the 14 - * names of its contributors may be used to endorse or promote products 15 - * derived from this software without specific prior written permission. 16 - * 17 - * 18 - * ALTERNATIVELY, this software may be distributed under the terms of the 19 - * GNU General Public License ("GPL") as published by the Free Software 20 - * Foundation, either version 2 of that License or (at your option) any 21 - * later version. 22 - * 23 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY 24 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 27 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 - */ 34 - 35 - lac: lac@229000 { 36 - compatible = "fsl,interlaken-lac"; 37 - reg = <0x229000 0x1000>; 38 - interrupts = <16 2 1 18>; 39 - }; 40 - 41 - lac-hv@228000 { 42 - compatible = "fsl,interlaken-lac-hv"; 43 - reg = <0x228000 0x1000>; 44 - fsl,non-hv-node = <&lac>; 45 - };
-43
arch/powerpc/boot/dts/fsl/pq3-mpic-message-B.dtsi
··· 1 - /* 2 - * PQ3 MPIC Message (Group B) device tree stub [ controller @ offset 0x42400 ] 3 - * 4 - * Copyright 2012 Freescale Semiconductor Inc. 5 - * 6 - * Redistribution and use in source and binary forms, with or without 7 - * modification, are permitted provided that the following conditions are met: 8 - * * Redistributions of source code must retain the above copyright 9 - * notice, this list of conditions and the following disclaimer. 10 - * * Redistributions in binary form must reproduce the above copyright 11 - * notice, this list of conditions and the following disclaimer in the 12 - * documentation and/or other materials provided with the distribution. 13 - * * Neither the name of Freescale Semiconductor nor the 14 - * names of its contributors may be used to endorse or promote products 15 - * derived from this software without specific prior written permission. 16 - * 17 - * 18 - * ALTERNATIVELY, this software may be distributed under the terms of the 19 - * GNU General Public License ("GPL") as published by the Free Software 20 - * Foundation, either version 2 of that License or (at your option) any 21 - * later version. 22 - * 23 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 24 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 27 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 - */ 34 - 35 - message@42400 { 36 - compatible = "fsl,mpic-v3.1-msgr"; 37 - reg = <0x42400 0x200>; 38 - interrupts = < 39 - 0xb4 2 0 0 40 - 0xb5 2 0 0 41 - 0xb6 2 0 0 42 - 0xb7 2 0 0>; 43 - };
-80
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi
··· 1 - /* 2 - * QorIQ FMan v3 1g port #1 device tree stub [ controller @ offset 0x400000 ] 3 - * 4 - * Copyright 2012 - 2015 Freescale Semiconductor Inc. 5 - * 6 - * Redistribution and use in source and binary forms, with or without 7 - * modification, are permitted provided that the following conditions are met: 8 - * * Redistributions of source code must retain the above copyright 9 - * notice, this list of conditions and the following disclaimer. 10 - * * Redistributions in binary form must reproduce the above copyright 11 - * notice, this list of conditions and the following disclaimer in the 12 - * documentation and/or other materials provided with the distribution. 13 - * * Neither the name of Freescale Semiconductor nor the 14 - * names of its contributors may be used to endorse or promote products 15 - * derived from this software without specific prior written permission. 16 - * 17 - * 18 - * ALTERNATIVELY, this software may be distributed under the terms of the 19 - * GNU General Public License ("GPL") as published by the Free Software 20 - * Foundation, either version 2 of that License or (at your option) any 21 - * later version. 22 - * 23 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY 24 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY 27 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 30 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 32 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 - */ 34 - 35 - fman@400000 { 36 - fman0_rx_0x09: port@89000 { 37 - cell-index = <0x9>; 38 - compatible = "fsl,fman-v3-port-rx"; 39 - reg = <0x89000 0x1000>; 40 - fsl,fman-10g-port; 41 - fsl,fman-best-effort-port; 42 - }; 43 - 44 - fman0_tx_0x29: port@a9000 { 45 - cell-index = <0x29>; 46 - compatible = "fsl,fman-v3-port-tx"; 47 - reg = <0xa9000 0x1000>; 48 - fsl,fman-10g-port; 49 - fsl,fman-best-effort-port; 50 - }; 51 - 52 - ethernet@e2000 { 53 - cell-index = <1>; 54 - compatible = "fsl,fman-memac"; 55 - reg = <0xe2000 0x1000>; 56 - fsl,fman-ports = <&fman0_rx_0x09 &fman0_tx_0x29>; 57 - ptp-timer = <&ptp_timer0>; 58 - pcsphy-handle = <&pcsphy1>, <&qsgmiia_pcs1>; 59 - pcs-handle-names = "sgmii", "qsgmii"; 60 - }; 61 - 62 - mdio@e1000 { 63 - qsgmiia_pcs1: ethernet-pcs@1 { 64 - compatible = "fsl,lynx-pcs"; 65 - reg = <1>; 66 - }; 67 - }; 68 - 69 - mdio@e3000 { 70 - #address-cells = <1>; 71 - #size-cells = <0>; 72 - compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; 73 - reg = <0xe3000 0x1000>; 74 - fsl,erratum-a011043; /* must ignore read errors */ 75 - 76 - pcsphy1: ethernet-phy@0 { 77 - reg = <0x0>; 78 - }; 79 - }; 80 - };
+1 -1
arch/powerpc/boot/dts/mpc8308_p1m.dts
··· 37 37 }; 38 38 }; 39 39 40 - memory { 40 + memory@0 { 41 41 device_type = "memory"; 42 42 reg = <0x00000000 0x08000000>; // 128MB at 0 43 43 };
+1 -1
arch/powerpc/boot/dts/mpc8308rdb.dts
··· 38 38 }; 39 39 }; 40 40 41 - memory { 41 + memory@0 { 42 42 device_type = "memory"; 43 43 reg = <0x00000000 0x08000000>; // 128MB at 0 44 44 };
+35 -26
arch/powerpc/boot/dts/mpc8313erdb.dts
··· 6 6 */ 7 7 8 8 /dts-v1/; 9 + #include <dt-bindings/interrupt-controller/irq.h> 9 10 10 11 / { 11 12 model = "MPC8313ERDB"; ··· 39 38 }; 40 39 }; 41 40 42 - memory { 41 + memory@0 { 43 42 device_type = "memory"; 44 43 reg = <0x00000000 0x08000000>; // 128MB at 0 45 44 }; ··· 49 48 #size-cells = <1>; 50 49 compatible = "fsl,mpc8313-elbc", "fsl,elbc", "simple-bus"; 51 50 reg = <0xe0005000 0x1000>; 52 - interrupts = <77 0x8>; 51 + interrupts = <77 IRQ_TYPE_LEVEL_LOW>; 53 52 interrupt-parent = <&ipic>; 54 53 55 54 // CS0 and CS1 are swapped when ··· 119 118 cell-index = <0>; 120 119 compatible = "fsl-i2c"; 121 120 reg = <0x3000 0x100>; 122 - interrupts = <14 0x8>; 121 + interrupts = <14 IRQ_TYPE_LEVEL_LOW>; 123 122 interrupt-parent = <&ipic>; 124 123 dfsrr; 125 124 rtc@68 { ··· 132 131 compatible = "fsl,sec2.2", "fsl,sec2.1", 133 132 "fsl,sec2.0"; 134 133 reg = <0x30000 0x10000>; 135 - interrupts = <11 0x8>; 134 + interrupts = <11 IRQ_TYPE_LEVEL_LOW>; 136 135 interrupt-parent = <&ipic>; 137 136 fsl,num-channels = <1>; 138 137 fsl,channel-fifo-len = <24>; ··· 147 146 cell-index = <1>; 148 147 compatible = "fsl-i2c"; 149 148 reg = <0x3100 0x100>; 150 - interrupts = <15 0x8>; 149 + interrupts = <15 IRQ_TYPE_LEVEL_LOW>; 151 150 interrupt-parent = <&ipic>; 152 151 dfsrr; 153 152 }; ··· 156 155 cell-index = <0>; 157 156 compatible = "fsl,spi"; 158 157 reg = <0x7000 0x1000>; 159 - interrupts = <16 0x8>; 158 + interrupts = <16 IRQ_TYPE_LEVEL_LOW>; 160 159 interrupt-parent = <&ipic>; 161 160 mode = "cpu"; 162 161 }; ··· 168 167 #address-cells = <1>; 169 168 #size-cells = <0>; 170 169 interrupt-parent = <&ipic>; 171 - interrupts = <38 0x8>; 170 + interrupts = <38 IRQ_TYPE_LEVEL_LOW>; 172 171 phy_type = "utmi_wide"; 173 172 sleep = <&pmc 0x00300000>; 174 173 }; ··· 176 175 ptp_clock@24E00 { 177 176 compatible = "fsl,etsec-ptp"; 178 177 reg = <0x24E00 0xB0>; 179 - interrupts = <12 0x8 13 0x8>; 178 + interrupts = <12 IRQ_TYPE_LEVEL_LOW>, 179 + <13 IRQ_TYPE_LEVEL_LOW>; 180 180 interrupt-parent = < &ipic >; 181 181 fsl,tclk-period = <10>; 182 182 fsl,tmr-prsc = <100>; ··· 199 197 compatible = "gianfar"; 200 198 reg = <0x24000 0x1000>; 201 199 local-mac-address = [ 00 00 00 00 00 00 ]; 202 - interrupts = <37 0x8 36 0x8 35 0x8>; 200 + interrupts = <37 IRQ_TYPE_LEVEL_LOW>, 201 + <36 IRQ_TYPE_LEVEL_LOW>, 202 + <35 IRQ_TYPE_LEVEL_LOW>; 203 203 interrupt-parent = <&ipic>; 204 204 tbi-handle = < &tbi0 >; 205 205 /* Vitesse 7385 isn't on the MDIO bus */ ··· 215 211 reg = <0x520 0x20>; 216 212 phy4: ethernet-phy@4 { 217 213 interrupt-parent = <&ipic>; 218 - interrupts = <20 0x8>; 214 + interrupts = <20 IRQ_TYPE_LEVEL_LOW>; 219 215 reg = <0x4>; 220 216 }; 221 217 tbi0: tbi-phy@11 { ··· 235 231 reg = <0x25000 0x1000>; 236 232 ranges = <0x0 0x25000 0x1000>; 237 233 local-mac-address = [ 00 00 00 00 00 00 ]; 238 - interrupts = <34 0x8 33 0x8 32 0x8>; 234 + interrupts = <34 IRQ_TYPE_LEVEL_LOW>, 235 + <33 IRQ_TYPE_LEVEL_LOW>, 236 + <32 IRQ_TYPE_LEVEL_LOW>; 239 237 interrupt-parent = <&ipic>; 240 238 tbi-handle = < &tbi1 >; 241 239 phy-handle = < &phy4 >; ··· 265 259 compatible = "fsl,ns16550", "ns16550"; 266 260 reg = <0x4500 0x100>; 267 261 clock-frequency = <0>; 268 - interrupts = <9 0x8>; 262 + interrupts = <9 IRQ_TYPE_LEVEL_LOW>; 269 263 interrupt-parent = <&ipic>; 270 264 }; 271 265 ··· 275 269 compatible = "fsl,ns16550", "ns16550"; 276 270 reg = <0x4600 0x100>; 277 271 clock-frequency = <0>; 278 - interrupts = <10 0x8>; 272 + interrupts = <10 IRQ_TYPE_LEVEL_LOW>; 279 273 interrupt-parent = <&ipic>; 280 274 }; 281 275 282 276 /* IPIC 283 - * interrupts cell = <intr #, sense> 284 - * sense values match linux IORESOURCE_IRQ_* defines: 285 - * sense == 8: Level, low assertion 286 - * sense == 2: Edge, high-to-low change 277 + * interrupts cell = <intr #, type> 287 278 */ 288 279 ipic: pic@700 { 289 280 interrupt-controller; ··· 293 290 pmc: power@b00 { 294 291 compatible = "fsl,mpc8313-pmc", "fsl,mpc8349-pmc"; 295 292 reg = <0xb00 0x100 0xa00 0x100>; 296 - interrupts = <80 8>; 293 + interrupts = <80 IRQ_TYPE_LEVEL_LOW>; 297 294 interrupt-parent = <&ipic>; 298 295 fsl,mpc8313-wakeup-timer = <&gtm1>; 299 296 ··· 309 306 gtm1: timer@500 { 310 307 compatible = "fsl,mpc8313-gtm", "fsl,gtm"; 311 308 reg = <0x500 0x100>; 312 - interrupts = <90 8 78 8 84 8 72 8>; 309 + interrupts = <90 IRQ_TYPE_LEVEL_LOW>, 310 + <78 IRQ_TYPE_LEVEL_LOW>, 311 + <84 IRQ_TYPE_LEVEL_LOW>, 312 + <72 IRQ_TYPE_LEVEL_LOW>; 313 313 interrupt-parent = <&ipic>; 314 314 }; 315 315 316 316 timer@600 { 317 317 compatible = "fsl,mpc8313-gtm", "fsl,gtm"; 318 318 reg = <0x600 0x100>; 319 - interrupts = <91 8 79 8 85 8 73 8>; 319 + interrupts = <91 IRQ_TYPE_LEVEL_LOW>, 320 + <79 IRQ_TYPE_LEVEL_LOW>, 321 + <85 IRQ_TYPE_LEVEL_LOW>, 322 + <73 IRQ_TYPE_LEVEL_LOW>; 320 323 interrupt-parent = <&ipic>; 321 324 }; 322 325 }; ··· 350 341 0x7800 0x0 0x0 0x3 &ipic 17 0x8 351 342 0x7800 0x0 0x0 0x4 &ipic 18 0x8>; 352 343 interrupt-parent = <&ipic>; 353 - interrupts = <66 0x8>; 344 + interrupts = <66 IRQ_TYPE_LEVEL_LOW>; 354 345 bus-range = <0x0 0x0>; 355 346 ranges = <0x02000000 0x0 0x90000000 0x90000000 0x0 0x10000000 356 347 0x42000000 0x0 0x80000000 0x80000000 0x0 0x10000000 ··· 372 363 reg = <0xe00082a8 4>; 373 364 ranges = <0 0xe0008100 0x1a8>; 374 365 interrupt-parent = <&ipic>; 375 - interrupts = <71 8>; 366 + interrupts = <71 IRQ_TYPE_LEVEL_LOW>; 376 367 377 368 dma-channel@0 { 378 369 compatible = "fsl,mpc8313-dma-channel", 379 370 "fsl,elo-dma-channel"; 380 371 reg = <0 0x28>; 381 372 interrupt-parent = <&ipic>; 382 - interrupts = <71 8>; 373 + interrupts = <71 IRQ_TYPE_LEVEL_LOW>; 383 374 cell-index = <0>; 384 375 }; 385 376 ··· 388 379 "fsl,elo-dma-channel"; 389 380 reg = <0x80 0x28>; 390 381 interrupt-parent = <&ipic>; 391 - interrupts = <71 8>; 382 + interrupts = <71 IRQ_TYPE_LEVEL_LOW>; 392 383 cell-index = <1>; 393 384 }; 394 385 ··· 397 388 "fsl,elo-dma-channel"; 398 389 reg = <0x100 0x28>; 399 390 interrupt-parent = <&ipic>; 400 - interrupts = <71 8>; 391 + interrupts = <71 IRQ_TYPE_LEVEL_LOW>; 401 392 cell-index = <2>; 402 393 }; 403 394 ··· 406 397 "fsl,elo-dma-channel"; 407 398 reg = <0x180 0x28>; 408 399 interrupt-parent = <&ipic>; 409 - interrupts = <71 8>; 400 + interrupts = <71 IRQ_TYPE_LEVEL_LOW>; 410 401 cell-index = <3>; 411 402 }; 412 403 };
+64 -55
arch/powerpc/boot/dts/mpc8315erdb.dts
··· 40 40 }; 41 41 }; 42 42 43 - memory { 43 + memory@0 { 44 44 device_type = "memory"; 45 45 reg = <0x00000000 0x08000000>; // 128MB at 0 46 46 }; ··· 50 50 #size-cells = <1>; 51 51 compatible = "fsl,mpc8315-elbc", "fsl,elbc", "simple-bus"; 52 52 reg = <0xe0005000 0x1000>; 53 - interrupts = <77 0x8>; 53 + interrupts = <77 IRQ_TYPE_LEVEL_LOW>; 54 54 interrupt-parent = <&ipic>; 55 55 56 56 // CS0 and CS1 are swapped when ··· 112 112 cell-index = <0>; 113 113 compatible = "fsl-i2c"; 114 114 reg = <0x3000 0x100>; 115 - interrupts = <14 0x8>; 115 + interrupts = <14 IRQ_TYPE_LEVEL_LOW>; 116 116 interrupt-parent = <&ipic>; 117 117 dfsrr; 118 118 rtc@68 { ··· 133 133 cell-index = <0>; 134 134 compatible = "fsl,spi"; 135 135 reg = <0x7000 0x1000>; 136 - interrupts = <16 0x8>; 136 + interrupts = <16 IRQ_TYPE_LEVEL_LOW>; 137 137 interrupt-parent = <&ipic>; 138 + #address-cells = <1>; 139 + #size-cells = <0>; 138 140 mode = "cpu"; 139 141 }; 140 142 ··· 147 145 reg = <0x82a8 4>; 148 146 ranges = <0 0x8100 0x1a8>; 149 147 interrupt-parent = <&ipic>; 150 - interrupts = <71 8>; 148 + interrupts = <71 IRQ_TYPE_LEVEL_LOW>; 151 149 cell-index = <0>; 152 150 dma-channel@0 { 153 151 compatible = "fsl,mpc8315-dma-channel", "fsl,elo-dma-channel"; 154 152 reg = <0 0x80>; 155 153 cell-index = <0>; 156 154 interrupt-parent = <&ipic>; 157 - interrupts = <71 8>; 155 + interrupts = <71 IRQ_TYPE_LEVEL_LOW>; 158 156 }; 159 157 dma-channel@80 { 160 158 compatible = "fsl,mpc8315-dma-channel", "fsl,elo-dma-channel"; 161 159 reg = <0x80 0x80>; 162 160 cell-index = <1>; 163 161 interrupt-parent = <&ipic>; 164 - interrupts = <71 8>; 162 + interrupts = <71 IRQ_TYPE_LEVEL_LOW>; 165 163 }; 166 164 dma-channel@100 { 167 165 compatible = "fsl,mpc8315-dma-channel", "fsl,elo-dma-channel"; 168 166 reg = <0x100 0x80>; 169 167 cell-index = <2>; 170 168 interrupt-parent = <&ipic>; 171 - interrupts = <71 8>; 169 + interrupts = <71 IRQ_TYPE_LEVEL_LOW>; 172 170 }; 173 171 dma-channel@180 { 174 172 compatible = "fsl,mpc8315-dma-channel", "fsl,elo-dma-channel"; 175 173 reg = <0x180 0x28>; 176 174 cell-index = <3>; 177 175 interrupt-parent = <&ipic>; 178 - interrupts = <71 8>; 176 + interrupts = <71 IRQ_TYPE_LEVEL_LOW>; 179 177 }; 180 178 }; 181 179 ··· 185 183 #address-cells = <1>; 186 184 #size-cells = <0>; 187 185 interrupt-parent = <&ipic>; 188 - interrupts = <38 0x8>; 186 + interrupts = <38 IRQ_TYPE_LEVEL_LOW>; 189 187 phy_type = "utmi"; 190 188 }; 191 189 ··· 199 197 reg = <0x24000 0x1000>; 200 198 ranges = <0x0 0x24000 0x1000>; 201 199 local-mac-address = [ 00 00 00 00 00 00 ]; 202 - interrupts = <32 0x8 33 0x8 34 0x8>; 200 + interrupts = <32 IRQ_TYPE_LEVEL_LOW>, 201 + <33 IRQ_TYPE_LEVEL_LOW>, 202 + <34 IRQ_TYPE_LEVEL_LOW>; 203 203 interrupt-parent = <&ipic>; 204 204 tbi-handle = <&tbi0>; 205 205 phy-handle = < &phy0 >; ··· 242 238 reg = <0x25000 0x1000>; 243 239 ranges = <0x0 0x25000 0x1000>; 244 240 local-mac-address = [ 00 00 00 00 00 00 ]; 245 - interrupts = <35 0x8 36 0x8 37 0x8>; 241 + interrupts = <35 IRQ_TYPE_LEVEL_LOW>, 242 + <36 IRQ_TYPE_LEVEL_LOW>, 243 + <37 IRQ_TYPE_LEVEL_LOW>; 246 244 interrupt-parent = <&ipic>; 247 245 tbi-handle = <&tbi1>; 248 246 phy-handle = < &phy1 >; ··· 269 263 compatible = "fsl,ns16550", "ns16550"; 270 264 reg = <0x4500 0x100>; 271 265 clock-frequency = <133333333>; 272 - interrupts = <9 0x8>; 266 + interrupts = <9 IRQ_TYPE_LEVEL_LOW>; 273 267 interrupt-parent = <&ipic>; 274 268 }; 275 269 ··· 279 273 compatible = "fsl,ns16550", "ns16550"; 280 274 reg = <0x4600 0x100>; 281 275 clock-frequency = <133333333>; 282 - interrupts = <10 0x8>; 276 + interrupts = <10 IRQ_TYPE_LEVEL_LOW>; 283 277 interrupt-parent = <&ipic>; 284 278 }; 285 279 ··· 288 282 "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1", 289 283 "fsl,sec2.0"; 290 284 reg = <0x30000 0x10000>; 291 - interrupts = <11 0x8>; 285 + interrupts = <11 IRQ_TYPE_LEVEL_LOW>; 292 286 interrupt-parent = <&ipic>; 293 287 fsl,num-channels = <4>; 294 288 fsl,channel-fifo-len = <24>; ··· 300 294 compatible = "fsl,mpc8315-sata", "fsl,pq-sata"; 301 295 reg = <0x18000 0x1000>; 302 296 cell-index = <1>; 303 - interrupts = <44 0x8>; 297 + interrupts = <44 IRQ_TYPE_LEVEL_LOW>; 304 298 interrupt-parent = <&ipic>; 305 299 }; 306 300 ··· 308 302 compatible = "fsl,mpc8315-sata", "fsl,pq-sata"; 309 303 reg = <0x19000 0x1000>; 310 304 cell-index = <2>; 311 - interrupts = <45 0x8>; 305 + interrupts = <45 IRQ_TYPE_LEVEL_LOW>; 312 306 interrupt-parent = <&ipic>; 313 307 }; 314 308 315 309 gtm1: timer@500 { 316 310 compatible = "fsl,mpc8315-gtm", "fsl,gtm"; 317 311 reg = <0x500 0x100>; 318 - interrupts = <90 8 78 8 84 8 72 8>; 312 + interrupts = <90 IRQ_TYPE_LEVEL_LOW>, 313 + <78 IRQ_TYPE_LEVEL_LOW>, 314 + <84 IRQ_TYPE_LEVEL_LOW>, 315 + <72 IRQ_TYPE_LEVEL_LOW>; 319 316 interrupt-parent = <&ipic>; 320 317 clock-frequency = <133333333>; 321 318 }; ··· 326 317 timer@600 { 327 318 compatible = "fsl,mpc8315-gtm", "fsl,gtm"; 328 319 reg = <0x600 0x100>; 329 - interrupts = <91 8 79 8 85 8 73 8>; 320 + interrupts = <91 IRQ_TYPE_LEVEL_LOW>, 321 + <79 IRQ_TYPE_LEVEL_LOW>, 322 + <85 IRQ_TYPE_LEVEL_LOW>, 323 + <73 IRQ_TYPE_LEVEL_LOW>; 330 324 interrupt-parent = <&ipic>; 331 325 clock-frequency = <133333333>; 332 326 }; 333 327 334 328 /* IPIC 335 - * interrupts cell = <intr #, sense> 336 - * sense values match linux IORESOURCE_IRQ_* defines: 337 - * sense == 8: Level, low assertion 338 - * sense == 2: Edge, high-to-low change 329 + * interrupts cell = <intr #, type> 339 330 */ 340 331 ipic: interrupt-controller@700 { 341 332 interrupt-controller; ··· 349 340 compatible = "fsl,ipic-msi"; 350 341 reg = <0x7c0 0x40>; 351 342 msi-available-ranges = <0 0x100>; 352 - interrupts = <0x43 0x8 353 - 0x4 0x8 354 - 0x51 0x8 355 - 0x52 0x8 356 - 0x56 0x8 357 - 0x57 0x8 358 - 0x58 0x8 359 - 0x59 0x8>; 343 + interrupts = <0x43 IRQ_TYPE_LEVEL_LOW 344 + 0x4 IRQ_TYPE_LEVEL_LOW 345 + 0x51 IRQ_TYPE_LEVEL_LOW 346 + 0x52 IRQ_TYPE_LEVEL_LOW 347 + 0x56 IRQ_TYPE_LEVEL_LOW 348 + 0x57 IRQ_TYPE_LEVEL_LOW 349 + 0x58 IRQ_TYPE_LEVEL_LOW 350 + 0x59 IRQ_TYPE_LEVEL_LOW>; 360 351 interrupt-parent = < &ipic >; 361 352 }; 362 353 ··· 364 355 compatible = "fsl,mpc8315-pmc", "fsl,mpc8313-pmc", 365 356 "fsl,mpc8349-pmc"; 366 357 reg = <0xb00 0x100 0xa00 0x100>; 367 - interrupts = <80 8>; 358 + interrupts = <80 IRQ_TYPE_LEVEL_LOW>; 368 359 interrupt-parent = <&ipic>; 369 360 fsl,mpc8313-wakeup-timer = <&gtm1>; 370 361 }; ··· 383 374 interrupt-map-mask = <0xf800 0x0 0x0 0x7>; 384 375 interrupt-map = < 385 376 /* IDSEL 0x0E -mini PCI */ 386 - 0x7000 0x0 0x0 0x1 &ipic 18 0x8 387 - 0x7000 0x0 0x0 0x2 &ipic 18 0x8 388 - 0x7000 0x0 0x0 0x3 &ipic 18 0x8 389 - 0x7000 0x0 0x0 0x4 &ipic 18 0x8 377 + 0x7000 0x0 0x0 0x1 &ipic 18 IRQ_TYPE_LEVEL_LOW 378 + 0x7000 0x0 0x0 0x2 &ipic 18 IRQ_TYPE_LEVEL_LOW 379 + 0x7000 0x0 0x0 0x3 &ipic 18 IRQ_TYPE_LEVEL_LOW 380 + 0x7000 0x0 0x0 0x4 &ipic 18 IRQ_TYPE_LEVEL_LOW 390 381 391 382 /* IDSEL 0x0F -mini PCI */ 392 - 0x7800 0x0 0x0 0x1 &ipic 17 0x8 393 - 0x7800 0x0 0x0 0x2 &ipic 17 0x8 394 - 0x7800 0x0 0x0 0x3 &ipic 17 0x8 395 - 0x7800 0x0 0x0 0x4 &ipic 17 0x8 383 + 0x7800 0x0 0x0 0x1 &ipic 17 IRQ_TYPE_LEVEL_LOW 384 + 0x7800 0x0 0x0 0x2 &ipic 17 IRQ_TYPE_LEVEL_LOW 385 + 0x7800 0x0 0x0 0x3 &ipic 17 IRQ_TYPE_LEVEL_LOW 386 + 0x7800 0x0 0x0 0x4 &ipic 17 IRQ_TYPE_LEVEL_LOW 396 387 397 388 /* IDSEL 0x10 - PCI slot */ 398 - 0x8000 0x0 0x0 0x1 &ipic 48 0x8 399 - 0x8000 0x0 0x0 0x2 &ipic 17 0x8 400 - 0x8000 0x0 0x0 0x3 &ipic 48 0x8 401 - 0x8000 0x0 0x0 0x4 &ipic 17 0x8>; 389 + 0x8000 0x0 0x0 0x1 &ipic 48 IRQ_TYPE_LEVEL_LOW 390 + 0x8000 0x0 0x0 0x2 &ipic 17 IRQ_TYPE_LEVEL_LOW 391 + 0x8000 0x0 0x0 0x3 &ipic 48 IRQ_TYPE_LEVEL_LOW 392 + 0x8000 0x0 0x0 0x4 &ipic 17 IRQ_TYPE_LEVEL_LOW>; 402 393 interrupt-parent = <&ipic>; 403 - interrupts = <66 0x8>; 394 + interrupts = <66 IRQ_TYPE_LEVEL_LOW>; 404 395 bus-range = <0x0 0x0>; 405 396 ranges = <0x02000000 0 0x90000000 0x90000000 0 0x10000000 406 397 0x42000000 0 0x80000000 0x80000000 0 0x10000000 ··· 426 417 0x01000000 0 0x00000000 0xb1000000 0 0x00800000>; 427 418 bus-range = <0 255>; 428 419 interrupt-map-mask = <0xf800 0 0 7>; 429 - interrupt-map = <0 0 0 1 &ipic 1 8 430 - 0 0 0 2 &ipic 1 8 431 - 0 0 0 3 &ipic 1 8 432 - 0 0 0 4 &ipic 1 8>; 420 + interrupt-map = <0 0 0 1 &ipic 1 IRQ_TYPE_LEVEL_LOW 421 + 0 0 0 2 &ipic 1 IRQ_TYPE_LEVEL_LOW 422 + 0 0 0 3 &ipic 1 IRQ_TYPE_LEVEL_LOW 423 + 0 0 0 4 &ipic 1 IRQ_TYPE_LEVEL_LOW>; 433 424 clock-frequency = <0>; 434 425 435 426 pcie@0 { ··· 457 448 0x01000000 0 0x00000000 0xd1000000 0 0x00800000>; 458 449 bus-range = <0 255>; 459 450 interrupt-map-mask = <0xf800 0 0 7>; 460 - interrupt-map = <0 0 0 1 &ipic 2 8 461 - 0 0 0 2 &ipic 2 8 462 - 0 0 0 3 &ipic 2 8 463 - 0 0 0 4 &ipic 2 8>; 451 + interrupt-map = <0 0 0 1 &ipic 2 IRQ_TYPE_LEVEL_LOW 452 + 0 0 0 2 &ipic 2 IRQ_TYPE_LEVEL_LOW 453 + 0 0 0 3 &ipic 2 IRQ_TYPE_LEVEL_LOW 454 + 0 0 0 4 &ipic 2 IRQ_TYPE_LEVEL_LOW>; 464 455 clock-frequency = <0>; 465 456 466 457 pcie@0 { ··· 480 471 leds { 481 472 compatible = "gpio-leds"; 482 473 483 - pwr { 474 + led-pwr { 484 475 gpios = <&mcu_pio 0 0>; 485 476 default-state = "on"; 486 477 }; 487 478 488 - hdd { 479 + led-hdd { 489 480 gpios = <&mcu_pio 1 0>; 490 481 linux,default-trigger = "disk-activity"; 491 482 };
+1 -1
arch/powerpc/boot/dts/mpc832x_rdb.dts
··· 38 38 }; 39 39 }; 40 40 41 - memory { 41 + memory@0 { 42 42 device_type = "memory"; 43 43 reg = <0x00000000 0x04000000>; 44 44 };
+1 -1
arch/powerpc/boot/dts/mpc8349emitx.dts
··· 39 39 }; 40 40 }; 41 41 42 - memory { 42 + memory@0 { 43 43 device_type = "memory"; 44 44 reg = <0x00000000 0x10000000>; 45 45 };
+1 -1
arch/powerpc/boot/dts/mpc8349emitxgp.dts
··· 37 37 }; 38 38 }; 39 39 40 - memory { 40 + memory@0 { 41 41 device_type = "memory"; 42 42 reg = <0x00000000 0x10000000>; 43 43 };
+1 -1
arch/powerpc/boot/dts/mpc8377_rdb.dts
··· 39 39 }; 40 40 }; 41 41 42 - memory { 42 + memory@0 { 43 43 device_type = "memory"; 44 44 reg = <0x00000000 0x10000000>; // 256MB at 0 45 45 };
+1 -1
arch/powerpc/boot/dts/mpc8377_wlan.dts
··· 40 40 }; 41 41 }; 42 42 43 - memory { 43 + memory@0 { 44 44 device_type = "memory"; 45 45 reg = <0x00000000 0x20000000>; // 512MB at 0 46 46 };
+1 -1
arch/powerpc/boot/dts/mpc8378_rdb.dts
··· 39 39 }; 40 40 }; 41 41 42 - memory { 42 + memory@0 { 43 43 device_type = "memory"; 44 44 reg = <0x00000000 0x10000000>; // 256MB at 0 45 45 };
+1 -1
arch/powerpc/boot/dts/mpc8379_rdb.dts
··· 37 37 }; 38 38 }; 39 39 40 - memory { 40 + memory@0 { 41 41 device_type = "memory"; 42 42 reg = <0x00000000 0x10000000>; // 256MB at 0 43 43 };
+1 -3
arch/powerpc/include/asm/nohash/32/pgtable.h
··· 120 120 121 121 #if defined(CONFIG_44x) 122 122 #include <asm/nohash/32/pte-44x.h> 123 - #elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT) 124 - #include <asm/nohash/pte-e500.h> 125 123 #elif defined(CONFIG_PPC_85xx) 126 - #include <asm/nohash/32/pte-85xx.h> 124 + #include <asm/nohash/pte-e500.h> 127 125 #elif defined(CONFIG_PPC_8xx) 128 126 #include <asm/nohash/32/pte-8xx.h> 129 127 #endif
-59
arch/powerpc/include/asm/nohash/32/pte-85xx.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _ASM_POWERPC_NOHASH_32_PTE_85xx_H 3 - #define _ASM_POWERPC_NOHASH_32_PTE_85xx_H 4 - #ifdef __KERNEL__ 5 - 6 - /* PTE bit definitions for Freescale BookE SW loaded TLB MMU based 7 - * processors 8 - * 9 - MMU Assist Register 3: 10 - 11 - 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63 12 - RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR 13 - 14 - - PRESENT *must* be in the bottom two bits because swap PTEs use 15 - the top 30 bits. 16 - 17 - */ 18 - 19 - /* Definitions for FSL Book-E Cores */ 20 - #define _PAGE_READ 0x00001 /* H: Read permission (SR) */ 21 - #define _PAGE_PRESENT 0x00002 /* S: PTE contains a translation */ 22 - #define _PAGE_WRITE 0x00004 /* S: Write permission (SW) */ 23 - #define _PAGE_DIRTY 0x00008 /* S: Page dirty */ 24 - #define _PAGE_EXEC 0x00010 /* H: SX permission */ 25 - #define _PAGE_ACCESSED 0x00020 /* S: Page referenced */ 26 - 27 - #define _PAGE_ENDIAN 0x00040 /* H: E bit */ 28 - #define _PAGE_GUARDED 0x00080 /* H: G bit */ 29 - #define _PAGE_COHERENT 0x00100 /* H: M bit */ 30 - #define _PAGE_NO_CACHE 0x00200 /* H: I bit */ 31 - #define _PAGE_WRITETHRU 0x00400 /* H: W bit */ 32 - #define _PAGE_SPECIAL 0x00800 /* S: Special page */ 33 - 34 - #define _PMD_PRESENT 0 35 - #define _PMD_PRESENT_MASK (PAGE_MASK) 36 - #define _PMD_BAD (~PAGE_MASK) 37 - #define _PMD_USER 0 38 - 39 - #define _PTE_NONE_MASK 0 40 - 41 - #define PTE_WIMGE_SHIFT (6) 42 - 43 - /* 44 - * We define 2 sets of base prot bits, one for basic pages (ie, 45 - * cacheable kernel and user pages) and one for non cacheable 46 - * pages. We always set _PAGE_COHERENT when SMP is enabled or 47 - * the processor might need it for DMA coherency. 48 - */ 49 - #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) 50 - #if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) 51 - #define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) 52 - #else 53 - #define _PAGE_BASE (_PAGE_BASE_NC) 54 - #endif 55 - 56 - #include <asm/pgtable-masks.h> 57 - 58 - #endif /* __KERNEL__ */ 59 - #endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_85xx_H */
+1 -1
arch/powerpc/include/asm/pgtable-types.h
··· 49 49 #endif /* CONFIG_PPC64 */ 50 50 51 51 /* PGD level */ 52 - #if defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT) 52 + #if defined(CONFIG_PPC_85xx) 53 53 typedef struct { unsigned long long pgd; } pgd_t; 54 54 55 55 static inline unsigned long long pgd_val(pgd_t x)
+48 -23
arch/powerpc/include/asm/uaccess.h
··· 15 15 #define TASK_SIZE_MAX TASK_SIZE_USER64 16 16 #endif 17 17 18 + /* Threshold above which VMX copy path is used */ 19 + #define VMX_COPY_THRESHOLD 3328 20 + 18 21 #include <asm-generic/access_ok.h> 19 22 20 23 /* ··· 258 255 ".section .fixup,\"ax\"\n" \ 259 256 "4: li %0,%3\n" \ 260 257 " li %1,0\n" \ 261 - " li %1+1,0\n" \ 258 + " li %L1,0\n" \ 262 259 " b 3b\n" \ 263 260 ".previous\n" \ 264 261 EX_TABLE(1b, 4b) \ ··· 329 326 extern unsigned long __copy_tofrom_user(void __user *to, 330 327 const void __user *from, unsigned long size); 331 328 332 - #ifdef __powerpc64__ 329 + unsigned long __copy_tofrom_user_base(void __user *to, 330 + const void __user *from, unsigned long size); 331 + 332 + unsigned long __copy_tofrom_user_power7_vmx(void __user *to, 333 + const void __user *from, unsigned long size); 334 + 335 + static __always_inline bool will_use_vmx(unsigned long n) 336 + { 337 + return IS_ENABLED(CONFIG_ALTIVEC) && cpu_has_feature(CPU_FTR_VMX_COPY) && 338 + n > VMX_COPY_THRESHOLD; 339 + } 340 + 341 + static __always_inline unsigned long 342 + raw_copy_tofrom_user(void __user *to, const void __user *from, 343 + unsigned long n, unsigned long dir) 344 + { 345 + unsigned long ret; 346 + 347 + if (will_use_vmx(n) && enter_vmx_usercopy()) { 348 + allow_user_access(to, dir); 349 + ret = __copy_tofrom_user_power7_vmx(to, from, n); 350 + prevent_user_access(dir); 351 + exit_vmx_usercopy(); 352 + 353 + if (unlikely(ret)) { 354 + allow_user_access(to, dir); 355 + ret = __copy_tofrom_user_base(to, from, n); 356 + prevent_user_access(dir); 357 + } 358 + return ret; 359 + } 360 + 361 + allow_user_access(to, dir); 362 + ret = __copy_tofrom_user(to, from, n); 363 + prevent_user_access(dir); 364 + return ret; 365 + } 366 + 367 + #ifdef CONFIG_PPC64 333 368 static inline unsigned long 334 369 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) 335 370 { 336 - unsigned long ret; 337 - 338 371 barrier_nospec(); 339 - allow_user_access(to, KUAP_READ_WRITE); 340 - ret = __copy_tofrom_user(to, from, n); 341 - prevent_user_access(KUAP_READ_WRITE); 342 - return ret; 372 + return raw_copy_tofrom_user(to, from, n, KUAP_READ_WRITE); 343 373 } 344 - #endif /* __powerpc64__ */ 374 + #endif /* CONFIG_PPC64 */ 345 375 346 - static inline unsigned long raw_copy_from_user(void *to, 347 - const void __user *from, unsigned long n) 376 + static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) 348 377 { 349 - unsigned long ret; 350 - 351 - allow_user_access(NULL, KUAP_READ); 352 - ret = __copy_tofrom_user((__force void __user *)to, from, n); 353 - prevent_user_access(KUAP_READ); 354 - return ret; 378 + return raw_copy_tofrom_user((__force void __user *)to, from, n, KUAP_READ); 355 379 } 356 380 357 381 static inline unsigned long 358 382 raw_copy_to_user(void __user *to, const void *from, unsigned long n) 359 383 { 360 - unsigned long ret; 361 - 362 - allow_user_access(to, KUAP_WRITE); 363 - ret = __copy_tofrom_user(to, (__force const void __user *)from, n); 364 - prevent_user_access(KUAP_WRITE); 365 - return ret; 384 + return raw_copy_tofrom_user(to, (__force const void __user *)from, n, KUAP_WRITE); 366 385 } 367 386 368 387 unsigned long __arch_clear_user(void __user *addr, unsigned long size);
+1 -45
arch/powerpc/kernel/head_85xx.S
··· 305 305 * r12 is pointer to the pte 306 306 * r10 is the pshift from the PGD, if we're a hugepage 307 307 */ 308 - #ifdef CONFIG_PTE_64BIT 309 308 #ifdef CONFIG_HUGETLB_PAGE 310 309 #define FIND_PTE \ 311 310 rlwinm r12, r13, 14, 18, 28; /* Compute pgdir/pmd offset */ \ ··· 328 329 rlwimi r12, r13, 23, 20, 28; /* Compute pte address */ \ 329 330 lwz r11, 4(r12); /* Get pte entry */ 330 331 #endif /* HUGEPAGE */ 331 - #else /* !PTE_64BIT */ 332 - #define FIND_PTE \ 333 - rlwimi r11, r13, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ 334 - lwz r11, 0(r11); /* Get L1 entry */ \ 335 - rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \ 336 - beq 2f; /* Bail if no table */ \ 337 - rlwimi r12, r13, 22, 20, 29; /* Compute PTE address */ \ 338 - lwz r11, 0(r12); /* Get Linux PTE */ 339 - #endif 340 332 341 333 /* 342 334 * Interrupt vector entry code ··· 463 473 4: 464 474 FIND_PTE 465 475 466 - #ifdef CONFIG_PTE_64BIT 467 476 li r13,_PAGE_PRESENT|_PAGE_BAP_SR 468 477 oris r13,r13,_PAGE_ACCESSED@h 469 - #else 470 - li r13,_PAGE_PRESENT|_PAGE_READ|_PAGE_ACCESSED 471 - #endif 472 478 andc. r13,r13,r11 /* Check permission */ 473 479 474 - #ifdef CONFIG_PTE_64BIT 475 480 #ifdef CONFIG_SMP 476 481 subf r13,r11,r12 /* create false data dep */ 477 482 lwzx r13,r11,r13 /* Get upper pte bits */ 478 483 #else 479 484 lwz r13,0(r12) /* Get upper pte bits */ 480 - #endif 481 485 #endif 482 486 483 487 bne 2f /* Bail if permission/valid mismatch */ ··· 536 552 537 553 FIND_PTE 538 554 /* Make up the required permissions for kernel code */ 539 - #ifdef CONFIG_PTE_64BIT 540 555 li r13,_PAGE_PRESENT | _PAGE_BAP_SX 541 556 oris r13,r13,_PAGE_ACCESSED@h 542 - #else 543 - li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC 544 - #endif 545 557 b 4f 546 558 547 559 /* Get the PGD for the current thread */ ··· 553 573 554 574 FIND_PTE 555 575 /* Make up the required permissions for user code */ 556 - #ifdef CONFIG_PTE_64BIT 557 576 li r13,_PAGE_PRESENT | _PAGE_BAP_UX 558 577 oris r13,r13,_PAGE_ACCESSED@h 559 - #else 560 - li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC 561 - #endif 562 578 563 579 4: 564 580 andc. r13,r13,r11 /* Check permission */ 565 581 566 - #ifdef CONFIG_PTE_64BIT 567 582 #ifdef CONFIG_SMP 568 583 subf r13,r11,r12 /* create false data dep */ 569 584 lwzx r13,r11,r13 /* Get upper pte bits */ 570 585 #else 571 586 lwz r13,0(r12) /* Get upper pte bits */ 572 - #endif 573 587 #endif 574 588 575 589 bne 2f /* Bail if permission mismatch */ ··· 657 683 * r10 - tsize encoding (if HUGETLB_PAGE) or available to use 658 684 * r11 - TLB (info from Linux PTE) 659 685 * r12 - available to use 660 - * r13 - upper bits of PTE (if PTE_64BIT) or available to use 686 + * r13 - upper bits of PTE 661 687 * CR5 - results of addr >= PAGE_OFFSET 662 688 * MAS0, MAS1 - loaded with proper value when we get here 663 689 * MAS2, MAS3 - will need additional info from Linux PTE ··· 725 751 * here we (properly should) assume have the appropriate value. 726 752 */ 727 753 finish_tlb_load_cont: 728 - #ifdef CONFIG_PTE_64BIT 729 754 rlwinm r12, r11, 32-2, 26, 31 /* Move in perm bits */ 730 755 andi. r10, r11, _PAGE_DIRTY 731 756 bne 1f ··· 737 764 srwi r10, r13, 12 /* grab RPN[12:31] */ 738 765 mtspr SPRN_MAS7, r10 739 766 END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) 740 - #else 741 - li r10, (_PAGE_EXEC | _PAGE_READ) 742 - mr r13, r11 743 - rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */ 744 - and r12, r11, r10 745 - mcrf cr0, cr5 /* Test for user page */ 746 - slwi r10, r12, 1 747 - or r10, r10, r12 748 - rlwinm r10, r10, 0, ~_PAGE_EXEC /* Clear SX on user pages */ 749 - isellt r12, r10, r12 750 - rlwimi r13, r12, 0, 20, 31 /* Get RPN from PTE, merge w/ perms */ 751 - mtspr SPRN_MAS3, r13 752 - #endif 753 767 754 768 mfspr r12, SPRN_MAS2 755 - #ifdef CONFIG_PTE_64BIT 756 769 rlwimi r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */ 757 - #else 758 - rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ 759 - #endif 760 770 #ifdef CONFIG_HUGETLB_PAGE 761 771 beq 6, 3f /* don't mask if page isn't huge */ 762 772 li r13, 1
+1 -1
arch/powerpc/kernel/iommu.c
··· 1159 1159 struct device *dev, 1160 1160 struct iommu_domain *old) 1161 1161 { 1162 - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1162 + struct iommu_domain *domain = iommu_driver_get_domain_for_dev(dev); 1163 1163 struct iommu_table_group *table_group; 1164 1164 struct iommu_group *grp; 1165 1165
+2 -1
arch/powerpc/kernel/prom_init.c
··· 2893 2893 for (node = 0; prom_next_node(&node); ) { 2894 2894 type[0] = '\0'; 2895 2895 prom_getprop(node, "device_type", type, sizeof(type)); 2896 - if (prom_strcmp(type, "escc") && prom_strcmp(type, "i2s")) 2896 + if (prom_strcmp(type, "escc") && prom_strcmp(type, "i2s") && 2897 + prom_strcmp(type, "media-bay")) 2897 2898 continue; 2898 2899 2899 2900 if (prom_getproplen(node, "#size-cells") != PROM_ERROR)
-10
arch/powerpc/kernel/setup-common.c
··· 35 35 #include <linux/of_irq.h> 36 36 #include <linux/hugetlb.h> 37 37 #include <linux/pgtable.h> 38 - #include <asm/kexec.h> 39 38 #include <asm/io.h> 40 39 #include <asm/paca.h> 41 40 #include <asm/processor.h> ··· 993 994 smp_release_cpus(); 994 995 995 996 initmem_init(); 996 - 997 - /* 998 - * Reserve large chunks of memory for use by CMA for kdump, fadump, KVM and 999 - * hugetlb. These must be called after initmem_init(), so that 1000 - * pageblock_order is initialised. 1001 - */ 1002 - fadump_cma_init(); 1003 - kdump_cma_reserve(); 1004 - kvm_cma_reserve(); 1005 997 1006 998 early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); 1007 999
+22 -4
arch/powerpc/kernel/trace/ftrace.c
··· 37 37 if (addr >= (unsigned long)__exittext_begin && addr < (unsigned long)__exittext_end) 38 38 return 0; 39 39 40 - if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY) && 41 - !IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) { 42 - addr += MCOUNT_INSN_SIZE; 43 - if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) 40 + if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) { 41 + if (!IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) { 44 42 addr += MCOUNT_INSN_SIZE; 43 + if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS)) 44 + addr += MCOUNT_INSN_SIZE; 45 + } else if (IS_ENABLED(CONFIG_CC_IS_CLANG) && IS_ENABLED(CONFIG_PPC64)) { 46 + /* 47 + * addr points to global entry point though the NOP was emitted at local 48 + * entry point due to https://github.com/llvm/llvm-project/issues/163706 49 + * Handle that here with ppc_function_entry() for kernel symbols while 50 + * adjusting module addresses in the else case, by looking for the below 51 + * module global entry point sequence: 52 + * ld r2, -8(r12) 53 + * add r2, r2, r12 54 + */ 55 + if (is_kernel_text(addr) || is_kernel_inittext(addr)) 56 + addr = ppc_function_entry((void *)addr); 57 + else if ((ppc_inst_val(ppc_inst_read((u32 *)addr)) == 58 + PPC_RAW_LD(_R2, _R12, -8)) && 59 + (ppc_inst_val(ppc_inst_read((u32 *)(addr+4))) == 60 + PPC_RAW_ADD(_R2, _R2, _R12))) 61 + addr += 8; 62 + } 45 63 } 46 64 47 65 return addr;
+9 -8
arch/powerpc/kexec/core.c
··· 23 23 #include <asm/firmware.h> 24 24 25 25 #define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG) 26 + #define __be_word __PASTE(__be, BITS_PER_LONG) 26 27 27 28 #ifdef CONFIG_CRASH_DUMP 28 29 void machine_crash_shutdown(struct pt_regs *regs) ··· 147 146 } 148 147 149 148 /* Values we need to export to the second kernel via the device tree. */ 150 - static phys_addr_t crashk_base; 151 - static phys_addr_t crashk_size; 152 - static unsigned long long mem_limit; 149 + static __be_word crashk_base; 150 + static __be_word crashk_size; 151 + static __be_word mem_limit; 153 152 154 153 static struct property crashk_base_prop = { 155 154 .name = "linux,crashkernel-base", 156 - .length = sizeof(phys_addr_t), 155 + .length = sizeof(__be_word), 157 156 .value = &crashk_base 158 157 }; 159 158 160 159 static struct property crashk_size_prop = { 161 160 .name = "linux,crashkernel-size", 162 - .length = sizeof(phys_addr_t), 161 + .length = sizeof(__be_word), 163 162 .value = &crashk_size, 164 163 }; 165 164 166 165 static struct property memory_limit_prop = { 167 166 .name = "linux,memory-limit", 168 - .length = sizeof(unsigned long long), 167 + .length = sizeof(__be_word), 169 168 .value = &mem_limit, 170 169 }; 171 170 ··· 194 193 } 195 194 #endif /* CONFIG_CRASH_RESERVE */ 196 195 197 - static phys_addr_t kernel_end; 196 + static __be_word kernel_end; 198 197 199 198 static struct property kernel_end_prop = { 200 199 .name = "linux,kernel-end", 201 - .length = sizeof(phys_addr_t), 200 + .length = sizeof(__be_word), 202 201 .value = &kernel_end, 203 202 }; 204 203
+13 -1
arch/powerpc/kexec/file_load_64.c
··· 450 450 kbuf->buffer = headers; 451 451 kbuf->mem = KEXEC_BUF_MEM_UNKNOWN; 452 452 kbuf->bufsz = headers_sz; 453 + 454 + /* 455 + * Account for extra space required to accommodate additional memory 456 + * ranges in elfcorehdr due to memory hotplug events. 457 + */ 453 458 kbuf->memsz = headers_sz + kdump_extra_elfcorehdr_size(cmem); 454 459 kbuf->top_down = false; 455 460 ··· 465 460 } 466 461 467 462 image->elf_load_addr = kbuf->mem; 468 - image->elf_headers_sz = headers_sz; 463 + 464 + /* 465 + * If CONFIG_CRASH_HOTPLUG is enabled, the elfcorehdr kexec segment 466 + * memsz can be larger than bufsz. Always initialize elf_headers_sz 467 + * with memsz. This ensures the correct size is reserved for elfcorehdr 468 + * memory in the FDT prepared for kdump. 469 + */ 470 + image->elf_headers_sz = kbuf->memsz; 469 471 image->elf_headers = headers; 470 472 out: 471 473 kfree(cmem);
+2 -2
arch/powerpc/kvm/book3s.c
··· 38 38 39 39 /* #define EXIT_DEBUG */ 40 40 41 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 41 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 42 42 KVM_GENERIC_VM_STATS(), 43 43 STATS_DESC_ICOUNTER(VM, num_2M_pages), 44 44 STATS_DESC_ICOUNTER(VM, num_1G_pages) ··· 53 53 sizeof(kvm_vm_stats_desc), 54 54 }; 55 55 56 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 56 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 57 57 KVM_GENERIC_VCPU_STATS(), 58 58 STATS_DESC_COUNTER(VCPU, sum_exits), 59 59 STATS_DESC_COUNTER(VCPU, mmio_exits),
+2 -2
arch/powerpc/kvm/booke.c
··· 36 36 37 37 unsigned long kvmppc_booke_handlers; 38 38 39 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 39 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 40 40 KVM_GENERIC_VM_STATS(), 41 41 STATS_DESC_ICOUNTER(VM, num_2M_pages), 42 42 STATS_DESC_ICOUNTER(VM, num_1G_pages) ··· 51 51 sizeof(kvm_vm_stats_desc), 52 52 }; 53 53 54 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 54 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 55 55 KVM_GENERIC_VCPU_STATS(), 56 56 STATS_DESC_COUNTER(VCPU, sum_exits), 57 57 STATS_DESC_COUNTER(VCPU, mmio_exits),
+1 -5
arch/powerpc/kvm/e500.h
··· 39 39 /* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */ 40 40 #define E500_TLB_MAS2_ATTR (0x7f) 41 41 42 - struct tlbe_ref { 42 + struct tlbe_priv { 43 43 kvm_pfn_t pfn; /* valid only for TLB0, except briefly */ 44 44 unsigned int flags; /* E500_TLB_* */ 45 - }; 46 - 47 - struct tlbe_priv { 48 - struct tlbe_ref ref; 49 45 }; 50 46 51 47 #ifdef CONFIG_KVM_E500V2
+2 -2
arch/powerpc/kvm/e500_mmu.c
··· 920 920 vcpu_e500->gtlb_offset[0] = 0; 921 921 vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE; 922 922 923 - vcpu_e500->gtlb_priv[0] = kzalloc_objs(struct tlbe_ref, 923 + vcpu_e500->gtlb_priv[0] = kzalloc_objs(struct tlbe_priv, 924 924 vcpu_e500->gtlb_params[0].entries); 925 925 if (!vcpu_e500->gtlb_priv[0]) 926 926 goto free_vcpu; 927 927 928 - vcpu_e500->gtlb_priv[1] = kzalloc_objs(struct tlbe_ref, 928 + vcpu_e500->gtlb_priv[1] = kzalloc_objs(struct tlbe_priv, 929 929 vcpu_e500->gtlb_params[1].entries); 930 930 if (!vcpu_e500->gtlb_priv[1]) 931 931 goto free_vcpu;
+44 -47
arch/powerpc/kvm/e500_mmu_host.c
··· 189 189 { 190 190 struct kvm_book3e_206_tlb_entry *gtlbe = 191 191 get_entry(vcpu_e500, tlbsel, esel); 192 - struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; 192 + struct tlbe_priv *tlbe = &vcpu_e500->gtlb_priv[tlbsel][esel]; 193 193 194 194 /* Don't bother with unmapped entries */ 195 - if (!(ref->flags & E500_TLB_VALID)) { 196 - WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), 197 - "%s: flags %x\n", __func__, ref->flags); 195 + if (!(tlbe->flags & E500_TLB_VALID)) { 196 + WARN(tlbe->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), 197 + "%s: flags %x\n", __func__, tlbe->flags); 198 198 WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); 199 199 } 200 200 201 - if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { 201 + if (tlbsel == 1 && tlbe->flags & E500_TLB_BITMAP) { 202 202 u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; 203 203 int hw_tlb_indx; 204 204 unsigned long flags; ··· 216 216 } 217 217 mb(); 218 218 vcpu_e500->g2h_tlb1_map[esel] = 0; 219 - ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); 219 + tlbe->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); 220 220 local_irq_restore(flags); 221 221 } 222 222 223 - if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) { 223 + if (tlbsel == 1 && tlbe->flags & E500_TLB_TLB0) { 224 224 /* 225 225 * TLB1 entry is backed by 4k pages. This should happen 226 226 * rarely and is not worth optimizing. Invalidate everything. 227 227 */ 228 228 kvmppc_e500_tlbil_all(vcpu_e500); 229 - ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); 229 + tlbe->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); 230 230 } 231 231 232 232 /* 233 233 * If TLB entry is still valid then it's a TLB0 entry, and thus 234 234 * backed by at most one host tlbe per shadow pid 235 235 */ 236 - if (ref->flags & E500_TLB_VALID) 236 + if (tlbe->flags & E500_TLB_VALID) 237 237 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); 238 238 239 239 /* Mark the TLB as not backed by the host anymore */ 240 - ref->flags = 0; 240 + tlbe->flags = 0; 241 241 } 242 242 243 243 static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) ··· 245 245 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); 246 246 } 247 247 248 - static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, 249 - struct kvm_book3e_206_tlb_entry *gtlbe, 250 - kvm_pfn_t pfn, unsigned int wimg, 251 - bool writable) 248 + static inline void kvmppc_e500_tlbe_setup(struct tlbe_priv *tlbe, 249 + struct kvm_book3e_206_tlb_entry *gtlbe, 250 + kvm_pfn_t pfn, unsigned int wimg, 251 + bool writable) 252 252 { 253 - ref->pfn = pfn; 254 - ref->flags = E500_TLB_VALID; 253 + tlbe->pfn = pfn; 254 + tlbe->flags = E500_TLB_VALID; 255 255 if (writable) 256 - ref->flags |= E500_TLB_WRITABLE; 256 + tlbe->flags |= E500_TLB_WRITABLE; 257 257 258 258 /* Use guest supplied MAS2_G and MAS2_E */ 259 - ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; 259 + tlbe->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; 260 260 } 261 261 262 - static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) 262 + static inline void kvmppc_e500_tlbe_release(struct tlbe_priv *tlbe) 263 263 { 264 - if (ref->flags & E500_TLB_VALID) { 264 + if (tlbe->flags & E500_TLB_VALID) { 265 265 /* FIXME: don't log bogus pfn for TLB1 */ 266 - trace_kvm_booke206_ref_release(ref->pfn, ref->flags); 267 - ref->flags = 0; 266 + trace_kvm_booke206_ref_release(tlbe->pfn, tlbe->flags); 267 + tlbe->flags = 0; 268 268 } 269 269 } 270 270 ··· 284 284 int i; 285 285 286 286 for (tlbsel = 0; tlbsel <= 1; tlbsel++) { 287 - for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { 288 - struct tlbe_ref *ref = 289 - &vcpu_e500->gtlb_priv[tlbsel][i].ref; 290 - kvmppc_e500_ref_release(ref); 291 - } 287 + for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) 288 + kvmppc_e500_tlbe_release(&vcpu_e500->gtlb_priv[tlbsel][i]); 292 289 } 293 290 } 294 291 ··· 301 304 static void kvmppc_e500_setup_stlbe( 302 305 struct kvm_vcpu *vcpu, 303 306 struct kvm_book3e_206_tlb_entry *gtlbe, 304 - int tsize, struct tlbe_ref *ref, u64 gvaddr, 307 + int tsize, struct tlbe_priv *tlbe, u64 gvaddr, 305 308 struct kvm_book3e_206_tlb_entry *stlbe) 306 309 { 307 - kvm_pfn_t pfn = ref->pfn; 310 + kvm_pfn_t pfn = tlbe->pfn; 308 311 u32 pr = vcpu->arch.shared->msr & MSR_PR; 309 - bool writable = !!(ref->flags & E500_TLB_WRITABLE); 312 + bool writable = !!(tlbe->flags & E500_TLB_WRITABLE); 310 313 311 - BUG_ON(!(ref->flags & E500_TLB_VALID)); 314 + BUG_ON(!(tlbe->flags & E500_TLB_VALID)); 312 315 313 316 /* Force IPROT=0 for all guest mappings. */ 314 317 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; 315 - stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR); 318 + stlbe->mas2 = (gvaddr & MAS2_EPN) | (tlbe->flags & E500_TLB_MAS2_ATTR); 316 319 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | 317 320 e500_shadow_mas3_attrib(gtlbe->mas7_3, writable, pr); 318 321 } ··· 320 323 static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, 321 324 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, 322 325 int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, 323 - struct tlbe_ref *ref) 326 + struct tlbe_priv *tlbe) 324 327 { 325 328 struct kvm_memory_slot *slot; 326 329 unsigned int psize; ··· 452 455 } 453 456 } 454 457 455 - kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg, writable); 458 + kvmppc_e500_tlbe_setup(tlbe, gtlbe, pfn, wimg, writable); 456 459 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 457 - ref, gvaddr, stlbe); 460 + tlbe, gvaddr, stlbe); 458 461 writable = tlbe_is_writable(stlbe); 459 462 460 463 /* Clear i-cache for new pages */ ··· 471 474 struct kvm_book3e_206_tlb_entry *stlbe) 472 475 { 473 476 struct kvm_book3e_206_tlb_entry *gtlbe; 474 - struct tlbe_ref *ref; 477 + struct tlbe_priv *tlbe; 475 478 int stlbsel = 0; 476 479 int sesel = 0; 477 480 int r; 478 481 479 482 gtlbe = get_entry(vcpu_e500, 0, esel); 480 - ref = &vcpu_e500->gtlb_priv[0][esel].ref; 483 + tlbe = &vcpu_e500->gtlb_priv[0][esel]; 481 484 482 485 r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), 483 486 get_tlb_raddr(gtlbe) >> PAGE_SHIFT, 484 - gtlbe, 0, stlbe, ref); 487 + gtlbe, 0, stlbe, tlbe); 485 488 if (r) 486 489 return r; 487 490 ··· 491 494 } 492 495 493 496 static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, 494 - struct tlbe_ref *ref, 497 + struct tlbe_priv *tlbe, 495 498 int esel) 496 499 { 497 500 unsigned int sesel = vcpu_e500->host_tlb1_nv++; ··· 504 507 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); 505 508 } 506 509 507 - vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; 510 + vcpu_e500->gtlb_priv[1][esel].flags |= E500_TLB_BITMAP; 508 511 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; 509 512 vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; 510 - WARN_ON(!(ref->flags & E500_TLB_VALID)); 513 + WARN_ON(!(tlbe->flags & E500_TLB_VALID)); 511 514 512 515 return sesel; 513 516 } ··· 519 522 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, 520 523 struct kvm_book3e_206_tlb_entry *stlbe, int esel) 521 524 { 522 - struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; 525 + struct tlbe_priv *tlbe = &vcpu_e500->gtlb_priv[1][esel]; 523 526 int sesel; 524 527 int r; 525 528 526 529 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, 527 - ref); 530 + tlbe); 528 531 if (r) 529 532 return r; 530 533 531 534 /* Use TLB0 when we can only map a page with 4k */ 532 535 if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) { 533 - vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0; 536 + vcpu_e500->gtlb_priv[1][esel].flags |= E500_TLB_TLB0; 534 537 write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0); 535 538 return 0; 536 539 } 537 540 538 541 /* Otherwise map into TLB1 */ 539 - sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); 542 + sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, tlbe, esel); 540 543 write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); 541 544 542 545 return 0; ··· 558 561 priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; 559 562 560 563 /* Triggers after clear_tlb_privs or on initial mapping */ 561 - if (!(priv->ref.flags & E500_TLB_VALID)) { 564 + if (!(priv->flags & E500_TLB_VALID)) { 562 565 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); 563 566 } else { 564 567 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, 565 - &priv->ref, eaddr, &stlbe); 568 + priv, eaddr, &stlbe); 566 569 write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0); 567 570 } 568 571 break;
+1
arch/powerpc/lib/copyuser_64.S
··· 562 562 li r5,4096 563 563 b .Ldst_aligned 564 564 EXPORT_SYMBOL(__copy_tofrom_user) 565 + EXPORT_SYMBOL(__copy_tofrom_user_base)
+15 -30
arch/powerpc/lib/copyuser_power7.S
··· 5 5 * 6 6 * Author: Anton Blanchard <anton@au.ibm.com> 7 7 */ 8 + #include <linux/export.h> 8 9 #include <asm/ppc_asm.h> 9 - 10 - #ifndef SELFTEST_CASE 11 - /* 0 == don't use VMX, 1 == use VMX */ 12 - #define SELFTEST_CASE 0 13 - #endif 14 10 15 11 #ifdef __BIG_ENDIAN__ 16 12 #define LVS(VRT,RA,RB) lvsl VRT,RA,RB ··· 43 47 ld r15,STK_REG(R15)(r1) 44 48 ld r14,STK_REG(R14)(r1) 45 49 .Ldo_err3: 46 - bl CFUNC(exit_vmx_usercopy) 50 + ld r6,STK_REG(R31)(r1) /* original destination pointer */ 51 + ld r5,STK_REG(R29)(r1) /* original number of bytes */ 52 + subf r7,r6,r3 /* #bytes copied */ 53 + subf r3,r7,r5 /* #bytes not copied in r3 */ 47 54 ld r0,STACKFRAMESIZE+16(r1) 48 55 mtlr r0 49 - b .Lexit 56 + addi r1,r1,STACKFRAMESIZE 57 + blr 50 58 #endif /* CONFIG_ALTIVEC */ 51 59 52 60 .Ldo_err2: ··· 74 74 75 75 _GLOBAL(__copy_tofrom_user_power7) 76 76 cmpldi r5,16 77 - cmpldi cr1,r5,3328 78 77 79 78 std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) 80 79 std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) ··· 81 82 82 83 blt .Lshort_copy 83 84 84 - #ifdef CONFIG_ALTIVEC 85 - test_feature = SELFTEST_CASE 86 - BEGIN_FTR_SECTION 87 - bgt cr1,.Lvmx_copy 88 - END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 89 - #endif 90 85 91 86 .Lnonvmx_copy: 92 87 /* Get the source 8B aligned */ ··· 256 263 15: li r3,0 257 264 blr 258 265 259 - .Lunwind_stack_nonvmx_copy: 260 - addi r1,r1,STACKFRAMESIZE 261 - b .Lnonvmx_copy 262 - 263 - .Lvmx_copy: 264 266 #ifdef CONFIG_ALTIVEC 267 + _GLOBAL(__copy_tofrom_user_power7_vmx) 265 268 mflr r0 266 269 std r0,16(r1) 267 270 stdu r1,-STACKFRAMESIZE(r1) 268 - bl CFUNC(enter_vmx_usercopy) 269 - cmpwi cr1,r3,0 270 - ld r0,STACKFRAMESIZE+16(r1) 271 - ld r3,STK_REG(R31)(r1) 272 - ld r4,STK_REG(R30)(r1) 273 - ld r5,STK_REG(R29)(r1) 274 - mtlr r0 275 271 272 + std r3,STK_REG(R31)(r1) 273 + std r5,STK_REG(R29)(r1) 276 274 /* 277 275 * We prefetch both the source and destination using enhanced touch 278 276 * instructions. We use a stream ID of 0 for the load side and ··· 283 299 ori r10,r7,1 /* stream=1 */ 284 300 285 301 DCBT_SETUP_STREAMS(r6, r7, r9, r10, r8) 286 - 287 - beq cr1,.Lunwind_stack_nonvmx_copy 288 302 289 303 /* 290 304 * If source and destination are not relatively aligned we use a ··· 460 478 err3; stb r0,0(r3) 461 479 462 480 15: addi r1,r1,STACKFRAMESIZE 463 - b CFUNC(exit_vmx_usercopy) /* tail call optimise */ 481 + li r3,0 482 + blr 464 483 465 484 .Lvmx_unaligned_copy: 466 485 /* Get the destination 16B aligned */ ··· 664 681 err3; stb r0,0(r3) 665 682 666 683 15: addi r1,r1,STACKFRAMESIZE 667 - b CFUNC(exit_vmx_usercopy) /* tail call optimise */ 684 + li r3,0 685 + blr 686 + EXPORT_SYMBOL(__copy_tofrom_user_power7_vmx) 668 687 #endif /* CONFIG_ALTIVEC */
+2
arch/powerpc/lib/vmx-helper.c
··· 27 27 28 28 return 1; 29 29 } 30 + EXPORT_SYMBOL(enter_vmx_usercopy); 30 31 31 32 /* 32 33 * This function must return 0 because we tail call optimise when calling ··· 50 49 set_dec(1); 51 50 return 0; 52 51 } 52 + EXPORT_SYMBOL(exit_vmx_usercopy); 53 53 54 54 int enter_vmx_ops(void) 55 55 {
+14
arch/powerpc/mm/mem.c
··· 30 30 #include <asm/setup.h> 31 31 #include <asm/fixmap.h> 32 32 33 + #include <asm/fadump.h> 34 + #include <asm/kexec.h> 35 + #include <asm/kvm_ppc.h> 36 + 33 37 #include <mm/mmu_decl.h> 34 38 35 39 unsigned long long memory_limit __initdata; ··· 272 268 273 269 void __init arch_mm_preinit(void) 274 270 { 271 + 272 + /* 273 + * Reserve large chunks of memory for use by CMA for kdump, fadump, KVM 274 + * and hugetlb. These must be called after pageblock_order is 275 + * initialised. 276 + */ 277 + fadump_cma_init(); 278 + kdump_cma_reserve(); 279 + kvm_cma_reserve(); 280 + 275 281 /* 276 282 * book3s is limited to 16 page sizes due to encoding this in 277 283 * a 4-bit field for slices.
-5
arch/powerpc/net/bpf_jit.h
··· 81 81 82 82 #ifdef CONFIG_PPC64 83 83 84 - /* for gpr non volatile registers BPG_REG_6 to 10 */ 85 - #define BPF_PPC_STACK_SAVE (6 * 8) 86 - 87 84 /* If dummy pass (!image), account for maximum possible instructions */ 88 85 #define PPC_LI64(d, i) do { \ 89 86 if (!image) \ ··· 216 219 int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass, 217 220 struct codegen_context *ctx, int insn_idx, 218 221 int jmp_off, int dst_reg, u32 code); 219 - 220 - int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx); 221 222 #endif 222 223 223 224 #endif
+56 -71
arch/powerpc/net/bpf_jit_comp.c
··· 450 450 451 451 bool bpf_jit_supports_kfunc_call(void) 452 452 { 453 - return true; 453 + return IS_ENABLED(CONFIG_PPC64); 454 454 } 455 455 456 456 bool bpf_jit_supports_arena(void) ··· 638 638 * for the traced function (BPF subprog/callee) to fetch it. 639 639 */ 640 640 static void bpf_trampoline_setup_tail_call_info(u32 *image, struct codegen_context *ctx, 641 - int func_frame_offset, 642 - int bpf_dummy_frame_size, int r4_off) 641 + int bpf_frame_size, int r4_off) 643 642 { 644 643 if (IS_ENABLED(CONFIG_PPC64)) { 645 - /* See Generated stack layout */ 646 - int tailcallinfo_offset = BPF_PPC_TAILCALL; 647 - 648 - /* 649 - * func_frame_offset = ...(1) 650 - * bpf_dummy_frame_size + trampoline_frame_size 651 - */ 652 - EMIT(PPC_RAW_LD(_R4, _R1, func_frame_offset)); 653 - EMIT(PPC_RAW_LD(_R3, _R4, -tailcallinfo_offset)); 644 + EMIT(PPC_RAW_LD(_R4, _R1, bpf_frame_size)); 645 + /* Refer to trampoline's Generated stack layout */ 646 + EMIT(PPC_RAW_LD(_R3, _R4, -BPF_PPC_TAILCALL)); 654 647 655 648 /* 656 649 * Setting the tail_call_info in trampoline's frame ··· 651 658 */ 652 659 EMIT(PPC_RAW_CMPLWI(_R3, MAX_TAIL_CALL_CNT)); 653 660 PPC_BCC_CONST_SHORT(COND_GT, 8); 654 - EMIT(PPC_RAW_ADDI(_R3, _R4, bpf_jit_stack_tailcallinfo_offset(ctx))); 661 + EMIT(PPC_RAW_ADDI(_R3, _R4, -BPF_PPC_TAILCALL)); 662 + 655 663 /* 656 - * From ...(1) above: 657 - * trampoline_frame_bottom = ...(2) 658 - * func_frame_offset - bpf_dummy_frame_size 659 - * 660 - * Using ...(2) derived above: 661 - * trampoline_tail_call_info_offset = ...(3) 662 - * trampoline_frame_bottom - tailcallinfo_offset 663 - * 664 - * From ...(3): 665 - * Use trampoline_tail_call_info_offset to write reference of main's 666 - * tail_call_info in trampoline frame. 664 + * Trampoline's tail_call_info is at the same offset, as that of 665 + * any bpf program, with reference to previous frame. Update the 666 + * address of main's tail_call_info in trampoline frame. 667 667 */ 668 - EMIT(PPC_RAW_STL(_R3, _R1, (func_frame_offset - bpf_dummy_frame_size) 669 - - tailcallinfo_offset)); 668 + EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size - BPF_PPC_TAILCALL)); 670 669 } else { 671 670 /* See bpf_jit_stack_offsetof() and BPF_PPC_TC */ 672 671 EMIT(PPC_RAW_LL(_R4, _R1, r4_off)); ··· 666 681 } 667 682 668 683 static void bpf_trampoline_restore_tail_call_cnt(u32 *image, struct codegen_context *ctx, 669 - int func_frame_offset, int r4_off) 684 + int bpf_frame_size, int r4_off) 670 685 { 671 686 if (IS_ENABLED(CONFIG_PPC32)) { 672 687 /* ··· 677 692 } 678 693 } 679 694 680 - static void bpf_trampoline_save_args(u32 *image, struct codegen_context *ctx, int func_frame_offset, 681 - int nr_regs, int regs_off) 695 + static void bpf_trampoline_save_args(u32 *image, struct codegen_context *ctx, 696 + int bpf_frame_size, int nr_regs, int regs_off) 682 697 { 683 698 int param_save_area_offset; 684 699 685 - param_save_area_offset = func_frame_offset; /* the two frames we alloted */ 700 + param_save_area_offset = bpf_frame_size; 686 701 param_save_area_offset += STACK_FRAME_MIN_SIZE; /* param save area is past frame header */ 687 702 688 703 for (int i = 0; i < nr_regs; i++) { ··· 705 720 706 721 /* Used when we call into the traced function. Replicate parameter save area */ 707 722 static void bpf_trampoline_restore_args_stack(u32 *image, struct codegen_context *ctx, 708 - int func_frame_offset, int nr_regs, int regs_off) 723 + int bpf_frame_size, int nr_regs, int regs_off) 709 724 { 710 725 int param_save_area_offset; 711 726 712 - param_save_area_offset = func_frame_offset; /* the two frames we alloted */ 727 + param_save_area_offset = bpf_frame_size; 713 728 param_save_area_offset += STACK_FRAME_MIN_SIZE; /* param save area is past frame header */ 714 729 715 730 for (int i = 8; i < nr_regs; i++) { ··· 726 741 void *func_addr) 727 742 { 728 743 int regs_off, nregs_off, ip_off, run_ctx_off, retval_off, nvr_off, alt_lr_off, r4_off = 0; 729 - int i, ret, nr_regs, bpf_frame_size = 0, bpf_dummy_frame_size = 0, func_frame_offset; 730 744 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 731 745 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; 732 746 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 747 + int i, ret, nr_regs, retaddr_off, bpf_frame_size = 0; 733 748 struct codegen_context codegen_ctx, *ctx; 734 749 u32 *image = (u32 *)rw_image; 735 750 ppc_inst_t branch_insn; ··· 755 770 * Generated stack layout: 756 771 * 757 772 * func prev back chain [ back chain ] 758 - * [ ] 759 - * bpf prog redzone/tailcallcnt [ ... ] 64 bytes (64-bit powerpc) 760 - * [ ] -- 761 - * LR save area [ r0 save (64-bit) ] | header 762 - * [ r0 save (32-bit) ] | 763 - * dummy frame for unwind [ back chain 1 ] -- 764 773 * [ tail_call_info ] optional - 64-bit powerpc 765 774 * [ padding ] align stack frame 766 775 * r4_off [ r4 (tailcallcnt) ] optional - 32-bit powerpc 767 776 * alt_lr_off [ real lr (ool stub)] optional - actual lr 777 + * retaddr_off [ return address ] 768 778 * [ r26 ] 769 779 * nvr_off [ r25 ] nvr save area 770 780 * retval_off [ return value ] 771 781 * [ reg argN ] 772 782 * [ ... ] 773 - * regs_off [ reg_arg1 ] prog ctx context 774 - * nregs_off [ args count ] 775 - * ip_off [ traced function ] 783 + * regs_off [ reg_arg1 ] prog_ctx 784 + * nregs_off [ args count ] ((u64 *)prog_ctx)[-1] 785 + * ip_off [ traced function ] ((u64 *)prog_ctx)[-2] 776 786 * [ ... ] 777 787 * run_ctx_off [ bpf_tramp_run_ctx ] 778 788 * [ reg argN ] ··· 823 843 nvr_off = bpf_frame_size; 824 844 bpf_frame_size += 2 * SZL; 825 845 846 + /* Save area for return address */ 847 + retaddr_off = bpf_frame_size; 848 + bpf_frame_size += SZL; 849 + 826 850 /* Optional save area for actual LR in case of ool ftrace */ 827 851 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) { 828 852 alt_lr_off = bpf_frame_size; ··· 853 869 /* Padding to align stack frame, if any */ 854 870 bpf_frame_size = round_up(bpf_frame_size, SZL * 2); 855 871 856 - /* Dummy frame size for proper unwind - includes 64-bytes red zone for 64-bit powerpc */ 857 - bpf_dummy_frame_size = STACK_FRAME_MIN_SIZE + 64; 858 - 859 - /* Offset to the traced function's stack frame */ 860 - func_frame_offset = bpf_dummy_frame_size + bpf_frame_size; 861 - 862 - /* Create dummy frame for unwind, store original return value */ 872 + /* Store original return value */ 863 873 EMIT(PPC_RAW_STL(_R0, _R1, PPC_LR_STKOFF)); 864 - /* Protect red zone where tail call count goes */ 865 - EMIT(PPC_RAW_STLU(_R1, _R1, -bpf_dummy_frame_size)); 866 874 867 875 /* Create our stack frame */ 868 876 EMIT(PPC_RAW_STLU(_R1, _R1, -bpf_frame_size)); ··· 869 893 if (IS_ENABLED(CONFIG_PPC32) && nr_regs < 2) 870 894 EMIT(PPC_RAW_STL(_R4, _R1, r4_off)); 871 895 872 - bpf_trampoline_save_args(image, ctx, func_frame_offset, nr_regs, regs_off); 896 + bpf_trampoline_save_args(image, ctx, bpf_frame_size, nr_regs, regs_off); 873 897 874 - /* Save our return address */ 898 + /* Save our LR/return address */ 875 899 EMIT(PPC_RAW_MFLR(_R3)); 876 900 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) 877 901 EMIT(PPC_RAW_STL(_R3, _R1, alt_lr_off)); 878 902 else 879 - EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF)); 903 + EMIT(PPC_RAW_STL(_R3, _R1, retaddr_off)); 880 904 881 905 /* 882 - * Save ip address of the traced function. 883 - * We could recover this from LR, but we will need to address for OOL trampoline, 884 - * and optional GEP area. 906 + * Derive IP address of the traced function. 907 + * In case of CONFIG_PPC_FTRACE_OUT_OF_LINE or BPF program, LR points to the instruction 908 + * after the 'bl' instruction in the OOL stub. Refer to ftrace_init_ool_stub() and 909 + * bpf_arch_text_poke() for OOL stub of kernel functions and bpf programs respectively. 910 + * Relevant stub sequence: 911 + * 912 + * bl <tramp> 913 + * LR (R3) => mtlr r0 914 + * b <func_addr+4> 915 + * 916 + * Recover kernel function/bpf program address from the unconditional 917 + * branch instruction at the end of OOL stub. 885 918 */ 886 919 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE) || flags & BPF_TRAMP_F_IP_ARG) { 887 920 EMIT(PPC_RAW_LWZ(_R4, _R3, 4)); 888 921 EMIT(PPC_RAW_SLWI(_R4, _R4, 6)); 889 922 EMIT(PPC_RAW_SRAWI(_R4, _R4, 6)); 890 923 EMIT(PPC_RAW_ADD(_R3, _R3, _R4)); 891 - EMIT(PPC_RAW_ADDI(_R3, _R3, 4)); 892 924 } 893 925 894 926 if (flags & BPF_TRAMP_F_IP_ARG) 895 927 EMIT(PPC_RAW_STL(_R3, _R1, ip_off)); 896 928 897 - if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) 898 - /* Fake our LR for unwind */ 899 - EMIT(PPC_RAW_STL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF)); 929 + if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) { 930 + /* Fake our LR for BPF_TRAMP_F_CALL_ORIG case */ 931 + EMIT(PPC_RAW_ADDI(_R3, _R3, 4)); 932 + EMIT(PPC_RAW_STL(_R3, _R1, retaddr_off)); 933 + } 900 934 901 935 /* Save function arg count -- see bpf_get_func_arg_cnt() */ 902 936 EMIT(PPC_RAW_LI(_R3, nr_regs)); ··· 944 958 /* Call the traced function */ 945 959 if (flags & BPF_TRAMP_F_CALL_ORIG) { 946 960 /* 947 - * The address in LR save area points to the correct point in the original function 961 + * retaddr on trampoline stack points to the correct point in the original function 948 962 * with both PPC_FTRACE_OUT_OF_LINE as well as with traditional ftrace instruction 949 963 * sequence 950 964 */ 951 - EMIT(PPC_RAW_LL(_R3, _R1, bpf_frame_size + PPC_LR_STKOFF)); 965 + EMIT(PPC_RAW_LL(_R3, _R1, retaddr_off)); 952 966 EMIT(PPC_RAW_MTCTR(_R3)); 953 967 954 968 /* Replicate tail_call_cnt before calling the original BPF prog */ 955 969 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) 956 - bpf_trampoline_setup_tail_call_info(image, ctx, func_frame_offset, 957 - bpf_dummy_frame_size, r4_off); 970 + bpf_trampoline_setup_tail_call_info(image, ctx, bpf_frame_size, r4_off); 958 971 959 972 /* Restore args */ 960 - bpf_trampoline_restore_args_stack(image, ctx, func_frame_offset, nr_regs, regs_off); 973 + bpf_trampoline_restore_args_stack(image, ctx, bpf_frame_size, nr_regs, regs_off); 961 974 962 975 /* Restore TOC for 64-bit */ 963 976 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2) && !IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) ··· 970 985 971 986 /* Restore updated tail_call_cnt */ 972 987 if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) 973 - bpf_trampoline_restore_tail_call_cnt(image, ctx, func_frame_offset, r4_off); 988 + bpf_trampoline_restore_tail_call_cnt(image, ctx, bpf_frame_size, r4_off); 974 989 975 990 /* Reserve space to patch branch instruction to skip fexit progs */ 976 991 if (ro_image) /* image is NULL for dummy pass */ ··· 1022 1037 EMIT(PPC_RAW_LD(_R2, _R1, 24)); 1023 1038 if (flags & BPF_TRAMP_F_SKIP_FRAME) { 1024 1039 /* Skip the traced function and return to parent */ 1025 - EMIT(PPC_RAW_ADDI(_R1, _R1, func_frame_offset)); 1040 + EMIT(PPC_RAW_ADDI(_R1, _R1, bpf_frame_size)); 1026 1041 EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF)); 1027 1042 EMIT(PPC_RAW_MTLR(_R0)); 1028 1043 EMIT(PPC_RAW_BLR()); ··· 1030 1045 if (IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)) { 1031 1046 EMIT(PPC_RAW_LL(_R0, _R1, alt_lr_off)); 1032 1047 EMIT(PPC_RAW_MTLR(_R0)); 1033 - EMIT(PPC_RAW_ADDI(_R1, _R1, func_frame_offset)); 1048 + EMIT(PPC_RAW_ADDI(_R1, _R1, bpf_frame_size)); 1034 1049 EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF)); 1035 1050 EMIT(PPC_RAW_BLR()); 1036 1051 } else { 1037 - EMIT(PPC_RAW_LL(_R0, _R1, bpf_frame_size + PPC_LR_STKOFF)); 1052 + EMIT(PPC_RAW_LL(_R0, _R1, retaddr_off)); 1038 1053 EMIT(PPC_RAW_MTCTR(_R0)); 1039 - EMIT(PPC_RAW_ADDI(_R1, _R1, func_frame_offset)); 1054 + EMIT(PPC_RAW_ADDI(_R1, _R1, bpf_frame_size)); 1040 1055 EMIT(PPC_RAW_LL(_R0, _R1, PPC_LR_STKOFF)); 1041 1056 EMIT(PPC_RAW_MTLR(_R0)); 1042 1057 EMIT(PPC_RAW_BCTR());
+143 -38
arch/powerpc/net/bpf_jit_comp64.c
··· 32 32 * 33 33 * [ prev sp ] <------------- 34 34 * [ tail_call_info ] 8 | 35 - * [ nv gpr save area ] 6*8 + (12*8) | 35 + * [ nv gpr save area ] (6 * 8) | 36 + * [ addl. nv gpr save area] (12 * 8) | <--- exception boundary/callback program 36 37 * [ local_tmp_var ] 24 | 37 38 * fp (r31) --> [ ebpf stack space ] upto 512 | 38 39 * [ frame header ] 32/112 | 39 40 * sp (r1) ---> [ stack pointer ] -------------- 40 41 * 41 - * Additional (12*8) in 'nv gpr save area' only in case of 42 - * exception boundary. 42 + * Additional (12 * 8) in 'nv gpr save area' only in case of 43 + * exception boundary/callback. 43 44 */ 45 + 46 + /* BPF non-volatile registers save area size */ 47 + #define BPF_PPC_STACK_SAVE (6 * 8) 44 48 45 49 /* for bpf JIT code internal usage */ 46 50 #define BPF_PPC_STACK_LOCALS 24 ··· 52 48 * for additional non volatile registers(r14-r25) to be saved 53 49 * at exception boundary 54 50 */ 55 - #define BPF_PPC_EXC_STACK_SAVE (12*8) 51 + #define BPF_PPC_EXC_STACK_SAVE (12 * 8) 56 52 57 53 /* stack frame excluding BPF stack, ensure this is quadword aligned */ 58 54 #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \ ··· 129 125 * [ ... ] | 130 126 * sp (r1) ---> [ stack pointer ] -------------- 131 127 * [ tail_call_info ] 8 132 - * [ nv gpr save area ] 6*8 + (12*8) 128 + * [ nv gpr save area ] (6 * 8) 129 + * [ addl. nv gpr save area] (12 * 8) <--- exception boundary/callback program 133 130 * [ local_tmp_var ] 24 134 131 * [ unused red zone ] 224 135 132 * 136 - * Additional (12*8) in 'nv gpr save area' only in case of 137 - * exception boundary. 133 + * Additional (12 * 8) in 'nv gpr save area' only in case of 134 + * exception boundary/callback. 138 135 */ 139 136 static int bpf_jit_stack_local(struct codegen_context *ctx) 140 137 { ··· 153 148 } 154 149 } 155 150 156 - int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx) 151 + static int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx) 157 152 { 158 153 return bpf_jit_stack_local(ctx) + BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE; 159 154 } ··· 242 237 243 238 if (bpf_has_stack_frame(ctx) && !ctx->exception_cb) { 244 239 /* 245 - * exception_cb uses boundary frame after stack walk. 246 - * It can simply use redzone, this optimization reduces 247 - * stack walk loop by one level. 248 - * 249 240 * We need a stack frame, but we don't necessarily need to 250 241 * save/restore LR unless we call other functions 251 242 */ ··· 285 284 * program(main prog) as third arg 286 285 */ 287 286 EMIT(PPC_RAW_MR(_R1, _R5)); 287 + /* 288 + * Exception callback reuses the stack frame of exception boundary. 289 + * But BPF stack depth of exception callback and exception boundary 290 + * don't have to be same. If BPF stack depth is different, adjust the 291 + * stack frame size considering BPF stack depth of exception callback. 292 + * The non-volatile register save area remains unchanged. These non- 293 + * volatile registers are restored in exception callback's epilogue. 294 + */ 295 + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R5, 0)); 296 + EMIT(PPC_RAW_SUB(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_1), _R1)); 297 + EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2), 298 + -BPF_PPC_EXC_STACKFRAME)); 299 + EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_2), ctx->stack_size)); 300 + PPC_BCC_CONST_SHORT(COND_EQ, 12); 301 + EMIT(PPC_RAW_MR(_R1, bpf_to_ppc(TMP_REG_1))); 302 + EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_EXC_STACKFRAME + ctx->stack_size))); 288 303 } 289 304 290 305 /* ··· 499 482 return 0; 500 483 } 501 484 485 + static int zero_extend(u32 *image, struct codegen_context *ctx, u32 src_reg, u32 dst_reg, u32 size) 486 + { 487 + switch (size) { 488 + case 1: 489 + /* zero-extend 8 bits into 64 bits */ 490 + EMIT(PPC_RAW_RLDICL(dst_reg, src_reg, 0, 56)); 491 + return 0; 492 + case 2: 493 + /* zero-extend 16 bits into 64 bits */ 494 + EMIT(PPC_RAW_RLDICL(dst_reg, src_reg, 0, 48)); 495 + return 0; 496 + case 4: 497 + /* zero-extend 32 bits into 64 bits */ 498 + EMIT(PPC_RAW_RLDICL(dst_reg, src_reg, 0, 32)); 499 + fallthrough; 500 + case 8: 501 + /* Nothing to do */ 502 + return 0; 503 + default: 504 + return -1; 505 + } 506 + } 507 + 508 + static int sign_extend(u32 *image, struct codegen_context *ctx, u32 src_reg, u32 dst_reg, u32 size) 509 + { 510 + switch (size) { 511 + case 1: 512 + /* sign-extend 8 bits into 64 bits */ 513 + EMIT(PPC_RAW_EXTSB(dst_reg, src_reg)); 514 + return 0; 515 + case 2: 516 + /* sign-extend 16 bits into 64 bits */ 517 + EMIT(PPC_RAW_EXTSH(dst_reg, src_reg)); 518 + return 0; 519 + case 4: 520 + /* sign-extend 32 bits into 64 bits */ 521 + EMIT(PPC_RAW_EXTSW(dst_reg, src_reg)); 522 + fallthrough; 523 + case 8: 524 + /* Nothing to do */ 525 + return 0; 526 + default: 527 + return -1; 528 + } 529 + } 530 + 531 + /* 532 + * Handle powerpc ABI expectations from caller: 533 + * - Unsigned arguments are zero-extended. 534 + * - Signed arguments are sign-extended. 535 + */ 536 + static int prepare_for_kfunc_call(const struct bpf_prog *fp, u32 *image, 537 + struct codegen_context *ctx, 538 + const struct bpf_insn *insn) 539 + { 540 + const struct btf_func_model *m = bpf_jit_find_kfunc_model(fp, insn); 541 + int i; 542 + 543 + if (!m) 544 + return -1; 545 + 546 + for (i = 0; i < m->nr_args; i++) { 547 + /* Note that BPF ABI only allows up to 5 args for kfuncs */ 548 + u32 reg = bpf_to_ppc(BPF_REG_1 + i), size = m->arg_size[i]; 549 + 550 + if (!(m->arg_flags[i] & BTF_FMODEL_SIGNED_ARG)) { 551 + if (zero_extend(image, ctx, reg, reg, size)) 552 + return -1; 553 + } else { 554 + if (sign_extend(image, ctx, reg, reg, size)) 555 + return -1; 556 + } 557 + } 558 + 559 + return 0; 560 + } 561 + 502 562 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) 503 563 { 504 564 /* ··· 616 522 617 523 /* 618 524 * tail_call_info++; <- Actual value of tcc here 525 + * Writeback this updated value only if tailcall succeeds. 619 526 */ 620 527 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1)); 528 + 529 + /* prog = array->ptrs[index]; */ 530 + EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_2), b2p_index, 8)); 531 + EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2), b2p_bpf_array)); 532 + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2), 533 + offsetof(struct bpf_array, ptrs))); 534 + 535 + /* 536 + * if (prog == NULL) 537 + * goto out; 538 + */ 539 + EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_2), 0)); 540 + PPC_BCC_SHORT(COND_EQ, out); 541 + 542 + /* goto *(prog->bpf_func + prologue_size); */ 543 + EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2), 544 + offsetof(struct bpf_prog, bpf_func))); 545 + EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), bpf_to_ppc(TMP_REG_2), 546 + FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size)); 547 + EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_2))); 621 548 622 549 /* 623 550 * Before writing updated tail_call_info, distinguish if current frame ··· 653 538 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_2), _R1, bpf_jit_stack_tailcallinfo_offset(ctx))); 654 539 /* Writeback updated value to tail_call_info */ 655 540 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0)); 656 - 657 - /* prog = array->ptrs[index]; */ 658 - EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8)); 659 - EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array)); 660 - EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs))); 661 - 662 - /* 663 - * if (prog == NULL) 664 - * goto out; 665 - */ 666 - EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0)); 667 - PPC_BCC_SHORT(COND_EQ, out); 668 - 669 - /* goto *(prog->bpf_func + prologue_size); */ 670 - EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func))); 671 - EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 672 - FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size)); 673 - EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1))); 674 541 675 542 /* tear down stack, restore NVRs, ... */ 676 543 bpf_jit_emit_common_epilogue(image, ctx); ··· 1220 1123 /* special mov32 for zext */ 1221 1124 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31)); 1222 1125 break; 1223 - } else if (off == 8) { 1224 - EMIT(PPC_RAW_EXTSB(dst_reg, src_reg)); 1225 - } else if (off == 16) { 1226 - EMIT(PPC_RAW_EXTSH(dst_reg, src_reg)); 1227 - } else if (off == 32) { 1228 - EMIT(PPC_RAW_EXTSW(dst_reg, src_reg)); 1229 - } else if (dst_reg != src_reg) 1230 - EMIT(PPC_RAW_MR(dst_reg, src_reg)); 1126 + } 1127 + if (off == 0) { 1128 + /* MOV */ 1129 + if (dst_reg != src_reg) 1130 + EMIT(PPC_RAW_MR(dst_reg, src_reg)); 1131 + } else { 1132 + /* MOVSX: dst = (s8,s16,s32)src (off = 8,16,32) */ 1133 + if (sign_extend(image, ctx, src_reg, dst_reg, off / 8)) 1134 + return -1; 1135 + } 1231 1136 goto bpf_alu32_trunc; 1232 1137 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */ 1233 1138 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */ ··· 1696 1597 &func_addr, &func_addr_fixed); 1697 1598 if (ret < 0) 1698 1599 return ret; 1600 + 1601 + /* Take care of powerpc ABI requirements before kfunc call */ 1602 + if (insn[i].src_reg == BPF_PSEUDO_KFUNC_CALL) { 1603 + if (prepare_for_kfunc_call(fp, image, ctx, &insn[i])) 1604 + return -1; 1605 + } 1699 1606 1700 1607 ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr); 1701 1608 if (ret)
+5
arch/powerpc/perf/callchain.c
··· 103 103 void 104 104 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 105 105 { 106 + perf_callchain_store(entry, perf_arch_instruction_pointer(regs)); 107 + 108 + if (!current->mm) 109 + return; 110 + 106 111 if (!is_32bit_task()) 107 112 perf_callchain_user_64(entry, regs); 108 113 else
-1
arch/powerpc/perf/callchain_32.c
··· 142 142 next_ip = perf_arch_instruction_pointer(regs); 143 143 lr = regs->link; 144 144 sp = regs->gpr[1]; 145 - perf_callchain_store(entry, next_ip); 146 145 147 146 while (entry->nr < entry->max_stack) { 148 147 fp = (unsigned int __user *) (unsigned long) sp;
-1
arch/powerpc/perf/callchain_64.c
··· 77 77 next_ip = perf_arch_instruction_pointer(regs); 78 78 lr = regs->link; 79 79 sp = regs->gpr[1]; 80 - perf_callchain_store(entry, next_ip); 81 80 82 81 while (entry->nr < entry->max_stack) { 83 82 fp = (unsigned long __user *) sp;
+2 -2
arch/powerpc/platforms/83xx/km83xx.c
··· 155 155 156 156 /* list of the supported boards */ 157 157 static char *board[] __initdata = { 158 - "Keymile,KMETER1", 159 - "Keymile,kmpbec8321", 158 + "keymile,KMETER1", 159 + "keymile,kmpbec8321", 160 160 NULL 161 161 }; 162 162
+2 -2
arch/powerpc/platforms/Kconfig.cputype
··· 276 276 config PPC_E500 277 277 select FSL_EMB_PERFMON 278 278 bool 279 - select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 279 + select ARCH_SUPPORTS_HUGETLBFS 280 280 select PPC_SMP_MUXED_IPI 281 281 select PPC_DOORBELL 282 282 select PPC_KUEP ··· 337 337 config PTE_64BIT 338 338 bool 339 339 depends on 44x || PPC_E500 || PPC_86xx 340 - default y if PHYS_64BIT 340 + default y if PPC_E500 || PHYS_64BIT 341 341 342 342 config PHYS_64BIT 343 343 bool 'Large physical address support' if PPC_E500 || PPC_86xx
+1 -1
arch/powerpc/platforms/pseries/msi.c
··· 605 605 &pseries_msi_irq_chip, pseries_dev); 606 606 } 607 607 608 - pseries_dev->msi_used++; 608 + pseries_dev->msi_used += nr_irqs; 609 609 return 0; 610 610 611 611 out:
+2 -2
arch/powerpc/tools/ftrace-gen-ool-stubs.sh
··· 15 15 RELOCATION=R_PPC_ADDR32 16 16 fi 17 17 18 - num_ool_stubs_total=$($objdump -r -j __patchable_function_entries "$vmlinux_o" | 18 + num_ool_stubs_total=$($objdump -r -j __patchable_function_entries -d "$vmlinux_o" | 19 19 grep -c "$RELOCATION") 20 - num_ool_stubs_inittext=$($objdump -r -j __patchable_function_entries "$vmlinux_o" | 20 + num_ool_stubs_inittext=$($objdump -r -j __patchable_function_entries -d "$vmlinux_o" | 21 21 grep -e ".init.text" -e ".text.startup" | grep -c "$RELOCATION") 22 22 num_ool_stubs_text=$((num_ool_stubs_total - num_ool_stubs_inittext)) 23 23
arch/powerpc/tools/gcc-check-fpatchable-function-entry.sh arch/powerpc/tools/check-fpatchable-function-entry.sh
+2
arch/riscv/boot/dts/microchip/mpfs.dtsi
··· 428 428 clocks = <&clkcfg CLK_CAN0>, <&clkcfg CLK_MSSPLL3>; 429 429 interrupt-parent = <&plic>; 430 430 interrupts = <56>; 431 + resets = <&mss_top_sysreg CLK_CAN0>; 431 432 status = "disabled"; 432 433 }; 433 434 ··· 438 437 clocks = <&clkcfg CLK_CAN1>, <&clkcfg CLK_MSSPLL3>; 439 438 interrupt-parent = <&plic>; 440 439 interrupts = <57>; 440 + resets = <&mss_top_sysreg CLK_CAN1>; 441 441 status = "disabled"; 442 442 }; 443 443
+13 -2
arch/riscv/kvm/aia.c
··· 13 13 #include <linux/irqchip/riscv-imsic.h> 14 14 #include <linux/irqdomain.h> 15 15 #include <linux/kvm_host.h> 16 + #include <linux/nospec.h> 16 17 #include <linux/percpu.h> 17 18 #include <linux/spinlock.h> 18 19 #include <asm/cpufeature.h> ··· 183 182 unsigned long *out_val) 184 183 { 185 184 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; 185 + unsigned long regs_max = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long); 186 186 187 - if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long)) 187 + if (!riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) 188 188 return -ENOENT; 189 + if (reg_num >= regs_max) 190 + return -ENOENT; 191 + 192 + reg_num = array_index_nospec(reg_num, regs_max); 189 193 190 194 *out_val = 0; 191 195 if (kvm_riscv_aia_available()) ··· 204 198 unsigned long val) 205 199 { 206 200 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; 201 + unsigned long regs_max = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long); 207 202 208 - if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long)) 203 + if (!riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) 209 204 return -ENOENT; 205 + if (reg_num >= regs_max) 206 + return -ENOENT; 207 + 208 + reg_num = array_index_nospec(reg_num, regs_max); 210 209 211 210 if (kvm_riscv_aia_available()) { 212 211 ((unsigned long *)csr)[reg_num] = val;
+12 -11
arch/riscv/kvm/aia_aplic.c
··· 10 10 #include <linux/irqchip/riscv-aplic.h> 11 11 #include <linux/kvm_host.h> 12 12 #include <linux/math.h> 13 + #include <linux/nospec.h> 13 14 #include <linux/spinlock.h> 14 15 #include <linux/swab.h> 15 16 #include <kvm/iodev.h> ··· 46 45 47 46 if (!irq || aplic->nr_irqs <= irq) 48 47 return 0; 49 - irqd = &aplic->irqs[irq]; 48 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 50 49 51 50 raw_spin_lock_irqsave(&irqd->lock, flags); 52 51 ret = irqd->sourcecfg; ··· 62 61 63 62 if (!irq || aplic->nr_irqs <= irq) 64 63 return; 65 - irqd = &aplic->irqs[irq]; 64 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 66 65 67 66 if (val & APLIC_SOURCECFG_D) 68 67 val = 0; ··· 82 81 83 82 if (!irq || aplic->nr_irqs <= irq) 84 83 return 0; 85 - irqd = &aplic->irqs[irq]; 84 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 86 85 87 86 raw_spin_lock_irqsave(&irqd->lock, flags); 88 87 ret = irqd->target; ··· 98 97 99 98 if (!irq || aplic->nr_irqs <= irq) 100 99 return; 101 - irqd = &aplic->irqs[irq]; 100 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 102 101 103 102 val &= APLIC_TARGET_EIID_MASK | 104 103 (APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT) | ··· 117 116 118 117 if (!irq || aplic->nr_irqs <= irq) 119 118 return false; 120 - irqd = &aplic->irqs[irq]; 119 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 121 120 122 121 raw_spin_lock_irqsave(&irqd->lock, flags); 123 122 ret = (irqd->state & APLIC_IRQ_STATE_PENDING) ? true : false; ··· 133 132 134 133 if (!irq || aplic->nr_irqs <= irq) 135 134 return; 136 - irqd = &aplic->irqs[irq]; 135 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 137 136 138 137 raw_spin_lock_irqsave(&irqd->lock, flags); 139 138 ··· 171 170 172 171 if (!irq || aplic->nr_irqs <= irq) 173 172 return false; 174 - irqd = &aplic->irqs[irq]; 173 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 175 174 176 175 raw_spin_lock_irqsave(&irqd->lock, flags); 177 176 ret = (irqd->state & APLIC_IRQ_STATE_ENABLED) ? true : false; ··· 187 186 188 187 if (!irq || aplic->nr_irqs <= irq) 189 188 return; 190 - irqd = &aplic->irqs[irq]; 189 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 191 190 192 191 raw_spin_lock_irqsave(&irqd->lock, flags); 193 192 if (enabled) ··· 206 205 207 206 if (!irq || aplic->nr_irqs <= irq) 208 207 return false; 209 - irqd = &aplic->irqs[irq]; 208 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 210 209 211 210 raw_spin_lock_irqsave(&irqd->lock, flags); 212 211 ··· 255 254 for (irq = first; irq <= last; irq++) { 256 255 if (!irq || aplic->nr_irqs <= irq) 257 256 continue; 258 - irqd = &aplic->irqs[irq]; 257 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 259 258 260 259 raw_spin_lock_irqsave(&irqd->lock, flags); 261 260 ··· 284 283 285 284 if (!aplic || !source || (aplic->nr_irqs <= source)) 286 285 return -ENODEV; 287 - irqd = &aplic->irqs[source]; 286 + irqd = &aplic->irqs[array_index_nospec(source, aplic->nr_irqs)]; 288 287 ie = (aplic->domaincfg & APLIC_DOMAINCFG_IE) ? true : false; 289 288 290 289 raw_spin_lock_irqsave(&irqd->lock, flags);
+14 -4
arch/riscv/kvm/aia_device.c
··· 11 11 #include <linux/irqchip/riscv-imsic.h> 12 12 #include <linux/kvm_host.h> 13 13 #include <linux/uaccess.h> 14 + #include <linux/cpufeature.h> 14 15 15 16 static int aia_create(struct kvm_device *dev, u32 type) 16 17 { ··· 22 21 23 22 if (irqchip_in_kernel(kvm)) 24 23 return -EEXIST; 24 + 25 + if (!riscv_isa_extension_available(NULL, SSAIA)) 26 + return -ENODEV; 25 27 26 28 ret = -EBUSY; 27 29 if (kvm_trylock_all_vcpus(kvm)) ··· 441 437 442 438 static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 443 439 { 444 - int nr_vcpus; 440 + int nr_vcpus, r = -ENXIO; 445 441 446 442 switch (attr->group) { 447 443 case KVM_DEV_RISCV_AIA_GRP_CONFIG: ··· 470 466 } 471 467 break; 472 468 case KVM_DEV_RISCV_AIA_GRP_APLIC: 473 - return kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr); 469 + mutex_lock(&dev->kvm->lock); 470 + r = kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr); 471 + mutex_unlock(&dev->kvm->lock); 472 + break; 474 473 case KVM_DEV_RISCV_AIA_GRP_IMSIC: 475 - return kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr); 474 + mutex_lock(&dev->kvm->lock); 475 + r = kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr); 476 + mutex_unlock(&dev->kvm->lock); 477 + break; 476 478 } 477 479 478 - return -ENXIO; 480 + return r; 479 481 } 480 482 481 483 struct kvm_device_ops kvm_riscv_aia_device_ops = {
+4
arch/riscv/kvm/aia_imsic.c
··· 908 908 int r, rc = KVM_INSN_CONTINUE_NEXT_SEPC; 909 909 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; 910 910 911 + /* If IMSIC vCPU state not initialized then forward to user space */ 912 + if (!imsic) 913 + return KVM_INSN_EXIT_TO_USER_SPACE; 914 + 911 915 if (isel == KVM_RISCV_AIA_IMSIC_TOPEI) { 912 916 /* Read pending and enabled interrupt with highest priority */ 913 917 topei = imsic_mrif_topei(imsic->swfile, imsic->nr_eix,
+5 -1
arch/riscv/kvm/mmu.c
··· 245 245 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 246 246 { 247 247 struct kvm_gstage gstage; 248 + bool mmu_locked; 248 249 249 250 if (!kvm->arch.pgd) 250 251 return false; ··· 254 253 gstage.flags = 0; 255 254 gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid); 256 255 gstage.pgd = kvm->arch.pgd; 256 + mmu_locked = spin_trylock(&kvm->mmu_lock); 257 257 kvm_riscv_gstage_unmap_range(&gstage, range->start << PAGE_SHIFT, 258 258 (range->end - range->start) << PAGE_SHIFT, 259 259 range->may_block); 260 + if (mmu_locked) 261 + spin_unlock(&kvm->mmu_lock); 260 262 return false; 261 263 } 262 264 ··· 539 535 goto out_unlock; 540 536 541 537 /* Check if we are backed by a THP and thus use block mapping if possible */ 542 - if (vma_pagesize == PAGE_SIZE) 538 + if (!logging && (vma_pagesize == PAGE_SIZE)) 543 539 vma_pagesize = transparent_hugepage_adjust(kvm, memslot, hva, &hfn, &gpa); 544 540 545 541 if (writable) {
+1 -1
arch/riscv/kvm/vcpu.c
··· 24 24 #define CREATE_TRACE_POINTS 25 25 #include "trace.h" 26 26 27 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 27 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 28 28 KVM_GENERIC_VCPU_STATS(), 29 29 STATS_DESC_COUNTER(VCPU, ecall_exit_stat), 30 30 STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
+13 -4
arch/riscv/kvm/vcpu_fp.c
··· 10 10 #include <linux/errno.h> 11 11 #include <linux/err.h> 12 12 #include <linux/kvm_host.h> 13 + #include <linux/nospec.h> 13 14 #include <linux/uaccess.h> 14 15 #include <asm/cpufeature.h> 15 16 ··· 94 93 if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) 95 94 reg_val = &cntx->fp.f.fcsr; 96 95 else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && 97 - reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) 96 + reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) { 97 + reg_num = array_index_nospec(reg_num, 98 + ARRAY_SIZE(cntx->fp.f.f)); 98 99 reg_val = &cntx->fp.f.f[reg_num]; 99 - else 100 + } else 100 101 return -ENOENT; 101 102 } else if ((rtype == KVM_REG_RISCV_FP_D) && 102 103 riscv_isa_extension_available(vcpu->arch.isa, d)) { ··· 110 107 reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { 111 108 if (KVM_REG_SIZE(reg->id) != sizeof(u64)) 112 109 return -EINVAL; 110 + reg_num = array_index_nospec(reg_num, 111 + ARRAY_SIZE(cntx->fp.d.f)); 113 112 reg_val = &cntx->fp.d.f[reg_num]; 114 113 } else 115 114 return -ENOENT; ··· 143 138 if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) 144 139 reg_val = &cntx->fp.f.fcsr; 145 140 else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && 146 - reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) 141 + reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) { 142 + reg_num = array_index_nospec(reg_num, 143 + ARRAY_SIZE(cntx->fp.f.f)); 147 144 reg_val = &cntx->fp.f.f[reg_num]; 148 - else 145 + } else 149 146 return -ENOENT; 150 147 } else if ((rtype == KVM_REG_RISCV_FP_D) && 151 148 riscv_isa_extension_available(vcpu->arch.isa, d)) { ··· 159 152 reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { 160 153 if (KVM_REG_SIZE(reg->id) != sizeof(u64)) 161 154 return -EINVAL; 155 + reg_num = array_index_nospec(reg_num, 156 + ARRAY_SIZE(cntx->fp.d.f)); 162 157 reg_val = &cntx->fp.d.f[reg_num]; 163 158 } else 164 159 return -ENOENT;
+36 -18
arch/riscv/kvm/vcpu_onereg.c
··· 10 10 #include <linux/bitops.h> 11 11 #include <linux/errno.h> 12 12 #include <linux/err.h> 13 + #include <linux/nospec.h> 13 14 #include <linux/uaccess.h> 14 15 #include <linux/kvm_host.h> 15 16 #include <asm/cacheflush.h> ··· 128 127 kvm_ext >= ARRAY_SIZE(kvm_isa_ext_arr)) 129 128 return -ENOENT; 130 129 130 + kvm_ext = array_index_nospec(kvm_ext, ARRAY_SIZE(kvm_isa_ext_arr)); 131 131 *guest_ext = kvm_isa_ext_arr[kvm_ext]; 132 132 switch (*guest_ext) { 133 133 case RISCV_ISA_EXT_SMNPM: ··· 445 443 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 446 444 KVM_REG_SIZE_MASK | 447 445 KVM_REG_RISCV_CORE); 446 + unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long); 448 447 unsigned long reg_val; 449 448 450 449 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 451 450 return -EINVAL; 452 - if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) 451 + if (reg_num >= regs_max) 453 452 return -ENOENT; 453 + 454 + reg_num = array_index_nospec(reg_num, regs_max); 454 455 455 456 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) 456 457 reg_val = cntx->sepc; ··· 481 476 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 482 477 KVM_REG_SIZE_MASK | 483 478 KVM_REG_RISCV_CORE); 479 + unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long); 484 480 unsigned long reg_val; 485 481 486 482 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 487 483 return -EINVAL; 488 - if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) 484 + if (reg_num >= regs_max) 489 485 return -ENOENT; 486 + 487 + reg_num = array_index_nospec(reg_num, regs_max); 490 488 491 489 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id))) 492 490 return -EFAULT; ··· 515 507 unsigned long *out_val) 516 508 { 517 509 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 510 + unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long); 518 511 519 - if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) 512 + if (reg_num >= regs_max) 520 513 return -ENOENT; 514 + 515 + reg_num = array_index_nospec(reg_num, regs_max); 521 516 522 517 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { 523 518 kvm_riscv_vcpu_flush_interrupts(vcpu); ··· 537 526 unsigned long reg_val) 538 527 { 539 528 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 529 + unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long); 540 530 541 - if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) 531 + if (reg_num >= regs_max) 542 532 return -ENOENT; 533 + 534 + reg_num = array_index_nospec(reg_num, regs_max); 543 535 544 536 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { 545 537 reg_val &= VSIP_VALID_MASK; ··· 562 548 unsigned long reg_val) 563 549 { 564 550 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr; 551 + unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) / 552 + sizeof(unsigned long); 565 553 566 - if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) / 567 - sizeof(unsigned long)) 568 - return -EINVAL; 554 + if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) 555 + return -ENOENT; 556 + if (reg_num >= regs_max) 557 + return -ENOENT; 558 + 559 + reg_num = array_index_nospec(reg_num, regs_max); 569 560 570 561 ((unsigned long *)csr)[reg_num] = reg_val; 571 562 return 0; ··· 581 562 unsigned long *out_val) 582 563 { 583 564 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr; 565 + unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) / 566 + sizeof(unsigned long); 584 567 585 - if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) / 586 - sizeof(unsigned long)) 587 - return -EINVAL; 568 + if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) 569 + return -ENOENT; 570 + if (reg_num >= regs_max) 571 + return -ENOENT; 572 + 573 + reg_num = array_index_nospec(reg_num, regs_max); 588 574 589 575 *out_val = ((unsigned long *)csr)[reg_num]; 590 576 return 0; ··· 619 595 rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val); 620 596 break; 621 597 case KVM_REG_RISCV_CSR_SMSTATEEN: 622 - rc = -EINVAL; 623 - if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) 624 - rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num, 625 - &reg_val); 598 + rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num, &reg_val); 626 599 break; 627 600 default: 628 601 rc = -ENOENT; ··· 661 640 rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val); 662 641 break; 663 642 case KVM_REG_RISCV_CSR_SMSTATEEN: 664 - rc = -EINVAL; 665 - if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) 666 - rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, 667 - reg_val); 643 + rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, reg_val); 668 644 break; 669 645 default: 670 646 rc = -ENOENT;
+12 -4
arch/riscv/kvm/vcpu_pmu.c
··· 10 10 #include <linux/errno.h> 11 11 #include <linux/err.h> 12 12 #include <linux/kvm_host.h> 13 + #include <linux/nospec.h> 13 14 #include <linux/perf/riscv_pmu.h> 14 15 #include <asm/csr.h> 15 16 #include <asm/kvm_vcpu_sbi.h> ··· 88 87 89 88 static u64 kvm_pmu_get_perf_event_hw_config(u32 sbi_event_code) 90 89 { 91 - return hw_event_perf_map[sbi_event_code]; 90 + return hw_event_perf_map[array_index_nospec(sbi_event_code, 91 + SBI_PMU_HW_GENERAL_MAX)]; 92 92 } 93 93 94 94 static u64 kvm_pmu_get_perf_event_cache_config(u32 sbi_event_code) ··· 220 218 return -EINVAL; 221 219 } 222 220 221 + cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS); 223 222 pmc = &kvpmu->pmc[cidx]; 224 223 225 224 if (pmc->cinfo.type != SBI_PMU_CTR_TYPE_FW) ··· 247 244 return -EINVAL; 248 245 } 249 246 247 + cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS); 250 248 pmc = &kvpmu->pmc[cidx]; 251 249 252 250 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { ··· 524 520 { 525 521 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 526 522 527 - if (cidx > RISCV_KVM_MAX_COUNTERS || cidx == 1) { 523 + if (cidx >= RISCV_KVM_MAX_COUNTERS || cidx == 1) { 528 524 retdata->err_val = SBI_ERR_INVALID_PARAM; 529 525 return 0; 530 526 } 531 527 528 + cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS); 532 529 retdata->out_val = kvpmu->pmc[cidx].cinfo.value; 533 530 534 531 return 0; ··· 564 559 } 565 560 /* Start the counters that have been configured and requested by the guest */ 566 561 for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { 567 - pmc_index = i + ctr_base; 562 + pmc_index = array_index_nospec(i + ctr_base, 563 + RISCV_KVM_MAX_COUNTERS); 568 564 if (!test_bit(pmc_index, kvpmu->pmc_in_use)) 569 565 continue; 570 566 /* The guest started the counter again. Reset the overflow status */ ··· 636 630 637 631 /* Stop the counters that have been configured and requested by the guest */ 638 632 for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { 639 - pmc_index = i + ctr_base; 633 + pmc_index = array_index_nospec(i + ctr_base, 634 + RISCV_KVM_MAX_COUNTERS); 640 635 if (!test_bit(pmc_index, kvpmu->pmc_in_use)) 641 636 continue; 642 637 pmc = &kvpmu->pmc[pmc_index]; ··· 768 761 } 769 762 } 770 763 764 + ctr_idx = array_index_nospec(ctr_idx, RISCV_KVM_MAX_COUNTERS); 771 765 pmc = &kvpmu->pmc[ctr_idx]; 772 766 pmc->idx = ctr_idx; 773 767
+1 -1
arch/riscv/kvm/vm.c
··· 13 13 #include <linux/kvm_host.h> 14 14 #include <asm/kvm_mmu.h> 15 15 16 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 16 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 17 17 KVM_GENERIC_VM_STATS() 18 18 }; 19 19 static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+8 -6
arch/s390/kernel/irq.c
··· 147 147 bool from_idle; 148 148 149 149 from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT); 150 - if (from_idle) { 150 + if (from_idle) 151 151 update_timer_idle(); 152 - regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); 153 - } 154 152 155 153 irq_enter_rcu(); 156 154 ··· 174 176 175 177 set_irq_regs(old_regs); 176 178 irqentry_exit(regs, state); 179 + 180 + if (from_idle) 181 + regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); 177 182 } 178 183 179 184 void noinstr do_ext_irq(struct pt_regs *regs) ··· 186 185 bool from_idle; 187 186 188 187 from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT); 189 - if (from_idle) { 188 + if (from_idle) 190 189 update_timer_idle(); 191 - regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); 192 - } 193 190 194 191 irq_enter_rcu(); 195 192 ··· 209 210 irq_exit_rcu(); 210 211 set_irq_regs(old_regs); 211 212 irqentry_exit(regs, state); 213 + 214 + if (from_idle) 215 + regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); 212 216 } 213 217 214 218 static void show_msi_interrupt(struct seq_file *p, int irq)
+2 -2
arch/s390/kvm/kvm-s390.c
··· 65 65 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \ 66 66 (KVM_MAX_VCPUS + LOCAL_IRQS)) 67 67 68 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 68 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 69 69 KVM_GENERIC_VM_STATS(), 70 70 STATS_DESC_COUNTER(VM, inject_io), 71 71 STATS_DESC_COUNTER(VM, inject_float_mchk), ··· 91 91 sizeof(kvm_vm_stats_desc), 92 92 }; 93 93 94 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 94 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 95 95 KVM_GENERIC_VCPU_STATS(), 96 96 STATS_DESC_COUNTER(VCPU, exit_userspace), 97 97 STATS_DESC_COUNTER(VCPU, exit_null),
-4
arch/sh/drivers/platform_early.c
··· 26 26 struct platform_device *pdev = to_platform_device(dev); 27 27 struct platform_driver *pdrv = to_platform_driver(drv); 28 28 29 - /* When driver_override is set, only bind to the matching driver */ 30 - if (pdev->driver_override) 31 - return !strcmp(pdev->driver_override, drv->name); 32 - 33 29 /* Then try to match against the id table */ 34 30 if (pdrv->id_table) 35 31 return platform_match_id(pdrv->id_table, pdev) != NULL;
+1 -1
arch/x86/entry/vdso/common/vclock_gettime.c
··· 13 13 #include <linux/types.h> 14 14 #include <vdso/gettime.h> 15 15 16 - #include "../../../../lib/vdso/gettimeofday.c" 16 + #include "lib/vdso/gettimeofday.c" 17 17 18 18 int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) 19 19 {
+5 -2
arch/x86/events/core.c
··· 1372 1372 else if (i < n_running) 1373 1373 continue; 1374 1374 1375 - if (hwc->state & PERF_HES_ARCH) 1375 + cpuc->events[hwc->idx] = event; 1376 + 1377 + if (hwc->state & PERF_HES_ARCH) { 1378 + static_call(x86_pmu_set_period)(event); 1376 1379 continue; 1380 + } 1377 1381 1378 1382 /* 1379 1383 * if cpuc->enabled = 0, then no wrmsr as 1380 1384 * per x86_pmu_enable_event() 1381 1385 */ 1382 - cpuc->events[hwc->idx] = event; 1383 1386 x86_pmu_start(event, PERF_EF_RELOAD); 1384 1387 } 1385 1388 cpuc->n_added = 0;
+21 -10
arch/x86/events/intel/core.c
··· 4628 4628 event->hw.dyn_constraint &= hybrid(event->pmu, acr_cause_mask64); 4629 4629 } 4630 4630 4631 + static inline int intel_set_branch_counter_constr(struct perf_event *event, 4632 + int *num) 4633 + { 4634 + if (branch_sample_call_stack(event)) 4635 + return -EINVAL; 4636 + if (branch_sample_counters(event)) { 4637 + (*num)++; 4638 + event->hw.dyn_constraint &= x86_pmu.lbr_counters; 4639 + } 4640 + 4641 + return 0; 4642 + } 4643 + 4631 4644 static int intel_pmu_hw_config(struct perf_event *event) 4632 4645 { 4633 4646 int ret = x86_pmu_hw_config(event); ··· 4711 4698 * group, which requires the extra space to store the counters. 4712 4699 */ 4713 4700 leader = event->group_leader; 4714 - if (branch_sample_call_stack(leader)) 4701 + if (intel_set_branch_counter_constr(leader, &num)) 4715 4702 return -EINVAL; 4716 - if (branch_sample_counters(leader)) { 4717 - num++; 4718 - leader->hw.dyn_constraint &= x86_pmu.lbr_counters; 4719 - } 4720 4703 leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS; 4721 4704 4722 4705 for_each_sibling_event(sibling, leader) { 4723 - if (branch_sample_call_stack(sibling)) 4706 + if (intel_set_branch_counter_constr(sibling, &num)) 4724 4707 return -EINVAL; 4725 - if (branch_sample_counters(sibling)) { 4726 - num++; 4727 - sibling->hw.dyn_constraint &= x86_pmu.lbr_counters; 4728 - } 4708 + } 4709 + 4710 + /* event isn't installed as a sibling yet. */ 4711 + if (event != leader) { 4712 + if (intel_set_branch_counter_constr(event, &num)) 4713 + return -EINVAL; 4729 4714 } 4730 4715 4731 4716 if (num > fls(x86_pmu.lbr_counters))
+7 -4
arch/x86/events/intel/ds.c
··· 345 345 if (omr.omr_remote) 346 346 val |= REM; 347 347 348 - val |= omr.omr_hitm ? P(SNOOP, HITM) : P(SNOOP, HIT); 349 - 350 348 if (omr.omr_source == 0x2) { 351 - u8 snoop = omr.omr_snoop | omr.omr_promoted; 349 + u8 snoop = omr.omr_snoop | (omr.omr_promoted << 1); 352 350 353 - if (snoop == 0x0) 351 + if (omr.omr_hitm) 352 + val |= P(SNOOP, HITM); 353 + else if (snoop == 0x0) 354 354 val |= P(SNOOP, NA); 355 355 else if (snoop == 0x1) 356 356 val |= P(SNOOP, MISS); ··· 359 359 else if (snoop == 0x3) 360 360 val |= P(SNOOP, NONE); 361 361 } else if (omr.omr_source > 0x2 && omr.omr_source < 0x7) { 362 + val |= omr.omr_hitm ? P(SNOOP, HITM) : P(SNOOP, HIT); 362 363 val |= omr.omr_snoop ? P(SNOOPX, FWD) : 0; 364 + } else { 365 + val |= P(SNOOP, NONE); 363 366 } 364 367 365 368 return val;
+61 -57
arch/x86/hyperv/hv_crash.c
··· 107 107 cpu_relax(); 108 108 } 109 109 110 - /* This cannot be inlined as it needs stack */ 111 - static noinline __noclone void hv_crash_restore_tss(void) 110 + static void hv_crash_restore_tss(void) 112 111 { 113 112 load_TR_desc(); 114 113 } 115 114 116 - /* This cannot be inlined as it needs stack */ 117 - static noinline void hv_crash_clear_kernpt(void) 115 + static void hv_crash_clear_kernpt(void) 118 116 { 119 117 pgd_t *pgd; 120 118 p4d_t *p4d; ··· 123 125 native_p4d_clear(p4d); 124 126 } 125 127 126 - /* 127 - * This is the C entry point from the asm glue code after the disable hypercall. 128 - * We enter here in IA32-e long mode, ie, full 64bit mode running on kernel 129 - * page tables with our below 4G page identity mapped, but using a temporary 130 - * GDT. ds/fs/gs/es are null. ss is not usable. bp is null. stack is not 131 - * available. We restore kernel GDT, and rest of the context, and continue 132 - * to kexec. 133 - */ 134 - static asmlinkage void __noreturn hv_crash_c_entry(void) 128 + 129 + static void __noreturn hv_crash_handle(void) 135 130 { 136 - struct hv_crash_ctxt *ctxt = &hv_crash_ctxt; 137 - 138 - /* first thing, restore kernel gdt */ 139 - native_load_gdt(&ctxt->gdtr); 140 - 141 - asm volatile("movw %%ax, %%ss" : : "a"(ctxt->ss)); 142 - asm volatile("movq %0, %%rsp" : : "m"(ctxt->rsp)); 143 - 144 - asm volatile("movw %%ax, %%ds" : : "a"(ctxt->ds)); 145 - asm volatile("movw %%ax, %%es" : : "a"(ctxt->es)); 146 - asm volatile("movw %%ax, %%fs" : : "a"(ctxt->fs)); 147 - asm volatile("movw %%ax, %%gs" : : "a"(ctxt->gs)); 148 - 149 - native_wrmsrq(MSR_IA32_CR_PAT, ctxt->pat); 150 - asm volatile("movq %0, %%cr0" : : "r"(ctxt->cr0)); 151 - 152 - asm volatile("movq %0, %%cr8" : : "r"(ctxt->cr8)); 153 - asm volatile("movq %0, %%cr4" : : "r"(ctxt->cr4)); 154 - asm volatile("movq %0, %%cr2" : : "r"(ctxt->cr4)); 155 - 156 - native_load_idt(&ctxt->idtr); 157 - native_wrmsrq(MSR_GS_BASE, ctxt->gsbase); 158 - native_wrmsrq(MSR_EFER, ctxt->efer); 159 - 160 - /* restore the original kernel CS now via far return */ 161 - asm volatile("movzwq %0, %%rax\n\t" 162 - "pushq %%rax\n\t" 163 - "pushq $1f\n\t" 164 - "lretq\n\t" 165 - "1:nop\n\t" : : "m"(ctxt->cs) : "rax"); 166 - 167 - /* We are in asmlinkage without stack frame, hence make C function 168 - * calls which will buy stack frames. 169 - */ 170 131 hv_crash_restore_tss(); 171 132 hv_crash_clear_kernpt(); 172 133 ··· 134 177 135 178 hv_panic_timeout_reboot(); 136 179 } 137 - /* Tell gcc we are using lretq long jump in the above function intentionally */ 180 + 181 + /* 182 + * __naked functions do not permit function calls, not even to __always_inline 183 + * functions that only contain asm() blocks themselves. So use a macro instead. 184 + */ 185 + #define hv_wrmsr(msr, val) \ 186 + asm volatile("wrmsr" :: "c"(msr), "a"((u32)val), "d"((u32)(val >> 32)) : "memory") 187 + 188 + /* 189 + * This is the C entry point from the asm glue code after the disable hypercall. 190 + * We enter here in IA32-e long mode, ie, full 64bit mode running on kernel 191 + * page tables with our below 4G page identity mapped, but using a temporary 192 + * GDT. ds/fs/gs/es are null. ss is not usable. bp is null. stack is not 193 + * available. We restore kernel GDT, and rest of the context, and continue 194 + * to kexec. 195 + */ 196 + static void __naked hv_crash_c_entry(void) 197 + { 198 + /* first thing, restore kernel gdt */ 199 + asm volatile("lgdt %0" : : "m" (hv_crash_ctxt.gdtr)); 200 + 201 + asm volatile("movw %0, %%ss\n\t" 202 + "movq %1, %%rsp" 203 + :: "m"(hv_crash_ctxt.ss), "m"(hv_crash_ctxt.rsp)); 204 + 205 + asm volatile("movw %0, %%ds" : : "m"(hv_crash_ctxt.ds)); 206 + asm volatile("movw %0, %%es" : : "m"(hv_crash_ctxt.es)); 207 + asm volatile("movw %0, %%fs" : : "m"(hv_crash_ctxt.fs)); 208 + asm volatile("movw %0, %%gs" : : "m"(hv_crash_ctxt.gs)); 209 + 210 + hv_wrmsr(MSR_IA32_CR_PAT, hv_crash_ctxt.pat); 211 + asm volatile("movq %0, %%cr0" : : "r"(hv_crash_ctxt.cr0)); 212 + 213 + asm volatile("movq %0, %%cr8" : : "r"(hv_crash_ctxt.cr8)); 214 + asm volatile("movq %0, %%cr4" : : "r"(hv_crash_ctxt.cr4)); 215 + asm volatile("movq %0, %%cr2" : : "r"(hv_crash_ctxt.cr2)); 216 + 217 + asm volatile("lidt %0" : : "m" (hv_crash_ctxt.idtr)); 218 + hv_wrmsr(MSR_GS_BASE, hv_crash_ctxt.gsbase); 219 + hv_wrmsr(MSR_EFER, hv_crash_ctxt.efer); 220 + 221 + /* restore the original kernel CS now via far return */ 222 + asm volatile("pushq %q0\n\t" 223 + "pushq %q1\n\t" 224 + "lretq" 225 + :: "r"(hv_crash_ctxt.cs), "r"(hv_crash_handle)); 226 + } 227 + /* Tell objtool we are using lretq long jump in the above function intentionally */ 138 228 STACK_FRAME_NON_STANDARD(hv_crash_c_entry); 139 229 140 230 static void hv_mark_tss_not_busy(void) ··· 199 195 { 200 196 struct hv_crash_ctxt *ctxt = &hv_crash_ctxt; 201 197 202 - asm volatile("movq %%rsp,%0" : "=m"(ctxt->rsp)); 198 + ctxt->rsp = current_stack_pointer; 203 199 204 200 ctxt->cr0 = native_read_cr0(); 205 201 ctxt->cr4 = native_read_cr4(); 206 202 207 - asm volatile("movq %%cr2, %0" : "=a"(ctxt->cr2)); 208 - asm volatile("movq %%cr8, %0" : "=a"(ctxt->cr8)); 203 + asm volatile("movq %%cr2, %0" : "=r"(ctxt->cr2)); 204 + asm volatile("movq %%cr8, %0" : "=r"(ctxt->cr8)); 209 205 210 - asm volatile("movl %%cs, %%eax" : "=a"(ctxt->cs)); 211 - asm volatile("movl %%ss, %%eax" : "=a"(ctxt->ss)); 212 - asm volatile("movl %%ds, %%eax" : "=a"(ctxt->ds)); 213 - asm volatile("movl %%es, %%eax" : "=a"(ctxt->es)); 214 - asm volatile("movl %%fs, %%eax" : "=a"(ctxt->fs)); 215 - asm volatile("movl %%gs, %%eax" : "=a"(ctxt->gs)); 206 + asm volatile("movw %%cs, %0" : "=m"(ctxt->cs)); 207 + asm volatile("movw %%ss, %0" : "=m"(ctxt->ss)); 208 + asm volatile("movw %%ds, %0" : "=m"(ctxt->ds)); 209 + asm volatile("movw %%es, %0" : "=m"(ctxt->es)); 210 + asm volatile("movw %%fs, %0" : "=m"(ctxt->fs)); 211 + asm volatile("movw %%gs, %0" : "=m"(ctxt->gs)); 216 212 217 213 native_store_gdt(&ctxt->gdtr); 218 214 store_idt(&ctxt->idtr);
+2 -1
arch/x86/include/asm/kvm_host.h
··· 2485 2485 KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS | \ 2486 2486 KVM_X86_QUIRK_SLOT_ZAP_ALL | \ 2487 2487 KVM_X86_QUIRK_STUFF_FEATURE_MSRS | \ 2488 - KVM_X86_QUIRK_IGNORE_GUEST_PAT) 2488 + KVM_X86_QUIRK_IGNORE_GUEST_PAT | \ 2489 + KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM) 2489 2490 2490 2491 #define KVM_X86_CONDITIONAL_QUIRKS \ 2491 2492 (KVM_X86_QUIRK_CD_NW_CLEARED | \
+1
arch/x86/include/uapi/asm/kvm.h
··· 476 476 #define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7) 477 477 #define KVM_X86_QUIRK_STUFF_FEATURE_MSRS (1 << 8) 478 478 #define KVM_X86_QUIRK_IGNORE_GUEST_PAT (1 << 9) 479 + #define KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM (1 << 10) 479 480 480 481 #define KVM_STATE_NESTED_FORMAT_VMX 0 481 482 #define KVM_STATE_NESTED_FORMAT_SVM 1
+6
arch/x86/kernel/apic/apic.c
··· 1894 1894 1895 1895 static inline void try_to_enable_x2apic(int remap_mode) { } 1896 1896 static inline void __x2apic_enable(void) { } 1897 + static inline void __x2apic_disable(void) { } 1897 1898 #endif /* !CONFIG_X86_X2APIC */ 1898 1899 1899 1900 void __init enable_IR_x2apic(void) ··· 2457 2456 if (x2apic_mode) { 2458 2457 __x2apic_enable(); 2459 2458 } else { 2459 + if (x2apic_enabled()) { 2460 + pr_warn_once("x2apic: re-enabled by firmware during resume. Disabling\n"); 2461 + __x2apic_disable(); 2462 + } 2463 + 2460 2464 /* 2461 2465 * Make sure the APICBASE points to the right address 2462 2466 *
+16 -2
arch/x86/kernel/apic/x2apic_uv_x.c
··· 1708 1708 struct uv_hub_info_s *new_hub; 1709 1709 1710 1710 /* Allocate & fill new per hub info list */ 1711 - new_hub = (bid == 0) ? &uv_hub_info_node0 1712 - : kzalloc_node(bytes, GFP_KERNEL, uv_blade_to_node(bid)); 1711 + if (bid == 0) { 1712 + new_hub = &uv_hub_info_node0; 1713 + } else { 1714 + int nid; 1715 + 1716 + /* 1717 + * Deconfigured sockets are mapped to SOCK_EMPTY. Use 1718 + * NUMA_NO_NODE to allocate on a valid node. 1719 + */ 1720 + nid = uv_blade_to_node(bid); 1721 + if (nid == SOCK_EMPTY) 1722 + nid = NUMA_NO_NODE; 1723 + 1724 + new_hub = kzalloc_node(bytes, GFP_KERNEL, nid); 1725 + } 1726 + 1713 1727 if (WARN_ON_ONCE(!new_hub)) { 1714 1728 /* do not kfree() bid 0, which is statically allocated */ 1715 1729 while (--bid > 0)
+11 -6
arch/x86/kernel/cpu/mce/amd.c
··· 875 875 { 876 876 amd_reset_thr_limit(m->bank); 877 877 878 - /* Clear MCA_DESTAT for all deferred errors even those logged in MCA_STATUS. */ 879 - if (m->status & MCI_STATUS_DEFERRED) 880 - mce_wrmsrq(MSR_AMD64_SMCA_MCx_DESTAT(m->bank), 0); 878 + if (mce_flags.smca) { 879 + /* 880 + * Clear MCA_DESTAT for all deferred errors even those 881 + * logged in MCA_STATUS. 882 + */ 883 + if (m->status & MCI_STATUS_DEFERRED) 884 + mce_wrmsrq(MSR_AMD64_SMCA_MCx_DESTAT(m->bank), 0); 881 885 882 - /* Don't clear MCA_STATUS if MCA_DESTAT was used exclusively. */ 883 - if (m->kflags & MCE_CHECK_DFR_REGS) 884 - return; 886 + /* Don't clear MCA_STATUS if MCA_DESTAT was used exclusively. */ 887 + if (m->kflags & MCE_CHECK_DFR_REGS) 888 + return; 889 + } 885 890 886 891 mce_wrmsrq(mca_msr_reg(m->bank, MCA_STATUS), 0); 887 892 }
+3 -2
arch/x86/kernel/cpu/mshyperv.c
··· 496 496 test_and_set_bit(HYPERV_DBG_FASTFAIL_VECTOR, system_vectors)) 497 497 BUG(); 498 498 499 - pr_info("Hyper-V: reserve vectors: %d %d %d\n", HYPERV_DBG_ASSERT_VECTOR, 500 - HYPERV_DBG_SERVICE_VECTOR, HYPERV_DBG_FASTFAIL_VECTOR); 499 + pr_info("Hyper-V: reserve vectors: 0x%x 0x%x 0x%x\n", 500 + HYPERV_DBG_ASSERT_VECTOR, HYPERV_DBG_SERVICE_VECTOR, 501 + HYPERV_DBG_FASTFAIL_VECTOR); 501 502 } 502 503 503 504 static void __init ms_hyperv_init_platform(void)
+4 -1
arch/x86/kvm/cpuid.c
··· 776 776 #define SYNTHESIZED_F(name) \ 777 777 ({ \ 778 778 kvm_cpu_cap_synthesized |= feature_bit(name); \ 779 - F(name); \ 779 + \ 780 + BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \ 781 + if (boot_cpu_has(X86_FEATURE_##name)) \ 782 + F(name); \ 780 783 }) 781 784 782 785 /*
+5 -4
arch/x86/kvm/hyperv.c
··· 1981 1981 if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY) 1982 1982 goto out_flush_all; 1983 1983 1984 - if (is_noncanonical_invlpg_address(entries[i], vcpu)) 1985 - continue; 1986 - 1987 1984 /* 1988 1985 * Lower 12 bits of 'address' encode the number of additional 1989 1986 * pages to flush. 1990 1987 */ 1991 1988 gva = entries[i] & PAGE_MASK; 1992 - for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++) 1989 + for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++) { 1990 + if (is_noncanonical_invlpg_address(gva + j * PAGE_SIZE, vcpu)) 1991 + continue; 1992 + 1993 1993 kvm_x86_call(flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE); 1994 + } 1994 1995 1995 1996 ++vcpu->stat.tlb_flush; 1996 1997 }
+2 -1
arch/x86/kvm/ioapic.c
··· 321 321 idx = srcu_read_lock(&kvm->irq_srcu); 322 322 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); 323 323 if (gsi != -1) 324 - hlist_for_each_entry_rcu(kimn, &ioapic->mask_notifier_list, link) 324 + hlist_for_each_entry_srcu(kimn, &ioapic->mask_notifier_list, link, 325 + srcu_read_lock_held(&kvm->irq_srcu)) 325 326 if (kimn->irq == gsi) 326 327 kimn->func(kimn, mask); 327 328 srcu_read_unlock(&kvm->irq_srcu, idx);
+6 -3
arch/x86/kvm/svm/avic.c
··· 189 189 struct kvm_vcpu *vcpu = &svm->vcpu; 190 190 191 191 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK); 192 - 193 192 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK; 194 193 vmcb->control.avic_physical_id |= avic_get_max_physical_id(vcpu); 195 - 196 194 vmcb->control.int_ctl |= AVIC_ENABLE_MASK; 195 + 196 + svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); 197 197 198 198 /* 199 199 * Note: KVM supports hybrid-AVIC mode, where KVM emulates x2APIC MSR ··· 225 225 226 226 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK); 227 227 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK; 228 + 229 + if (!sev_es_guest(svm->vcpu.kvm)) 230 + svm_set_intercept(svm, INTERCEPT_CR8_WRITE); 228 231 229 232 /* 230 233 * If running nested and the guest uses its own MSR bitmap, there ··· 371 368 vmcb->control.avic_physical_id = __sme_set(__pa(kvm_svm->avic_physical_id_table)); 372 369 vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE; 373 370 374 - if (kvm_apicv_activated(svm->vcpu.kvm)) 371 + if (kvm_vcpu_apicv_active(&svm->vcpu)) 375 372 avic_activate_vmcb(svm); 376 373 else 377 374 avic_deactivate_vmcb(svm);
+10 -2
arch/x86/kvm/svm/nested.c
··· 418 418 return __nested_vmcb_check_controls(vcpu, ctl); 419 419 } 420 420 421 + int nested_svm_check_cached_vmcb12(struct kvm_vcpu *vcpu) 422 + { 423 + if (!nested_vmcb_check_save(vcpu) || 424 + !nested_vmcb_check_controls(vcpu)) 425 + return -EINVAL; 426 + 427 + return 0; 428 + } 429 + 421 430 /* 422 431 * If a feature is not advertised to L1, clear the corresponding vmcb12 423 432 * intercept. ··· 1037 1028 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); 1038 1029 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); 1039 1030 1040 - if (!nested_vmcb_check_save(vcpu) || 1041 - !nested_vmcb_check_controls(vcpu)) { 1031 + if (nested_svm_check_cached_vmcb12(vcpu) < 0) { 1042 1032 vmcb12->control.exit_code = SVM_EXIT_ERR; 1043 1033 vmcb12->control.exit_info_1 = 0; 1044 1034 vmcb12->control.exit_info_2 = 0;
+11 -6
arch/x86/kvm/svm/svm.c
··· 1077 1077 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); 1078 1078 svm_set_intercept(svm, INTERCEPT_CR3_WRITE); 1079 1079 svm_set_intercept(svm, INTERCEPT_CR4_WRITE); 1080 - if (!kvm_vcpu_apicv_active(vcpu)) 1081 - svm_set_intercept(svm, INTERCEPT_CR8_WRITE); 1080 + svm_set_intercept(svm, INTERCEPT_CR8_WRITE); 1082 1081 1083 1082 set_dr_intercepts(svm); 1084 1083 ··· 1188 1189 if (guest_cpu_cap_has(vcpu, X86_FEATURE_ERAPS)) 1189 1190 svm->vmcb->control.erap_ctl |= ERAP_CONTROL_ALLOW_LARGER_RAP; 1190 1191 1191 - if (kvm_vcpu_apicv_active(vcpu)) 1192 + if (enable_apicv && irqchip_in_kernel(vcpu->kvm)) 1192 1193 avic_init_vmcb(svm, vmcb); 1193 1194 1194 1195 if (vnmi) ··· 2673 2674 2674 2675 static int cr8_write_interception(struct kvm_vcpu *vcpu) 2675 2676 { 2677 + u8 cr8_prev = kvm_get_cr8(vcpu); 2676 2678 int r; 2677 2679 2678 - u8 cr8_prev = kvm_get_cr8(vcpu); 2680 + WARN_ON_ONCE(kvm_vcpu_apicv_active(vcpu)); 2681 + 2679 2682 /* instruction emulation calls kvm_set_cr8() */ 2680 2683 r = cr_interception(vcpu); 2681 2684 if (lapic_in_kernel(vcpu)) ··· 4880 4879 vmcb12 = map.hva; 4881 4880 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); 4882 4881 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); 4883 - ret = enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, vmcb12, false); 4884 4882 4885 - if (ret) 4883 + if (nested_svm_check_cached_vmcb12(vcpu) < 0) 4886 4884 goto unmap_save; 4887 4885 4886 + if (enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, 4887 + vmcb12, false) != 0) 4888 + goto unmap_save; 4889 + 4890 + ret = 0; 4888 4891 svm->nested.nested_run_pending = 1; 4889 4892 4890 4893 unmap_save:
+1
arch/x86/kvm/svm/svm.h
··· 797 797 798 798 int nested_svm_exit_handled(struct vcpu_svm *svm); 799 799 int nested_svm_check_permissions(struct kvm_vcpu *vcpu); 800 + int nested_svm_check_cached_vmcb12(struct kvm_vcpu *vcpu); 800 801 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, 801 802 bool has_error_code, u32 error_code); 802 803 int nested_svm_exit_special(struct vcpu_svm *svm);
+45 -16
arch/x86/kvm/vmx/nested.c
··· 3300 3300 if (CC(vmcs12->guest_cr4 & X86_CR4_CET && !(vmcs12->guest_cr0 & X86_CR0_WP))) 3301 3301 return -EINVAL; 3302 3302 3303 - if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) && 3304 - (CC(!kvm_dr7_valid(vmcs12->guest_dr7)) || 3305 - CC(!vmx_is_valid_debugctl(vcpu, vmcs12->guest_ia32_debugctl, false)))) 3306 - return -EINVAL; 3303 + if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { 3304 + u64 debugctl = vmcs12->guest_ia32_debugctl; 3305 + 3306 + /* 3307 + * FREEZE_IN_SMM is not virtualized, but allow L1 to set it in 3308 + * vmcs12's DEBUGCTL under a quirk for backwards compatibility. 3309 + * Note that the quirk only relaxes the consistency check. The 3310 + * vmcc02 bit is still under the control of the host. In 3311 + * particular, if a host administrator decides to clear the bit, 3312 + * then L1 has no say in the matter. 3313 + */ 3314 + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM)) 3315 + debugctl &= ~DEBUGCTLMSR_FREEZE_IN_SMM; 3316 + 3317 + if (CC(!kvm_dr7_valid(vmcs12->guest_dr7)) || 3318 + CC(!vmx_is_valid_debugctl(vcpu, debugctl, false))) 3319 + return -EINVAL; 3320 + } 3307 3321 3308 3322 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && 3309 3323 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat))) ··· 6856 6842 free_nested(vcpu); 6857 6843 } 6858 6844 6845 + int nested_vmx_check_restored_vmcs12(struct kvm_vcpu *vcpu) 6846 + { 6847 + enum vm_entry_failure_code ignored; 6848 + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 6849 + 6850 + if (nested_cpu_has_shadow_vmcs(vmcs12) && 6851 + vmcs12->vmcs_link_pointer != INVALID_GPA) { 6852 + struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); 6853 + 6854 + if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || 6855 + !shadow_vmcs12->hdr.shadow_vmcs) 6856 + return -EINVAL; 6857 + } 6858 + 6859 + if (nested_vmx_check_controls(vcpu, vmcs12) || 6860 + nested_vmx_check_host_state(vcpu, vmcs12) || 6861 + nested_vmx_check_guest_state(vcpu, vmcs12, &ignored)) 6862 + return -EINVAL; 6863 + 6864 + return 0; 6865 + } 6866 + 6859 6867 static int vmx_set_nested_state(struct kvm_vcpu *vcpu, 6860 6868 struct kvm_nested_state __user *user_kvm_nested_state, 6861 6869 struct kvm_nested_state *kvm_state) 6862 6870 { 6863 6871 struct vcpu_vmx *vmx = to_vmx(vcpu); 6864 6872 struct vmcs12 *vmcs12; 6865 - enum vm_entry_failure_code ignored; 6866 6873 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = 6867 6874 &user_kvm_nested_state->data.vmx[0]; 6868 6875 int ret; ··· 7014 6979 vmx->nested.mtf_pending = 7015 6980 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING); 7016 6981 7017 - ret = -EINVAL; 7018 6982 if (nested_cpu_has_shadow_vmcs(vmcs12) && 7019 6983 vmcs12->vmcs_link_pointer != INVALID_GPA) { 7020 6984 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); 7021 6985 6986 + ret = -EINVAL; 7022 6987 if (kvm_state->size < 7023 6988 sizeof(*kvm_state) + 7024 6989 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12)) 7025 6990 goto error_guest_mode; 7026 6991 6992 + ret = -EFAULT; 7027 6993 if (copy_from_user(shadow_vmcs12, 7028 6994 user_vmx_nested_state->shadow_vmcs12, 7029 - sizeof(*shadow_vmcs12))) { 7030 - ret = -EFAULT; 7031 - goto error_guest_mode; 7032 - } 7033 - 7034 - if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || 7035 - !shadow_vmcs12->hdr.shadow_vmcs) 6995 + sizeof(*shadow_vmcs12))) 7036 6996 goto error_guest_mode; 7037 6997 } 7038 6998 ··· 7038 7008 kvm_state->hdr.vmx.preemption_timer_deadline; 7039 7009 } 7040 7010 7041 - if (nested_vmx_check_controls(vcpu, vmcs12) || 7042 - nested_vmx_check_host_state(vcpu, vmcs12) || 7043 - nested_vmx_check_guest_state(vcpu, vmcs12, &ignored)) 7011 + ret = nested_vmx_check_restored_vmcs12(vcpu); 7012 + if (ret < 0) 7044 7013 goto error_guest_mode; 7045 7014 7046 7015 vmx->nested.dirty_vmcs12 = true;
+1
arch/x86/kvm/vmx/nested.h
··· 22 22 void nested_vmx_hardware_unsetup(void); 23 23 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)); 24 24 void nested_vmx_set_vmcs_shadowing_bitmap(void); 25 + int nested_vmx_check_restored_vmcs12(struct kvm_vcpu *vcpu); 25 26 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu); 26 27 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, 27 28 bool from_vmentry);
+7 -3
arch/x86/kvm/vmx/vmx.c
··· 1149 1149 } 1150 1150 1151 1151 vmx_add_auto_msr(&m->guest, msr, guest_val, VM_ENTRY_MSR_LOAD_COUNT, kvm); 1152 - vmx_add_auto_msr(&m->guest, msr, host_val, VM_EXIT_MSR_LOAD_COUNT, kvm); 1152 + vmx_add_auto_msr(&m->host, msr, host_val, VM_EXIT_MSR_LOAD_COUNT, kvm); 1153 1153 } 1154 1154 1155 1155 static bool update_transition_efer(struct vcpu_vmx *vmx) ··· 8528 8528 } 8529 8529 8530 8530 if (vmx->nested.smm.guest_mode) { 8531 + /* Triple fault if the state is invalid. */ 8532 + if (nested_vmx_check_restored_vmcs12(vcpu) < 0) 8533 + return 1; 8534 + 8531 8535 ret = nested_vmx_enter_non_root_mode(vcpu, false); 8532 - if (ret) 8533 - return ret; 8536 + if (ret != NVMX_VMENTRY_SUCCESS) 8537 + return 1; 8534 8538 8535 8539 vmx->nested.nested_run_pending = 1; 8536 8540 vmx->nested.smm.guest_mode = false;
+2 -2
arch/x86/kvm/x86.c
··· 243 243 bool __read_mostly enable_device_posted_irqs = true; 244 244 EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_device_posted_irqs); 245 245 246 - const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 246 + const struct kvm_stats_desc kvm_vm_stats_desc[] = { 247 247 KVM_GENERIC_VM_STATS(), 248 248 STATS_DESC_COUNTER(VM, mmu_shadow_zapped), 249 249 STATS_DESC_COUNTER(VM, mmu_pte_write), ··· 269 269 sizeof(kvm_vm_stats_desc), 270 270 }; 271 271 272 - const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { 272 + const struct kvm_stats_desc kvm_vcpu_stats_desc[] = { 273 273 KVM_GENERIC_VCPU_STATS(), 274 274 STATS_DESC_COUNTER(VCPU, pf_taken), 275 275 STATS_DESC_COUNTER(VCPU, pf_fixed),
+2 -12
drivers/accel/amdxdna/aie2_ctx.c
··· 165 165 166 166 trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq); 167 167 168 - amdxdna_pm_suspend_put(job->hwctx->client->xdna); 169 168 job->hwctx->priv->completed++; 170 169 dma_fence_signal(fence); 171 170 ··· 289 290 struct dma_fence *fence; 290 291 int ret; 291 292 292 - ret = amdxdna_pm_resume_get(hwctx->client->xdna); 293 - if (ret) 293 + if (!hwctx->priv->mbox_chann) 294 294 return NULL; 295 295 296 - if (!hwctx->priv->mbox_chann) { 297 - amdxdna_pm_suspend_put(hwctx->client->xdna); 298 - return NULL; 299 - } 300 - 301 - if (!mmget_not_zero(job->mm)) { 302 - amdxdna_pm_suspend_put(hwctx->client->xdna); 296 + if (!mmget_not_zero(job->mm)) 303 297 return ERR_PTR(-ESRCH); 304 - } 305 298 306 299 kref_get(&job->refcnt); 307 300 fence = dma_fence_get(job->fence); ··· 324 333 325 334 out: 326 335 if (ret) { 327 - amdxdna_pm_suspend_put(hwctx->client->xdna); 328 336 dma_fence_put(job->fence); 329 337 aie2_job_put(job); 330 338 mmput(job->mm);
+10
drivers/accel/amdxdna/amdxdna_ctx.c
··· 17 17 #include "amdxdna_ctx.h" 18 18 #include "amdxdna_gem.h" 19 19 #include "amdxdna_pci_drv.h" 20 + #include "amdxdna_pm.h" 20 21 21 22 #define MAX_HWCTX_ID 255 22 23 #define MAX_ARG_COUNT 4095 ··· 446 445 void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job) 447 446 { 448 447 trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release"); 448 + amdxdna_pm_suspend_put(job->hwctx->client->xdna); 449 449 amdxdna_arg_bos_put(job); 450 450 amdxdna_gem_put_obj(job->cmd_bo); 451 451 dma_fence_put(job->fence); ··· 482 480 if (ret) { 483 481 XDNA_ERR(xdna, "Argument BOs lookup failed, ret %d", ret); 484 482 goto cmd_put; 483 + } 484 + 485 + ret = amdxdna_pm_resume_get(xdna); 486 + if (ret) { 487 + XDNA_ERR(xdna, "Resume failed, ret %d", ret); 488 + goto put_bos; 485 489 } 486 490 487 491 idx = srcu_read_lock(&client->hwctx_srcu); ··· 530 522 dma_fence_put(job->fence); 531 523 unlock_srcu: 532 524 srcu_read_unlock(&client->hwctx_srcu, idx); 525 + amdxdna_pm_suspend_put(xdna); 526 + put_bos: 533 527 amdxdna_arg_bos_put(job); 534 528 cmd_put: 535 529 amdxdna_gem_put_obj(job->cmd_bo);
-6
drivers/accel/ivpu/ivpu_hw_40xx_reg.h
··· 121 121 #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY 0x0003006cu 122 122 #define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY_STATUS_DLY_MASK GENMASK(7, 0) 123 123 124 - #define VPU_40XX_HOST_SS_AON_RETENTION0 0x0003000cu 125 - #define VPU_40XX_HOST_SS_AON_RETENTION1 0x00030010u 126 - #define VPU_40XX_HOST_SS_AON_RETENTION2 0x00030014u 127 - #define VPU_40XX_HOST_SS_AON_RETENTION3 0x00030018u 128 - #define VPU_40XX_HOST_SS_AON_RETENTION4 0x0003001cu 129 - 130 124 #define VPU_40XX_HOST_SS_AON_IDLE_GEN 0x00030200u 131 125 #define VPU_40XX_HOST_SS_AON_IDLE_GEN_EN_MASK BIT_MASK(0) 132 126 #define VPU_40XX_HOST_SS_AON_IDLE_GEN_HW_PG_EN_MASK BIT_MASK(1)
-1
drivers/accel/ivpu/ivpu_hw_ip.c
··· 931 931 932 932 static int soc_cpu_boot_60xx(struct ivpu_device *vdev) 933 933 { 934 - REGV_WR64(VPU_40XX_HOST_SS_AON_RETENTION1, vdev->fw->mem_bp->vpu_addr); 935 934 soc_cpu_set_entry_point_40xx(vdev, vdev->fw->cold_boot_entry_point); 936 935 937 936 return 0;
+1
drivers/acpi/Kconfig
··· 9 9 menuconfig ACPI 10 10 bool "ACPI (Advanced Configuration and Power Interface) Support" 11 11 depends on ARCH_SUPPORTS_ACPI 12 + select AUXILIARY_BUS 12 13 select PNP 13 14 select NLS 14 15 select CRC32
+1 -1
drivers/acpi/acpi_platform.c
··· 135 135 } 136 136 } 137 137 138 - if (adev->device_type == ACPI_BUS_TYPE_DEVICE && !adev->pnp.type.backlight) { 138 + if (adev->device_type == ACPI_BUS_TYPE_DEVICE) { 139 139 LIST_HEAD(resource_list); 140 140 141 141 count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+8 -7
drivers/acpi/acpi_processor.c
··· 113 113 PCI_ANY_ID, PCI_ANY_ID, NULL); 114 114 if (ide_dev) { 115 115 errata.piix4.bmisx = pci_resource_start(ide_dev, 4); 116 + if (errata.piix4.bmisx) 117 + dev_dbg(&ide_dev->dev, 118 + "Bus master activity detection (BM-IDE) erratum enabled\n"); 119 + 116 120 pci_dev_put(ide_dev); 117 121 } 118 122 ··· 135 131 if (isa_dev) { 136 132 pci_read_config_byte(isa_dev, 0x76, &value1); 137 133 pci_read_config_byte(isa_dev, 0x77, &value2); 138 - if ((value1 & 0x80) || (value2 & 0x80)) 134 + if ((value1 & 0x80) || (value2 & 0x80)) { 139 135 errata.piix4.fdma = 1; 136 + dev_dbg(&isa_dev->dev, 137 + "Type-F DMA livelock erratum (C3 disabled)\n"); 138 + } 140 139 pci_dev_put(isa_dev); 141 140 } 142 141 143 142 break; 144 143 } 145 - 146 - if (ide_dev) 147 - dev_dbg(&ide_dev->dev, "Bus master activity detection (BM-IDE) erratum enabled\n"); 148 - 149 - if (isa_dev) 150 - dev_dbg(&isa_dev->dev, "Type-F DMA livelock erratum (C3 disabled)\n"); 151 144 152 145 return 0; 153 146 }
+22 -23
drivers/acpi/acpi_video.c
··· 9 9 10 10 #define pr_fmt(fmt) "ACPI: video: " fmt 11 11 12 + #include <linux/auxiliary_bus.h> 12 13 #include <linux/kernel.h> 13 14 #include <linux/module.h> 14 15 #include <linux/init.h> ··· 22 21 #include <linux/sort.h> 23 22 #include <linux/pci.h> 24 23 #include <linux/pci_ids.h> 25 - #include <linux/platform_device.h> 26 24 #include <linux/slab.h> 27 25 #include <linux/dmi.h> 28 26 #include <linux/suspend.h> ··· 77 77 static DEFINE_MUTEX(register_count_mutex); 78 78 static DEFINE_MUTEX(video_list_lock); 79 79 static LIST_HEAD(video_bus_head); 80 - static int acpi_video_bus_probe(struct platform_device *pdev); 81 - static void acpi_video_bus_remove(struct platform_device *pdev); 80 + static int acpi_video_bus_probe(struct auxiliary_device *aux_dev, 81 + const struct auxiliary_device_id *id); 82 + static void acpi_video_bus_remove(struct auxiliary_device *aux); 82 83 static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data); 83 84 84 85 /* ··· 94 93 ACPI_VIDEO_FIRST_LEVEL, /* actual supported levels begin here */ 95 94 }; 96 95 97 - static const struct acpi_device_id video_device_ids[] = { 98 - {ACPI_VIDEO_HID, 0}, 99 - {"", 0}, 96 + static const struct auxiliary_device_id video_bus_auxiliary_id_table[] = { 97 + { .name = "acpi.video_bus" }, 98 + {}, 100 99 }; 101 - MODULE_DEVICE_TABLE(acpi, video_device_ids); 100 + MODULE_DEVICE_TABLE(auxiliary, video_bus_auxiliary_id_table); 102 101 103 - static struct platform_driver acpi_video_bus = { 102 + static struct auxiliary_driver acpi_video_bus = { 104 103 .probe = acpi_video_bus_probe, 105 104 .remove = acpi_video_bus_remove, 106 - .driver = { 107 - .name = "acpi-video", 108 - .acpi_match_table = video_device_ids, 109 - }, 105 + .id_table = video_bus_auxiliary_id_table, 110 106 }; 111 107 112 108 struct acpi_video_bus_flags { ··· 1883 1885 } 1884 1886 1885 1887 static int acpi_video_bus_add_notify_handler(struct acpi_video_bus *video, 1886 - struct platform_device *pdev) 1888 + struct device *parent) 1887 1889 { 1888 1890 struct input_dev *input; 1889 1891 struct acpi_video_device *dev; ··· 1906 1908 input->phys = video->phys; 1907 1909 input->id.bustype = BUS_HOST; 1908 1910 input->id.product = 0x06; 1909 - input->dev.parent = &pdev->dev; 1911 + input->dev.parent = parent; 1910 1912 input->evbit[0] = BIT(EV_KEY); 1911 1913 set_bit(KEY_SWITCHVIDEOMODE, input->keybit); 1912 1914 set_bit(KEY_VIDEO_NEXT, input->keybit); ··· 1978 1980 1979 1981 static int instance; 1980 1982 1981 - static int acpi_video_bus_probe(struct platform_device *pdev) 1983 + static int acpi_video_bus_probe(struct auxiliary_device *aux_dev, 1984 + const struct auxiliary_device_id *id_unused) 1982 1985 { 1983 - struct acpi_device *device = ACPI_COMPANION(&pdev->dev); 1986 + struct acpi_device *device = ACPI_COMPANION(&aux_dev->dev); 1984 1987 struct acpi_video_bus *video; 1985 1988 bool auto_detect; 1986 1989 int error; ··· 2018 2019 instance++; 2019 2020 } 2020 2021 2021 - platform_set_drvdata(pdev, video); 2022 + auxiliary_set_drvdata(aux_dev, video); 2022 2023 2023 2024 video->device = device; 2024 2025 strscpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME); ··· 2067 2068 !auto_detect) 2068 2069 acpi_video_bus_register_backlight(video); 2069 2070 2070 - error = acpi_video_bus_add_notify_handler(video, pdev); 2071 + error = acpi_video_bus_add_notify_handler(video, &aux_dev->dev); 2071 2072 if (error) 2072 2073 goto err_del; 2073 2074 ··· 2095 2096 return error; 2096 2097 } 2097 2098 2098 - static void acpi_video_bus_remove(struct platform_device *pdev) 2099 + static void acpi_video_bus_remove(struct auxiliary_device *aux_dev) 2099 2100 { 2100 - struct acpi_video_bus *video = platform_get_drvdata(pdev); 2101 - struct acpi_device *device = ACPI_COMPANION(&pdev->dev); 2101 + struct acpi_video_bus *video = auxiliary_get_drvdata(aux_dev); 2102 + struct acpi_device *device = ACPI_COMPANION(&aux_dev->dev); 2102 2103 2103 2104 acpi_dev_remove_notify_handler(device, ACPI_DEVICE_NOTIFY, 2104 2105 acpi_video_bus_notify); ··· 2162 2163 2163 2164 dmi_check_system(video_dmi_table); 2164 2165 2165 - ret = platform_driver_register(&acpi_video_bus); 2166 + ret = auxiliary_driver_register(&acpi_video_bus); 2166 2167 if (ret) 2167 2168 goto leave; 2168 2169 ··· 2182 2183 { 2183 2184 mutex_lock(&register_count_mutex); 2184 2185 if (register_count) { 2185 - platform_driver_unregister(&acpi_video_bus); 2186 + auxiliary_driver_unregister(&acpi_video_bus); 2186 2187 register_count = 0; 2187 2188 may_report_brightness_keys = false; 2188 2189 }
+1 -1
drivers/acpi/acpica/acpredef.h
··· 451 451 452 452 {{"_DSM", 453 453 METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, 454 - ACPI_TYPE_ANY | ACPI_TYPE_PACKAGE) | 454 + ACPI_TYPE_PACKAGE | ACPI_TYPE_ANY) | 455 455 ARG_COUNT_IS_MINIMUM, 456 456 METHOD_RETURNS(ACPI_RTYPE_ALL)}}, /* Must return a value, but it can be of any type */ 457 457
-3
drivers/acpi/bus.c
··· 818 818 if (list_empty(&adev->pnp.ids)) 819 819 return NULL; 820 820 821 - if (adev->pnp.type.backlight) 822 - return adev; 823 - 824 821 return acpi_primary_dev_companion(adev, dev); 825 822 } 826 823
+1 -1
drivers/acpi/osl.c
··· 1681 1681 * Use acpi_os_map_generic_address to pre-map the reset 1682 1682 * register if it's in system memory. 1683 1683 */ 1684 - void *rv; 1684 + void __iomem *rv; 1685 1685 1686 1686 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); 1687 1687 pr_debug("%s: Reset register mapping %s\n", __func__,
+45
drivers/acpi/scan.c
··· 6 6 #define pr_fmt(fmt) "ACPI: " fmt 7 7 8 8 #include <linux/async.h> 9 + #include <linux/auxiliary_bus.h> 9 10 #include <linux/module.h> 10 11 #include <linux/init.h> 11 12 #include <linux/slab.h> ··· 2193 2192 return acpi_bus_check_add(handle, false, (struct acpi_device **)ret_p); 2194 2193 } 2195 2194 2195 + static void acpi_video_bus_device_release(struct device *dev) 2196 + { 2197 + struct auxiliary_device *aux_dev = to_auxiliary_dev(dev); 2198 + 2199 + kfree(aux_dev); 2200 + } 2201 + 2202 + static void acpi_create_video_bus_device(struct acpi_device *adev, 2203 + struct acpi_device *parent) 2204 + { 2205 + struct auxiliary_device *aux_dev; 2206 + static unsigned int aux_dev_id; 2207 + 2208 + aux_dev = kzalloc_obj(*aux_dev); 2209 + if (!aux_dev) 2210 + return; 2211 + 2212 + aux_dev->id = aux_dev_id++; 2213 + aux_dev->name = "video_bus"; 2214 + aux_dev->dev.parent = acpi_get_first_physical_node(parent); 2215 + if (!aux_dev->dev.parent) 2216 + goto err; 2217 + 2218 + aux_dev->dev.release = acpi_video_bus_device_release; 2219 + 2220 + if (auxiliary_device_init(aux_dev)) 2221 + goto err; 2222 + 2223 + ACPI_COMPANION_SET(&aux_dev->dev, adev); 2224 + if (__auxiliary_device_add(aux_dev, "acpi")) 2225 + auxiliary_device_uninit(aux_dev); 2226 + 2227 + return; 2228 + 2229 + err: 2230 + kfree(aux_dev); 2231 + } 2232 + 2196 2233 struct acpi_scan_system_dev { 2197 2234 struct list_head node; 2198 2235 struct acpi_device *adev; ··· 2268 2229 sd->adev = device; 2269 2230 list_add_tail(&sd->node, &acpi_scan_system_dev_list); 2270 2231 } 2232 + } else if (device->pnp.type.backlight) { 2233 + struct acpi_device *parent; 2234 + 2235 + parent = acpi_dev_parent(device); 2236 + if (parent) 2237 + acpi_create_video_bus_device(device, parent); 2271 2238 } else { 2272 2239 /* For a regular device object, create a platform device. */ 2273 2240 acpi_create_platform_device(device, NULL);
+64 -20
drivers/android/binder/page_range.rs
··· 142 142 _pin: PhantomPinned, 143 143 } 144 144 145 + // We do not define any ops. For now, used only to check identity of vmas. 146 + static BINDER_VM_OPS: bindings::vm_operations_struct = pin_init::zeroed(); 147 + 148 + // To ensure that we do not accidentally install pages into or zap pages from the wrong vma, we 149 + // check its vm_ops and private data before using it. 150 + fn check_vma(vma: &virt::VmaRef, owner: *const ShrinkablePageRange) -> Option<&virt::VmaMixedMap> { 151 + // SAFETY: Just reading the vm_ops pointer of any active vma is safe. 152 + let vm_ops = unsafe { (*vma.as_ptr()).vm_ops }; 153 + if !ptr::eq(vm_ops, &BINDER_VM_OPS) { 154 + return None; 155 + } 156 + 157 + // SAFETY: Reading the vm_private_data pointer of a binder-owned vma is safe. 158 + let vm_private_data = unsafe { (*vma.as_ptr()).vm_private_data }; 159 + // The ShrinkablePageRange is only dropped when the Process is dropped, which only happens once 160 + // the file's ->release handler is invoked, which means the ShrinkablePageRange outlives any 161 + // VMA associated with it, so there can't be any false positives due to pointer reuse here. 162 + if !ptr::eq(vm_private_data, owner.cast()) { 163 + return None; 164 + } 165 + 166 + vma.as_mixedmap_vma() 167 + } 168 + 145 169 struct Inner { 146 170 /// Array of pages. 147 171 /// ··· 332 308 inner.size = num_pages; 333 309 inner.vma_addr = vma.start(); 334 310 311 + // This pointer is only used for comparison - it's not dereferenced. 312 + // 313 + // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on 314 + // `vm_private_data`. 315 + unsafe { 316 + (*vma.as_ptr()).vm_private_data = ptr::from_ref(self).cast_mut().cast::<c_void>() 317 + }; 318 + 319 + // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on 320 + // `vm_ops`. 321 + unsafe { (*vma.as_ptr()).vm_ops = &BINDER_VM_OPS }; 322 + 335 323 Ok(num_pages) 336 324 } 337 325 ··· 435 399 // 436 400 // Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a 437 401 // workqueue. 438 - MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?) 439 - .mmap_read_lock() 440 - .vma_lookup(vma_addr) 441 - .ok_or(ESRCH)? 442 - .as_mixedmap_vma() 443 - .ok_or(ESRCH)? 444 - .vm_insert_page(user_page_addr, &new_page) 445 - .inspect_err(|err| { 446 - pr_warn!( 447 - "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}", 448 - user_page_addr, 449 - vma_addr, 450 - i, 451 - err 452 - ) 453 - })?; 402 + let mm = MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?); 403 + { 404 + let vma_read; 405 + let mmap_read; 406 + let vma = if let Some(ret) = mm.lock_vma_under_rcu(vma_addr) { 407 + vma_read = ret; 408 + check_vma(&vma_read, self) 409 + } else { 410 + mmap_read = mm.mmap_read_lock(); 411 + mmap_read 412 + .vma_lookup(vma_addr) 413 + .and_then(|vma| check_vma(vma, self)) 414 + }; 415 + 416 + match vma { 417 + Some(vma) => vma.vm_insert_page(user_page_addr, &new_page)?, 418 + None => return Err(ESRCH), 419 + } 420 + } 454 421 455 422 let inner = self.lock.lock(); 456 423 ··· 706 667 let mmap_read; 707 668 let mm_mutex; 708 669 let vma_addr; 670 + let range_ptr; 709 671 710 672 { 711 673 // CAST: The `list_head` field is first in `PageInfo`. 712 674 let info = item as *mut PageInfo; 713 675 // SAFETY: The `range` field of `PageInfo` is immutable. 714 - let range = unsafe { &*((*info).range) }; 676 + range_ptr = unsafe { (*info).range }; 677 + // SAFETY: The `range` outlives its `PageInfo` values. 678 + let range = unsafe { &*range_ptr }; 715 679 716 680 mm = match range.mm.mmget_not_zero() { 717 681 Some(mm) => MmWithUser::into_mmput_async(mm), ··· 759 717 // SAFETY: The lru lock is locked when this method is called. 760 718 unsafe { bindings::spin_unlock(&raw mut (*lru).lock) }; 761 719 762 - if let Some(vma) = mmap_read.vma_lookup(vma_addr) { 763 - let user_page_addr = vma_addr + (page_index << PAGE_SHIFT); 764 - vma.zap_page_range_single(user_page_addr, PAGE_SIZE); 720 + if let Some(unchecked_vma) = mmap_read.vma_lookup(vma_addr) { 721 + if let Some(vma) = check_vma(unchecked_vma, range_ptr) { 722 + let user_page_addr = vma_addr + (page_index << PAGE_SHIFT); 723 + vma.zap_page_range_single(user_page_addr, PAGE_SIZE); 724 + } 765 725 } 766 726 767 727 drop(mmap_read);
+2 -1
drivers/android/binder/process.rs
··· 1295 1295 } 1296 1296 1297 1297 pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) { 1298 - if let Some(death) = self.inner.lock().pull_delivered_death(cookie) { 1298 + let death = self.inner.lock().pull_delivered_death(cookie); 1299 + if let Some(death) = death { 1299 1300 death.set_notification_done(thread); 1300 1301 } 1301 1302 }
+33 -2
drivers/android/binder/range_alloc/array.rs
··· 118 118 size: usize, 119 119 is_oneway: bool, 120 120 pid: Pid, 121 - ) -> Result<usize> { 121 + ) -> Result<(usize, bool)> { 122 122 // Compute new value of free_oneway_space, which is set only on success. 123 123 let new_oneway_space = if is_oneway { 124 124 match self.free_oneway_space.checked_sub(size) { ··· 146 146 .ok() 147 147 .unwrap(); 148 148 149 - Ok(insert_at_offset) 149 + // Start detecting spammers once we have less than 20% 150 + // of async space left (which is less than 10% of total 151 + // buffer size). 152 + // 153 + // (This will short-circuit, so `low_oneway_space` is 154 + // only called when necessary.) 155 + let oneway_spam_detected = 156 + is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid); 157 + 158 + Ok((insert_at_offset, oneway_spam_detected)) 159 + } 160 + 161 + /// Find the amount and size of buffers allocated by the current caller. 162 + /// 163 + /// The idea is that once we cross the threshold, whoever is responsible 164 + /// for the low async space is likely to try to send another async transaction, 165 + /// and at some point we'll catch them in the act. This is more efficient 166 + /// than keeping a map per pid. 167 + fn low_oneway_space(&self, calling_pid: Pid) -> bool { 168 + let mut total_alloc_size = 0; 169 + let mut num_buffers = 0; 170 + 171 + // Warn if this pid has more than 50 transactions, or more than 50% of 172 + // async space (which is 25% of total buffer size). Oneway spam is only 173 + // detected when the threshold is exceeded. 174 + for range in &self.ranges { 175 + if range.state.is_oneway() && range.state.pid() == calling_pid { 176 + total_alloc_size += range.size; 177 + num_buffers += 1; 178 + } 179 + } 180 + num_buffers > 50 || total_alloc_size > self.size / 4 150 181 } 151 182 152 183 pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+2 -2
drivers/android/binder/range_alloc/mod.rs
··· 188 188 self.reserve_new(args) 189 189 } 190 190 Impl::Array(array) => { 191 - let offset = 191 + let (offset, oneway_spam_detected) = 192 192 array.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid)?; 193 193 Ok(ReserveNew::Success(ReserveNewSuccess { 194 194 offset, 195 - oneway_spam_detected: false, 195 + oneway_spam_detected, 196 196 _empty_array_alloc: args.empty_array_alloc, 197 197 _new_tree_alloc: args.new_tree_alloc, 198 198 _tree_alloc: args.tree_alloc,
+9 -9
drivers/android/binder/range_alloc/tree.rs
··· 164 164 self.free_oneway_space 165 165 }; 166 166 167 - // Start detecting spammers once we have less than 20% 168 - // of async space left (which is less than 10% of total 169 - // buffer size). 170 - // 171 - // (This will short-circut, so `low_oneway_space` is 172 - // only called when necessary.) 173 - let oneway_spam_detected = 174 - is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid); 175 - 176 167 let (found_size, found_off, tree_node, free_tree_node) = match self.find_best_match(size) { 177 168 None => { 178 169 pr_warn!("ENOSPC from range_alloc.reserve_new - size: {}", size); ··· 193 202 self.tree.insert(tree_node); 194 203 self.free_tree.insert(free_tree_node); 195 204 } 205 + 206 + // Start detecting spammers once we have less than 20% 207 + // of async space left (which is less than 10% of total 208 + // buffer size). 209 + // 210 + // (This will short-circuit, so `low_oneway_space` is 211 + // only called when necessary.) 212 + let oneway_spam_detected = 213 + is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid); 196 214 197 215 Ok((found_off, oneway_spam_detected)) 198 216 }
+6 -11
drivers/android/binder/thread.rs
··· 1015 1015 1016 1016 // Copy offsets if there are any. 1017 1017 if offsets_size > 0 { 1018 - { 1019 - let mut reader = 1020 - UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size) 1021 - .reader(); 1022 - alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?; 1023 - } 1018 + let mut offsets_reader = 1019 + UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size) 1020 + .reader(); 1024 1021 1025 1022 let offsets_start = aligned_data_size; 1026 1023 let offsets_end = aligned_data_size + offsets_size; ··· 1038 1041 .step_by(size_of::<u64>()) 1039 1042 .enumerate() 1040 1043 { 1041 - let offset: usize = view 1042 - .alloc 1043 - .read::<u64>(index_offset)? 1044 - .try_into() 1045 - .map_err(|_| EINVAL)?; 1044 + let offset = offsets_reader.read::<u64>()?; 1045 + view.alloc.write(index_offset, &offset)?; 1046 + let offset: usize = offset.try_into().map_err(|_| EINVAL)?; 1046 1047 1047 1048 if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) { 1048 1049 pr_warn!("Got transaction with invalid offset.");
+3
drivers/ata/libata-core.c
··· 4188 4188 { "ST3320[68]13AS", "SD1[5-9]", ATA_QUIRK_NONCQ | 4189 4189 ATA_QUIRK_FIRMWARE_WARN }, 4190 4190 4191 + /* ADATA devices with LPM issues. */ 4192 + { "ADATA SU680", NULL, ATA_QUIRK_NOLPM }, 4193 + 4191 4194 /* Seagate disks with LPM issues */ 4192 4195 { "ST1000DM010-2EP102", NULL, ATA_QUIRK_NOLPM }, 4193 4196 { "ST2000DM008-2FR102", NULL, ATA_QUIRK_NOLPM },
+1 -1
drivers/ata/libata-scsi.c
··· 3600 3600 3601 3601 if (cdb[2] != 1 && cdb[2] != 3) { 3602 3602 ata_dev_warn(dev, "invalid command format %d\n", cdb[2]); 3603 - ata_scsi_set_invalid_field(dev, cmd, 1, 0xff); 3603 + ata_scsi_set_invalid_field(dev, cmd, 2, 0xff); 3604 3604 return 0; 3605 3605 } 3606 3606
+42 -1
drivers/base/bus.c
··· 504 504 } 505 505 EXPORT_SYMBOL_GPL(bus_for_each_drv); 506 506 507 + static ssize_t driver_override_store(struct device *dev, 508 + struct device_attribute *attr, 509 + const char *buf, size_t count) 510 + { 511 + int ret; 512 + 513 + ret = __device_set_driver_override(dev, buf, count); 514 + if (ret) 515 + return ret; 516 + 517 + return count; 518 + } 519 + 520 + static ssize_t driver_override_show(struct device *dev, 521 + struct device_attribute *attr, char *buf) 522 + { 523 + guard(spinlock)(&dev->driver_override.lock); 524 + return sysfs_emit(buf, "%s\n", dev->driver_override.name); 525 + } 526 + static DEVICE_ATTR_RW(driver_override); 527 + 528 + static struct attribute *driver_override_dev_attrs[] = { 529 + &dev_attr_driver_override.attr, 530 + NULL, 531 + }; 532 + 533 + static const struct attribute_group driver_override_dev_group = { 534 + .attrs = driver_override_dev_attrs, 535 + }; 536 + 507 537 /** 508 538 * bus_add_device - add device to bus 509 539 * @dev: device being added ··· 567 537 if (error) 568 538 goto out_put; 569 539 540 + if (dev->bus->driver_override) { 541 + error = device_add_group(dev, &driver_override_dev_group); 542 + if (error) 543 + goto out_groups; 544 + } 545 + 570 546 error = sysfs_create_link(&sp->devices_kset->kobj, &dev->kobj, dev_name(dev)); 571 547 if (error) 572 - goto out_groups; 548 + goto out_override; 573 549 574 550 error = sysfs_create_link(&dev->kobj, &sp->subsys.kobj, "subsystem"); 575 551 if (error) ··· 586 550 587 551 out_subsys: 588 552 sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev)); 553 + out_override: 554 + if (dev->bus->driver_override) 555 + device_remove_group(dev, &driver_override_dev_group); 589 556 out_groups: 590 557 device_remove_groups(dev, sp->bus->dev_groups); 591 558 out_put: ··· 646 607 647 608 sysfs_remove_link(&dev->kobj, "subsystem"); 648 609 sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev)); 610 + if (dev->bus->driver_override) 611 + device_remove_group(dev, &driver_override_dev_group); 649 612 device_remove_groups(dev, dev->bus->dev_groups); 650 613 if (klist_node_attached(&dev->p->knode_bus)) 651 614 klist_del(&dev->p->knode_bus);
+2
drivers/base/core.c
··· 2556 2556 devres_release_all(dev); 2557 2557 2558 2558 kfree(dev->dma_range_map); 2559 + kfree(dev->driver_override.name); 2559 2560 2560 2561 if (dev->release) 2561 2562 dev->release(dev); ··· 3160 3159 kobject_init(&dev->kobj, &device_ktype); 3161 3160 INIT_LIST_HEAD(&dev->dma_pools); 3162 3161 mutex_init(&dev->mutex); 3162 + spin_lock_init(&dev->driver_override.lock); 3163 3163 lockdep_set_novalidate_class(&dev->mutex); 3164 3164 spin_lock_init(&dev->devres_lock); 3165 3165 INIT_LIST_HEAD(&dev->devres_head);
+60
drivers/base/dd.c
··· 381 381 } 382 382 __exitcall(deferred_probe_exit); 383 383 384 + int __device_set_driver_override(struct device *dev, const char *s, size_t len) 385 + { 386 + const char *new, *old; 387 + char *cp; 388 + 389 + if (!s) 390 + return -EINVAL; 391 + 392 + /* 393 + * The stored value will be used in sysfs show callback (sysfs_emit()), 394 + * which has a length limit of PAGE_SIZE and adds a trailing newline. 395 + * Thus we can store one character less to avoid truncation during sysfs 396 + * show. 397 + */ 398 + if (len >= (PAGE_SIZE - 1)) 399 + return -EINVAL; 400 + 401 + /* 402 + * Compute the real length of the string in case userspace sends us a 403 + * bunch of \0 characters like python likes to do. 404 + */ 405 + len = strlen(s); 406 + 407 + if (!len) { 408 + /* Empty string passed - clear override */ 409 + spin_lock(&dev->driver_override.lock); 410 + old = dev->driver_override.name; 411 + dev->driver_override.name = NULL; 412 + spin_unlock(&dev->driver_override.lock); 413 + kfree(old); 414 + 415 + return 0; 416 + } 417 + 418 + cp = strnchr(s, len, '\n'); 419 + if (cp) 420 + len = cp - s; 421 + 422 + new = kstrndup(s, len, GFP_KERNEL); 423 + if (!new) 424 + return -ENOMEM; 425 + 426 + spin_lock(&dev->driver_override.lock); 427 + old = dev->driver_override.name; 428 + if (cp != s) { 429 + dev->driver_override.name = new; 430 + spin_unlock(&dev->driver_override.lock); 431 + } else { 432 + /* "\n" passed - clear override */ 433 + dev->driver_override.name = NULL; 434 + spin_unlock(&dev->driver_override.lock); 435 + 436 + kfree(new); 437 + } 438 + kfree(old); 439 + 440 + return 0; 441 + } 442 + EXPORT_SYMBOL_GPL(__device_set_driver_override); 443 + 384 444 /** 385 445 * device_is_bound() - Check if device is bound to a driver 386 446 * @dev: device to check
+5 -32
drivers/base/platform.c
··· 603 603 kfree(pa->pdev.dev.platform_data); 604 604 kfree(pa->pdev.mfd_cell); 605 605 kfree(pa->pdev.resource); 606 - kfree(pa->pdev.driver_override); 607 606 kfree(pa); 608 607 } 609 608 ··· 1305 1306 } 1306 1307 static DEVICE_ATTR_RO(numa_node); 1307 1308 1308 - static ssize_t driver_override_show(struct device *dev, 1309 - struct device_attribute *attr, char *buf) 1310 - { 1311 - struct platform_device *pdev = to_platform_device(dev); 1312 - ssize_t len; 1313 - 1314 - device_lock(dev); 1315 - len = sysfs_emit(buf, "%s\n", pdev->driver_override); 1316 - device_unlock(dev); 1317 - 1318 - return len; 1319 - } 1320 - 1321 - static ssize_t driver_override_store(struct device *dev, 1322 - struct device_attribute *attr, 1323 - const char *buf, size_t count) 1324 - { 1325 - struct platform_device *pdev = to_platform_device(dev); 1326 - int ret; 1327 - 1328 - ret = driver_set_override(dev, &pdev->driver_override, buf, count); 1329 - if (ret) 1330 - return ret; 1331 - 1332 - return count; 1333 - } 1334 - static DEVICE_ATTR_RW(driver_override); 1335 - 1336 1309 static struct attribute *platform_dev_attrs[] = { 1337 1310 &dev_attr_modalias.attr, 1338 1311 &dev_attr_numa_node.attr, 1339 - &dev_attr_driver_override.attr, 1340 1312 NULL, 1341 1313 }; 1342 1314 ··· 1347 1377 { 1348 1378 struct platform_device *pdev = to_platform_device(dev); 1349 1379 struct platform_driver *pdrv = to_platform_driver(drv); 1380 + int ret; 1350 1381 1351 1382 /* When driver_override is set, only bind to the matching driver */ 1352 - if (pdev->driver_override) 1353 - return !strcmp(pdev->driver_override, drv->name); 1383 + ret = device_match_driver_override(dev, drv); 1384 + if (ret >= 0) 1385 + return ret; 1354 1386 1355 1387 /* Attempt an OF style match first */ 1356 1388 if (of_driver_match_device(dev, drv)) ··· 1488 1516 const struct bus_type platform_bus_type = { 1489 1517 .name = "platform", 1490 1518 .dev_groups = platform_dev_groups, 1519 + .driver_override = true, 1491 1520 .match = platform_match, 1492 1521 .uevent = platform_uevent, 1493 1522 .probe = platform_probe,
+1
drivers/base/power/runtime.c
··· 1895 1895 void pm_runtime_remove(struct device *dev) 1896 1896 { 1897 1897 __pm_runtime_disable(dev, false); 1898 + flush_work(&dev->power.work); 1898 1899 pm_runtime_reinit(dev); 1899 1900 } 1900 1901
+12 -4
drivers/block/ublk_drv.c
··· 4443 4443 4444 4444 /* Skip partition scan if disabled by user */ 4445 4445 if (ub->dev_info.flags & UBLK_F_NO_AUTO_PART_SCAN) { 4446 - clear_bit(GD_SUPPRESS_PART_SCAN, &disk->state); 4446 + /* Not clear for unprivileged daemons, see comment above */ 4447 + if (!ub->unprivileged_daemons) 4448 + clear_bit(GD_SUPPRESS_PART_SCAN, &disk->state); 4447 4449 } else { 4448 4450 /* Schedule async partition scan for trusted daemons */ 4449 4451 if (!ub->unprivileged_daemons) ··· 5008 5006 return 0; 5009 5007 } 5010 5008 5011 - static void ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header) 5009 + static int ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header) 5012 5010 { 5013 5011 struct ublk_param_basic *p = &ub->params.basic; 5014 5012 u64 new_size = header->data[0]; 5013 + int ret = 0; 5015 5014 5016 5015 mutex_lock(&ub->mutex); 5016 + if (!ub->ub_disk) { 5017 + ret = -ENODEV; 5018 + goto out; 5019 + } 5017 5020 p->dev_sectors = new_size; 5018 5021 set_capacity_and_notify(ub->ub_disk, p->dev_sectors); 5022 + out: 5019 5023 mutex_unlock(&ub->mutex); 5024 + return ret; 5020 5025 } 5021 5026 5022 5027 struct count_busy { ··· 5344 5335 ret = ublk_ctrl_end_recovery(ub, &header); 5345 5336 break; 5346 5337 case UBLK_CMD_UPDATE_SIZE: 5347 - ublk_ctrl_set_size(ub, &header); 5348 - ret = 0; 5338 + ret = ublk_ctrl_set_size(ub, &header); 5349 5339 break; 5350 5340 case UBLK_CMD_QUIESCE_DEV: 5351 5341 ret = ublk_ctrl_quiesce_dev(ub, &header);
+12 -12
drivers/block/zram/zram_drv.c
··· 549 549 return ret; 550 550 } 551 551 552 - static ssize_t writeback_compressed_store(struct device *dev, 552 + static ssize_t compressed_writeback_store(struct device *dev, 553 553 struct device_attribute *attr, 554 554 const char *buf, size_t len) 555 555 { ··· 564 564 return -EBUSY; 565 565 } 566 566 567 - zram->wb_compressed = val; 567 + zram->compressed_wb = val; 568 568 569 569 return len; 570 570 } 571 571 572 - static ssize_t writeback_compressed_show(struct device *dev, 572 + static ssize_t compressed_writeback_show(struct device *dev, 573 573 struct device_attribute *attr, 574 574 char *buf) 575 575 { ··· 577 577 struct zram *zram = dev_to_zram(dev); 578 578 579 579 guard(rwsem_read)(&zram->dev_lock); 580 - val = zram->wb_compressed; 580 + val = zram->compressed_wb; 581 581 582 582 return sysfs_emit(buf, "%d\n", val); 583 583 } ··· 946 946 goto out; 947 947 } 948 948 949 - if (zram->wb_compressed) { 949 + if (zram->compressed_wb) { 950 950 /* 951 951 * ZRAM_WB slots get freed, we need to preserve data required 952 952 * for read decompression. ··· 960 960 set_slot_flag(zram, index, ZRAM_WB); 961 961 set_slot_handle(zram, index, req->blk_idx); 962 962 963 - if (zram->wb_compressed) { 963 + if (zram->compressed_wb) { 964 964 if (huge) 965 965 set_slot_flag(zram, index, ZRAM_HUGE); 966 966 set_slot_size(zram, index, size); ··· 1100 1100 */ 1101 1101 if (!test_slot_flag(zram, index, ZRAM_PP_SLOT)) 1102 1102 goto next; 1103 - if (zram->wb_compressed) 1103 + if (zram->compressed_wb) 1104 1104 err = read_from_zspool_raw(zram, req->page, index); 1105 1105 else 1106 1106 err = read_from_zspool(zram, req->page, index); ··· 1429 1429 * 1430 1430 * Keep the existing behavior for now. 1431 1431 */ 1432 - if (zram->wb_compressed == false) { 1432 + if (zram->compressed_wb == false) { 1433 1433 /* No decompression needed, complete the parent IO */ 1434 1434 bio_endio(req->parent); 1435 1435 bio_put(bio); ··· 1508 1508 flush_work(&req.work); 1509 1509 destroy_work_on_stack(&req.work); 1510 1510 1511 - if (req.error || zram->wb_compressed == false) 1511 + if (req.error || zram->compressed_wb == false) 1512 1512 return req.error; 1513 1513 1514 1514 return decompress_bdev_page(zram, page, index); ··· 3007 3007 static DEVICE_ATTR_RW(writeback_limit); 3008 3008 static DEVICE_ATTR_RW(writeback_limit_enable); 3009 3009 static DEVICE_ATTR_RW(writeback_batch_size); 3010 - static DEVICE_ATTR_RW(writeback_compressed); 3010 + static DEVICE_ATTR_RW(compressed_writeback); 3011 3011 #endif 3012 3012 #ifdef CONFIG_ZRAM_MULTI_COMP 3013 3013 static DEVICE_ATTR_RW(recomp_algorithm); ··· 3031 3031 &dev_attr_writeback_limit.attr, 3032 3032 &dev_attr_writeback_limit_enable.attr, 3033 3033 &dev_attr_writeback_batch_size.attr, 3034 - &dev_attr_writeback_compressed.attr, 3034 + &dev_attr_compressed_writeback.attr, 3035 3035 #endif 3036 3036 &dev_attr_io_stat.attr, 3037 3037 &dev_attr_mm_stat.attr, ··· 3091 3091 init_rwsem(&zram->dev_lock); 3092 3092 #ifdef CONFIG_ZRAM_WRITEBACK 3093 3093 zram->wb_batch_size = 32; 3094 - zram->wb_compressed = false; 3094 + zram->compressed_wb = false; 3095 3095 #endif 3096 3096 3097 3097 /* gendisk structure */
+1 -1
drivers/block/zram/zram_drv.h
··· 133 133 #ifdef CONFIG_ZRAM_WRITEBACK 134 134 struct file *backing_dev; 135 135 bool wb_limit_enable; 136 - bool wb_compressed; 136 + bool compressed_wb; 137 137 u32 wb_batch_size; 138 138 u64 bd_wb_limit; 139 139 struct block_device *bdev;
+2
drivers/bluetooth/btqca.c
··· 787 787 */ 788 788 if (soc_type == QCA_WCN3988) 789 789 rom_ver = ((soc_ver & 0x00000f00) >> 0x05) | (soc_ver & 0x0000000f); 790 + else if (soc_type == QCA_WCN3998) 791 + rom_ver = ((soc_ver & 0x0000f000) >> 0x07) | (soc_ver & 0x0000000f); 790 792 else 791 793 rom_ver = ((soc_ver & 0x00000f00) >> 0x04) | (soc_ver & 0x0000000f); 792 794
+2 -2
drivers/bus/simple-pm-bus.c
··· 36 36 * that's not listed in simple_pm_bus_of_match. We don't want to do any 37 37 * of the simple-pm-bus tasks for these devices, so return early. 38 38 */ 39 - if (pdev->driver_override) 39 + if (device_has_driver_override(&pdev->dev)) 40 40 return 0; 41 41 42 42 match = of_match_device(dev->driver->of_match_table, dev); ··· 78 78 { 79 79 const void *data = of_device_get_match_data(&pdev->dev); 80 80 81 - if (pdev->driver_override || data) 81 + if (device_has_driver_override(&pdev->dev) || data) 82 82 return; 83 83 84 84 dev_dbg(&pdev->dev, "%s\n", __func__);
+2 -2
drivers/cache/ax45mp_cache.c
··· 178 178 179 179 static int __init ax45mp_cache_init(void) 180 180 { 181 - struct device_node *np; 182 181 struct resource res; 183 182 int ret; 184 183 185 - np = of_find_matching_node(NULL, ax45mp_cache_ids); 184 + struct device_node *np __free(device_node) = 185 + of_find_matching_node(NULL, ax45mp_cache_ids); 186 186 if (!of_device_is_available(np)) 187 187 return -ENODEV; 188 188
+1 -2
drivers/clk/imx/clk-scu.c
··· 706 706 if (ret) 707 707 goto put_device; 708 708 709 - ret = driver_set_override(&pdev->dev, &pdev->driver_override, 710 - "imx-scu-clk", strlen("imx-scu-clk")); 709 + ret = device_set_driver_override(&pdev->dev, "imx-scu-clk"); 711 710 if (ret) 712 711 goto put_device; 713 712
-10
drivers/cpuidle/cpuidle.c
··· 359 359 int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, 360 360 bool *stop_tick) 361 361 { 362 - /* 363 - * If there is only a single idle state (or none), there is nothing 364 - * meaningful for the governor to choose. Skip the governor and 365 - * always use state 0 with the tick running. 366 - */ 367 - if (drv->state_count <= 1) { 368 - *stop_tick = false; 369 - return 0; 370 - } 371 - 372 362 return cpuidle_curr_governor->select(drv, dev, stop_tick); 373 363 } 374 364
+1 -3
drivers/crypto/ccp/sev-dev.c
··· 2408 2408 * in Firmware state on failure. Use snp_reclaim_pages() to 2409 2409 * transition either case back to Hypervisor-owned state. 2410 2410 */ 2411 - if (snp_reclaim_pages(__pa(data), 1, true)) { 2412 - snp_leak_pages(__page_to_pfn(status_page), 1); 2411 + if (snp_reclaim_pages(__pa(data), 1, true)) 2413 2412 return -EFAULT; 2414 - } 2415 2413 } 2416 2414 2417 2415 if (ret)
+7
drivers/crypto/padlock-sha.c
··· 332 332 if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN)) 333 333 return -ENODEV; 334 334 335 + /* 336 + * Skip family 0x07 and newer used by Zhaoxin processors, 337 + * as the driver's self-tests fail on these CPUs. 338 + */ 339 + if (c->x86 >= 0x07) 340 + return -ENODEV; 341 + 335 342 /* Register the newly added algorithm module if on * 336 343 * VIA Nano processor, or else just do as before */ 337 344 if (c->x86_model < 0x0f) {
+3 -2
drivers/firewire/net.c
··· 257 257 memcpy((u8 *)hh->hh_data + HH_DATA_OFF(FWNET_HLEN), haddr, net->addr_len); 258 258 } 259 259 260 - static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr) 260 + static int fwnet_header_parse(const struct sk_buff *skb, const struct net_device *dev, 261 + unsigned char *haddr) 261 262 { 262 - memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN); 263 + memcpy(haddr, dev->dev_addr, FWNET_ALEN); 263 264 264 265 return FWNET_ALEN; 265 266 }
+4 -4
drivers/firmware/arm_ffa/driver.c
··· 205 205 return 0; 206 206 } 207 207 208 - static int ffa_rxtx_unmap(u16 vm_id) 208 + static int ffa_rxtx_unmap(void) 209 209 { 210 210 ffa_value_t ret; 211 211 212 212 invoke_ffa_fn((ffa_value_t){ 213 - .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0), 213 + .a0 = FFA_RXTX_UNMAP, 214 214 }, &ret); 215 215 216 216 if (ret.a0 == FFA_ERROR) ··· 2097 2097 2098 2098 pr_err("failed to setup partitions\n"); 2099 2099 ffa_notifications_cleanup(); 2100 - ffa_rxtx_unmap(drv_info->vm_id); 2100 + ffa_rxtx_unmap(); 2101 2101 free_pages: 2102 2102 if (drv_info->tx_buffer) 2103 2103 free_pages_exact(drv_info->tx_buffer, rxtx_bufsz); ··· 2112 2112 { 2113 2113 ffa_notifications_cleanup(); 2114 2114 ffa_partitions_cleanup(); 2115 - ffa_rxtx_unmap(drv_info->vm_id); 2115 + ffa_rxtx_unmap(); 2116 2116 free_pages_exact(drv_info->tx_buffer, drv_info->rxtx_bufsz); 2117 2117 free_pages_exact(drv_info->rx_buffer, drv_info->rxtx_bufsz); 2118 2118 kfree(drv_info);
+2 -2
drivers/firmware/arm_scmi/notify.c
··· 1066 1066 * since at creation time we usually want to have all setup and ready before 1067 1067 * events really start flowing. 1068 1068 * 1069 - * Return: A properly refcounted handler on Success, NULL on Failure 1069 + * Return: A properly refcounted handler on Success, ERR_PTR on Failure 1070 1070 */ 1071 1071 static inline struct scmi_event_handler * 1072 1072 __scmi_event_handler_get_ops(struct scmi_notify_instance *ni, ··· 1113 1113 } 1114 1114 mutex_unlock(&ni->pending_mtx); 1115 1115 1116 - return hndl; 1116 + return hndl ?: ERR_PTR(-ENODEV); 1117 1117 } 1118 1118 1119 1119 static struct scmi_event_handler *
+2 -2
drivers/firmware/arm_scmi/protocols.h
··· 189 189 190 190 /** 191 191 * struct scmi_iterator_state - Iterator current state descriptor 192 - * @desc_index: Starting index for the current mulit-part request. 192 + * @desc_index: Starting index for the current multi-part request. 193 193 * @num_returned: Number of returned items in the last multi-part reply. 194 194 * @num_remaining: Number of remaining items in the multi-part message. 195 195 * @max_resources: Maximum acceptable number of items, configured by the caller 196 196 * depending on the underlying resources that it is querying. 197 197 * @loop_idx: The iterator loop index in the current multi-part reply. 198 - * @rx_len: Size in bytes of the currenly processed message; it can be used by 198 + * @rx_len: Size in bytes of the currently processed message; it can be used by 199 199 * the user of the iterator to verify a reply size. 200 200 * @priv: Optional pointer to some additional state-related private data setup 201 201 * by the caller during the iterations.
+3 -2
drivers/firmware/arm_scpi.c
··· 18 18 19 19 #include <linux/bitmap.h> 20 20 #include <linux/bitfield.h> 21 + #include <linux/cleanup.h> 21 22 #include <linux/device.h> 22 23 #include <linux/err.h> 23 24 #include <linux/export.h> ··· 941 940 int idx = scpi_drvinfo->num_chans; 942 941 struct scpi_chan *pchan = scpi_drvinfo->channels + idx; 943 942 struct mbox_client *cl = &pchan->cl; 944 - struct device_node *shmem = of_parse_phandle(np, "shmem", idx); 943 + struct device_node *shmem __free(device_node) = 944 + of_parse_phandle(np, "shmem", idx); 945 945 946 946 if (!of_match_node(shmem_of_match, shmem)) 947 947 return -ENXIO; 948 948 949 949 ret = of_address_to_resource(shmem, 0, &res); 950 - of_node_put(shmem); 951 950 if (ret) { 952 951 dev_err(dev, "failed to get SCPI payload mem resource\n"); 953 952 return ret;
+18 -6
drivers/firmware/cirrus/cs_dsp.c
··· 1610 1610 region_name); 1611 1611 1612 1612 if (reg) { 1613 + /* 1614 + * Although we expect the underlying bus does not require 1615 + * physically-contiguous buffers, we pessimistically use 1616 + * a temporary buffer instead of trusting that the 1617 + * alignment of region->data is ok. 1618 + */ 1613 1619 region_len = le32_to_cpu(region->len); 1614 1620 if (region_len > buf_len) { 1615 1621 buf_len = round_up(region_len, PAGE_SIZE); 1616 - kfree(buf); 1617 - buf = kmalloc(buf_len, GFP_KERNEL | GFP_DMA); 1622 + vfree(buf); 1623 + buf = vmalloc(buf_len); 1618 1624 if (!buf) { 1619 1625 ret = -ENOMEM; 1620 1626 goto out_fw; ··· 1649 1643 1650 1644 ret = 0; 1651 1645 out_fw: 1652 - kfree(buf); 1646 + vfree(buf); 1653 1647 1654 1648 if (ret == -EOVERFLOW) 1655 1649 cs_dsp_err(dsp, "%s: file content overflows file data\n", file); ··· 2337 2331 } 2338 2332 2339 2333 if (reg) { 2334 + /* 2335 + * Although we expect the underlying bus does not require 2336 + * physically-contiguous buffers, we pessimistically use 2337 + * a temporary buffer instead of trusting that the 2338 + * alignment of blk->data is ok. 2339 + */ 2340 2340 region_len = le32_to_cpu(blk->len); 2341 2341 if (region_len > buf_len) { 2342 2342 buf_len = round_up(region_len, PAGE_SIZE); 2343 - kfree(buf); 2344 - buf = kmalloc(buf_len, GFP_KERNEL | GFP_DMA); 2343 + vfree(buf); 2344 + buf = vmalloc(buf_len); 2345 2345 if (!buf) { 2346 2346 ret = -ENOMEM; 2347 2347 goto out_fw; ··· 2378 2366 2379 2367 ret = 0; 2380 2368 out_fw: 2381 - kfree(buf); 2369 + vfree(buf); 2382 2370 2383 2371 if (ret == -EOVERFLOW) 2384 2372 cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
+2
drivers/firmware/stratix10-rsu.c
··· 768 768 rsu_async_status_callback); 769 769 if (ret) { 770 770 dev_err(dev, "Error, getting RSU status %i\n", ret); 771 + stratix10_svc_remove_async_client(priv->chan); 771 772 stratix10_svc_free_channel(priv->chan); 773 + return ret; 772 774 } 773 775 774 776 /* get DCMF version from firmware */
+126 -102
drivers/firmware/stratix10-svc.c
··· 37 37 * service layer will return error to FPGA manager when timeout occurs, 38 38 * timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC. 39 39 */ 40 - #define SVC_NUM_DATA_IN_FIFO 32 40 + #define SVC_NUM_DATA_IN_FIFO 8 41 41 #define SVC_NUM_CHANNEL 4 42 - #define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200 42 + #define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 2000 43 43 #define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30 44 44 #define BYTE_TO_WORD_SIZE 4 45 45 46 46 /* stratix10 service layer clients */ 47 47 #define STRATIX10_RSU "stratix10-rsu" 48 - #define INTEL_FCS "intel-fcs" 49 48 50 49 /* Maximum number of SDM client IDs. */ 51 50 #define MAX_SDM_CLIENT_IDS 16 ··· 104 105 /** 105 106 * struct stratix10_svc - svc private data 106 107 * @stratix10_svc_rsu: pointer to stratix10 RSU device 107 - * @intel_svc_fcs: pointer to the FCS device 108 108 */ 109 109 struct stratix10_svc { 110 110 struct platform_device *stratix10_svc_rsu; 111 - struct platform_device *intel_svc_fcs; 112 111 }; 113 112 114 113 /** ··· 248 251 * @num_active_client: number of active service client 249 252 * @node: list management 250 253 * @genpool: memory pool pointing to the memory region 251 - * @task: pointer to the thread task which handles SMC or HVC call 252 - * @svc_fifo: a queue for storing service message data 253 254 * @complete_status: state for completion 254 - * @svc_fifo_lock: protect access to service message data queue 255 255 * @invoke_fn: function to issue secure monitor call or hypervisor call 256 256 * @svc: manages the list of client svc drivers 257 + * @sdm_lock: only allows a single command single response to SDM 257 258 * @actrl: async control structure 258 259 * 259 260 * This struct is used to create communication channels for service clients, to ··· 264 269 int num_active_client; 265 270 struct list_head node; 266 271 struct gen_pool *genpool; 267 - struct task_struct *task; 268 - struct kfifo svc_fifo; 269 272 struct completion complete_status; 270 - spinlock_t svc_fifo_lock; 271 273 svc_invoke_fn *invoke_fn; 272 274 struct stratix10_svc *svc; 275 + struct mutex sdm_lock; 273 276 struct stratix10_async_ctrl actrl; 274 277 }; 275 278 ··· 276 283 * @ctrl: pointer to service controller which is the provider of this channel 277 284 * @scl: pointer to service client which owns the channel 278 285 * @name: service client name associated with the channel 286 + * @task: pointer to the thread task which handles SMC or HVC call 287 + * @svc_fifo: a queue for storing service message data (separate fifo for every channel) 288 + * @svc_fifo_lock: protect access to service message data queue (locking pending fifo) 279 289 * @lock: protect access to the channel 280 290 * @async_chan: reference to asynchronous channel object for this channel 281 291 * ··· 289 293 struct stratix10_svc_controller *ctrl; 290 294 struct stratix10_svc_client *scl; 291 295 char *name; 296 + struct task_struct *task; 297 + struct kfifo svc_fifo; 298 + spinlock_t svc_fifo_lock; 292 299 spinlock_t lock; 293 300 struct stratix10_async_chan *async_chan; 294 301 }; ··· 526 527 */ 527 528 static int svc_normal_to_secure_thread(void *data) 528 529 { 529 - struct stratix10_svc_controller 530 - *ctrl = (struct stratix10_svc_controller *)data; 531 - struct stratix10_svc_data *pdata; 532 - struct stratix10_svc_cb_data *cbdata; 530 + struct stratix10_svc_chan *chan = (struct stratix10_svc_chan *)data; 531 + struct stratix10_svc_controller *ctrl = chan->ctrl; 532 + struct stratix10_svc_data *pdata = NULL; 533 + struct stratix10_svc_cb_data *cbdata = NULL; 533 534 struct arm_smccc_res res; 534 535 unsigned long a0, a1, a2, a3, a4, a5, a6, a7; 535 536 int ret_fifo = 0; ··· 554 555 a6 = 0; 555 556 a7 = 0; 556 557 557 - pr_debug("smc_hvc_shm_thread is running\n"); 558 + pr_debug("%s: %s: Thread is running!\n", __func__, chan->name); 558 559 559 560 while (!kthread_should_stop()) { 560 - ret_fifo = kfifo_out_spinlocked(&ctrl->svc_fifo, 561 + ret_fifo = kfifo_out_spinlocked(&chan->svc_fifo, 561 562 pdata, sizeof(*pdata), 562 - &ctrl->svc_fifo_lock); 563 + &chan->svc_fifo_lock); 563 564 564 565 if (!ret_fifo) 565 566 continue; ··· 568 569 (unsigned int)pdata->paddr, pdata->command, 569 570 (unsigned int)pdata->size); 570 571 572 + /* SDM can only process one command at a time */ 573 + pr_debug("%s: %s: Thread is waiting for mutex!\n", 574 + __func__, chan->name); 575 + if (mutex_lock_interruptible(&ctrl->sdm_lock)) { 576 + /* item already dequeued; notify client to unblock it */ 577 + cbdata->status = BIT(SVC_STATUS_ERROR); 578 + cbdata->kaddr1 = NULL; 579 + cbdata->kaddr2 = NULL; 580 + cbdata->kaddr3 = NULL; 581 + if (pdata->chan->scl) 582 + pdata->chan->scl->receive_cb(pdata->chan->scl, 583 + cbdata); 584 + break; 585 + } 586 + 571 587 switch (pdata->command) { 572 588 case COMMAND_RECONFIG_DATA_CLAIM: 573 589 svc_thread_cmd_data_claim(ctrl, pdata, cbdata); 590 + mutex_unlock(&ctrl->sdm_lock); 574 591 continue; 575 592 case COMMAND_RECONFIG: 576 593 a0 = INTEL_SIP_SMC_FPGA_CONFIG_START; ··· 715 700 break; 716 701 default: 717 702 pr_warn("it shouldn't happen\n"); 718 - break; 703 + mutex_unlock(&ctrl->sdm_lock); 704 + continue; 719 705 } 720 - pr_debug("%s: before SMC call -- a0=0x%016x a1=0x%016x", 721 - __func__, 706 + pr_debug("%s: %s: before SMC call -- a0=0x%016x a1=0x%016x", 707 + __func__, chan->name, 722 708 (unsigned int)a0, 723 709 (unsigned int)a1); 724 710 pr_debug(" a2=0x%016x\n", (unsigned int)a2); ··· 728 712 pr_debug(" a5=0x%016x\n", (unsigned int)a5); 729 713 ctrl->invoke_fn(a0, a1, a2, a3, a4, a5, a6, a7, &res); 730 714 731 - pr_debug("%s: after SMC call -- res.a0=0x%016x", 732 - __func__, (unsigned int)res.a0); 715 + pr_debug("%s: %s: after SMC call -- res.a0=0x%016x", 716 + __func__, chan->name, (unsigned int)res.a0); 733 717 pr_debug(" res.a1=0x%016x, res.a2=0x%016x", 734 718 (unsigned int)res.a1, (unsigned int)res.a2); 735 719 pr_debug(" res.a3=0x%016x\n", (unsigned int)res.a3); ··· 744 728 cbdata->kaddr2 = NULL; 745 729 cbdata->kaddr3 = NULL; 746 730 pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata); 731 + mutex_unlock(&ctrl->sdm_lock); 747 732 continue; 748 733 } 749 734 ··· 818 801 break; 819 802 820 803 } 804 + 805 + mutex_unlock(&ctrl->sdm_lock); 821 806 } 822 807 823 808 kfree(cbdata); ··· 1715 1696 if (!p_data) 1716 1697 return -ENOMEM; 1717 1698 1718 - /* first client will create kernel thread */ 1719 - if (!chan->ctrl->task) { 1720 - chan->ctrl->task = 1721 - kthread_run_on_cpu(svc_normal_to_secure_thread, 1722 - (void *)chan->ctrl, 1723 - cpu, "svc_smc_hvc_thread"); 1724 - if (IS_ERR(chan->ctrl->task)) { 1699 + /* first caller creates the per-channel kthread */ 1700 + if (!chan->task) { 1701 + struct task_struct *task; 1702 + 1703 + task = kthread_run_on_cpu(svc_normal_to_secure_thread, 1704 + (void *)chan, 1705 + cpu, "svc_smc_hvc_thread"); 1706 + if (IS_ERR(task)) { 1725 1707 dev_err(chan->ctrl->dev, 1726 1708 "failed to create svc_smc_hvc_thread\n"); 1727 1709 kfree(p_data); 1728 1710 return -EINVAL; 1729 1711 } 1712 + 1713 + spin_lock(&chan->lock); 1714 + if (chan->task) { 1715 + /* another caller won the race; discard our thread */ 1716 + spin_unlock(&chan->lock); 1717 + kthread_stop(task); 1718 + } else { 1719 + chan->task = task; 1720 + spin_unlock(&chan->lock); 1721 + } 1730 1722 } 1731 1723 1732 - pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__, 1733 - p_msg->payload, p_msg->command, 1724 + pr_debug("%s: %s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__, 1725 + chan->name, p_msg->payload, p_msg->command, 1734 1726 (unsigned int)p_msg->payload_length); 1735 1727 1736 1728 if (list_empty(&svc_data_mem)) { ··· 1777 1747 p_data->arg[2] = p_msg->arg[2]; 1778 1748 p_data->size = p_msg->payload_length; 1779 1749 p_data->chan = chan; 1780 - pr_debug("%s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n", __func__, 1781 - (unsigned int)p_data->paddr, p_data->command, 1782 - (unsigned int)p_data->size); 1783 - ret = kfifo_in_spinlocked(&chan->ctrl->svc_fifo, p_data, 1750 + pr_debug("%s: %s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n", 1751 + __func__, 1752 + chan->name, 1753 + (unsigned int)p_data->paddr, 1754 + p_data->command, 1755 + (unsigned int)p_data->size); 1756 + 1757 + ret = kfifo_in_spinlocked(&chan->svc_fifo, p_data, 1784 1758 sizeof(*p_data), 1785 - &chan->ctrl->svc_fifo_lock); 1759 + &chan->svc_fifo_lock); 1786 1760 1787 1761 kfree(p_data); 1788 1762 ··· 1807 1773 */ 1808 1774 void stratix10_svc_done(struct stratix10_svc_chan *chan) 1809 1775 { 1810 - /* stop thread when thread is running AND only one active client */ 1811 - if (chan->ctrl->task && chan->ctrl->num_active_client <= 1) { 1812 - pr_debug("svc_smc_hvc_shm_thread is stopped\n"); 1813 - kthread_stop(chan->ctrl->task); 1814 - chan->ctrl->task = NULL; 1776 + /* stop thread when thread is running */ 1777 + if (chan->task) { 1778 + pr_debug("%s: %s: svc_smc_hvc_shm_thread is stopping\n", 1779 + __func__, chan->name); 1780 + kthread_stop(chan->task); 1781 + chan->task = NULL; 1815 1782 } 1816 1783 } 1817 1784 EXPORT_SYMBOL_GPL(stratix10_svc_done); ··· 1852 1817 pmem->paddr = pa; 1853 1818 pmem->size = s; 1854 1819 list_add_tail(&pmem->node, &svc_data_mem); 1855 - pr_debug("%s: va=%p, pa=0x%016x\n", __func__, 1856 - pmem->vaddr, (unsigned int)pmem->paddr); 1820 + pr_debug("%s: %s: va=%p, pa=0x%016x\n", __func__, 1821 + chan->name, pmem->vaddr, (unsigned int)pmem->paddr); 1857 1822 1858 1823 return (void *)va; 1859 1824 } ··· 1890 1855 {}, 1891 1856 }; 1892 1857 1858 + static const char * const chan_names[SVC_NUM_CHANNEL] = { 1859 + SVC_CLIENT_FPGA, 1860 + SVC_CLIENT_RSU, 1861 + SVC_CLIENT_FCS, 1862 + SVC_CLIENT_HWMON 1863 + }; 1864 + 1893 1865 static int stratix10_svc_drv_probe(struct platform_device *pdev) 1894 1866 { 1895 1867 struct device *dev = &pdev->dev; ··· 1904 1862 struct stratix10_svc_chan *chans; 1905 1863 struct gen_pool *genpool; 1906 1864 struct stratix10_svc_sh_memory *sh_memory; 1907 - struct stratix10_svc *svc; 1865 + struct stratix10_svc *svc = NULL; 1908 1866 1909 1867 svc_invoke_fn *invoke_fn; 1910 1868 size_t fifo_size; 1911 - int ret; 1869 + int ret, i = 0; 1912 1870 1913 1871 /* get SMC or HVC function */ 1914 1872 invoke_fn = get_invoke_func(dev); ··· 1947 1905 controller->num_active_client = 0; 1948 1906 controller->chans = chans; 1949 1907 controller->genpool = genpool; 1950 - controller->task = NULL; 1951 1908 controller->invoke_fn = invoke_fn; 1909 + INIT_LIST_HEAD(&controller->node); 1952 1910 init_completion(&controller->complete_status); 1953 1911 1954 1912 ret = stratix10_svc_async_init(controller); ··· 1959 1917 } 1960 1918 1961 1919 fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO; 1962 - ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL); 1963 - if (ret) { 1964 - dev_err(dev, "failed to allocate FIFO\n"); 1965 - goto err_async_exit; 1920 + mutex_init(&controller->sdm_lock); 1921 + 1922 + for (i = 0; i < SVC_NUM_CHANNEL; i++) { 1923 + chans[i].scl = NULL; 1924 + chans[i].ctrl = controller; 1925 + chans[i].name = (char *)chan_names[i]; 1926 + spin_lock_init(&chans[i].lock); 1927 + ret = kfifo_alloc(&chans[i].svc_fifo, fifo_size, GFP_KERNEL); 1928 + if (ret) { 1929 + dev_err(dev, "failed to allocate FIFO %d\n", i); 1930 + goto err_free_fifos; 1931 + } 1932 + spin_lock_init(&chans[i].svc_fifo_lock); 1966 1933 } 1967 - spin_lock_init(&controller->svc_fifo_lock); 1968 - 1969 - chans[0].scl = NULL; 1970 - chans[0].ctrl = controller; 1971 - chans[0].name = SVC_CLIENT_FPGA; 1972 - spin_lock_init(&chans[0].lock); 1973 - 1974 - chans[1].scl = NULL; 1975 - chans[1].ctrl = controller; 1976 - chans[1].name = SVC_CLIENT_RSU; 1977 - spin_lock_init(&chans[1].lock); 1978 - 1979 - chans[2].scl = NULL; 1980 - chans[2].ctrl = controller; 1981 - chans[2].name = SVC_CLIENT_FCS; 1982 - spin_lock_init(&chans[2].lock); 1983 - 1984 - chans[3].scl = NULL; 1985 - chans[3].ctrl = controller; 1986 - chans[3].name = SVC_CLIENT_HWMON; 1987 - spin_lock_init(&chans[3].lock); 1988 1934 1989 1935 list_add_tail(&controller->node, &svc_ctrl); 1990 1936 platform_set_drvdata(pdev, controller); ··· 1981 1951 svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL); 1982 1952 if (!svc) { 1983 1953 ret = -ENOMEM; 1984 - goto err_free_kfifo; 1954 + goto err_free_fifos; 1985 1955 } 1986 1956 controller->svc = svc; 1987 1957 ··· 1989 1959 if (!svc->stratix10_svc_rsu) { 1990 1960 dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU); 1991 1961 ret = -ENOMEM; 1992 - goto err_free_kfifo; 1962 + goto err_free_fifos; 1993 1963 } 1994 1964 1995 1965 ret = platform_device_add(svc->stratix10_svc_rsu); 1996 - if (ret) { 1997 - platform_device_put(svc->stratix10_svc_rsu); 1998 - goto err_free_kfifo; 1999 - } 2000 - 2001 - svc->intel_svc_fcs = platform_device_alloc(INTEL_FCS, 1); 2002 - if (!svc->intel_svc_fcs) { 2003 - dev_err(dev, "failed to allocate %s device\n", INTEL_FCS); 2004 - ret = -ENOMEM; 2005 - goto err_unregister_rsu_dev; 2006 - } 2007 - 2008 - ret = platform_device_add(svc->intel_svc_fcs); 2009 - if (ret) { 2010 - platform_device_put(svc->intel_svc_fcs); 2011 - goto err_unregister_rsu_dev; 2012 - } 1966 + if (ret) 1967 + goto err_put_device; 2013 1968 2014 1969 ret = of_platform_default_populate(dev_of_node(dev), NULL, dev); 2015 1970 if (ret) 2016 - goto err_unregister_fcs_dev; 1971 + goto err_unregister_rsu_dev; 2017 1972 2018 1973 pr_info("Intel Service Layer Driver Initialized\n"); 2019 1974 2020 1975 return 0; 2021 1976 2022 - err_unregister_fcs_dev: 2023 - platform_device_unregister(svc->intel_svc_fcs); 2024 1977 err_unregister_rsu_dev: 2025 1978 platform_device_unregister(svc->stratix10_svc_rsu); 2026 - err_free_kfifo: 2027 - kfifo_free(&controller->svc_fifo); 2028 - err_async_exit: 1979 + goto err_free_fifos; 1980 + err_put_device: 1981 + platform_device_put(svc->stratix10_svc_rsu); 1982 + err_free_fifos: 1983 + /* only remove from list if list_add_tail() was reached */ 1984 + if (!list_empty(&controller->node)) 1985 + list_del(&controller->node); 1986 + /* free only the FIFOs that were successfully allocated */ 1987 + while (i--) 1988 + kfifo_free(&chans[i].svc_fifo); 2029 1989 stratix10_svc_async_exit(controller); 2030 1990 err_destroy_pool: 2031 1991 gen_pool_destroy(genpool); 1992 + 2032 1993 return ret; 2033 1994 } 2034 1995 2035 1996 static void stratix10_svc_drv_remove(struct platform_device *pdev) 2036 1997 { 1998 + int i; 2037 1999 struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev); 2038 2000 struct stratix10_svc *svc = ctrl->svc; 2039 2001 ··· 2033 2011 2034 2012 of_platform_depopulate(ctrl->dev); 2035 2013 2036 - platform_device_unregister(svc->intel_svc_fcs); 2037 2014 platform_device_unregister(svc->stratix10_svc_rsu); 2038 2015 2039 - kfifo_free(&ctrl->svc_fifo); 2040 - if (ctrl->task) { 2041 - kthread_stop(ctrl->task); 2042 - ctrl->task = NULL; 2016 + for (i = 0; i < SVC_NUM_CHANNEL; i++) { 2017 + if (ctrl->chans[i].task) { 2018 + kthread_stop(ctrl->chans[i].task); 2019 + ctrl->chans[i].task = NULL; 2020 + } 2021 + kfifo_free(&ctrl->chans[i].svc_fifo); 2043 2022 } 2023 + 2044 2024 if (ctrl->genpool) 2045 2025 gen_pool_destroy(ctrl->genpool); 2046 2026 list_del(&ctrl->node);
+4 -3
drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
··· 38 38 /* 39 39 * Table of devices that work with this driver. 40 40 * 41 - * Currently, only one device is known to be used in the 42 - * lpvo_usb_gpib adapter (FTDI 0403:6001). 41 + * Currently, only one device is known to be used in the lpvo_usb_gpib 42 + * adapter (FTDI 0403:6001) but as this device id is already handled by the 43 + * ftdi_sio USB serial driver the LPVO driver must not bind to it by default. 44 + * 43 45 * If your adapter uses a different chip, insert a line 44 46 * in the following table with proper <Vendor-id>, <Product-id>. 45 47 * ··· 52 50 */ 53 51 54 52 static const struct usb_device_id skel_table[] = { 55 - { USB_DEVICE(0x0403, 0x6001) }, 56 53 { } /* Terminating entry */ 57 54 }; 58 55 MODULE_DEVICE_TABLE(usb, skel_table);
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
··· 36 36 37 37 #define AMDGPU_BO_LIST_MAX_PRIORITY 32u 38 38 #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1) 39 + #define AMDGPU_BO_LIST_MAX_ENTRIES (128 * 1024) 39 40 40 41 static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu) 41 42 { ··· 188 187 const uint32_t bo_info_size = in->bo_info_size; 189 188 const uint32_t bo_number = in->bo_number; 190 189 struct drm_amdgpu_bo_list_entry *info; 190 + 191 + if (bo_number > AMDGPU_BO_LIST_MAX_ENTRIES) 192 + return -EINVAL; 191 193 192 194 /* copy the handle array from userspace to a kernel buffer */ 193 195 if (likely(info_size == bo_info_size)) {
+13 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 2690 2690 break; 2691 2691 default: 2692 2692 r = amdgpu_discovery_set_ip_blocks(adev); 2693 - if (r) 2693 + if (r) { 2694 + adev->num_ip_blocks = 0; 2694 2695 return r; 2696 + } 2695 2697 break; 2696 2698 } 2697 2699 ··· 3249 3247 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 3250 3248 if (!adev->ip_blocks[i].status.late_initialized) 3251 3249 continue; 3250 + if (!adev->ip_blocks[i].version) 3251 + continue; 3252 3252 /* skip CG for GFX, SDMA on S0ix */ 3253 3253 if (adev->in_s0ix && 3254 3254 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || ··· 3289 3285 for (j = 0; j < adev->num_ip_blocks; j++) { 3290 3286 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 3291 3287 if (!adev->ip_blocks[i].status.late_initialized) 3288 + continue; 3289 + if (!adev->ip_blocks[i].version) 3292 3290 continue; 3293 3291 /* skip PG for GFX, SDMA on S0ix */ 3294 3292 if (adev->in_s0ix && ··· 3499 3493 int i, r; 3500 3494 3501 3495 for (i = 0; i < adev->num_ip_blocks; i++) { 3496 + if (!adev->ip_blocks[i].version) 3497 + continue; 3502 3498 if (!adev->ip_blocks[i].version->funcs->early_fini) 3503 3499 continue; 3504 3500 ··· 3578 3570 if (!adev->ip_blocks[i].status.sw) 3579 3571 continue; 3580 3572 3573 + if (!adev->ip_blocks[i].version) 3574 + continue; 3581 3575 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 3582 3576 amdgpu_ucode_free_bo(adev); 3583 3577 amdgpu_free_static_csa(&adev->virt.csa_obj); ··· 3605 3595 3606 3596 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 3607 3597 if (!adev->ip_blocks[i].status.late_initialized) 3598 + continue; 3599 + if (!adev->ip_blocks[i].version) 3608 3600 continue; 3609 3601 if (adev->ip_blocks[i].version->funcs->late_fini) 3610 3602 adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 83 83 { 84 84 struct amdgpu_device *adev = drm_to_adev(dev); 85 85 86 - if (adev == NULL) 86 + if (adev == NULL || !adev->num_ip_blocks) 87 87 return; 88 88 89 89 amdgpu_unregister_gpu_instance(adev);
+8 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
··· 368 368 369 369 struct drm_property *plane_ctm_property; 370 370 /** 371 - * @shaper_lut_property: Plane property to set pre-blending shaper LUT 372 - * that converts color content before 3D LUT. If 373 - * plane_shaper_tf_property != Identity TF, AMD color module will 371 + * @plane_shaper_lut_property: Plane property to set pre-blending 372 + * shaper LUT that converts color content before 3D LUT. 373 + * If plane_shaper_tf_property != Identity TF, AMD color module will 374 374 * combine the user LUT values with pre-defined TF into the LUT 375 375 * parameters to be programmed. 376 376 */ 377 377 struct drm_property *plane_shaper_lut_property; 378 378 /** 379 - * @shaper_lut_size_property: Plane property for the size of 379 + * @plane_shaper_lut_size_property: Plane property for the size of 380 380 * pre-blending shaper LUT as supported by the driver (read-only). 381 381 */ 382 382 struct drm_property *plane_shaper_lut_size_property; ··· 400 400 */ 401 401 struct drm_property *plane_lut3d_property; 402 402 /** 403 - * @plane_degamma_lut_size_property: Plane property to define the max 404 - * size of 3D LUT as supported by the driver (read-only). The max size 405 - * is the max size of one dimension and, therefore, the max number of 406 - * entries for 3D LUT array is the 3D LUT size cubed; 403 + * @plane_lut3d_size_property: Plane property to define the max size 404 + * of 3D LUT as supported by the driver (read-only). The max size is 405 + * the max size of one dimension and, therefore, the max number of 406 + * entries for 3D LUT array is the 3D LUT size cubed. 407 407 */ 408 408 struct drm_property *plane_lut3d_size_property; 409 409 /**
+6 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 1069 1069 } 1070 1070 1071 1071 /* Prepare a TLB flush fence to be attached to PTs */ 1072 - if (!params->unlocked) { 1072 + /* The check for need_tlb_fence should be dropped once we 1073 + * sort out the issues with KIQ/MES TLB invalidation timeouts. 1074 + */ 1075 + if (!params->unlocked && vm->need_tlb_fence) { 1073 1076 amdgpu_vm_tlb_fence_create(params->adev, vm, fence); 1074 1077 1075 1078 /* Makes sure no PD/PT is freed before the flush */ ··· 2605 2602 ttm_lru_bulk_move_init(&vm->lru_bulk_move); 2606 2603 2607 2604 vm->is_compute_context = false; 2605 + vm->need_tlb_fence = amdgpu_userq_enabled(&adev->ddev); 2608 2606 2609 2607 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2610 2608 AMDGPU_VM_USE_CPU_FOR_GFX); ··· 2743 2739 dma_fence_put(vm->last_update); 2744 2740 vm->last_update = dma_fence_get_stub(); 2745 2741 vm->is_compute_context = true; 2742 + vm->need_tlb_fence = true; 2746 2743 2747 2744 unreserve_bo: 2748 2745 amdgpu_bo_unreserve(vm->root.bo);
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 441 441 struct ttm_lru_bulk_move lru_bulk_move; 442 442 /* Flag to indicate if VM is used for compute */ 443 443 bool is_compute_context; 444 + /* Flag to indicate if VM needs a TLB fence (KFD or KGD) */ 445 + bool need_tlb_fence; 444 446 445 447 /* Memory partition number, -1 means any partition */ 446 448 int8_t mem_id;
+14 -7
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 662 662 } else { 663 663 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 664 664 case IP_VERSION(9, 0, 0): 665 - mmhub_cid = mmhub_client_ids_vega10[cid][rw]; 665 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vega10) ? 666 + mmhub_client_ids_vega10[cid][rw] : NULL; 666 667 break; 667 668 case IP_VERSION(9, 3, 0): 668 - mmhub_cid = mmhub_client_ids_vega12[cid][rw]; 669 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vega12) ? 670 + mmhub_client_ids_vega12[cid][rw] : NULL; 669 671 break; 670 672 case IP_VERSION(9, 4, 0): 671 - mmhub_cid = mmhub_client_ids_vega20[cid][rw]; 673 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vega20) ? 674 + mmhub_client_ids_vega20[cid][rw] : NULL; 672 675 break; 673 676 case IP_VERSION(9, 4, 1): 674 - mmhub_cid = mmhub_client_ids_arcturus[cid][rw]; 677 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_arcturus) ? 678 + mmhub_client_ids_arcturus[cid][rw] : NULL; 675 679 break; 676 680 case IP_VERSION(9, 1, 0): 677 681 case IP_VERSION(9, 2, 0): 678 - mmhub_cid = mmhub_client_ids_raven[cid][rw]; 682 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_raven) ? 683 + mmhub_client_ids_raven[cid][rw] : NULL; 679 684 break; 680 685 case IP_VERSION(1, 5, 0): 681 686 case IP_VERSION(2, 4, 0): 682 - mmhub_cid = mmhub_client_ids_renoir[cid][rw]; 687 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_renoir) ? 688 + mmhub_client_ids_renoir[cid][rw] : NULL; 683 689 break; 684 690 case IP_VERSION(1, 8, 0): 685 691 case IP_VERSION(9, 4, 2): 686 - mmhub_cid = mmhub_client_ids_aldebaran[cid][rw]; 692 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_aldebaran) ? 693 + mmhub_client_ids_aldebaran[cid][rw] : NULL; 687 694 break; 688 695 default: 689 696 mmhub_cid = NULL;
+2 -2
drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
··· 129 129 if (!pdev) 130 130 return -EINVAL; 131 131 132 - if (!dev->type->name) { 132 + if (!dev->type || !dev->type->name) { 133 133 drm_dbg(&adev->ddev, "Invalid device type to add\n"); 134 134 goto exit; 135 135 } ··· 165 165 if (!pdev) 166 166 return -EINVAL; 167 167 168 - if (!dev->type->name) { 168 + if (!dev->type || !dev->type->name) { 169 169 drm_dbg(&adev->ddev, "Invalid device type to remove\n"); 170 170 goto exit; 171 171 }
+4 -1
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
··· 731 731 int i; 732 732 struct amdgpu_device *adev = mes->adev; 733 733 union MESAPI_SET_HW_RESOURCES mes_set_hw_res_pkt; 734 + uint32_t mes_rev = (pipe == AMDGPU_MES_SCHED_PIPE) ? 735 + (mes->sched_version & AMDGPU_MES_VERSION_MASK) : 736 + (mes->kiq_version & AMDGPU_MES_VERSION_MASK); 734 737 735 738 memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt)); 736 739 ··· 788 785 * handling support, other queue will not use the oversubscribe timer. 789 786 * handling mode - 0: disabled; 1: basic version; 2: basic+ version 790 787 */ 791 - mes_set_hw_res_pkt.oversubscription_timer = 50; 788 + mes_set_hw_res_pkt.oversubscription_timer = mes_rev < 0x8b ? 0 : 50; 792 789 mes_set_hw_res_pkt.unmapped_doorbell_handling = 1; 793 790 794 791 if (amdgpu_mes_log_enable) {
+6 -3
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
··· 154 154 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 155 155 case IP_VERSION(2, 0, 0): 156 156 case IP_VERSION(2, 0, 2): 157 - mmhub_cid = mmhub_client_ids_navi1x[cid][rw]; 157 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_navi1x) ? 158 + mmhub_client_ids_navi1x[cid][rw] : NULL; 158 159 break; 159 160 case IP_VERSION(2, 1, 0): 160 161 case IP_VERSION(2, 1, 1): 161 - mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw]; 162 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_sienna_cichlid) ? 163 + mmhub_client_ids_sienna_cichlid[cid][rw] : NULL; 162 164 break; 163 165 case IP_VERSION(2, 1, 2): 164 - mmhub_cid = mmhub_client_ids_beige_goby[cid][rw]; 166 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_beige_goby) ? 167 + mmhub_client_ids_beige_goby[cid][rw] : NULL; 165 168 break; 166 169 default: 167 170 mmhub_cid = NULL;
+2 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
··· 94 94 case IP_VERSION(2, 3, 0): 95 95 case IP_VERSION(2, 4, 0): 96 96 case IP_VERSION(2, 4, 1): 97 - mmhub_cid = mmhub_client_ids_vangogh[cid][rw]; 97 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_vangogh) ? 98 + mmhub_client_ids_vangogh[cid][rw] : NULL; 98 99 break; 99 100 default: 100 101 mmhub_cid = NULL;
+2 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
··· 110 110 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 111 111 case IP_VERSION(3, 0, 0): 112 112 case IP_VERSION(3, 0, 1): 113 - mmhub_cid = mmhub_client_ids_v3_0_0[cid][rw]; 113 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_0_0) ? 114 + mmhub_client_ids_v3_0_0[cid][rw] : NULL; 114 115 break; 115 116 default: 116 117 mmhub_cid = NULL;
+2 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
··· 117 117 118 118 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 119 119 case IP_VERSION(3, 0, 1): 120 - mmhub_cid = mmhub_client_ids_v3_0_1[cid][rw]; 120 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_0_1) ? 121 + mmhub_client_ids_v3_0_1[cid][rw] : NULL; 121 122 break; 122 123 default: 123 124 mmhub_cid = NULL;
+2 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
··· 108 108 "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", 109 109 status); 110 110 111 - mmhub_cid = mmhub_client_ids_v3_0_2[cid][rw]; 111 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_0_2) ? 112 + mmhub_client_ids_v3_0_2[cid][rw] : NULL; 112 113 dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", 113 114 mmhub_cid ? mmhub_cid : "unknown", cid); 114 115 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
+2 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
··· 102 102 status); 103 103 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 104 104 case IP_VERSION(4, 1, 0): 105 - mmhub_cid = mmhub_client_ids_v4_1_0[cid][rw]; 105 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v4_1_0) ? 106 + mmhub_client_ids_v4_1_0[cid][rw] : NULL; 106 107 break; 107 108 default: 108 109 mmhub_cid = NULL;
+2 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v4_2_0.c
··· 688 688 status); 689 689 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 690 690 case IP_VERSION(4, 2, 0): 691 - mmhub_cid = mmhub_client_ids_v4_2_0[cid][rw]; 691 + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v4_2_0) ? 692 + mmhub_client_ids_v4_2_0[cid][rw] : NULL; 692 693 break; 693 694 default: 694 695 mmhub_cid = NULL;
+1
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
··· 593 593 p->queue_size)) { 594 594 pr_debug("ring buf 0x%llx size 0x%llx not mapped on GPU\n", 595 595 p->queue_address, p->queue_size); 596 + amdgpu_bo_unreserve(vm->root.bo); 596 597 return -EFAULT; 597 598 } 598 599
+3 -3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 2554 2554 fw_meta_info_params.fw_inst_const = adev->dm.dmub_fw->data + 2555 2555 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2556 2556 PSP_HEADER_BYTES_256; 2557 - fw_meta_info_params.fw_bss_data = region_params.bss_data_size ? adev->dm.dmub_fw->data + 2557 + fw_meta_info_params.fw_bss_data = fw_meta_info_params.bss_data_size ? adev->dm.dmub_fw->data + 2558 2558 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2559 2559 le32_to_cpu(hdr->inst_const_bytes) : NULL; 2560 2560 fw_meta_info_params.custom_psp_footer_size = 0; ··· 13119 13119 u16 min_vfreq; 13120 13120 u16 max_vfreq; 13121 13121 13122 - if (edid == NULL || edid->extensions == 0) 13122 + if (!edid || !edid->extensions) 13123 13123 return; 13124 13124 13125 13125 /* Find DisplayID extension */ ··· 13129 13129 break; 13130 13130 } 13131 13131 13132 - if (edid_ext == NULL) 13132 + if (i == edid->extensions) 13133 13133 return; 13134 13134 13135 13135 while (j < EDID_LENGTH) {
+3 -3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
··· 37 37 BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | 38 38 BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) | 39 39 BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) | 40 - BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV); 40 + BIT(DRM_COLOROP_1D_CURVE_GAMMA22); 41 41 42 42 const u64 amdgpu_dm_supported_shaper_tfs = 43 43 BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF) | 44 44 BIT(DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF) | 45 45 BIT(DRM_COLOROP_1D_CURVE_BT2020_OETF) | 46 - BIT(DRM_COLOROP_1D_CURVE_GAMMA22); 46 + BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV); 47 47 48 48 const u64 amdgpu_dm_supported_blnd_tfs = 49 49 BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | 50 50 BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) | 51 51 BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) | 52 - BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV); 52 + BIT(DRM_COLOROP_1D_CURVE_GAMMA22); 53 53 54 54 #define MAX_COLOR_PIPELINE_OPS 10 55 55
+4 -4
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
··· 255 255 BREAK_TO_DEBUGGER(); 256 256 return NULL; 257 257 } 258 + if (ctx->dce_version == DCN_VERSION_2_01) { 259 + dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); 260 + return &clk_mgr->base; 261 + } 258 262 if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev)) { 259 263 dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); 260 264 return &clk_mgr->base; ··· 269 265 } 270 266 if (ASICREV_IS_BEIGE_GOBY_P(asic_id.hw_internal_rev)) { 271 267 dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); 272 - return &clk_mgr->base; 273 - } 274 - if (ctx->dce_version == DCN_VERSION_2_01) { 275 - dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); 276 268 return &clk_mgr->base; 277 269 } 278 270 dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+5 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
··· 38 38 DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\ 39 39 DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1),\ 40 40 SR(DISPCLK_FREQ_CHANGE_CNTL),\ 41 - SR(DC_MEM_GLOBAL_PWR_REQ_CNTL) 41 + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\ 42 + SR(MICROSECOND_TIME_BASE_DIV),\ 43 + SR(MILLISECOND_TIME_BASE_DIV),\ 44 + SR(DCCG_GATE_DISABLE_CNTL),\ 45 + SR(DCCG_GATE_DISABLE_CNTL2) 42 46 43 47 #define DCCG_REG_LIST_DCN2() \ 44 48 DCCG_COMMON_REG_LIST_DCN_BASE(),\
+20 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c
··· 96 96 dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk; 97 97 } 98 98 99 + /* 100 + * On DCN21 S0i3 resume, BIOS programs MICROSECOND_TIME_BASE_DIV to 101 + * 0x00120464 as a marker that golden init has already been done. 102 + * dcn21_s0i3_golden_init_wa() reads this marker later in bios_golden_init() 103 + * to decide whether to skip golden init. 104 + * 105 + * dccg2_init() unconditionally overwrites MICROSECOND_TIME_BASE_DIV to 106 + * 0x00120264, destroying the marker before it can be read. 107 + * 108 + * Guard the call: if the S0i3 marker is present, skip dccg2_init() so the 109 + * WA can function correctly. bios_golden_init() will handle init in that case. 110 + */ 111 + static void dccg21_init(struct dccg *dccg) 112 + { 113 + if (dccg2_is_s0i3_golden_init_wa_done(dccg)) 114 + return; 115 + 116 + dccg2_init(dccg); 117 + } 99 118 100 119 static const struct dccg_funcs dccg21_funcs = { 101 120 .update_dpp_dto = dccg21_update_dpp_dto, ··· 122 103 .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, 123 104 .otg_add_pixel = dccg2_otg_add_pixel, 124 105 .otg_drop_pixel = dccg2_otg_drop_pixel, 125 - .dccg_init = dccg2_init, 106 + .dccg_init = dccg21_init, 126 107 .refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */ 127 108 .allow_clock_gating = dccg2_allow_clock_gating, 128 109 .enable_memory_low_power = dccg2_enable_memory_low_power,
+7 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h
··· 34 34 DCCG_SRII(DTO_PARAM, DPPCLK, 1),\ 35 35 DCCG_SRII(DTO_PARAM, DPPCLK, 2),\ 36 36 DCCG_SRII(DTO_PARAM, DPPCLK, 3),\ 37 - SR(REFCLK_CNTL) 37 + SR(REFCLK_CNTL),\ 38 + SR(DISPCLK_FREQ_CHANGE_CNTL),\ 39 + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\ 40 + SR(MICROSECOND_TIME_BASE_DIV),\ 41 + SR(MILLISECOND_TIME_BASE_DIV),\ 42 + SR(DCCG_GATE_DISABLE_CNTL),\ 43 + SR(DCCG_GATE_DISABLE_CNTL2) 38 44 39 45 #define DCCG_MASK_SH_LIST_DCN301(mask_sh) \ 40 46 DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
+4 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
··· 64 64 SR(DSCCLK1_DTO_PARAM),\ 65 65 SR(DSCCLK2_DTO_PARAM),\ 66 66 SR(DSCCLK_DTO_CTRL),\ 67 + SR(DCCG_GATE_DISABLE_CNTL),\ 67 68 SR(DCCG_GATE_DISABLE_CNTL2),\ 68 69 SR(DCCG_GATE_DISABLE_CNTL3),\ 69 - SR(HDMISTREAMCLK0_DTO_PARAM) 70 + SR(HDMISTREAMCLK0_DTO_PARAM),\ 71 + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\ 72 + SR(MICROSECOND_TIME_BASE_DIV) 70 73 71 74 72 75 #define DCCG_MASK_SH_LIST_DCN31(mask_sh) \
+4 -1
drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
··· 70 70 SR(DSCCLK2_DTO_PARAM),\ 71 71 SR(DSCCLK3_DTO_PARAM),\ 72 72 SR(DSCCLK_DTO_CTRL),\ 73 + SR(DCCG_GATE_DISABLE_CNTL),\ 73 74 SR(DCCG_GATE_DISABLE_CNTL2),\ 74 75 SR(DCCG_GATE_DISABLE_CNTL3),\ 75 76 SR(HDMISTREAMCLK0_DTO_PARAM),\ 76 77 SR(OTG_PIXEL_RATE_DIV),\ 77 - SR(DTBCLK_P_CNTL) 78 + SR(DTBCLK_P_CNTL),\ 79 + SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\ 80 + SR(MICROSECOND_TIME_BASE_DIV) 78 81 79 82 #define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \ 80 83 DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
+3
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
··· 1785 1785 1786 1786 dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); 1787 1787 1788 + DC_FP_START(); 1788 1789 dcn32_override_min_req_memclk(dc, context); 1790 + DC_FP_END(); 1791 + 1789 1792 dcn32_override_min_req_dcfclk(dc, context); 1790 1793 1791 1794 BW_VAL_TRACE_END_WATERMARKS();
+3 -1
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
··· 3454 3454 if (adev->asic_type == CHIP_HAINAN) { 3455 3455 if ((adev->pdev->revision == 0x81) || 3456 3456 (adev->pdev->revision == 0xC3) || 3457 + (adev->pdev->device == 0x6660) || 3457 3458 (adev->pdev->device == 0x6664) || 3458 3459 (adev->pdev->device == 0x6665) || 3459 - (adev->pdev->device == 0x6667)) { 3460 + (adev->pdev->device == 0x6667) || 3461 + (adev->pdev->device == 0x666F)) { 3460 3462 max_sclk = 75000; 3461 3463 } 3462 3464 if ((adev->pdev->revision == 0xC3) ||
+2 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
··· 2222 2222 user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) | 2223 2223 BIT(PP_OD_FEATURE_UCLK_BIT) | 2224 2224 BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) | 2225 - BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2225 + BIT(PP_OD_FEATURE_FAN_CURVE_BIT) | 2226 + BIT(PP_OD_FEATURE_ZERO_FAN_BIT); 2226 2227 res = smu_v13_0_0_upload_overdrive_table(smu, user_od_table); 2227 2228 user_od_table->OverDriveTable.FeatureCtrlMask = 0; 2228 2229 if (res == 0)
+2 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
··· 2224 2224 user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) | 2225 2225 BIT(PP_OD_FEATURE_UCLK_BIT) | 2226 2226 BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) | 2227 - BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2227 + BIT(PP_OD_FEATURE_FAN_CURVE_BIT) | 2228 + BIT(PP_OD_FEATURE_ZERO_FAN_BIT); 2228 2229 res = smu_v13_0_7_upload_overdrive_table(smu, user_od_table); 2229 2230 user_od_table->OverDriveTable.FeatureCtrlMask = 0; 2230 2231 if (res == 0)
+2 -1
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
··· 2311 2311 user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) | 2312 2312 BIT(PP_OD_FEATURE_UCLK_BIT) | 2313 2313 BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) | 2314 - BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2314 + BIT(PP_OD_FEATURE_FAN_CURVE_BIT) | 2315 + BIT(PP_OD_FEATURE_ZERO_FAN_BIT); 2315 2316 res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table); 2316 2317 user_od_table->OverDriveTable.FeatureCtrlMask = 0; 2317 2318 if (res == 0)
+1 -1
drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
··· 848 848 849 849 regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS0, &header_bytes, 1); 850 850 regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS1, &buffer[3], 1); 851 - regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS2, &buffer[4], 1); 851 + regmap_bulk_write(hdmi->regm, PKT_AUDI_CONTENTS2, &buffer[7], 1); 852 852 853 853 /* Enable ACR, AUDI, AMD */ 854 854 dw_hdmi_qp_mod(hdmi,
+7 -6
drivers/gpu/drm/bridge/ti-sn65dsi83.c
··· 351 351 * DSI_CLK = mode clock * bpp / dsi_data_lanes / 2 352 352 * the 2 is there because the bus is DDR. 353 353 */ 354 - return DIV_ROUND_UP(clamp((unsigned int)mode->clock * 355 - mipi_dsi_pixel_format_to_bpp(ctx->dsi->format) / 356 - ctx->dsi->lanes / 2, 40000U, 500000U), 5000U); 354 + return clamp((unsigned int)mode->clock * 355 + mipi_dsi_pixel_format_to_bpp(ctx->dsi->format) / 356 + ctx->dsi->lanes / 2, 40000U, 500000U) / 5000U; 357 357 } 358 358 359 359 static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx) ··· 517 517 struct drm_atomic_state *state) 518 518 { 519 519 struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); 520 + const unsigned int dual_factor = ctx->lvds_dual_link ? 2 : 1; 520 521 const struct drm_bridge_state *bridge_state; 521 522 const struct drm_crtc_state *crtc_state; 522 523 const struct drm_display_mode *mode; ··· 654 653 /* 32 + 1 pixel clock to ensure proper operation */ 655 654 le16val = cpu_to_le16(32 + 1); 656 655 regmap_bulk_write(ctx->regmap, REG_VID_CHA_SYNC_DELAY_LOW, &le16val, 2); 657 - le16val = cpu_to_le16(mode->hsync_end - mode->hsync_start); 656 + le16val = cpu_to_le16((mode->hsync_end - mode->hsync_start) / dual_factor); 658 657 regmap_bulk_write(ctx->regmap, REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW, 659 658 &le16val, 2); 660 659 le16val = cpu_to_le16(mode->vsync_end - mode->vsync_start); 661 660 regmap_bulk_write(ctx->regmap, REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW, 662 661 &le16val, 2); 663 662 regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_BACK_PORCH, 664 - mode->htotal - mode->hsync_end); 663 + (mode->htotal - mode->hsync_end) / dual_factor); 665 664 regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_BACK_PORCH, 666 665 mode->vtotal - mode->vsync_end); 667 666 regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_FRONT_PORCH, 668 - mode->hsync_start - mode->hdisplay); 667 + (mode->hsync_start - mode->hdisplay) / dual_factor); 669 668 regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_FRONT_PORCH, 670 669 mode->vsync_start - mode->vdisplay); 671 670 regmap_write(ctx->regmap, REG_VID_CHA_TEST_PATTERN, 0x00);
+4 -1
drivers/gpu/drm/drm_file.c
··· 233 233 void drm_file_free(struct drm_file *file) 234 234 { 235 235 struct drm_device *dev; 236 + int idx; 236 237 237 238 if (!file) 238 239 return; ··· 250 249 251 250 drm_events_release(file); 252 251 253 - if (drm_core_check_feature(dev, DRIVER_MODESET)) { 252 + if (drm_core_check_feature(dev, DRIVER_MODESET) && 253 + drm_dev_enter(dev, &idx)) { 254 254 drm_fb_release(file); 255 255 drm_property_destroy_user_blobs(dev, file); 256 + drm_dev_exit(idx); 256 257 } 257 258 258 259 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+6 -3
drivers/gpu/drm/drm_mode_config.c
··· 577 577 */ 578 578 WARN_ON(!list_empty(&dev->mode_config.fb_list)); 579 579 list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { 580 - struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]"); 580 + if (list_empty(&fb->filp_head) || drm_framebuffer_read_refcount(fb) > 1) { 581 + struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]"); 581 582 582 - drm_printf(&p, "framebuffer[%u]:\n", fb->base.id); 583 - drm_framebuffer_print_info(&p, 1, fb); 583 + drm_printf(&p, "framebuffer[%u]:\n", fb->base.id); 584 + drm_framebuffer_print_info(&p, 1, fb); 585 + } 586 + list_del_init(&fb->filp_head); 584 587 drm_framebuffer_free(&fb->base.refcount); 585 588 } 586 589
+5 -9
drivers/gpu/drm/drm_pagemap_util.c
··· 65 65 drm_dbg(cache->shrinker->drm, "Destroying dpagemap cache.\n"); 66 66 spin_lock(&cache->lock); 67 67 dpagemap = cache->dpagemap; 68 - if (!dpagemap) { 69 - spin_unlock(&cache->lock); 70 - goto out; 71 - } 68 + cache->dpagemap = NULL; 69 + if (dpagemap && !drm_pagemap_shrinker_cancel(dpagemap)) 70 + dpagemap = NULL; 71 + spin_unlock(&cache->lock); 72 72 73 - if (drm_pagemap_shrinker_cancel(dpagemap)) { 74 - cache->dpagemap = NULL; 75 - spin_unlock(&cache->lock); 73 + if (dpagemap) 76 74 drm_pagemap_destroy(dpagemap, false); 77 - } 78 75 79 - out: 80 76 mutex_destroy(&cache->lookup_mutex); 81 77 kfree(cache); 82 78 }
+8 -1
drivers/gpu/drm/gud/gud_drv.c
··· 339 339 } 340 340 341 341 static const struct drm_crtc_helper_funcs gud_crtc_helper_funcs = { 342 - .atomic_check = drm_crtc_helper_atomic_check 342 + .atomic_check = drm_crtc_helper_atomic_check, 343 + .atomic_enable = gud_crtc_atomic_enable, 344 + .atomic_disable = gud_crtc_atomic_disable, 343 345 }; 344 346 345 347 static const struct drm_crtc_funcs gud_crtc_funcs = { ··· 364 362 .disable_plane = drm_atomic_helper_disable_plane, 365 363 .destroy = drm_plane_cleanup, 366 364 DRM_GEM_SHADOW_PLANE_FUNCS, 365 + }; 366 + 367 + static const struct drm_mode_config_helper_funcs gud_mode_config_helpers = { 368 + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, 367 369 }; 368 370 369 371 static const struct drm_mode_config_funcs gud_mode_config_funcs = { ··· 505 499 drm->mode_config.min_height = le32_to_cpu(desc.min_height); 506 500 drm->mode_config.max_height = le32_to_cpu(desc.max_height); 507 501 drm->mode_config.funcs = &gud_mode_config_funcs; 502 + drm->mode_config.helper_private = &gud_mode_config_helpers; 508 503 509 504 /* Format init */ 510 505 formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL);
+4
drivers/gpu/drm/gud/gud_internal.h
··· 62 62 63 63 void gud_clear_damage(struct gud_device *gdrm); 64 64 void gud_flush_work(struct work_struct *work); 65 + void gud_crtc_atomic_enable(struct drm_crtc *crtc, 66 + struct drm_atomic_state *state); 67 + void gud_crtc_atomic_disable(struct drm_crtc *crtc, 68 + struct drm_atomic_state *state); 65 69 int gud_plane_atomic_check(struct drm_plane *plane, 66 70 struct drm_atomic_state *state); 67 71 void gud_plane_atomic_update(struct drm_plane *plane,
+36 -18
drivers/gpu/drm/gud/gud_pipe.c
··· 580 580 return ret; 581 581 } 582 582 583 + void gud_crtc_atomic_enable(struct drm_crtc *crtc, 584 + struct drm_atomic_state *state) 585 + { 586 + struct drm_device *drm = crtc->dev; 587 + struct gud_device *gdrm = to_gud_device(drm); 588 + int idx; 589 + 590 + if (!drm_dev_enter(drm, &idx)) 591 + return; 592 + 593 + gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1); 594 + gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0); 595 + gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, 1); 596 + 597 + drm_dev_exit(idx); 598 + } 599 + 600 + void gud_crtc_atomic_disable(struct drm_crtc *crtc, 601 + struct drm_atomic_state *state) 602 + { 603 + struct drm_device *drm = crtc->dev; 604 + struct gud_device *gdrm = to_gud_device(drm); 605 + int idx; 606 + 607 + if (!drm_dev_enter(drm, &idx)) 608 + return; 609 + 610 + gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, 0); 611 + gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0); 612 + 613 + drm_dev_exit(idx); 614 + } 615 + 583 616 void gud_plane_atomic_update(struct drm_plane *plane, 584 617 struct drm_atomic_state *atomic_state) 585 618 { ··· 640 607 mutex_unlock(&gdrm->damage_lock); 641 608 } 642 609 643 - if (!drm_dev_enter(drm, &idx)) 610 + if (!crtc || !drm_dev_enter(drm, &idx)) 644 611 return; 645 - 646 - if (!old_state->fb) 647 - gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1); 648 - 649 - if (fb && (crtc->state->mode_changed || crtc->state->connectors_changed)) 650 - gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0); 651 - 652 - if (crtc->state->active_changed) 653 - gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active); 654 - 655 - if (!fb) 656 - goto ctrl_disable; 657 612 658 613 ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); 659 614 if (ret) 660 - goto ctrl_disable; 615 + goto out; 661 616 662 617 drm_atomic_helper_damage_iter_init(&iter, old_state, new_state); 663 618 drm_atomic_for_each_plane_damage(&iter, &damage) ··· 653 632 654 633 drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); 655 634 656 - ctrl_disable: 657 - if (!crtc->state->enable) 658 - gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0); 659 - 635 + out: 660 636 drm_dev_exit(idx); 661 637 }
-6
drivers/gpu/drm/i915/display/intel_alpm.c
··· 43 43 44 44 void intel_alpm_init(struct intel_dp *intel_dp) 45 45 { 46 - u8 dpcd; 47 - 48 - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &dpcd) < 0) 49 - return; 50 - 51 - intel_dp->alpm_dpcd = dpcd; 52 46 mutex_init(&intel_dp->alpm.lock); 53 47 } 54 48
-1
drivers/gpu/drm/i915/display/intel_display.c
··· 1614 1614 } 1615 1615 1616 1616 intel_set_transcoder_timings(crtc_state); 1617 - intel_vrr_set_transcoder_timings(crtc_state); 1618 1617 1619 1618 if (cpu_transcoder != TRANSCODER_EDP) 1620 1619 intel_de_write(display, TRANS_MULT(display, cpu_transcoder),
+1 -1
drivers/gpu/drm/i915/display/intel_display_power_well.c
··· 806 806 power_domains->dc_state, val & mask); 807 807 808 808 enable_dc6 = state & DC_STATE_EN_UPTO_DC6; 809 - dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6; 809 + dc6_was_enabled = power_domains->dc_state & DC_STATE_EN_UPTO_DC6; 810 810 if (!dc6_was_enabled && enable_dc6) 811 811 intel_dmc_update_dc6_allowed_count(display, true); 812 812
+1
drivers/gpu/drm/i915/display/intel_display_types.h
··· 1186 1186 u32 dc3co_exitline; 1187 1187 u16 su_y_granularity; 1188 1188 u8 active_non_psr_pipes; 1189 + u8 entry_setup_frames; 1189 1190 const char *no_psr_reason; 1190 1191 1191 1192 /*
+1 -2
drivers/gpu/drm/i915/display/intel_dmc.c
··· 1599 1599 return false; 1600 1600 1601 1601 mutex_lock(&power_domains->lock); 1602 - dc6_enabled = intel_de_read(display, DC_STATE_EN) & 1603 - DC_STATE_EN_UPTO_DC6; 1602 + dc6_enabled = power_domains->dc_state & DC_STATE_EN_UPTO_DC6; 1604 1603 if (dc6_enabled) 1605 1604 intel_dmc_update_dc6_allowed_count(display, false); 1606 1605
+7
drivers/gpu/drm/i915/display/intel_dp.c
··· 4577 4577 intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector) 4578 4578 { 4579 4579 struct intel_display *display = to_intel_display(intel_dp); 4580 + int ret; 4580 4581 4581 4582 /* this function is meant to be called only once */ 4582 4583 drm_WARN_ON(display->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); ··· 4616 4615 * available (such as HDR backlight controls) 4617 4616 */ 4618 4617 intel_dp_init_source_oui(intel_dp); 4618 + 4619 + /* Read the ALPM DPCD caps */ 4620 + ret = drm_dp_dpcd_read_byte(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, 4621 + &intel_dp->alpm_dpcd); 4622 + if (ret < 0) 4623 + return false; 4619 4624 4620 4625 /* 4621 4626 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
+53 -14
drivers/gpu/drm/i915/display/intel_psr.c
··· 1717 1717 entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, conn_state, adjusted_mode); 1718 1718 1719 1719 if (entry_setup_frames >= 0) { 1720 - intel_dp->psr.entry_setup_frames = entry_setup_frames; 1720 + crtc_state->entry_setup_frames = entry_setup_frames; 1721 1721 } else { 1722 1722 crtc_state->no_psr_reason = "PSR setup timing not met"; 1723 1723 drm_dbg_kms(display->drm, ··· 1815 1815 { 1816 1816 struct intel_display *display = to_intel_display(intel_dp); 1817 1817 1818 - return (DISPLAY_VER(display) == 20 && intel_dp->psr.entry_setup_frames > 0 && 1818 + return (DISPLAY_VER(display) == 20 && crtc_state->entry_setup_frames > 0 && 1819 1819 !crtc_state->has_sel_update); 1820 1820 } 1821 1821 ··· 2189 2189 intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used; 2190 2190 intel_dp->psr.io_wake_lines = crtc_state->alpm_state.io_wake_lines; 2191 2191 intel_dp->psr.fast_wake_lines = crtc_state->alpm_state.fast_wake_lines; 2192 + intel_dp->psr.entry_setup_frames = crtc_state->entry_setup_frames; 2192 2193 2193 2194 if (!psr_interrupt_error_check(intel_dp)) 2194 2195 return; ··· 2620 2619 2621 2620 intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 2622 2621 crtc_state->pipe_srcsz_early_tpt); 2622 + 2623 + if (!crtc_state->dsc.compression_enable) 2624 + return; 2625 + 2626 + intel_dsc_su_et_parameters_configure(dsb, encoder, crtc_state, 2627 + drm_rect_height(&crtc_state->psr2_su_area)); 2623 2628 } 2624 2629 2625 2630 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, ··· 2696 2689 overlap_damage_area->y2 = damage_area->y2; 2697 2690 } 2698 2691 2699 - static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state) 2692 + static bool intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state) 2700 2693 { 2701 2694 struct intel_display *display = to_intel_display(crtc_state); 2702 2695 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2703 2696 u16 y_alignment; 2697 + bool su_area_changed = false; 2704 2698 2705 2699 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */ 2706 2700 if (crtc_state->dsc.compression_enable && ··· 2710 2702 else 2711 2703 y_alignment = crtc_state->su_y_granularity; 2712 2704 2713 - crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment; 2714 - if (crtc_state->psr2_su_area.y2 % y_alignment) 2705 + if (crtc_state->psr2_su_area.y1 % y_alignment) { 2706 + crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment; 2707 + su_area_changed = true; 2708 + } 2709 + 2710 + if (crtc_state->psr2_su_area.y2 % y_alignment) { 2715 2711 crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 / 2716 2712 y_alignment) + 1) * y_alignment; 2713 + su_area_changed = true; 2714 + } 2715 + 2716 + return su_area_changed; 2717 2717 } 2718 2718 2719 2719 /* ··· 2855 2839 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 2856 2840 struct intel_plane_state *new_plane_state, *old_plane_state; 2857 2841 struct intel_plane *plane; 2858 - bool full_update = false, cursor_in_su_area = false; 2842 + bool full_update = false, su_area_changed; 2859 2843 int i, ret; 2860 2844 2861 2845 if (!crtc_state->enable_psr2_sel_fetch) ··· 2962 2946 if (ret) 2963 2947 return ret; 2964 2948 2965 - /* 2966 - * Adjust su area to cover cursor fully as necessary (early 2967 - * transport). This needs to be done after 2968 - * drm_atomic_add_affected_planes to ensure visible cursor is added into 2969 - * affected planes even when cursor is not updated by itself. 2970 - */ 2971 - intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area); 2949 + do { 2950 + bool cursor_in_su_area; 2972 2951 2973 - intel_psr2_sel_fetch_pipe_alignment(crtc_state); 2952 + /* 2953 + * Adjust su area to cover cursor fully as necessary 2954 + * (early transport). This needs to be done after 2955 + * drm_atomic_add_affected_planes to ensure visible 2956 + * cursor is added into affected planes even when 2957 + * cursor is not updated by itself. 2958 + */ 2959 + intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area); 2960 + 2961 + su_area_changed = intel_psr2_sel_fetch_pipe_alignment(crtc_state); 2962 + 2963 + /* 2964 + * If the cursor was outside the SU area before 2965 + * alignment, the alignment step (which only expands 2966 + * SU) may pull the cursor partially inside, so we 2967 + * must run ET alignment again to fully cover it. But 2968 + * if the cursor was already fully inside before 2969 + * alignment, expanding the SU area won't change that, 2970 + * so no further work is needed. 2971 + */ 2972 + if (cursor_in_su_area) 2973 + break; 2974 + } while (su_area_changed); 2974 2975 2975 2976 /* 2976 2977 * Now that we have the pipe damaged area check if it intersect with ··· 3047 3014 } 3048 3015 3049 3016 skip_sel_fetch_set_loop: 3017 + if (full_update) 3018 + clip_area_update(&crtc_state->psr2_su_area, &crtc_state->pipe_src, 3019 + &crtc_state->pipe_src); 3020 + 3050 3021 psr2_man_trk_ctl_calc(crtc_state, full_update); 3051 3022 crtc_state->pipe_srcsz_early_tpt = 3052 3023 psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update); ··· 3110 3073 * - Display WA #1136: skl, bxt 3111 3074 */ 3112 3075 if (intel_crtc_needs_modeset(new_crtc_state) || 3076 + new_crtc_state->update_m_n || 3077 + new_crtc_state->update_lrr || 3113 3078 !new_crtc_state->has_psr || 3114 3079 !new_crtc_state->active_planes || 3115 3080 new_crtc_state->has_sel_update != psr->sel_update_enabled ||
+23
drivers/gpu/drm/i915/display/intel_vdsc.c
··· 767 767 sizeof(dp_dsc_pps_sdp)); 768 768 } 769 769 770 + void intel_dsc_su_et_parameters_configure(struct intel_dsb *dsb, struct intel_encoder *encoder, 771 + const struct intel_crtc_state *crtc_state, int su_lines) 772 + { 773 + struct intel_display *display = to_intel_display(crtc_state); 774 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 775 + const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 776 + enum pipe pipe = crtc->pipe; 777 + int vdsc_instances_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state); 778 + int slice_row_per_frame = su_lines / vdsc_cfg->slice_height; 779 + u32 val; 780 + 781 + drm_WARN_ON_ONCE(display->drm, su_lines % vdsc_cfg->slice_height); 782 + drm_WARN_ON_ONCE(display->drm, vdsc_instances_per_pipe > 2); 783 + 784 + val = DSC_SUPS0_SU_SLICE_ROW_PER_FRAME(slice_row_per_frame); 785 + val |= DSC_SUPS0_SU_PIC_HEIGHT(su_lines); 786 + 787 + intel_de_write_dsb(display, dsb, LNL_DSC0_SU_PARAMETER_SET_0(pipe), val); 788 + 789 + if (vdsc_instances_per_pipe == 2) 790 + intel_de_write_dsb(display, dsb, LNL_DSC1_SU_PARAMETER_SET_0(pipe), val); 791 + } 792 + 770 793 static i915_reg_t dss_ctl1_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder) 771 794 { 772 795 return is_pipe_dsc(crtc, cpu_transcoder) ?
+3
drivers/gpu/drm/i915/display/intel_vdsc.h
··· 13 13 enum transcoder; 14 14 struct intel_crtc; 15 15 struct intel_crtc_state; 16 + struct intel_dsb; 16 17 struct intel_encoder; 17 18 18 19 bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state); ··· 32 31 const struct intel_crtc_state *crtc_state); 33 32 void intel_dsc_dp_pps_write(struct intel_encoder *encoder, 34 33 const struct intel_crtc_state *crtc_state); 34 + void intel_dsc_su_et_parameters_configure(struct intel_dsb *dsb, struct intel_encoder *encoder, 35 + const struct intel_crtc_state *crtc_state, int su_lines); 35 36 void intel_vdsc_state_dump(struct drm_printer *p, int indent, 36 37 const struct intel_crtc_state *crtc_state); 37 38 int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state);
+12
drivers/gpu/drm/i915/display/intel_vdsc_regs.h
··· 196 196 #define DSC_PPS18_NSL_BPG_OFFSET(offset) REG_FIELD_PREP(DSC_PPS18_NSL_BPG_OFFSET_MASK, offset) 197 197 #define DSC_PPS18_SL_OFFSET_ADJ(offset) REG_FIELD_PREP(DSC_PPS18_SL_OFFSET_ADJ_MASK, offset) 198 198 199 + #define _LNL_DSC0_SU_PARAMETER_SET_0_PA 0x78064 200 + #define _LNL_DSC1_SU_PARAMETER_SET_0_PA 0x78164 201 + #define _LNL_DSC0_SU_PARAMETER_SET_0_PB 0x78264 202 + #define _LNL_DSC1_SU_PARAMETER_SET_0_PB 0x78364 203 + #define LNL_DSC0_SU_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe), _LNL_DSC0_SU_PARAMETER_SET_0_PA, _LNL_DSC0_SU_PARAMETER_SET_0_PB) 204 + #define LNL_DSC1_SU_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe), _LNL_DSC1_SU_PARAMETER_SET_0_PA, _LNL_DSC1_SU_PARAMETER_SET_0_PB) 205 + 206 + #define DSC_SUPS0_SU_SLICE_ROW_PER_FRAME_MASK REG_GENMASK(31, 20) 207 + #define DSC_SUPS0_SU_SLICE_ROW_PER_FRAME(rows) REG_FIELD_PREP(DSC_SUPS0_SU_SLICE_ROW_PER_FRAME_MASK, (rows)) 208 + #define DSC_SUPS0_SU_PIC_HEIGHT_MASK REG_GENMASK(15, 0) 209 + #define DSC_SUPS0_SU_PIC_HEIGHT(h) REG_FIELD_PREP(DSC_SUPS0_SU_PIC_HEIGHT_MASK, (h)) 210 + 199 211 /* Icelake Rate Control Buffer Threshold Registers */ 200 212 #define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230) 201 213 #define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
+14
drivers/gpu/drm/i915/display/intel_vrr.c
··· 598 598 return; 599 599 600 600 /* 601 + * Bspec says: 602 + * "(note: VRR needs to be programmed after 603 + * TRANS_DDI_FUNC_CTL and before TRANS_CONF)." 604 + * 605 + * In practice it turns out that ICL can hang if 606 + * TRANS_VRR_VMAX/FLIPLINE are written before 607 + * enabling TRANS_DDI_FUNC_CTL. 608 + */ 609 + drm_WARN_ON(display->drm, 610 + !(intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE)); 611 + 612 + /* 601 613 * This bit seems to have two meanings depending on the platform: 602 614 * TGL: generate VRR "safe window" for DSB vblank waits 603 615 * ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR ··· 950 938 void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state) 951 939 { 952 940 struct intel_display *display = to_intel_display(crtc_state); 941 + 942 + intel_vrr_set_transcoder_timings(crtc_state); 953 943 954 944 if (!intel_vrr_possible(crtc_state)) 955 945 return;
+9 -3
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
··· 153 153 } 154 154 } while (1); 155 155 156 - nr_pages = min_t(unsigned long, 157 - folio_nr_pages(folio), page_count - i); 156 + nr_pages = min_array(((unsigned long[]) { 157 + folio_nr_pages(folio), 158 + page_count - i, 159 + max_segment / PAGE_SIZE, 160 + }), 3); 161 + 158 162 if (!i || 159 163 sg->length >= max_segment || 160 164 folio_pfn(folio) != next_pfn) { ··· 168 164 st->nents++; 169 165 sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0); 170 166 } else { 171 - /* XXX: could overflow? */ 167 + nr_pages = min_t(unsigned long, nr_pages, 168 + (max_segment - sg->length) / PAGE_SIZE); 169 + 172 170 sg->length += nr_pages * PAGE_SIZE; 173 171 } 174 172 next_pfn = folio_pfn(folio) + nr_pages;
+2 -1
drivers/gpu/drm/i915/gt/intel_engine_cs.c
··· 1967 1967 if (engine->sanitize) 1968 1968 engine->sanitize(engine); 1969 1969 1970 - engine->set_default_submission(engine); 1970 + if (engine->set_default_submission) 1971 + engine->set_default_submission(engine); 1971 1972 } 1972 1973 } 1973 1974
-17
drivers/gpu/drm/imagination/pvr_device.c
··· 225 225 } 226 226 227 227 if (pvr_dev->has_safety_events) { 228 - int err; 229 - 230 - /* 231 - * Ensure the GPU is powered on since some safety events (such 232 - * as ECC faults) can happen outside of job submissions, which 233 - * are otherwise the only time a power reference is held. 234 - */ 235 - err = pvr_power_get(pvr_dev); 236 - if (err) { 237 - drm_err_ratelimited(drm_dev, 238 - "%s: could not take power reference (%d)\n", 239 - __func__, err); 240 - return ret; 241 - } 242 - 243 228 while (pvr_device_safety_irq_pending(pvr_dev)) { 244 229 pvr_device_safety_irq_clear(pvr_dev); 245 230 pvr_device_handle_safety_events(pvr_dev); 246 231 247 232 ret = IRQ_HANDLED; 248 233 } 249 - 250 - pvr_power_put(pvr_dev); 251 234 } 252 235 253 236 return ret;
+39 -12
drivers/gpu/drm/imagination/pvr_power.c
··· 90 90 } 91 91 92 92 static int 93 - pvr_power_fw_disable(struct pvr_device *pvr_dev, bool hard_reset) 93 + pvr_power_fw_disable(struct pvr_device *pvr_dev, bool hard_reset, bool rpm_suspend) 94 94 { 95 - if (!hard_reset) { 96 - int err; 95 + int err; 97 96 97 + if (!hard_reset) { 98 98 cancel_delayed_work_sync(&pvr_dev->watchdog.work); 99 99 100 100 err = pvr_power_request_idle(pvr_dev); ··· 106 106 return err; 107 107 } 108 108 109 - return pvr_fw_stop(pvr_dev); 109 + if (rpm_suspend) { 110 + /* This also waits for late processing of GPU or firmware IRQs in other cores */ 111 + disable_irq(pvr_dev->irq); 112 + } 113 + 114 + err = pvr_fw_stop(pvr_dev); 115 + if (err && rpm_suspend) 116 + enable_irq(pvr_dev->irq); 117 + 118 + return err; 110 119 } 111 120 112 121 static int 113 - pvr_power_fw_enable(struct pvr_device *pvr_dev) 122 + pvr_power_fw_enable(struct pvr_device *pvr_dev, bool rpm_resume) 114 123 { 115 124 int err; 116 125 126 + if (rpm_resume) 127 + enable_irq(pvr_dev->irq); 128 + 117 129 err = pvr_fw_start(pvr_dev); 118 130 if (err) 119 - return err; 131 + goto out; 120 132 121 133 err = pvr_wait_for_fw_boot(pvr_dev); 122 134 if (err) { 123 135 drm_err(from_pvr_device(pvr_dev), "Firmware failed to boot\n"); 124 136 pvr_fw_stop(pvr_dev); 125 - return err; 137 + goto out; 126 138 } 127 139 128 140 queue_delayed_work(pvr_dev->sched_wq, &pvr_dev->watchdog.work, 129 141 msecs_to_jiffies(WATCHDOG_TIME_MS)); 130 142 131 143 return 0; 144 + 145 + out: 146 + if (rpm_resume) 147 + disable_irq(pvr_dev->irq); 148 + 149 + return err; 132 150 } 133 151 134 152 bool ··· 379 361 return -EIO; 380 362 381 363 if (pvr_dev->fw_dev.booted) { 382 - err = pvr_power_fw_disable(pvr_dev, false); 364 + err = pvr_power_fw_disable(pvr_dev, false, true); 383 365 if (err) 384 366 goto err_drm_dev_exit; 385 367 } ··· 409 391 goto err_drm_dev_exit; 410 392 411 393 if (pvr_dev->fw_dev.booted) { 412 - err = pvr_power_fw_enable(pvr_dev); 394 + err = pvr_power_fw_enable(pvr_dev, true); 413 395 if (err) 414 396 goto err_power_off; 415 397 } ··· 528 510 } 529 511 530 512 /* Disable IRQs for the duration of the reset. */ 531 - disable_irq(pvr_dev->irq); 513 + if (hard_reset) { 514 + disable_irq(pvr_dev->irq); 515 + } else { 516 + /* 517 + * Soft reset is triggered as a response to a FW command to the Host and is 518 + * processed from the threaded IRQ handler. This code cannot (nor needs to) 519 + * wait for any IRQ processing to complete. 520 + */ 521 + disable_irq_nosync(pvr_dev->irq); 522 + } 532 523 533 524 do { 534 525 if (hard_reset) { ··· 545 518 queues_disabled = true; 546 519 } 547 520 548 - err = pvr_power_fw_disable(pvr_dev, hard_reset); 521 + err = pvr_power_fw_disable(pvr_dev, hard_reset, false); 549 522 if (!err) { 550 523 if (hard_reset) { 551 524 pvr_dev->fw_dev.booted = false; ··· 568 541 569 542 pvr_fw_irq_clear(pvr_dev); 570 543 571 - err = pvr_power_fw_enable(pvr_dev); 544 + err = pvr_power_fw_enable(pvr_dev, false); 572 545 } 573 546 574 547 if (err && hard_reset)
+1 -1
drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
··· 78 78 { 79 79 struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); 80 80 81 - dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base, 81 + dma_free_attrs(mmu->dev, TABLE_SIZE + 32, gpummu->table, gpummu->pt_base, 82 82 DMA_ATTR_FORCE_CONTIGUOUS); 83 83 84 84 kfree(gpummu);
+1 -2
drivers/gpu/drm/msm/adreno/a6xx_catalog.c
··· 1759 1759 A6XX_PROTECT_NORDWR(0x27c06, 0x0000), 1760 1760 }; 1761 1761 1762 - DECLARE_ADRENO_PROTECT(x285_protect, 64); 1762 + DECLARE_ADRENO_PROTECT(x285_protect, 15); 1763 1763 1764 1764 static const struct adreno_reglist_pipe a840_nonctxt_regs[] = { 1765 1765 { REG_A8XX_CP_SMMU_STREAM_ID_LPAC, 0x00000101, BIT(PIPE_NONE) }, ··· 1966 1966 BUILD_BUG_ON(a660_protect.count > a660_protect.count_max); 1967 1967 BUILD_BUG_ON(a690_protect.count > a690_protect.count_max); 1968 1968 BUILD_BUG_ON(a730_protect.count > a730_protect.count_max); 1969 - BUILD_BUG_ON(a840_protect.count > a840_protect.count_max); 1970 1969 }
+12 -2
drivers/gpu/drm/msm/adreno/a8xx_gpu.c
··· 310 310 hbb = cfg->highest_bank_bit - 13; 311 311 hbb_hi = hbb >> 2; 312 312 hbb_lo = hbb & 3; 313 - a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_GRAS_NC_MODE_CNTL, hbb << 5); 314 - a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_GRAS_NC_MODE_CNTL, hbb << 5); 313 + 314 + a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_GRAS_NC_MODE_CNTL, 315 + hbb << 5 | 316 + level3_swizzling_dis << 4 | 317 + level2_swizzling_dis << 3); 318 + 319 + a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_GRAS_NC_MODE_CNTL, 320 + hbb << 5 | 321 + level3_swizzling_dis << 4 | 322 + level2_swizzling_dis << 3); 315 323 316 324 a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_RB_CCU_NC_MODE_CNTL, 317 325 yuvnotcomptofc << 6 | 326 + level3_swizzling_dis << 5 | 327 + level2_swizzling_dis << 4 | 318 328 hbb_hi << 3 | 319 329 hbb_lo << 1); 320 330
+1
drivers/gpu/drm/msm/adreno/adreno_device.c
··· 302 302 { .compatible = "qcom,kgsl-3d0" }, 303 303 {} 304 304 }; 305 + MODULE_DEVICE_TABLE(of, dt_match); 305 306 306 307 static int adreno_runtime_resume(struct device *dev) 307 308 {
+6 -6
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
··· 133 133 static const struct dpu_lm_cfg sc8280xp_lm[] = { 134 134 { 135 135 .name = "lm_0", .id = LM_0, 136 - .base = 0x44000, .len = 0x320, 136 + .base = 0x44000, .len = 0x400, 137 137 .features = MIXER_MSM8998_MASK, 138 138 .sblk = &sdm845_lm_sblk, 139 139 .lm_pair = LM_1, ··· 141 141 .dspp = DSPP_0, 142 142 }, { 143 143 .name = "lm_1", .id = LM_1, 144 - .base = 0x45000, .len = 0x320, 144 + .base = 0x45000, .len = 0x400, 145 145 .features = MIXER_MSM8998_MASK, 146 146 .sblk = &sdm845_lm_sblk, 147 147 .lm_pair = LM_0, ··· 149 149 .dspp = DSPP_1, 150 150 }, { 151 151 .name = "lm_2", .id = LM_2, 152 - .base = 0x46000, .len = 0x320, 152 + .base = 0x46000, .len = 0x400, 153 153 .features = MIXER_MSM8998_MASK, 154 154 .sblk = &sdm845_lm_sblk, 155 155 .lm_pair = LM_3, ··· 157 157 .dspp = DSPP_2, 158 158 }, { 159 159 .name = "lm_3", .id = LM_3, 160 - .base = 0x47000, .len = 0x320, 160 + .base = 0x47000, .len = 0x400, 161 161 .features = MIXER_MSM8998_MASK, 162 162 .sblk = &sdm845_lm_sblk, 163 163 .lm_pair = LM_2, ··· 165 165 .dspp = DSPP_3, 166 166 }, { 167 167 .name = "lm_4", .id = LM_4, 168 - .base = 0x48000, .len = 0x320, 168 + .base = 0x48000, .len = 0x400, 169 169 .features = MIXER_MSM8998_MASK, 170 170 .sblk = &sdm845_lm_sblk, 171 171 .lm_pair = LM_5, 172 172 .pingpong = PINGPONG_4, 173 173 }, { 174 174 .name = "lm_5", .id = LM_5, 175 - .base = 0x49000, .len = 0x320, 175 + .base = 0x49000, .len = 0x400, 176 176 .features = MIXER_MSM8998_MASK, 177 177 .sblk = &sdm845_lm_sblk, 178 178 .lm_pair = LM_4,
+6 -6
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
··· 134 134 static const struct dpu_lm_cfg sm8450_lm[] = { 135 135 { 136 136 .name = "lm_0", .id = LM_0, 137 - .base = 0x44000, .len = 0x320, 137 + .base = 0x44000, .len = 0x400, 138 138 .features = MIXER_MSM8998_MASK, 139 139 .sblk = &sdm845_lm_sblk, 140 140 .lm_pair = LM_1, ··· 142 142 .dspp = DSPP_0, 143 143 }, { 144 144 .name = "lm_1", .id = LM_1, 145 - .base = 0x45000, .len = 0x320, 145 + .base = 0x45000, .len = 0x400, 146 146 .features = MIXER_MSM8998_MASK, 147 147 .sblk = &sdm845_lm_sblk, 148 148 .lm_pair = LM_0, ··· 150 150 .dspp = DSPP_1, 151 151 }, { 152 152 .name = "lm_2", .id = LM_2, 153 - .base = 0x46000, .len = 0x320, 153 + .base = 0x46000, .len = 0x400, 154 154 .features = MIXER_MSM8998_MASK, 155 155 .sblk = &sdm845_lm_sblk, 156 156 .lm_pair = LM_3, ··· 158 158 .dspp = DSPP_2, 159 159 }, { 160 160 .name = "lm_3", .id = LM_3, 161 - .base = 0x47000, .len = 0x320, 161 + .base = 0x47000, .len = 0x400, 162 162 .features = MIXER_MSM8998_MASK, 163 163 .sblk = &sdm845_lm_sblk, 164 164 .lm_pair = LM_2, ··· 166 166 .dspp = DSPP_3, 167 167 }, { 168 168 .name = "lm_4", .id = LM_4, 169 - .base = 0x48000, .len = 0x320, 169 + .base = 0x48000, .len = 0x400, 170 170 .features = MIXER_MSM8998_MASK, 171 171 .sblk = &sdm845_lm_sblk, 172 172 .lm_pair = LM_5, 173 173 .pingpong = PINGPONG_4, 174 174 }, { 175 175 .name = "lm_5", .id = LM_5, 176 - .base = 0x49000, .len = 0x320, 176 + .base = 0x49000, .len = 0x400, 177 177 .features = MIXER_MSM8998_MASK, 178 178 .sblk = &sdm845_lm_sblk, 179 179 .lm_pair = LM_4,
+2 -2
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
··· 366 366 .type = INTF_NONE, 367 367 .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */ 368 368 .prog_fetch_lines_worst_case = 24, 369 - .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17), 370 - .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16), 369 + .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16), 370 + .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17), 371 371 }, { 372 372 .name = "intf_7", .id = INTF_7, 373 373 .base = 0x3b000, .len = 0x280,
+6 -6
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
··· 131 131 static const struct dpu_lm_cfg sm8550_lm[] = { 132 132 { 133 133 .name = "lm_0", .id = LM_0, 134 - .base = 0x44000, .len = 0x320, 134 + .base = 0x44000, .len = 0x400, 135 135 .features = MIXER_MSM8998_MASK, 136 136 .sblk = &sdm845_lm_sblk, 137 137 .lm_pair = LM_1, ··· 139 139 .dspp = DSPP_0, 140 140 }, { 141 141 .name = "lm_1", .id = LM_1, 142 - .base = 0x45000, .len = 0x320, 142 + .base = 0x45000, .len = 0x400, 143 143 .features = MIXER_MSM8998_MASK, 144 144 .sblk = &sdm845_lm_sblk, 145 145 .lm_pair = LM_0, ··· 147 147 .dspp = DSPP_1, 148 148 }, { 149 149 .name = "lm_2", .id = LM_2, 150 - .base = 0x46000, .len = 0x320, 150 + .base = 0x46000, .len = 0x400, 151 151 .features = MIXER_MSM8998_MASK, 152 152 .sblk = &sdm845_lm_sblk, 153 153 .lm_pair = LM_3, ··· 155 155 .dspp = DSPP_2, 156 156 }, { 157 157 .name = "lm_3", .id = LM_3, 158 - .base = 0x47000, .len = 0x320, 158 + .base = 0x47000, .len = 0x400, 159 159 .features = MIXER_MSM8998_MASK, 160 160 .sblk = &sdm845_lm_sblk, 161 161 .lm_pair = LM_2, ··· 163 163 .dspp = DSPP_3, 164 164 }, { 165 165 .name = "lm_4", .id = LM_4, 166 - .base = 0x48000, .len = 0x320, 166 + .base = 0x48000, .len = 0x400, 167 167 .features = MIXER_MSM8998_MASK, 168 168 .sblk = &sdm845_lm_sblk, 169 169 .lm_pair = LM_5, 170 170 .pingpong = PINGPONG_4, 171 171 }, { 172 172 .name = "lm_5", .id = LM_5, 173 - .base = 0x49000, .len = 0x320, 173 + .base = 0x49000, .len = 0x400, 174 174 .features = MIXER_MSM8998_MASK, 175 175 .sblk = &sdm845_lm_sblk, 176 176 .lm_pair = LM_4,
+6 -6
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
··· 131 131 static const struct dpu_lm_cfg sar2130p_lm[] = { 132 132 { 133 133 .name = "lm_0", .id = LM_0, 134 - .base = 0x44000, .len = 0x320, 134 + .base = 0x44000, .len = 0x400, 135 135 .features = MIXER_MSM8998_MASK, 136 136 .sblk = &sdm845_lm_sblk, 137 137 .lm_pair = LM_1, ··· 139 139 .dspp = DSPP_0, 140 140 }, { 141 141 .name = "lm_1", .id = LM_1, 142 - .base = 0x45000, .len = 0x320, 142 + .base = 0x45000, .len = 0x400, 143 143 .features = MIXER_MSM8998_MASK, 144 144 .sblk = &sdm845_lm_sblk, 145 145 .lm_pair = LM_0, ··· 147 147 .dspp = DSPP_1, 148 148 }, { 149 149 .name = "lm_2", .id = LM_2, 150 - .base = 0x46000, .len = 0x320, 150 + .base = 0x46000, .len = 0x400, 151 151 .features = MIXER_MSM8998_MASK, 152 152 .sblk = &sdm845_lm_sblk, 153 153 .lm_pair = LM_3, ··· 155 155 .dspp = DSPP_2, 156 156 }, { 157 157 .name = "lm_3", .id = LM_3, 158 - .base = 0x47000, .len = 0x320, 158 + .base = 0x47000, .len = 0x400, 159 159 .features = MIXER_MSM8998_MASK, 160 160 .sblk = &sdm845_lm_sblk, 161 161 .lm_pair = LM_2, ··· 163 163 .dspp = DSPP_3, 164 164 }, { 165 165 .name = "lm_4", .id = LM_4, 166 - .base = 0x48000, .len = 0x320, 166 + .base = 0x48000, .len = 0x400, 167 167 .features = MIXER_MSM8998_MASK, 168 168 .sblk = &sdm845_lm_sblk, 169 169 .lm_pair = LM_5, 170 170 .pingpong = PINGPONG_4, 171 171 }, { 172 172 .name = "lm_5", .id = LM_5, 173 - .base = 0x49000, .len = 0x320, 173 + .base = 0x49000, .len = 0x400, 174 174 .features = MIXER_MSM8998_MASK, 175 175 .sblk = &sdm845_lm_sblk, 176 176 .lm_pair = LM_4,
+6 -6
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
··· 130 130 static const struct dpu_lm_cfg x1e80100_lm[] = { 131 131 { 132 132 .name = "lm_0", .id = LM_0, 133 - .base = 0x44000, .len = 0x320, 133 + .base = 0x44000, .len = 0x400, 134 134 .features = MIXER_MSM8998_MASK, 135 135 .sblk = &sdm845_lm_sblk, 136 136 .lm_pair = LM_1, ··· 138 138 .dspp = DSPP_0, 139 139 }, { 140 140 .name = "lm_1", .id = LM_1, 141 - .base = 0x45000, .len = 0x320, 141 + .base = 0x45000, .len = 0x400, 142 142 .features = MIXER_MSM8998_MASK, 143 143 .sblk = &sdm845_lm_sblk, 144 144 .lm_pair = LM_0, ··· 146 146 .dspp = DSPP_1, 147 147 }, { 148 148 .name = "lm_2", .id = LM_2, 149 - .base = 0x46000, .len = 0x320, 149 + .base = 0x46000, .len = 0x400, 150 150 .features = MIXER_MSM8998_MASK, 151 151 .sblk = &sdm845_lm_sblk, 152 152 .lm_pair = LM_3, ··· 154 154 .dspp = DSPP_2, 155 155 }, { 156 156 .name = "lm_3", .id = LM_3, 157 - .base = 0x47000, .len = 0x320, 157 + .base = 0x47000, .len = 0x400, 158 158 .features = MIXER_MSM8998_MASK, 159 159 .sblk = &sdm845_lm_sblk, 160 160 .lm_pair = LM_2, ··· 162 162 .dspp = DSPP_3, 163 163 }, { 164 164 .name = "lm_4", .id = LM_4, 165 - .base = 0x48000, .len = 0x320, 165 + .base = 0x48000, .len = 0x400, 166 166 .features = MIXER_MSM8998_MASK, 167 167 .sblk = &sdm845_lm_sblk, 168 168 .lm_pair = LM_5, 169 169 .pingpong = PINGPONG_4, 170 170 }, { 171 171 .name = "lm_5", .id = LM_5, 172 - .base = 0x49000, .len = 0x320, 172 + .base = 0x49000, .len = 0x400, 173 173 .features = MIXER_MSM8998_MASK, 174 174 .sblk = &sdm845_lm_sblk, 175 175 .lm_pair = LM_4,
+1 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
··· 89 89 base = ctx->cap->sblk->gc.base; 90 90 91 91 if (!base) { 92 - DRM_ERROR("invalid ctx %pK gc base\n", ctx); 92 + DRM_ERROR("invalid ctx %p gc base\n", ctx); 93 93 return; 94 94 } 95 95
+3 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp_v13.c
··· 156 156 u8 color; 157 157 u32 lr_pe[4], tb_pe[4]; 158 158 const u32 bytemask = 0xff; 159 - u32 offset = ctx->cap->sblk->sspp_rec0_blk.base; 159 + u32 offset; 160 160 161 161 if (!ctx || !pe_ext) 162 162 return; 163 + 164 + offset = ctx->cap->sblk->sspp_rec0_blk.base; 163 165 164 166 c = &ctx->hw; 165 167 /* program SW pixel extension override for all pipes*/
+14 -38
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
··· 350 350 return true; 351 351 } 352 352 353 - static bool dpu_rm_find_lms(struct dpu_rm *rm, 354 - struct dpu_global_state *global_state, 355 - uint32_t crtc_id, bool skip_dspp, 356 - struct msm_display_topology *topology, 357 - int *lm_idx, int *pp_idx, int *dspp_idx) 353 + static int _dpu_rm_reserve_lms(struct dpu_rm *rm, 354 + struct dpu_global_state *global_state, 355 + uint32_t crtc_id, 356 + struct msm_display_topology *topology) 358 357 359 358 { 359 + int lm_idx[MAX_BLOCKS]; 360 + int pp_idx[MAX_BLOCKS]; 361 + int dspp_idx[MAX_BLOCKS] = {0}; 360 362 int i, lm_count = 0; 363 + 364 + if (!topology->num_lm) { 365 + DPU_ERROR("zero LMs in topology\n"); 366 + return -EINVAL; 367 + } 361 368 362 369 /* Find a primary mixer */ 363 370 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) && 364 371 lm_count < topology->num_lm; i++) { 365 372 if (!rm->mixer_blks[i]) 366 373 continue; 367 - 368 - if (skip_dspp && to_dpu_hw_mixer(rm->mixer_blks[i])->cap->dspp) { 369 - DPU_DEBUG("Skipping LM_%d, skipping LMs with DSPPs\n", i); 370 - continue; 371 - } 372 374 373 375 /* 374 376 * Reset lm_count to an even index. This will drop the previous ··· 410 408 } 411 409 } 412 410 413 - return lm_count == topology->num_lm; 414 - } 415 - 416 - static int _dpu_rm_reserve_lms(struct dpu_rm *rm, 417 - struct dpu_global_state *global_state, 418 - uint32_t crtc_id, 419 - struct msm_display_topology *topology) 420 - 421 - { 422 - int lm_idx[MAX_BLOCKS]; 423 - int pp_idx[MAX_BLOCKS]; 424 - int dspp_idx[MAX_BLOCKS] = {0}; 425 - int i; 426 - bool found; 427 - 428 - if (!topology->num_lm) { 429 - DPU_ERROR("zero LMs in topology\n"); 430 - return -EINVAL; 431 - } 432 - 433 - /* Try using non-DSPP LM blocks first */ 434 - found = dpu_rm_find_lms(rm, global_state, crtc_id, !topology->num_dspp, 435 - topology, lm_idx, pp_idx, dspp_idx); 436 - if (!found && !topology->num_dspp) 437 - found = dpu_rm_find_lms(rm, global_state, crtc_id, false, 438 - topology, lm_idx, pp_idx, dspp_idx); 439 - if (!found) { 411 + if (lm_count != topology->num_lm) { 440 412 DPU_DEBUG("unable to find appropriate mixers\n"); 441 413 return -ENAVAIL; 442 414 } 443 415 444 - for (i = 0; i < topology->num_lm; i++) { 416 + for (i = 0; i < lm_count; i++) { 445 417 global_state->mixer_to_crtc_id[lm_idx[i]] = crtc_id; 446 418 global_state->pingpong_to_crtc_id[pp_idx[i]] = crtc_id; 447 419 global_state->dspp_to_crtc_id[dspp_idx[i]] =
+31 -12
drivers/gpu/drm/msm/dsi/dsi_host.c
··· 584 584 * FIXME: Reconsider this if/when CMD mode handling is rewritten to use 585 585 * transfer time and data overhead as a starting point of the calculations. 586 586 */ 587 - static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mode *mode, 588 - const struct drm_dsc_config *dsc) 587 + static unsigned long 588 + dsi_adjust_pclk_for_compression(const struct drm_display_mode *mode, 589 + const struct drm_dsc_config *dsc, 590 + bool is_bonded_dsi) 589 591 { 590 - int new_hdisplay = DIV_ROUND_UP(mode->hdisplay * drm_dsc_get_bpp_int(dsc), 591 - dsc->bits_per_component * 3); 592 + int hdisplay, new_hdisplay, new_htotal; 592 593 593 - int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay; 594 + /* 595 + * For bonded DSI, split hdisplay across two links and round up each 596 + * half separately, passing the full hdisplay would only round up once. 597 + * This also aligns with the hdisplay we program later in 598 + * dsi_timing_setup() 599 + */ 600 + hdisplay = mode->hdisplay; 601 + if (is_bonded_dsi) 602 + hdisplay /= 2; 603 + 604 + new_hdisplay = DIV_ROUND_UP(hdisplay * drm_dsc_get_bpp_int(dsc), 605 + dsc->bits_per_component * 3); 606 + 607 + if (is_bonded_dsi) 608 + new_hdisplay *= 2; 609 + 610 + new_htotal = mode->htotal - mode->hdisplay + new_hdisplay; 594 611 595 612 return mult_frac(mode->clock * 1000u, new_htotal, mode->htotal); 596 613 } ··· 620 603 pclk_rate = mode->clock * 1000u; 621 604 622 605 if (dsc) 623 - pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc); 606 + pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc, is_bonded_dsi); 624 607 625 608 /* 626 609 * For bonded DSI mode, the current DRM mode has the complete width of the ··· 1010 993 1011 994 if (msm_host->dsc) { 1012 995 struct drm_dsc_config *dsc = msm_host->dsc; 1013 - u32 bytes_per_pclk; 996 + u32 bits_per_pclk; 1014 997 1015 998 /* update dsc params with timing params */ 1016 999 if (!dsc || !mode->hdisplay || !mode->vdisplay) { ··· 1032 1015 1033 1016 /* 1034 1017 * DPU sends 3 bytes per pclk cycle to DSI. If widebus is 1035 - * enabled, bus width is extended to 6 bytes. 1018 + * enabled, MDP always sends out 48-bit compressed data per 1019 + * pclk and on average, DSI consumes an amount of compressed 1020 + * data equivalent to the uncompressed pixel depth per pclk. 1036 1021 * 1037 1022 * Calculate the number of pclks needed to transmit one line of 1038 1023 * the compressed data. ··· 1046 1027 * unused anyway. 1047 1028 */ 1048 1029 h_total -= hdisplay; 1049 - if (wide_bus_enabled && !(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) 1050 - bytes_per_pclk = 6; 1030 + if (wide_bus_enabled) 1031 + bits_per_pclk = mipi_dsi_pixel_format_to_bpp(msm_host->format); 1051 1032 else 1052 - bytes_per_pclk = 3; 1033 + bits_per_pclk = 24; 1053 1034 1054 - hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc), bytes_per_pclk); 1035 + hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc) * 8, bits_per_pclk); 1055 1036 1056 1037 h_total += hdisplay; 1057 1038 ha_end = ha_start + hdisplay;
+11 -11
drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
··· 51 51 #define DSI_PHY_7NM_QUIRK_V4_3 BIT(3) 52 52 /* Hardware is V5.2 */ 53 53 #define DSI_PHY_7NM_QUIRK_V5_2 BIT(4) 54 - /* Hardware is V7.0 */ 55 - #define DSI_PHY_7NM_QUIRK_V7_0 BIT(5) 54 + /* Hardware is V7.2 */ 55 + #define DSI_PHY_7NM_QUIRK_V7_2 BIT(5) 56 56 57 57 struct dsi_pll_config { 58 58 bool enable_ssc; ··· 143 143 144 144 if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1) { 145 145 config->pll_clock_inverters = 0x28; 146 - } else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) { 146 + } else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) { 147 147 if (pll_freq < 163000000ULL) 148 148 config->pll_clock_inverters = 0xa0; 149 149 else if (pll_freq < 175000000ULL) ··· 284 284 } 285 285 286 286 if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) || 287 - (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) { 287 + (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) { 288 288 if (pll->vco_current_rate < 1557000000ULL) 289 289 vco_config_1 = 0x08; 290 290 else ··· 699 699 case MSM_DSI_PHY_MASTER: 700 700 pll_7nm->slave = pll_7nm_list[(pll_7nm->phy->id + 1) % DSI_MAX]; 701 701 /* v7.0: Enable ATB_EN0 and alternate clock output to external phy */ 702 - if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0) 702 + if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2) 703 703 writel(0x07, base + REG_DSI_7nm_PHY_CMN_CTRL_5); 704 704 break; 705 705 case MSM_DSI_PHY_SLAVE: ··· 987 987 /* Request for REFGEN READY */ 988 988 if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) || 989 989 (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) || 990 - (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) { 990 + (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) { 991 991 writel(0x1, phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10); 992 992 udelay(500); 993 993 } ··· 1021 1021 lane_ctrl0 = 0x1f; 1022 1022 } 1023 1023 1024 - if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) { 1024 + if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) { 1025 1025 if (phy->cphy_mode) { 1026 1026 /* TODO: different for second phy */ 1027 1027 vreg_ctrl_0 = 0x57; ··· 1097 1097 1098 1098 /* program CMN_CTRL_4 for minor_ver 2 chipsets*/ 1099 1099 if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) || 1100 - (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0) || 1100 + (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2) || 1101 1101 (readl(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0) & (0xf0)) == 0x20) 1102 1102 writel(0x04, base + REG_DSI_7nm_PHY_CMN_CTRL_4); 1103 1103 ··· 1213 1213 /* Turn off REFGEN Vote */ 1214 1214 if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) || 1215 1215 (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) || 1216 - (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) { 1216 + (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) { 1217 1217 writel(0x0, base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10); 1218 1218 wmb(); 1219 1219 /* Delay to ensure HW removes vote before PHY shut down */ ··· 1502 1502 #endif 1503 1503 .io_start = { 0xae95000, 0xae97000 }, 1504 1504 .num_dsi_phy = 2, 1505 - .quirks = DSI_PHY_7NM_QUIRK_V7_0, 1505 + .quirks = DSI_PHY_7NM_QUIRK_V7_2, 1506 1506 }; 1507 1507 1508 1508 const struct msm_dsi_phy_cfg dsi_phy_3nm_kaanapali_cfgs = { ··· 1525 1525 #endif 1526 1526 .io_start = { 0x9ac1000, 0x9ac4000 }, 1527 1527 .num_dsi_phy = 2, 1528 - .quirks = DSI_PHY_7NM_QUIRK_V7_0, 1528 + .quirks = DSI_PHY_7NM_QUIRK_V7_2, 1529 1529 };
+3 -1
drivers/gpu/drm/radeon/si_dpm.c
··· 2915 2915 if (rdev->family == CHIP_HAINAN) { 2916 2916 if ((rdev->pdev->revision == 0x81) || 2917 2917 (rdev->pdev->revision == 0xC3) || 2918 + (rdev->pdev->device == 0x6660) || 2918 2919 (rdev->pdev->device == 0x6664) || 2919 2920 (rdev->pdev->device == 0x6665) || 2920 - (rdev->pdev->device == 0x6667)) { 2921 + (rdev->pdev->device == 0x6667) || 2922 + (rdev->pdev->device == 0x666F)) { 2921 2923 max_sclk = 75000; 2922 2924 } 2923 2925 if ((rdev->pdev->revision == 0xC3) ||
+6 -9
drivers/gpu/drm/sitronix/st7586.c
··· 347 347 if (ret) 348 348 return ret; 349 349 350 + /* 351 + * Override value set by mipi_dbi_spi_init(). This driver is a bit 352 + * non-standard, so best to set it explicitly here. 353 + */ 354 + dbi->write_memory_bpw = 8; 355 + 350 356 /* Cannot read from this controller via SPI */ 351 357 dbi->read_commands = NULL; 352 358 ··· 361 355 &st7586_mode, rotation, bufsize); 362 356 if (ret) 363 357 return ret; 364 - 365 - /* 366 - * we are using 8-bit data, so we are not actually swapping anything, 367 - * but setting mipi->swap_bytes makes mipi_dbi_typec3_command() do the 368 - * right thing and not use 16-bit transfers (which results in swapped 369 - * bytes on little-endian systems and causes out of order data to be 370 - * sent to the display). 371 - */ 372 - dbi->swap_bytes = true; 373 358 374 359 drm_mode_config_reset(drm); 375 360
+57 -36
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 96 96 97 97 struct vmw_res_func; 98 98 99 + struct vmw_bo; 100 + struct vmw_bo; 101 + struct vmw_resource_dirty; 102 + 99 103 /** 100 - * struct vmw-resource - base class for hardware resources 104 + * struct vmw_resource - base class for hardware resources 101 105 * 102 106 * @kref: For refcounting. 103 107 * @dev_priv: Pointer to the device private for this resource. Immutable. 104 108 * @id: Device id. Protected by @dev_priv::resource_lock. 109 + * @used_prio: Priority for this resource. 105 110 * @guest_memory_size: Guest memory buffer size. Immutable. 106 111 * @res_dirty: Resource contains data not yet in the guest memory buffer. 107 112 * Protected by resource reserved. ··· 122 117 * pin-count greater than zero. It is not on the resource LRU lists and its 123 118 * guest memory buffer is pinned. Hence it can't be evicted. 124 119 * @func: Method vtable for this resource. Immutable. 125 - * @mob_node; Node for the MOB guest memory rbtree. Protected by 120 + * @mob_node: Node for the MOB guest memory rbtree. Protected by 126 121 * @guest_memory_bo reserved. 127 122 * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. 128 123 * @binding_head: List head for the context binding list. Protected by 129 124 * the @dev_priv::binding_mutex 125 + * @dirty: resource's dirty tracker 130 126 * @res_free: The resource destructor. 131 127 * @hw_destroy: Callback to destroy the resource on the device, as part of 132 128 * resource destruction. 133 129 */ 134 - struct vmw_bo; 135 - struct vmw_bo; 136 - struct vmw_resource_dirty; 137 130 struct vmw_resource { 138 131 struct kref kref; 139 132 struct vmw_private *dev_priv; ··· 199 196 * @quality_level: Quality level. 200 197 * @autogen_filter: Filter for automatically generated mipmaps. 201 198 * @array_size: Number of array elements for a 1D/2D texture. For cubemap 202 - texture number of faces * array_size. This should be 0 for pre 203 - SM4 device. 199 + * texture number of faces * array_size. This should be 0 for pre 200 + * SM4 device. 204 201 * @buffer_byte_stride: Buffer byte stride. 205 202 * @num_sizes: Size of @sizes. For GB surface this should always be 1. 206 203 * @base_size: Surface dimension. ··· 268 265 struct vmw_res_cache_entry { 269 266 uint32_t handle; 270 267 struct vmw_resource *res; 268 + /* private: */ 271 269 void *private; 270 + /* public: */ 272 271 unsigned short valid_handle; 273 272 unsigned short valid; 274 273 }; 275 274 276 275 /** 277 276 * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings. 277 + * @vmw_dma_alloc_coherent: Use TTM coherent pages 278 + * @vmw_dma_map_populate: Unmap from DMA just after unpopulate 279 + * @vmw_dma_map_bind: Unmap from DMA just before unbind 278 280 */ 279 281 enum vmw_dma_map_mode { 280 - vmw_dma_alloc_coherent, /* Use TTM coherent pages */ 281 - vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */ 282 - vmw_dma_map_bind, /* Unmap from DMA just before unbind */ 282 + vmw_dma_alloc_coherent, 283 + vmw_dma_map_populate, 284 + vmw_dma_map_bind, 285 + /* private: */ 283 286 vmw_dma_map_max 284 287 }; 285 288 ··· 293 284 * struct vmw_sg_table - Scatter/gather table for binding, with additional 294 285 * device-specific information. 295 286 * 287 + * @mode: which page mapping mode to use 288 + * @pages: Array of page pointers to the pages. 289 + * @addrs: DMA addresses to the pages if coherent pages are used. 296 290 * @sgt: Pointer to a struct sg_table with binding information 297 - * @num_regions: Number of regions with device-address contiguous pages 291 + * @num_pages: Number of @pages 298 292 */ 299 293 struct vmw_sg_table { 300 294 enum vmw_dma_map_mode mode; ··· 365 353 * than from user-space 366 354 * @fp: If @kernel is false, points to the file of the client. Otherwise 367 355 * NULL 356 + * @filp: DRM state for this file 368 357 * @cmd_bounce: Command bounce buffer used for command validation before 369 358 * copying to fifo space 370 359 * @cmd_bounce_size: Current command bounce buffer size ··· 742 729 bool vmwgfx_supported(struct vmw_private *vmw); 743 730 744 731 745 - /** 732 + /* 746 733 * GMR utilities - vmwgfx_gmr.c 747 734 */ 748 735 ··· 752 739 int gmr_id); 753 740 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); 754 741 755 - /** 742 + /* 756 743 * User handles 757 744 */ 758 745 struct vmw_user_object { ··· 772 759 void vmw_user_object_unmap(struct vmw_user_object *uo); 773 760 bool vmw_user_object_is_mapped(struct vmw_user_object *uo); 774 761 775 - /** 762 + /* 776 763 * Resource utilities - vmwgfx_resource.c 777 764 */ 778 765 struct vmw_user_resource_conv; ··· 832 819 return !RB_EMPTY_NODE(&res->mob_node); 833 820 } 834 821 835 - /** 822 + /* 836 823 * GEM related functionality - vmwgfx_gem.c 837 824 */ 838 825 struct vmw_bo_params; ··· 846 833 struct drm_file *filp); 847 834 extern void vmw_debugfs_gem_init(struct vmw_private *vdev); 848 835 849 - /** 836 + /* 850 837 * Misc Ioctl functionality - vmwgfx_ioctl.c 851 838 */ 852 839 ··· 859 846 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, 860 847 struct drm_file *file_priv); 861 848 862 - /** 849 + /* 863 850 * Fifo utilities - vmwgfx_fifo.c 864 851 */ 865 852 ··· 893 880 894 881 895 882 /** 896 - * vmw_fifo_caps - Returns the capabilities of the FIFO command 883 + * vmw_fifo_caps - Get the capabilities of the FIFO command 897 884 * queue or 0 if fifo memory isn't present. 898 885 * @dev_priv: The device private context 886 + * 887 + * Returns: capabilities of the FIFO command or %0 if fifo memory not present 899 888 */ 900 889 static inline uint32_t vmw_fifo_caps(const struct vmw_private *dev_priv) 901 890 { ··· 908 893 909 894 910 895 /** 911 - * vmw_is_cursor_bypass3_enabled - Returns TRUE iff Cursor Bypass 3 912 - * is enabled in the FIFO. 896 + * vmw_is_cursor_bypass3_enabled - check Cursor Bypass 3 enabled setting 897 + * in the FIFO. 913 898 * @dev_priv: The device private context 899 + * 900 + * Returns: %true iff Cursor Bypass 3 is enabled in the FIFO 914 901 */ 915 902 static inline bool 916 903 vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv) ··· 920 903 return (vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_CURSOR_BYPASS_3) != 0; 921 904 } 922 905 923 - /** 906 + /* 924 907 * TTM buffer object driver - vmwgfx_ttm_buffer.c 925 908 */ 926 909 ··· 944 927 * 945 928 * @viter: Pointer to the iterator to advance. 946 929 * 947 - * Returns false if past the list of pages, true otherwise. 930 + * Returns: false if past the list of pages, true otherwise. 948 931 */ 949 932 static inline bool vmw_piter_next(struct vmw_piter *viter) 950 933 { ··· 956 939 * 957 940 * @viter: Pointer to the iterator 958 941 * 959 - * Returns the DMA address of the page pointed to by @viter. 942 + * Returns: the DMA address of the page pointed to by @viter. 960 943 */ 961 944 static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter) 962 945 { ··· 968 951 * 969 952 * @viter: Pointer to the iterator 970 953 * 971 - * Returns the DMA address of the page pointed to by @viter. 954 + * Returns: the DMA address of the page pointed to by @viter. 972 955 */ 973 956 static inline struct page *vmw_piter_page(struct vmw_piter *viter) 974 957 { 975 958 return viter->pages[viter->i]; 976 959 } 977 960 978 - /** 961 + /* 979 962 * Command submission - vmwgfx_execbuf.c 980 963 */ 981 964 ··· 1010 993 int32_t out_fence_fd); 1011 994 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); 1012 995 1013 - /** 996 + /* 1014 997 * IRQs and wating - vmwgfx_irq.c 1015 998 */ 1016 999 ··· 1033 1016 bool vmw_generic_waiter_remove(struct vmw_private *dev_priv, 1034 1017 u32 flag, int *waiter_count); 1035 1018 1036 - /** 1019 + /* 1037 1020 * Kernel modesetting - vmwgfx_kms.c 1038 1021 */ 1039 1022 ··· 1065 1048 extern void vmw_resource_unpin(struct vmw_resource *res); 1066 1049 extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); 1067 1050 1068 - /** 1051 + /* 1069 1052 * Overlay control - vmwgfx_overlay.c 1070 1053 */ 1071 1054 ··· 1080 1063 int vmw_overlay_num_overlays(struct vmw_private *dev_priv); 1081 1064 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); 1082 1065 1083 - /** 1066 + /* 1084 1067 * GMR Id manager 1085 1068 */ 1086 1069 1087 1070 int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type); 1088 1071 void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type); 1089 1072 1090 - /** 1073 + /* 1091 1074 * System memory manager 1092 1075 */ 1093 1076 int vmw_sys_man_init(struct vmw_private *dev_priv); 1094 1077 void vmw_sys_man_fini(struct vmw_private *dev_priv); 1095 1078 1096 - /** 1079 + /* 1097 1080 * Prime - vmwgfx_prime.c 1098 1081 */ 1099 1082 ··· 1309 1292 * @line: The current line of the blit. 1310 1293 * @line_offset: Offset of the current line segment. 1311 1294 * @cpp: Bytes per pixel (granularity information). 1312 - * @memcpy: Which memcpy function to use. 1295 + * @do_cpy: Which memcpy function to use. 1313 1296 */ 1314 1297 struct vmw_diff_cpy { 1315 1298 struct drm_rect rect; ··· 1397 1380 1398 1381 /** 1399 1382 * VMW_DEBUG_KMS - Debug output for kernel mode-setting 1383 + * @fmt: format string for the args 1400 1384 * 1401 1385 * This macro is for debugging vmwgfx mode-setting code. 1402 1386 */ 1403 1387 #define VMW_DEBUG_KMS(fmt, ...) \ 1404 1388 DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) 1405 1389 1406 - /** 1390 + /* 1407 1391 * Inline helper functions 1408 1392 */ 1409 1393 ··· 1435 1417 1436 1418 /** 1437 1419 * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory 1438 - * 1420 + * @vmw: The device private structure 1439 1421 * @fifo_reg: The fifo register to read from 1440 1422 * 1441 1423 * This function is intended to be equivalent to ioread32() on 1442 1424 * memremap'd memory, but without byteswapping. 1425 + * 1426 + * Returns: the value read 1443 1427 */ 1444 1428 static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg) 1445 1429 { ··· 1451 1431 1452 1432 /** 1453 1433 * vmw_fifo_mem_write - Perform a MMIO write to volatile memory 1454 - * 1455 - * @addr: The fifo register to write to 1434 + * @vmw: The device private structure 1435 + * @fifo_reg: The fifo register to write to 1436 + * @value: The value to write 1456 1437 * 1457 1438 * This function is intended to be equivalent to iowrite32 on 1458 1439 * memremap'd memory, but without byteswapping.
+2 -1
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 771 771 ret = vmw_bo_dirty_add(bo); 772 772 if (!ret && surface && surface->res.func->dirty_alloc) { 773 773 surface->res.coherent = true; 774 - ret = surface->res.func->dirty_alloc(&surface->res); 774 + if (surface->res.dirty == NULL) 775 + ret = surface->res.func->dirty_alloc(&surface->res); 775 776 } 776 777 ttm_bo_unreserve(&bo->tbo); 777 778 }
+4 -6
drivers/gpu/drm/xe/xe_ggtt.c
··· 313 313 { 314 314 struct xe_ggtt *ggtt = arg; 315 315 316 + scoped_guard(mutex, &ggtt->lock) 317 + ggtt->flags &= ~XE_GGTT_FLAGS_ONLINE; 316 318 drain_workqueue(ggtt->wq); 317 319 } 318 320 ··· 379 377 if (err) 380 378 return err; 381 379 380 + ggtt->flags |= XE_GGTT_FLAGS_ONLINE; 382 381 err = devm_add_action_or_reset(xe->drm.dev, dev_fini_ggtt, ggtt); 383 382 if (err) 384 383 return err; ··· 413 410 static void ggtt_node_remove(struct xe_ggtt_node *node) 414 411 { 415 412 struct xe_ggtt *ggtt = node->ggtt; 416 - struct xe_device *xe = tile_to_xe(ggtt->tile); 417 413 bool bound; 418 - int idx; 419 - 420 - bound = drm_dev_enter(&xe->drm, &idx); 421 414 422 415 mutex_lock(&ggtt->lock); 416 + bound = ggtt->flags & XE_GGTT_FLAGS_ONLINE; 423 417 if (bound) 424 418 xe_ggtt_clear(ggtt, node->base.start, node->base.size); 425 419 drm_mm_remove_node(&node->base); ··· 428 428 429 429 if (node->invalidate_on_remove) 430 430 xe_ggtt_invalidate(ggtt); 431 - 432 - drm_dev_exit(idx); 433 431 434 432 free_node: 435 433 xe_ggtt_node_fini(node);
+4 -1
drivers/gpu/drm/xe/xe_ggtt_types.h
··· 28 28 /** @size: Total usable size of this GGTT */ 29 29 u64 size; 30 30 31 - #define XE_GGTT_FLAGS_64K BIT(0) 31 + #define XE_GGTT_FLAGS_64K BIT(0) 32 + #define XE_GGTT_FLAGS_ONLINE BIT(1) 32 33 /** 33 34 * @flags: Flags for this GGTT 34 35 * Acceptable flags: 35 36 * - %XE_GGTT_FLAGS_64K - if PTE size is 64K. Otherwise, regular is 4K. 37 + * - %XE_GGTT_FLAGS_ONLINE - is GGTT online, protected by ggtt->lock 38 + * after init 36 39 */ 37 40 unsigned int flags; 38 41 /** @scratch: Internal object allocation used as a scratch page */
+2
drivers/gpu/drm/xe/xe_gt_ccs_mode.c
··· 12 12 #include "xe_gt_printk.h" 13 13 #include "xe_gt_sysfs.h" 14 14 #include "xe_mmio.h" 15 + #include "xe_pm.h" 15 16 #include "xe_sriov.h" 16 17 17 18 static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines) ··· 151 150 xe_gt_info(gt, "Setting compute mode to %d\n", num_engines); 152 151 gt->ccs_mode = num_engines; 153 152 xe_gt_record_user_engines(gt); 153 + guard(xe_pm_runtime)(xe); 154 154 xe_gt_reset(gt); 155 155 } 156 156
+27 -5
drivers/gpu/drm/xe/xe_guc.c
··· 1124 1124 struct xe_guc_pc *guc_pc = &gt->uc.guc.pc; 1125 1125 u32 before_freq, act_freq, cur_freq; 1126 1126 u32 status = 0, tries = 0; 1127 + int load_result, ret; 1127 1128 ktime_t before; 1128 1129 u64 delta_ms; 1129 - int ret; 1130 1130 1131 1131 before_freq = xe_guc_pc_get_act_freq(guc_pc); 1132 1132 before = ktime_get(); 1133 1133 1134 - ret = poll_timeout_us(ret = guc_load_done(gt, &status, &tries), ret, 1134 + ret = poll_timeout_us(load_result = guc_load_done(gt, &status, &tries), load_result, 1135 1135 10 * USEC_PER_MSEC, 1136 1136 GUC_LOAD_TIMEOUT_SEC * USEC_PER_SEC, false); 1137 1137 ··· 1139 1139 act_freq = xe_guc_pc_get_act_freq(guc_pc); 1140 1140 cur_freq = xe_guc_pc_get_cur_freq_fw(guc_pc); 1141 1141 1142 - if (ret) { 1142 + if (ret || load_result <= 0) { 1143 1143 xe_gt_err(gt, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz (req %dMHz)\n", 1144 1144 status, delta_ms, xe_guc_pc_get_act_freq(guc_pc), 1145 1145 xe_guc_pc_get_cur_freq_fw(guc_pc)); ··· 1347 1347 return 0; 1348 1348 } 1349 1349 1350 - int xe_guc_suspend(struct xe_guc *guc) 1350 + /** 1351 + * xe_guc_softreset() - Soft reset GuC 1352 + * @guc: The GuC object 1353 + * 1354 + * Send soft reset command to GuC through mmio send. 1355 + * 1356 + * Return: 0 if success, otherwise error code 1357 + */ 1358 + int xe_guc_softreset(struct xe_guc *guc) 1351 1359 { 1352 - struct xe_gt *gt = guc_to_gt(guc); 1353 1360 u32 action[] = { 1354 1361 XE_GUC_ACTION_CLIENT_SOFT_RESET, 1355 1362 }; 1356 1363 int ret; 1357 1364 1365 + if (!xe_uc_fw_is_running(&guc->fw)) 1366 + return 0; 1367 + 1358 1368 ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action)); 1369 + if (ret) 1370 + return ret; 1371 + 1372 + return 0; 1373 + } 1374 + 1375 + int xe_guc_suspend(struct xe_guc *guc) 1376 + { 1377 + struct xe_gt *gt = guc_to_gt(guc); 1378 + int ret; 1379 + 1380 + ret = xe_guc_softreset(guc); 1359 1381 if (ret) { 1360 1382 xe_gt_err(gt, "GuC suspend failed: %pe\n", ERR_PTR(ret)); 1361 1383 return ret;
+1
drivers/gpu/drm/xe/xe_guc.h
··· 44 44 void xe_guc_runtime_suspend(struct xe_guc *guc); 45 45 void xe_guc_runtime_resume(struct xe_guc *guc); 46 46 int xe_guc_suspend(struct xe_guc *guc); 47 + int xe_guc_softreset(struct xe_guc *guc); 47 48 void xe_guc_notify(struct xe_guc *guc); 48 49 int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr); 49 50 int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len);
+1
drivers/gpu/drm/xe/xe_guc_ct.c
··· 345 345 { 346 346 struct xe_guc_ct *ct = arg; 347 347 348 + xe_guc_ct_stop(ct); 348 349 guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED); 349 350 } 350 351
+61 -25
drivers/gpu/drm/xe/xe_guc_submit.c
··· 48 48 49 49 #define XE_GUC_EXEC_QUEUE_CGP_CONTEXT_ERROR_LEN 6 50 50 51 + static int guc_submit_reset_prepare(struct xe_guc *guc); 52 + 51 53 static struct xe_guc * 52 54 exec_queue_to_guc(struct xe_exec_queue *q) 53 55 { ··· 241 239 EXEC_QUEUE_STATE_BANNED)); 242 240 } 243 241 244 - static void guc_submit_fini(struct drm_device *drm, void *arg) 242 + static void guc_submit_sw_fini(struct drm_device *drm, void *arg) 245 243 { 246 244 struct xe_guc *guc = arg; 247 245 struct xe_device *xe = guc_to_xe(guc); ··· 257 255 xe_gt_assert(gt, ret); 258 256 259 257 xa_destroy(&guc->submission_state.exec_queue_lookup); 258 + } 259 + 260 + static void guc_submit_fini(void *arg) 261 + { 262 + struct xe_guc *guc = arg; 263 + 264 + /* Forcefully kill any remaining exec queues */ 265 + xe_guc_ct_stop(&guc->ct); 266 + guc_submit_reset_prepare(guc); 267 + xe_guc_softreset(guc); 268 + xe_guc_submit_stop(guc); 269 + xe_uc_fw_sanitize(&guc->fw); 270 + xe_guc_submit_pause_abort(guc); 260 271 } 261 272 262 273 static void guc_submit_wedged_fini(void *arg) ··· 341 326 342 327 guc->submission_state.initialized = true; 343 328 344 - return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc); 329 + err = drmm_add_action_or_reset(&xe->drm, guc_submit_sw_fini, guc); 330 + if (err) 331 + return err; 332 + 333 + return devm_add_action_or_reset(xe->drm.dev, guc_submit_fini, guc); 345 334 } 346 335 347 336 /* ··· 1271 1252 */ 1272 1253 void xe_guc_submit_wedge(struct xe_guc *guc) 1273 1254 { 1255 + struct xe_device *xe = guc_to_xe(guc); 1274 1256 struct xe_gt *gt = guc_to_gt(guc); 1275 1257 struct xe_exec_queue *q; 1276 1258 unsigned long index; ··· 1286 1266 if (!guc->submission_state.initialized) 1287 1267 return; 1288 1268 1289 - err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev, 1290 - guc_submit_wedged_fini, guc); 1291 - if (err) { 1292 - xe_gt_err(gt, "Failed to register clean-up in wedged.mode=%s; " 1293 - "Although device is wedged.\n", 1294 - xe_wedged_mode_to_string(XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET)); 1295 - return; 1296 - } 1269 + if (xe->wedged.mode == 2) { 1270 + err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev, 1271 + guc_submit_wedged_fini, guc); 1272 + if (err) { 1273 + xe_gt_err(gt, "Failed to register clean-up on wedged.mode=2; " 1274 + "Although device is wedged.\n"); 1275 + return; 1276 + } 1297 1277 1298 - mutex_lock(&guc->submission_state.lock); 1299 - xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) 1300 - if (xe_exec_queue_get_unless_zero(q)) 1301 - set_exec_queue_wedged(q); 1302 - mutex_unlock(&guc->submission_state.lock); 1278 + mutex_lock(&guc->submission_state.lock); 1279 + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) 1280 + if (xe_exec_queue_get_unless_zero(q)) 1281 + set_exec_queue_wedged(q); 1282 + mutex_unlock(&guc->submission_state.lock); 1283 + } else { 1284 + /* Forcefully kill any remaining exec queues, signal fences */ 1285 + guc_submit_reset_prepare(guc); 1286 + xe_guc_submit_stop(guc); 1287 + xe_guc_softreset(guc); 1288 + xe_uc_fw_sanitize(&guc->fw); 1289 + xe_guc_submit_pause_abort(guc); 1290 + } 1303 1291 } 1304 1292 1305 1293 static bool guc_submit_hint_wedged(struct xe_guc *guc) ··· 2258 2230 static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q) 2259 2231 { 2260 2232 struct xe_gpu_scheduler *sched = &q->guc->sched; 2233 + bool do_destroy = false; 2261 2234 2262 2235 /* Stop scheduling + flush any DRM scheduler operations */ 2263 2236 xe_sched_submission_stop(sched); ··· 2266 2237 /* Clean up lost G2H + reset engine state */ 2267 2238 if (exec_queue_registered(q)) { 2268 2239 if (exec_queue_destroyed(q)) 2269 - __guc_exec_queue_destroy(guc, q); 2240 + do_destroy = true; 2270 2241 } 2271 2242 if (q->guc->suspend_pending) { 2272 2243 set_exec_queue_suspended(q); ··· 2302 2273 xe_guc_exec_queue_trigger_cleanup(q); 2303 2274 } 2304 2275 } 2276 + 2277 + if (do_destroy) 2278 + __guc_exec_queue_destroy(guc, q); 2305 2279 } 2306 2280 2307 - int xe_guc_submit_reset_prepare(struct xe_guc *guc) 2281 + static int guc_submit_reset_prepare(struct xe_guc *guc) 2308 2282 { 2309 2283 int ret; 2310 - 2311 - if (xe_gt_WARN_ON(guc_to_gt(guc), vf_recovery(guc))) 2312 - return 0; 2313 - 2314 - if (!guc->submission_state.initialized) 2315 - return 0; 2316 2284 2317 2285 /* 2318 2286 * Using an atomic here rather than submission_state.lock as this ··· 2323 2297 wake_up_all(&guc->ct.wq); 2324 2298 2325 2299 return ret; 2300 + } 2301 + 2302 + int xe_guc_submit_reset_prepare(struct xe_guc *guc) 2303 + { 2304 + if (xe_gt_WARN_ON(guc_to_gt(guc), vf_recovery(guc))) 2305 + return 0; 2306 + 2307 + if (!guc->submission_state.initialized) 2308 + return 0; 2309 + 2310 + return guc_submit_reset_prepare(guc); 2326 2311 } 2327 2312 2328 2313 void xe_guc_submit_reset_wait(struct xe_guc *guc) ··· 2732 2695 continue; 2733 2696 2734 2697 xe_sched_submission_start(sched); 2735 - if (exec_queue_killed_or_banned_or_wedged(q)) 2736 - xe_guc_exec_queue_trigger_cleanup(q); 2698 + guc_exec_queue_kill(q); 2737 2699 } 2738 2700 mutex_unlock(&guc->submission_state.lock); 2739 2701 }
+2 -2
drivers/gpu/drm/xe/xe_lrc.c
··· 2413 2413 * @lrc: Pointer to the lrc. 2414 2414 * 2415 2415 * Return latest ctx timestamp. With support for active contexts, the 2416 - * calculation may bb slightly racy, so follow a read-again logic to ensure that 2416 + * calculation may be slightly racy, so follow a read-again logic to ensure that 2417 2417 * the context is still active before returning the right timestamp. 2418 2418 * 2419 2419 * Returns: New ctx timestamp value 2420 2420 */ 2421 2421 u64 xe_lrc_timestamp(struct xe_lrc *lrc) 2422 2422 { 2423 - u64 lrc_ts, reg_ts, new_ts; 2423 + u64 lrc_ts, reg_ts, new_ts = lrc->ctx_timestamp; 2424 2424 u32 engine_id; 2425 2425 2426 2426 lrc_ts = xe_lrc_ctx_timestamp(lrc);
+5 -2
drivers/gpu/drm/xe/xe_oa.c
··· 543 543 size_t offset = 0; 544 544 int ret; 545 545 546 - /* Can't read from disabled streams */ 547 - if (!stream->enabled || !stream->sample) 546 + if (!stream->sample) 548 547 return -EINVAL; 549 548 550 549 if (!(file->f_flags & O_NONBLOCK)) { ··· 1459 1460 1460 1461 if (stream->sample) 1461 1462 hrtimer_cancel(&stream->poll_check_timer); 1463 + 1464 + /* Update stream->oa_buffer.tail to allow any final reports to be read */ 1465 + if (xe_oa_buffer_check_unlocked(stream)) 1466 + wake_up(&stream->poll_wq); 1462 1467 } 1463 1468 1464 1469 static int xe_oa_enable_preempt_timeslice(struct xe_oa_stream *stream)
+29 -9
drivers/gpu/drm/xe/xe_pt.c
··· 1655 1655 XE_WARN_ON(!level); 1656 1656 /* Check for leaf node */ 1657 1657 if (xe_walk->prl && xe_page_reclaim_list_valid(xe_walk->prl) && 1658 - (!xe_child->base.children || !xe_child->base.children[first])) { 1658 + xe_child->level <= MAX_HUGEPTE_LEVEL) { 1659 1659 struct iosys_map *leaf_map = &xe_child->bo->vmap; 1660 1660 pgoff_t count = xe_pt_num_entries(addr, next, xe_child->level, walk); 1661 1661 1662 1662 for (pgoff_t i = 0; i < count; i++) { 1663 - u64 pte = xe_map_rd(xe, leaf_map, (first + i) * sizeof(u64), u64); 1663 + u64 pte; 1664 1664 int ret; 1665 + 1666 + /* 1667 + * If not a leaf pt, skip unless non-leaf pt is interleaved between 1668 + * leaf ptes which causes the page walk to skip over the child leaves 1669 + */ 1670 + if (xe_child->base.children && xe_child->base.children[first + i]) { 1671 + u64 pt_size = 1ULL << walk->shifts[xe_child->level]; 1672 + bool edge_pt = (i == 0 && !IS_ALIGNED(addr, pt_size)) || 1673 + (i == count - 1 && !IS_ALIGNED(next, pt_size)); 1674 + 1675 + if (!edge_pt) { 1676 + xe_page_reclaim_list_abort(xe_walk->tile->primary_gt, 1677 + xe_walk->prl, 1678 + "PT is skipped by walk at level=%u offset=%lu", 1679 + xe_child->level, first + i); 1680 + break; 1681 + } 1682 + continue; 1683 + } 1684 + 1685 + pte = xe_map_rd(xe, leaf_map, (first + i) * sizeof(u64), u64); 1665 1686 1666 1687 /* 1667 1688 * In rare scenarios, pte may not be written yet due to racy conditions. ··· 1695 1674 } 1696 1675 1697 1676 /* Ensure it is a defined page */ 1698 - xe_tile_assert(xe_walk->tile, 1699 - xe_child->level == 0 || 1700 - (pte & (XE_PTE_PS64 | XE_PDE_PS_2M | XE_PDPE_PS_1G))); 1677 + xe_tile_assert(xe_walk->tile, xe_child->level == 0 || 1678 + (pte & (XE_PDE_PS_2M | XE_PDPE_PS_1G))); 1701 1679 1702 1680 /* An entry should be added for 64KB but contigious 4K have XE_PTE_PS64 */ 1703 1681 if (pte & XE_PTE_PS64) ··· 1721 1701 killed = xe_pt_check_kill(addr, next, level - 1, xe_child, action, walk); 1722 1702 1723 1703 /* 1724 - * Verify PRL is active and if entry is not a leaf pte (base.children conditions), 1725 - * there is a potential need to invalidate the PRL if any PTE (num_live) are dropped. 1704 + * Verify if any PTE are potentially dropped at non-leaf levels, either from being 1705 + * killed or the page walk covers the region. 1726 1706 */ 1727 - if (xe_walk->prl && level > 1 && xe_child->num_live && 1728 - xe_child->base.children && xe_child->base.children[first]) { 1707 + if (xe_walk->prl && xe_page_reclaim_list_valid(xe_walk->prl) && 1708 + xe_child->level > MAX_HUGEPTE_LEVEL && xe_child->num_live) { 1729 1709 bool covered = xe_pt_covers(addr, next, xe_child->level, &xe_walk->base); 1730 1710 1731 1711 /*
+24 -22
drivers/gpu/nova-core/gsp.rs
··· 47 47 unsafe impl<const NUM_ENTRIES: usize> AsBytes for PteArray<NUM_ENTRIES> {} 48 48 49 49 impl<const NUM_PAGES: usize> PteArray<NUM_PAGES> { 50 - /// Creates a new page table array mapping `NUM_PAGES` GSP pages starting at address `start`. 51 - fn new(start: DmaAddress) -> Result<Self> { 52 - let mut ptes = [0u64; NUM_PAGES]; 53 - for (i, pte) in ptes.iter_mut().enumerate() { 54 - *pte = start 55 - .checked_add(num::usize_as_u64(i) << GSP_PAGE_SHIFT) 56 - .ok_or(EOVERFLOW)?; 57 - } 58 - 59 - Ok(Self(ptes)) 50 + /// Returns the page table entry for `index`, for a mapping starting at `start`. 51 + // TODO: Replace with `IoView` projection once available. 52 + fn entry(start: DmaAddress, index: usize) -> Result<u64> { 53 + start 54 + .checked_add(num::usize_as_u64(index) << GSP_PAGE_SHIFT) 55 + .ok_or(EOVERFLOW) 60 56 } 61 57 } 62 58 ··· 82 86 NUM_PAGES * GSP_PAGE_SIZE, 83 87 GFP_KERNEL | __GFP_ZERO, 84 88 )?); 85 - let ptes = PteArray::<NUM_PAGES>::new(obj.0.dma_handle())?; 89 + 90 + let start_addr = obj.0.dma_handle(); 86 91 87 92 // SAFETY: `obj` has just been created and we are its sole user. 88 - unsafe { 89 - // Copy the self-mapping PTE at the expected location. 93 + let pte_region = unsafe { 90 94 obj.0 91 - .as_slice_mut(size_of::<u64>(), size_of_val(&ptes))? 92 - .copy_from_slice(ptes.as_bytes()) 95 + .as_slice_mut(size_of::<u64>(), NUM_PAGES * size_of::<u64>())? 93 96 }; 97 + 98 + // Write values one by one to avoid an on-stack instance of `PteArray`. 99 + for (i, chunk) in pte_region.chunks_exact_mut(size_of::<u64>()).enumerate() { 100 + let pte_value = PteArray::<0>::entry(start_addr, i)?; 101 + 102 + chunk.copy_from_slice(&pte_value.to_ne_bytes()); 103 + } 94 104 95 105 Ok(obj) 96 106 } ··· 145 143 // _kgspInitLibosLoggingStructures (allocates memory for buffers) 146 144 // kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array) 147 145 dma_write!( 148 - libos[0] = LibosMemoryRegionInitArgument::new("LOGINIT", &loginit.0) 149 - )?; 146 + libos, [0]?, LibosMemoryRegionInitArgument::new("LOGINIT", &loginit.0) 147 + ); 150 148 dma_write!( 151 - libos[1] = LibosMemoryRegionInitArgument::new("LOGINTR", &logintr.0) 152 - )?; 153 - dma_write!(libos[2] = LibosMemoryRegionInitArgument::new("LOGRM", &logrm.0))?; 154 - dma_write!(rmargs[0].inner = fw::GspArgumentsCached::new(cmdq))?; 155 - dma_write!(libos[3] = LibosMemoryRegionInitArgument::new("RMARGS", rmargs))?; 149 + libos, [1]?, LibosMemoryRegionInitArgument::new("LOGINTR", &logintr.0) 150 + ); 151 + dma_write!(libos, [2]?, LibosMemoryRegionInitArgument::new("LOGRM", &logrm.0)); 152 + dma_write!(rmargs, [0]?.inner, fw::GspArgumentsCached::new(cmdq)); 153 + dma_write!(libos, [3]?, LibosMemoryRegionInitArgument::new("RMARGS", rmargs)); 156 154 }, 157 155 })) 158 156 })
+1 -1
drivers/gpu/nova-core/gsp/boot.rs
··· 157 157 158 158 let wpr_meta = 159 159 CoherentAllocation::<GspFwWprMeta>::alloc_coherent(dev, 1, GFP_KERNEL | __GFP_ZERO)?; 160 - dma_write!(wpr_meta[0] = GspFwWprMeta::new(&gsp_fw, &fb_layout))?; 160 + dma_write!(wpr_meta, [0]?, GspFwWprMeta::new(&gsp_fw, &fb_layout)); 161 161 162 162 self.cmdq 163 163 .send_command(bar, commands::SetSystemInfo::new(pdev))?;
+33 -60
drivers/gpu/nova-core/gsp/cmdq.rs
··· 2 2 3 3 use core::{ 4 4 cmp, 5 - mem, 6 - sync::atomic::{ 7 - fence, 8 - Ordering, // 9 - }, // 5 + mem, // 10 6 }; 11 7 12 8 use kernel::{ ··· 142 146 #[repr(C)] 143 147 // There is no struct defined for this in the open-gpu-kernel-source headers. 144 148 // Instead it is defined by code in `GspMsgQueuesInit()`. 145 - struct Msgq { 149 + // TODO: Revert to private once `IoView` projections replace the `gsp_mem` module. 150 + pub(super) struct Msgq { 146 151 /// Header for sending messages, including the write pointer. 147 - tx: MsgqTxHeader, 152 + pub(super) tx: MsgqTxHeader, 148 153 /// Header for receiving messages, including the read pointer. 149 - rx: MsgqRxHeader, 154 + pub(super) rx: MsgqRxHeader, 150 155 /// The message queue proper. 151 156 msgq: MsgqData, 152 157 } 153 158 154 159 /// Structure shared between the driver and the GSP and containing the command and message queues. 155 160 #[repr(C)] 156 - struct GspMem { 161 + // TODO: Revert to private once `IoView` projections replace the `gsp_mem` module. 162 + pub(super) struct GspMem { 157 163 /// Self-mapping page table entries. 158 - ptes: PteArray<{ GSP_PAGE_SIZE / size_of::<u64>() }>, 164 + ptes: PteArray<{ Self::PTE_ARRAY_SIZE }>, 159 165 /// CPU queue: the driver writes commands here, and the GSP reads them. It also contains the 160 166 /// write and read pointers that the CPU updates. 161 167 /// 162 168 /// This member is read-only for the GSP. 163 - cpuq: Msgq, 169 + pub(super) cpuq: Msgq, 164 170 /// GSP queue: the GSP writes messages here, and the driver reads them. It also contains the 165 171 /// write and read pointers that the GSP updates. 166 172 /// 167 173 /// This member is read-only for the driver. 168 - gspq: Msgq, 174 + pub(super) gspq: Msgq, 175 + } 176 + 177 + impl GspMem { 178 + const PTE_ARRAY_SIZE: usize = GSP_PAGE_SIZE / size_of::<u64>(); 169 179 } 170 180 171 181 // SAFETY: These structs don't meet the no-padding requirements of AsBytes but ··· 203 201 204 202 let gsp_mem = 205 203 CoherentAllocation::<GspMem>::alloc_coherent(dev, 1, GFP_KERNEL | __GFP_ZERO)?; 206 - dma_write!(gsp_mem[0].ptes = PteArray::new(gsp_mem.dma_handle())?)?; 207 - dma_write!(gsp_mem[0].cpuq.tx = MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES))?; 208 - dma_write!(gsp_mem[0].cpuq.rx = MsgqRxHeader::new())?; 204 + 205 + let start = gsp_mem.dma_handle(); 206 + // Write values one by one to avoid an on-stack instance of `PteArray`. 207 + for i in 0..GspMem::PTE_ARRAY_SIZE { 208 + dma_write!(gsp_mem, [0]?.ptes.0[i], PteArray::<0>::entry(start, i)?); 209 + } 210 + 211 + dma_write!( 212 + gsp_mem, 213 + [0]?.cpuq.tx, 214 + MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES) 215 + ); 216 + dma_write!(gsp_mem, [0]?.cpuq.rx, MsgqRxHeader::new()); 209 217 210 218 Ok(Self(gsp_mem)) 211 219 } ··· 329 317 // 330 318 // - The returned value is between `0` and `MSGQ_NUM_PAGES`. 331 319 fn gsp_write_ptr(&self) -> u32 { 332 - let gsp_mem = self.0.start_ptr(); 333 - 334 - // SAFETY: 335 - // - The 'CoherentAllocation' contains at least one object. 336 - // - By the invariants of `CoherentAllocation` the pointer is valid. 337 - (unsafe { (*gsp_mem).gspq.tx.write_ptr() } % MSGQ_NUM_PAGES) 320 + super::fw::gsp_mem::gsp_write_ptr(&self.0) 338 321 } 339 322 340 323 // Returns the index of the memory page the GSP will read the next command from. ··· 338 331 // 339 332 // - The returned value is between `0` and `MSGQ_NUM_PAGES`. 340 333 fn gsp_read_ptr(&self) -> u32 { 341 - let gsp_mem = self.0.start_ptr(); 342 - 343 - // SAFETY: 344 - // - The 'CoherentAllocation' contains at least one object. 345 - // - By the invariants of `CoherentAllocation` the pointer is valid. 346 - (unsafe { (*gsp_mem).gspq.rx.read_ptr() } % MSGQ_NUM_PAGES) 334 + super::fw::gsp_mem::gsp_read_ptr(&self.0) 347 335 } 348 336 349 337 // Returns the index of the memory page the CPU can read the next message from. ··· 347 345 // 348 346 // - The returned value is between `0` and `MSGQ_NUM_PAGES`. 349 347 fn cpu_read_ptr(&self) -> u32 { 350 - let gsp_mem = self.0.start_ptr(); 351 - 352 - // SAFETY: 353 - // - The ['CoherentAllocation'] contains at least one object. 354 - // - By the invariants of CoherentAllocation the pointer is valid. 355 - (unsafe { (*gsp_mem).cpuq.rx.read_ptr() } % MSGQ_NUM_PAGES) 348 + super::fw::gsp_mem::cpu_read_ptr(&self.0) 356 349 } 357 350 358 351 // Informs the GSP that it can send `elem_count` new pages into the message queue. 359 352 fn advance_cpu_read_ptr(&mut self, elem_count: u32) { 360 - let rptr = self.cpu_read_ptr().wrapping_add(elem_count) % MSGQ_NUM_PAGES; 361 - 362 - // Ensure read pointer is properly ordered. 363 - fence(Ordering::SeqCst); 364 - 365 - let gsp_mem = self.0.start_ptr_mut(); 366 - 367 - // SAFETY: 368 - // - The 'CoherentAllocation' contains at least one object. 369 - // - By the invariants of `CoherentAllocation` the pointer is valid. 370 - unsafe { (*gsp_mem).cpuq.rx.set_read_ptr(rptr) }; 353 + super::fw::gsp_mem::advance_cpu_read_ptr(&self.0, elem_count) 371 354 } 372 355 373 356 // Returns the index of the memory page the CPU can write the next command to. ··· 361 374 // 362 375 // - The returned value is between `0` and `MSGQ_NUM_PAGES`. 363 376 fn cpu_write_ptr(&self) -> u32 { 364 - let gsp_mem = self.0.start_ptr(); 365 - 366 - // SAFETY: 367 - // - The 'CoherentAllocation' contains at least one object. 368 - // - By the invariants of `CoherentAllocation` the pointer is valid. 369 - (unsafe { (*gsp_mem).cpuq.tx.write_ptr() } % MSGQ_NUM_PAGES) 377 + super::fw::gsp_mem::cpu_write_ptr(&self.0) 370 378 } 371 379 372 380 // Informs the GSP that it can process `elem_count` new pages from the command queue. 373 381 fn advance_cpu_write_ptr(&mut self, elem_count: u32) { 374 - let wptr = self.cpu_write_ptr().wrapping_add(elem_count) & MSGQ_NUM_PAGES; 375 - let gsp_mem = self.0.start_ptr_mut(); 376 - 377 - // SAFETY: 378 - // - The 'CoherentAllocation' contains at least one object. 379 - // - By the invariants of `CoherentAllocation` the pointer is valid. 380 - unsafe { (*gsp_mem).cpuq.tx.set_write_ptr(wptr) }; 381 - 382 - // Ensure all command data is visible before triggering the GSP read. 383 - fence(Ordering::SeqCst); 382 + super::fw::gsp_mem::advance_cpu_write_ptr(&self.0, elem_count) 384 383 } 385 384 } 386 385
+69 -32
drivers/gpu/nova-core/gsp/fw.rs
··· 40 40 }, 41 41 }; 42 42 43 + // TODO: Replace with `IoView` projections once available; the `unwrap()` calls go away once we 44 + // switch to the new `dma::Coherent` API. 45 + pub(super) mod gsp_mem { 46 + use core::sync::atomic::{ 47 + fence, 48 + Ordering, // 49 + }; 50 + 51 + use kernel::{ 52 + dma::CoherentAllocation, 53 + dma_read, 54 + dma_write, 55 + prelude::*, // 56 + }; 57 + 58 + use crate::gsp::cmdq::{ 59 + GspMem, 60 + MSGQ_NUM_PAGES, // 61 + }; 62 + 63 + pub(in crate::gsp) fn gsp_write_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { 64 + // PANIC: A `dma::CoherentAllocation` always contains at least one element. 65 + || -> Result<u32> { Ok(dma_read!(qs, [0]?.gspq.tx.0.writePtr) % MSGQ_NUM_PAGES) }().unwrap() 66 + } 67 + 68 + pub(in crate::gsp) fn gsp_read_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { 69 + // PANIC: A `dma::CoherentAllocation` always contains at least one element. 70 + || -> Result<u32> { Ok(dma_read!(qs, [0]?.gspq.rx.0.readPtr) % MSGQ_NUM_PAGES) }().unwrap() 71 + } 72 + 73 + pub(in crate::gsp) fn cpu_read_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { 74 + // PANIC: A `dma::CoherentAllocation` always contains at least one element. 75 + || -> Result<u32> { Ok(dma_read!(qs, [0]?.cpuq.rx.0.readPtr) % MSGQ_NUM_PAGES) }().unwrap() 76 + } 77 + 78 + pub(in crate::gsp) fn advance_cpu_read_ptr(qs: &CoherentAllocation<GspMem>, count: u32) { 79 + let rptr = cpu_read_ptr(qs).wrapping_add(count) % MSGQ_NUM_PAGES; 80 + 81 + // Ensure read pointer is properly ordered. 82 + fence(Ordering::SeqCst); 83 + 84 + // PANIC: A `dma::CoherentAllocation` always contains at least one element. 85 + || -> Result { 86 + dma_write!(qs, [0]?.cpuq.rx.0.readPtr, rptr); 87 + Ok(()) 88 + }() 89 + .unwrap() 90 + } 91 + 92 + pub(in crate::gsp) fn cpu_write_ptr(qs: &CoherentAllocation<GspMem>) -> u32 { 93 + // PANIC: A `dma::CoherentAllocation` always contains at least one element. 94 + || -> Result<u32> { Ok(dma_read!(qs, [0]?.cpuq.tx.0.writePtr) % MSGQ_NUM_PAGES) }().unwrap() 95 + } 96 + 97 + pub(in crate::gsp) fn advance_cpu_write_ptr(qs: &CoherentAllocation<GspMem>, count: u32) { 98 + let wptr = cpu_write_ptr(qs).wrapping_add(count) % MSGQ_NUM_PAGES; 99 + 100 + // PANIC: A `dma::CoherentAllocation` always contains at least one element. 101 + || -> Result { 102 + dma_write!(qs, [0]?.cpuq.tx.0.writePtr, wptr); 103 + Ok(()) 104 + }() 105 + .unwrap(); 106 + 107 + // Ensure all command data is visible before triggering the GSP read. 108 + fence(Ordering::SeqCst); 109 + } 110 + } 111 + 43 112 /// Empty type to group methods related to heap parameters for running the GSP firmware. 44 113 enum GspFwHeapParams {} 45 114 ··· 777 708 entryOff: num::usize_into_u32::<GSP_PAGE_SIZE>(), 778 709 }) 779 710 } 780 - 781 - /// Returns the value of the write pointer for this queue. 782 - pub(crate) fn write_ptr(&self) -> u32 { 783 - let ptr = core::ptr::from_ref(&self.0.writePtr); 784 - 785 - // SAFETY: `ptr` is a valid pointer to a `u32`. 786 - unsafe { ptr.read_volatile() } 787 - } 788 - 789 - /// Sets the value of the write pointer for this queue. 790 - pub(crate) fn set_write_ptr(&mut self, val: u32) { 791 - let ptr = core::ptr::from_mut(&mut self.0.writePtr); 792 - 793 - // SAFETY: `ptr` is a valid pointer to a `u32`. 794 - unsafe { ptr.write_volatile(val) } 795 - } 796 711 } 797 712 798 713 // SAFETY: Padding is explicit and does not contain uninitialized data. ··· 791 738 /// Creates a new RX queue header. 792 739 pub(crate) fn new() -> Self { 793 740 Self(Default::default()) 794 - } 795 - 796 - /// Returns the value of the read pointer for this queue. 797 - pub(crate) fn read_ptr(&self) -> u32 { 798 - let ptr = core::ptr::from_ref(&self.0.readPtr); 799 - 800 - // SAFETY: `ptr` is a valid pointer to a `u32`. 801 - unsafe { ptr.read_volatile() } 802 - } 803 - 804 - /// Sets the value of the read pointer for this queue. 805 - pub(crate) fn set_read_ptr(&mut self, val: u32) { 806 - let ptr = core::ptr::from_mut(&mut self.0.readPtr); 807 - 808 - // SAFETY: `ptr` is a valid pointer to a `u32`. 809 - unsafe { ptr.write_volatile(val) } 810 741 } 811 742 } 812 743
+2
drivers/hid/bpf/hid_bpf_dispatch.c
··· 444 444 (u64)(long)ctx, 445 445 true); /* prevent infinite recursions */ 446 446 447 + if (ret > size) 448 + ret = size; 447 449 if (ret > 0) 448 450 memcpy(buf, dma_data, ret); 449 451
+3 -2
drivers/hid/hid-appletb-kbd.c
··· 476 476 return 0; 477 477 } 478 478 479 - static int appletb_kbd_reset_resume(struct hid_device *hdev) 479 + static int appletb_kbd_resume(struct hid_device *hdev) 480 480 { 481 481 struct appletb_kbd *kbd = hid_get_drvdata(hdev); 482 482 ··· 500 500 .event = appletb_kbd_hid_event, 501 501 .input_configured = appletb_kbd_input_configured, 502 502 .suspend = pm_ptr(appletb_kbd_suspend), 503 - .reset_resume = pm_ptr(appletb_kbd_reset_resume), 503 + .resume = pm_ptr(appletb_kbd_resume), 504 + .reset_resume = pm_ptr(appletb_kbd_resume), 504 505 .driver.dev_groups = appletb_kbd_groups, 505 506 }; 506 507 module_hid_driver(appletb_kbd_hid_driver);
+3
drivers/hid/hid-asus.c
··· 1498 1498 USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X), 1499 1499 QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD | QUIRK_ROG_ALLY_XPAD }, 1500 1500 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, 1501 + USB_DEVICE_ID_ASUSTEK_XGM_2022), 1502 + }, 1503 + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, 1501 1504 USB_DEVICE_ID_ASUSTEK_XGM_2023), 1502 1505 }, 1503 1506 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+4 -3
drivers/hid/hid-core.c
··· 2057 2057 rsize = max_buffer_size; 2058 2058 2059 2059 if (csize < rsize) { 2060 - dbg_hid("report %d is too short, (%d < %d)\n", report->id, 2061 - csize, rsize); 2062 - memset(cdata + csize, 0, rsize - csize); 2060 + hid_warn_ratelimited(hid, "Event data for report %d was too short (%d vs %d)\n", 2061 + report->id, rsize, csize); 2062 + ret = -EINVAL; 2063 + goto out; 2063 2064 } 2064 2065 2065 2066 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
+1 -2
drivers/hid/hid-ids.h
··· 229 229 #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X 0x1b4c 230 230 #define USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD 0x196b 231 231 #define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869 232 + #define USB_DEVICE_ID_ASUSTEK_XGM_2022 0x1970 232 233 #define USB_DEVICE_ID_ASUSTEK_XGM_2023 0x1a9a 233 234 234 235 #define USB_VENDOR_ID_ATEN 0x0557 ··· 455 454 #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401 456 455 #define USB_DEVICE_ID_HP_X2 0x074d 457 456 #define USB_DEVICE_ID_HP_X2_10_COVER 0x0755 458 - #define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544 459 - #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706 460 457 #define I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM 0x2F81 461 458 462 459 #define USB_VENDOR_ID_ELECOM 0x056e
+11 -7
drivers/hid/hid-input.c
··· 354 354 #define HID_BATTERY_QUIRK_FEATURE (1 << 1) /* ask for feature report */ 355 355 #define HID_BATTERY_QUIRK_IGNORE (1 << 2) /* completely ignore the battery */ 356 356 #define HID_BATTERY_QUIRK_AVOID_QUERY (1 << 3) /* do not query the battery */ 357 + #define HID_BATTERY_QUIRK_DYNAMIC (1 << 4) /* report present only after life signs */ 357 358 358 359 static const struct hid_device_id hid_battery_quirks[] = { 359 360 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, ··· 387 386 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 388 387 USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD), 389 388 HID_BATTERY_QUIRK_IGNORE }, 390 - { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN), 391 - HID_BATTERY_QUIRK_IGNORE }, 392 - { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN), 393 - HID_BATTERY_QUIRK_IGNORE }, 394 389 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L), 395 390 HID_BATTERY_QUIRK_AVOID_QUERY }, 396 391 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW), ··· 399 402 * Elan HID touchscreens seem to all report a non present battery, 400 403 * set HID_BATTERY_QUIRK_IGNORE for all Elan I2C and USB HID devices. 401 404 */ 402 - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE }, 403 - { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE }, 405 + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_DYNAMIC }, 406 + { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_DYNAMIC }, 404 407 {} 405 408 }; 406 409 ··· 457 460 int ret = 0; 458 461 459 462 switch (prop) { 460 - case POWER_SUPPLY_PROP_PRESENT: 461 463 case POWER_SUPPLY_PROP_ONLINE: 462 464 val->intval = 1; 465 + break; 466 + 467 + case POWER_SUPPLY_PROP_PRESENT: 468 + val->intval = dev->battery_present; 463 469 break; 464 470 465 471 case POWER_SUPPLY_PROP_CAPACITY: ··· 577 577 if (quirks & HID_BATTERY_QUIRK_AVOID_QUERY) 578 578 dev->battery_avoid_query = true; 579 579 580 + dev->battery_present = (quirks & HID_BATTERY_QUIRK_DYNAMIC) ? false : true; 581 + 580 582 dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); 581 583 if (IS_ERR(dev->battery)) { 582 584 error = PTR_ERR(dev->battery); ··· 634 632 return; 635 633 636 634 if (hidinput_update_battery_charge_status(dev, usage, value)) { 635 + dev->battery_present = true; 637 636 power_supply_changed(dev->battery); 638 637 return; 639 638 } ··· 650 647 if (dev->battery_status != HID_BATTERY_REPORTED || 651 648 capacity != dev->battery_capacity || 652 649 ktime_after(ktime_get_coarse(), dev->battery_ratelimit_time)) { 650 + dev->battery_present = true; 653 651 dev->battery_capacity = capacity; 654 652 dev->battery_status = HID_BATTERY_REPORTED; 655 653 dev->battery_ratelimit_time =
+5 -1
drivers/hid/hid-logitech-hidpp.c
··· 4487 4487 if (!ret) 4488 4488 ret = hidpp_ff_init(hidpp, &data); 4489 4489 4490 - if (ret) 4490 + if (ret) { 4491 4491 hid_warn(hidpp->hid_dev, 4492 4492 "Unable to initialize force feedback support, errno %d\n", 4493 4493 ret); 4494 + ret = 0; 4495 + } 4494 4496 } 4495 4497 4496 4498 /* ··· 4670 4668 HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb038) }, 4671 4669 { /* Slim Solar+ K980 Keyboard over Bluetooth */ 4672 4670 HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb391) }, 4671 + { /* MX Master 4 mouse over Bluetooth */ 4672 + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb042) }, 4673 4673 {} 4674 4674 }; 4675 4675
+7
drivers/hid/hid-multitouch.c
··· 526 526 dev_warn(&hdev->dev, "failed to fetch feature %d\n", 527 527 report->id); 528 528 } else { 529 + /* The report ID in the request and the response should match */ 530 + if (report->id != buf[0]) { 531 + hid_err(hdev, "Returned feature report did not match the request\n"); 532 + goto free; 533 + } 534 + 529 535 ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, buf, 530 536 size, 0); 531 537 if (ret) 532 538 dev_warn(&hdev->dev, "failed to report feature\n"); 533 539 } 534 540 541 + free: 535 542 kfree(buf); 536 543 } 537 544
+1
drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c
··· 127 127 hid->product = le16_to_cpu(qcdev->dev_desc.product_id); 128 128 snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "quicki2c-hid", 129 129 hid->vendor, hid->product); 130 + strscpy(hid->phys, dev_name(qcdev->dev), sizeof(hid->phys)); 130 131 131 132 ret = hid_add_device(hid); 132 133 if (ret) {
+1
drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c
··· 118 118 hid->product = le16_to_cpu(qsdev->dev_desc.product_id); 119 119 snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "quickspi-hid", 120 120 hid->vendor, hid->product); 121 + strscpy(hid->phys, dev_name(qsdev->dev), sizeof(hid->phys)); 121 122 122 123 ret = hid_add_device(hid); 123 124 if (ret) {
+10
drivers/hid/wacom_wac.c
··· 1208 1208 1209 1209 switch (data[0]) { 1210 1210 case 0x04: 1211 + if (len < 32) { 1212 + dev_warn(wacom->pen_input->dev.parent, 1213 + "Report 0x04 too short: %zu bytes\n", len); 1214 + break; 1215 + } 1211 1216 wacom_intuos_bt_process_data(wacom, data + i); 1212 1217 i += 10; 1213 1218 fallthrough; 1214 1219 case 0x03: 1220 + if (i == 1 && len < 22) { 1221 + dev_warn(wacom->pen_input->dev.parent, 1222 + "Report 0x03 too short: %zu bytes\n", len); 1223 + break; 1224 + } 1215 1225 wacom_intuos_bt_process_data(wacom, data + i); 1216 1226 i += 10; 1217 1227 wacom_intuos_bt_process_data(wacom, data + i);
+4 -2
drivers/hv/mshv_regions.c
··· 314 314 ret = pin_user_pages_fast(userspace_addr, nr_pages, 315 315 FOLL_WRITE | FOLL_LONGTERM, 316 316 pages); 317 - if (ret < 0) 317 + if (ret != nr_pages) 318 318 goto release_pages; 319 319 } 320 320 321 321 return 0; 322 322 323 323 release_pages: 324 + if (ret > 0) 325 + done_count += ret; 324 326 mshv_region_invalidate_pages(region, 0, done_count); 325 - return ret; 327 + return ret < 0 ? ret : -ENOMEM; 326 328 } 327 329 328 330 static int mshv_region_chunk_unmap(struct mshv_mem_region *region,
+2 -3
drivers/hv/mshv_root.h
··· 190 190 }; 191 191 192 192 struct mshv_root { 193 - struct hv_synic_pages __percpu *synic_pages; 194 193 spinlock_t pt_ht_lock; 195 194 DECLARE_HASHTABLE(pt_htable, MSHV_PARTITIONS_HASH_BITS); 196 195 struct hv_partition_property_vmm_capabilities vmm_caps; ··· 248 249 void mshv_unregister_doorbell(u64 partition_id, int doorbell_portid); 249 250 250 251 void mshv_isr(void); 251 - int mshv_synic_init(unsigned int cpu); 252 - int mshv_synic_cleanup(unsigned int cpu); 252 + int mshv_synic_init(struct device *dev); 253 + void mshv_synic_exit(void); 253 254 254 255 static inline bool mshv_partition_encrypted(struct mshv_partition *partition) 255 256 {
+22 -71
drivers/hv/mshv_root_main.c
··· 120 120 HVCALL_SET_VP_REGISTERS, 121 121 HVCALL_TRANSLATE_VIRTUAL_ADDRESS, 122 122 HVCALL_CLEAR_VIRTUAL_INTERRUPT, 123 - HVCALL_SCRUB_PARTITION, 124 123 HVCALL_REGISTER_INTERCEPT_RESULT, 125 124 HVCALL_ASSERT_VIRTUAL_INTERRUPT, 126 125 HVCALL_GET_GPA_PAGES_ACCESS_STATES, ··· 1288 1289 */ 1289 1290 static long 1290 1291 mshv_map_user_memory(struct mshv_partition *partition, 1291 - struct mshv_user_mem_region mem) 1292 + struct mshv_user_mem_region *mem) 1292 1293 { 1293 1294 struct mshv_mem_region *region; 1294 1295 struct vm_area_struct *vma; ··· 1296 1297 ulong mmio_pfn; 1297 1298 long ret; 1298 1299 1299 - if (mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP) || 1300 - !access_ok((const void __user *)mem.userspace_addr, mem.size)) 1300 + if (mem->flags & BIT(MSHV_SET_MEM_BIT_UNMAP) || 1301 + !access_ok((const void __user *)mem->userspace_addr, mem->size)) 1301 1302 return -EINVAL; 1302 1303 1303 1304 mmap_read_lock(current->mm); 1304 - vma = vma_lookup(current->mm, mem.userspace_addr); 1305 + vma = vma_lookup(current->mm, mem->userspace_addr); 1305 1306 is_mmio = vma ? !!(vma->vm_flags & (VM_IO | VM_PFNMAP)) : 0; 1306 1307 mmio_pfn = is_mmio ? vma->vm_pgoff : 0; 1307 1308 mmap_read_unlock(current->mm); ··· 1309 1310 if (!vma) 1310 1311 return -EINVAL; 1311 1312 1312 - ret = mshv_partition_create_region(partition, &mem, &region, 1313 + ret = mshv_partition_create_region(partition, mem, &region, 1313 1314 is_mmio); 1314 1315 if (ret) 1315 1316 return ret; ··· 1347 1348 return 0; 1348 1349 1349 1350 errout: 1350 - vfree(region); 1351 + mshv_region_put(region); 1351 1352 return ret; 1352 1353 } 1353 1354 1354 1355 /* Called for unmapping both the guest ram and the mmio space */ 1355 1356 static long 1356 1357 mshv_unmap_user_memory(struct mshv_partition *partition, 1357 - struct mshv_user_mem_region mem) 1358 + struct mshv_user_mem_region *mem) 1358 1359 { 1359 1360 struct mshv_mem_region *region; 1360 1361 1361 - if (!(mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP))) 1362 + if (!(mem->flags & BIT(MSHV_SET_MEM_BIT_UNMAP))) 1362 1363 return -EINVAL; 1363 1364 1364 1365 spin_lock(&partition->pt_mem_regions_lock); 1365 1366 1366 - region = mshv_partition_region_by_gfn(partition, mem.guest_pfn); 1367 + region = mshv_partition_region_by_gfn(partition, mem->guest_pfn); 1367 1368 if (!region) { 1368 1369 spin_unlock(&partition->pt_mem_regions_lock); 1369 1370 return -ENOENT; 1370 1371 } 1371 1372 1372 1373 /* Paranoia check */ 1373 - if (region->start_uaddr != mem.userspace_addr || 1374 - region->start_gfn != mem.guest_pfn || 1375 - region->nr_pages != HVPFN_DOWN(mem.size)) { 1374 + if (region->start_uaddr != mem->userspace_addr || 1375 + region->start_gfn != mem->guest_pfn || 1376 + region->nr_pages != HVPFN_DOWN(mem->size)) { 1376 1377 spin_unlock(&partition->pt_mem_regions_lock); 1377 1378 return -EINVAL; 1378 1379 } ··· 1403 1404 return -EINVAL; 1404 1405 1405 1406 if (mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP)) 1406 - return mshv_unmap_user_memory(partition, mem); 1407 + return mshv_unmap_user_memory(partition, &mem); 1407 1408 1408 - return mshv_map_user_memory(partition, mem); 1409 + return mshv_map_user_memory(partition, &mem); 1409 1410 } 1410 1411 1411 1412 static long ··· 2063 2064 return 0; 2064 2065 } 2065 2066 2066 - static int mshv_cpuhp_online; 2067 2067 static int mshv_root_sched_online; 2068 2068 2069 2069 static const char *scheduler_type_to_string(enum hv_scheduler_type type) ··· 2247 2249 free_percpu(root_scheduler_output); 2248 2250 } 2249 2251 2250 - static int mshv_reboot_notify(struct notifier_block *nb, 2251 - unsigned long code, void *unused) 2252 - { 2253 - cpuhp_remove_state(mshv_cpuhp_online); 2254 - return 0; 2255 - } 2256 - 2257 - struct notifier_block mshv_reboot_nb = { 2258 - .notifier_call = mshv_reboot_notify, 2259 - }; 2260 - 2261 - static void mshv_root_partition_exit(void) 2262 - { 2263 - unregister_reboot_notifier(&mshv_reboot_nb); 2264 - } 2265 - 2266 - static int __init mshv_root_partition_init(struct device *dev) 2267 - { 2268 - return register_reboot_notifier(&mshv_reboot_nb); 2269 - } 2270 - 2271 2252 static int __init mshv_init_vmm_caps(struct device *dev) 2272 2253 { 2273 2254 int ret; ··· 2291 2314 MSHV_HV_MAX_VERSION); 2292 2315 } 2293 2316 2294 - mshv_root.synic_pages = alloc_percpu(struct hv_synic_pages); 2295 - if (!mshv_root.synic_pages) { 2296 - dev_err(dev, "Failed to allocate percpu synic page\n"); 2297 - ret = -ENOMEM; 2317 + ret = mshv_synic_init(dev); 2318 + if (ret) 2298 2319 goto device_deregister; 2299 - } 2300 - 2301 - ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mshv_synic", 2302 - mshv_synic_init, 2303 - mshv_synic_cleanup); 2304 - if (ret < 0) { 2305 - dev_err(dev, "Failed to setup cpu hotplug state: %i\n", ret); 2306 - goto free_synic_pages; 2307 - } 2308 - 2309 - mshv_cpuhp_online = ret; 2310 2320 2311 2321 ret = mshv_init_vmm_caps(dev); 2312 2322 if (ret) 2313 - goto remove_cpu_state; 2323 + goto synic_cleanup; 2314 2324 2315 2325 ret = mshv_retrieve_scheduler_type(dev); 2316 2326 if (ret) 2317 - goto remove_cpu_state; 2318 - 2319 - if (hv_root_partition()) 2320 - ret = mshv_root_partition_init(dev); 2321 - if (ret) 2322 - goto remove_cpu_state; 2327 + goto synic_cleanup; 2323 2328 2324 2329 ret = root_scheduler_init(dev); 2325 2330 if (ret) 2326 - goto exit_partition; 2331 + goto synic_cleanup; 2327 2332 2328 2333 ret = mshv_debugfs_init(); 2329 2334 if (ret) ··· 2326 2367 mshv_debugfs_exit(); 2327 2368 deinit_root_scheduler: 2328 2369 root_scheduler_deinit(); 2329 - exit_partition: 2330 - if (hv_root_partition()) 2331 - mshv_root_partition_exit(); 2332 - remove_cpu_state: 2333 - cpuhp_remove_state(mshv_cpuhp_online); 2334 - free_synic_pages: 2335 - free_percpu(mshv_root.synic_pages); 2370 + synic_cleanup: 2371 + mshv_synic_exit(); 2336 2372 device_deregister: 2337 2373 misc_deregister(&mshv_dev); 2338 2374 return ret; ··· 2341 2387 misc_deregister(&mshv_dev); 2342 2388 mshv_irqfd_wq_cleanup(); 2343 2389 root_scheduler_deinit(); 2344 - if (hv_root_partition()) 2345 - mshv_root_partition_exit(); 2346 - cpuhp_remove_state(mshv_cpuhp_online); 2347 - free_percpu(mshv_root.synic_pages); 2390 + mshv_synic_exit(); 2348 2391 } 2349 2392 2350 2393 module_init(mshv_parent_partition_init);
+173 -15
drivers/hv/mshv_synic.c
··· 10 10 #include <linux/kernel.h> 11 11 #include <linux/slab.h> 12 12 #include <linux/mm.h> 13 + #include <linux/interrupt.h> 13 14 #include <linux/io.h> 14 15 #include <linux/random.h> 16 + #include <linux/cpuhotplug.h> 17 + #include <linux/reboot.h> 15 18 #include <asm/mshyperv.h> 19 + #include <linux/acpi.h> 16 20 17 21 #include "mshv_eventfd.h" 18 22 #include "mshv.h" 23 + 24 + static int synic_cpuhp_online; 25 + static struct hv_synic_pages __percpu *synic_pages; 26 + static int mshv_sint_vector = -1; /* hwirq for the SynIC SINTs */ 27 + static int mshv_sint_irq = -1; /* Linux IRQ for mshv_sint_vector */ 19 28 20 29 static u32 synic_event_ring_get_queued_port(u32 sint_index) 21 30 { ··· 35 26 u32 message; 36 27 u8 tail; 37 28 38 - spages = this_cpu_ptr(mshv_root.synic_pages); 29 + spages = this_cpu_ptr(synic_pages); 39 30 event_ring_page = &spages->synic_event_ring_page; 40 31 synic_eventring_tail = (u8 **)this_cpu_ptr(hv_synic_eventring_tail); 41 32 ··· 402 393 403 394 void mshv_isr(void) 404 395 { 405 - struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages); 396 + struct hv_synic_pages *spages = this_cpu_ptr(synic_pages); 406 397 struct hv_message_page **msg_page = &spages->hyp_synic_message_page; 407 398 struct hv_message *msg; 408 399 bool handled; ··· 446 437 if (msg->header.message_flags.msg_pending) 447 438 hv_set_non_nested_msr(HV_MSR_EOM, 0); 448 439 449 - #ifdef HYPERVISOR_CALLBACK_VECTOR 450 - add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR); 451 - #endif 440 + add_interrupt_randomness(mshv_sint_vector); 452 441 } else { 453 442 pr_warn_once("%s: unknown message type 0x%x\n", __func__, 454 443 msg->header.message_type); 455 444 } 456 445 } 457 446 458 - int mshv_synic_init(unsigned int cpu) 447 + static int mshv_synic_cpu_init(unsigned int cpu) 459 448 { 460 449 union hv_synic_simp simp; 461 450 union hv_synic_siefp siefp; 462 451 union hv_synic_sirbp sirbp; 463 - #ifdef HYPERVISOR_CALLBACK_VECTOR 464 452 union hv_synic_sint sint; 465 - #endif 466 453 union hv_synic_scontrol sctrl; 467 - struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages); 454 + struct hv_synic_pages *spages = this_cpu_ptr(synic_pages); 468 455 struct hv_message_page **msg_page = &spages->hyp_synic_message_page; 469 456 struct hv_synic_event_flags_page **event_flags_page = 470 457 &spages->synic_event_flags_page; ··· 501 496 502 497 hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64); 503 498 504 - #ifdef HYPERVISOR_CALLBACK_VECTOR 499 + if (mshv_sint_irq != -1) 500 + enable_percpu_irq(mshv_sint_irq, 0); 501 + 505 502 /* Enable intercepts */ 506 503 sint.as_uint64 = 0; 507 - sint.vector = HYPERVISOR_CALLBACK_VECTOR; 504 + sint.vector = mshv_sint_vector; 508 505 sint.masked = false; 509 506 sint.auto_eoi = hv_recommend_using_aeoi(); 510 507 hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX, ··· 514 507 515 508 /* Doorbell SINT */ 516 509 sint.as_uint64 = 0; 517 - sint.vector = HYPERVISOR_CALLBACK_VECTOR; 510 + sint.vector = mshv_sint_vector; 518 511 sint.masked = false; 519 512 sint.as_intercept = 1; 520 513 sint.auto_eoi = hv_recommend_using_aeoi(); 521 514 hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX, 522 515 sint.as_uint64); 523 - #endif 524 516 525 517 /* Enable global synic bit */ 526 518 sctrl.as_uint64 = hv_get_non_nested_msr(HV_MSR_SCONTROL); ··· 548 542 return -EFAULT; 549 543 } 550 544 551 - int mshv_synic_cleanup(unsigned int cpu) 545 + static int mshv_synic_cpu_exit(unsigned int cpu) 552 546 { 553 547 union hv_synic_sint sint; 554 548 union hv_synic_simp simp; 555 549 union hv_synic_siefp siefp; 556 550 union hv_synic_sirbp sirbp; 557 551 union hv_synic_scontrol sctrl; 558 - struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages); 552 + struct hv_synic_pages *spages = this_cpu_ptr(synic_pages); 559 553 struct hv_message_page **msg_page = &spages->hyp_synic_message_page; 560 554 struct hv_synic_event_flags_page **event_flags_page = 561 555 &spages->synic_event_flags_page; ··· 573 567 sint.masked = true; 574 568 hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX, 575 569 sint.as_uint64); 570 + 571 + if (mshv_sint_irq != -1) 572 + disable_percpu_irq(mshv_sint_irq); 576 573 577 574 /* Disable Synic's event ring page */ 578 575 sirbp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIRBP); ··· 671 662 hv_call_delete_port(hv_current_partition_id, port_id); 672 663 673 664 mshv_portid_free(doorbell_portid); 665 + } 666 + 667 + static int mshv_synic_reboot_notify(struct notifier_block *nb, 668 + unsigned long code, void *unused) 669 + { 670 + if (!hv_root_partition()) 671 + return 0; 672 + 673 + cpuhp_remove_state(synic_cpuhp_online); 674 + return 0; 675 + } 676 + 677 + static struct notifier_block mshv_synic_reboot_nb = { 678 + .notifier_call = mshv_synic_reboot_notify, 679 + }; 680 + 681 + #ifndef HYPERVISOR_CALLBACK_VECTOR 682 + static DEFINE_PER_CPU(long, mshv_evt); 683 + 684 + static irqreturn_t mshv_percpu_isr(int irq, void *dev_id) 685 + { 686 + mshv_isr(); 687 + return IRQ_HANDLED; 688 + } 689 + 690 + #ifdef CONFIG_ACPI 691 + static int __init mshv_acpi_setup_sint_irq(void) 692 + { 693 + return acpi_register_gsi(NULL, mshv_sint_vector, ACPI_EDGE_SENSITIVE, 694 + ACPI_ACTIVE_HIGH); 695 + } 696 + 697 + static void mshv_acpi_cleanup_sint_irq(void) 698 + { 699 + acpi_unregister_gsi(mshv_sint_vector); 700 + } 701 + #else 702 + static int __init mshv_acpi_setup_sint_irq(void) 703 + { 704 + return -ENODEV; 705 + } 706 + 707 + static void mshv_acpi_cleanup_sint_irq(void) 708 + { 709 + } 710 + #endif 711 + 712 + static int __init mshv_sint_vector_setup(void) 713 + { 714 + int ret; 715 + struct hv_register_assoc reg = { 716 + .name = HV_ARM64_REGISTER_SINT_RESERVED_INTERRUPT_ID, 717 + }; 718 + union hv_input_vtl input_vtl = { 0 }; 719 + 720 + if (acpi_disabled) 721 + return -ENODEV; 722 + 723 + ret = hv_call_get_vp_registers(HV_VP_INDEX_SELF, HV_PARTITION_ID_SELF, 724 + 1, input_vtl, &reg); 725 + if (ret || !reg.value.reg64) 726 + return -ENODEV; 727 + 728 + mshv_sint_vector = reg.value.reg64; 729 + ret = mshv_acpi_setup_sint_irq(); 730 + if (ret < 0) { 731 + pr_err("Failed to setup IRQ for MSHV SINT vector %d: %d\n", 732 + mshv_sint_vector, ret); 733 + goto out_fail; 734 + } 735 + 736 + mshv_sint_irq = ret; 737 + 738 + ret = request_percpu_irq(mshv_sint_irq, mshv_percpu_isr, "MSHV", 739 + &mshv_evt); 740 + if (ret) 741 + goto out_unregister; 742 + 743 + return 0; 744 + 745 + out_unregister: 746 + mshv_acpi_cleanup_sint_irq(); 747 + out_fail: 748 + return ret; 749 + } 750 + 751 + static void mshv_sint_vector_cleanup(void) 752 + { 753 + free_percpu_irq(mshv_sint_irq, &mshv_evt); 754 + mshv_acpi_cleanup_sint_irq(); 755 + } 756 + #else /* !HYPERVISOR_CALLBACK_VECTOR */ 757 + static int __init mshv_sint_vector_setup(void) 758 + { 759 + mshv_sint_vector = HYPERVISOR_CALLBACK_VECTOR; 760 + return 0; 761 + } 762 + 763 + static void mshv_sint_vector_cleanup(void) 764 + { 765 + } 766 + #endif /* HYPERVISOR_CALLBACK_VECTOR */ 767 + 768 + int __init mshv_synic_init(struct device *dev) 769 + { 770 + int ret = 0; 771 + 772 + ret = mshv_sint_vector_setup(); 773 + if (ret) 774 + return ret; 775 + 776 + synic_pages = alloc_percpu(struct hv_synic_pages); 777 + if (!synic_pages) { 778 + dev_err(dev, "Failed to allocate percpu synic page\n"); 779 + ret = -ENOMEM; 780 + goto sint_vector_cleanup; 781 + } 782 + 783 + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mshv_synic", 784 + mshv_synic_cpu_init, 785 + mshv_synic_cpu_exit); 786 + if (ret < 0) { 787 + dev_err(dev, "Failed to setup cpu hotplug state: %i\n", ret); 788 + goto free_synic_pages; 789 + } 790 + 791 + synic_cpuhp_online = ret; 792 + 793 + ret = register_reboot_notifier(&mshv_synic_reboot_nb); 794 + if (ret) 795 + goto remove_cpuhp_state; 796 + 797 + return 0; 798 + 799 + remove_cpuhp_state: 800 + cpuhp_remove_state(synic_cpuhp_online); 801 + free_synic_pages: 802 + free_percpu(synic_pages); 803 + sint_vector_cleanup: 804 + mshv_sint_vector_cleanup(); 805 + return ret; 806 + } 807 + 808 + void mshv_synic_exit(void) 809 + { 810 + unregister_reboot_notifier(&mshv_synic_reboot_nb); 811 + cpuhp_remove_state(synic_cpuhp_online); 812 + free_percpu(synic_pages); 813 + mshv_sint_vector_cleanup(); 674 814 }
+2 -4
drivers/hwmon/Kconfig
··· 1493 1493 1494 1494 config SENSORS_LM75 1495 1495 tristate "National Semiconductor LM75 and compatibles" 1496 - depends on I2C 1497 - depends on I3C || !I3C 1496 + depends on I3C_OR_I2C 1498 1497 select REGMAP_I2C 1499 1498 select REGMAP_I3C if I3C 1500 1499 help ··· 2381 2382 2382 2383 config SENSORS_TMP108 2383 2384 tristate "Texas Instruments TMP108" 2384 - depends on I2C 2385 - depends on I3C || !I3C 2385 + depends on I3C_OR_I2C 2386 2386 select REGMAP_I2C 2387 2387 select REGMAP_I3C if I3C 2388 2388 help
+1 -1
drivers/hwmon/axi-fan-control.c
··· 507 507 ret = devm_request_threaded_irq(&pdev->dev, ctl->irq, NULL, 508 508 axi_fan_control_irq_handler, 509 509 IRQF_ONESHOT | IRQF_TRIGGER_HIGH, 510 - pdev->driver_override, ctl); 510 + NULL, ctl); 511 511 if (ret) 512 512 return dev_err_probe(&pdev->dev, ret, 513 513 "failed to request an irq\n");
+5 -5
drivers/hwmon/max6639.c
··· 232 232 static int max6639_set_ppr(struct max6639_data *data, int channel, u8 ppr) 233 233 { 234 234 /* Decrement the PPR value and shift left by 6 to match the register format */ 235 - return regmap_write(data->regmap, MAX6639_REG_FAN_PPR(channel), ppr-- << 6); 235 + return regmap_write(data->regmap, MAX6639_REG_FAN_PPR(channel), --ppr << 6); 236 236 } 237 237 238 238 static int max6639_write_fan(struct device *dev, u32 attr, int channel, ··· 524 524 525 525 { 526 526 struct device *dev = &client->dev; 527 - u32 i; 528 - int err, val; 527 + u32 i, val; 528 + int err; 529 529 530 530 err = of_property_read_u32(child, "reg", &i); 531 531 if (err) { ··· 540 540 541 541 err = of_property_read_u32(child, "pulses-per-revolution", &val); 542 542 if (!err) { 543 - if (val < 1 || val > 5) { 544 - dev_err(dev, "invalid pulses-per-revolution %d of %pOFn\n", val, child); 543 + if (val < 1 || val > 4) { 544 + dev_err(dev, "invalid pulses-per-revolution %u of %pOFn\n", val, child); 545 545 return -EINVAL; 546 546 } 547 547 data->ppr[i] = val;
+2
drivers/hwmon/pmbus/hac300s.c
··· 58 58 case PMBUS_MFR_VOUT_MIN: 59 59 case PMBUS_READ_VOUT: 60 60 rv = pmbus_read_word_data(client, page, phase, reg); 61 + if (rv < 0) 62 + return rv; 61 63 return FIELD_GET(LINEAR11_MANTISSA_MASK, rv); 62 64 default: 63 65 return -ENODATA;
+2
drivers/hwmon/pmbus/ina233.c
··· 67 67 switch (reg) { 68 68 case PMBUS_VIRT_READ_VMON: 69 69 ret = pmbus_read_word_data(client, 0, 0xff, MFR_READ_VSHUNT); 70 + if (ret < 0) 71 + return ret; 70 72 71 73 /* Adjust returned value to match VIN coefficients */ 72 74 /* VIN: 1.25 mV VSHUNT: 2.5 uV LSB */
+5 -2
drivers/hwmon/pmbus/isl68137.c
··· 98 98 { 99 99 int val = pmbus_read_byte_data(client, page, PMBUS_OPERATION); 100 100 101 - return sprintf(buf, "%d\n", 102 - (val & ISL68137_VOUT_AVS) == ISL68137_VOUT_AVS ? 1 : 0); 101 + if (val < 0) 102 + return val; 103 + 104 + return sysfs_emit(buf, "%d\n", 105 + (val & ISL68137_VOUT_AVS) == ISL68137_VOUT_AVS); 103 106 } 104 107 105 108 static ssize_t isl68137_avs_enable_store_page(struct i2c_client *client,
+21 -14
drivers/hwmon/pmbus/mp2869.c
··· 165 165 { 166 166 const struct pmbus_driver_info *info = pmbus_get_driver_info(client); 167 167 struct mp2869_data *data = to_mp2869_data(info); 168 - int ret; 168 + int ret, mfr; 169 169 170 170 switch (reg) { 171 171 case PMBUS_VOUT_MODE: ··· 188 188 if (ret < 0) 189 189 return ret; 190 190 191 + mfr = pmbus_read_byte_data(client, page, 192 + PMBUS_STATUS_MFR_SPECIFIC); 193 + if (mfr < 0) 194 + return mfr; 195 + 191 196 ret = (ret & ~GENMASK(2, 2)) | 192 197 FIELD_PREP(GENMASK(2, 2), 193 - FIELD_GET(GENMASK(1, 1), 194 - pmbus_read_byte_data(client, page, 195 - PMBUS_STATUS_MFR_SPECIFIC))); 198 + FIELD_GET(GENMASK(1, 1), mfr)); 196 199 break; 197 200 case PMBUS_STATUS_TEMPERATURE: 198 201 /* ··· 210 207 if (ret < 0) 211 208 return ret; 212 209 210 + mfr = pmbus_read_byte_data(client, page, 211 + PMBUS_STATUS_MFR_SPECIFIC); 212 + if (mfr < 0) 213 + return mfr; 214 + 213 215 ret = (ret & ~GENMASK(7, 6)) | 214 216 FIELD_PREP(GENMASK(6, 6), 215 - FIELD_GET(GENMASK(1, 1), 216 - pmbus_read_byte_data(client, page, 217 - PMBUS_STATUS_MFR_SPECIFIC))) | 217 + FIELD_GET(GENMASK(1, 1), mfr)) | 218 218 FIELD_PREP(GENMASK(7, 7), 219 - FIELD_GET(GENMASK(1, 1), 220 - pmbus_read_byte_data(client, page, 221 - PMBUS_STATUS_MFR_SPECIFIC))); 219 + FIELD_GET(GENMASK(1, 1), mfr)); 222 220 break; 223 221 default: 224 222 ret = -ENODATA; ··· 234 230 { 235 231 const struct pmbus_driver_info *info = pmbus_get_driver_info(client); 236 232 struct mp2869_data *data = to_mp2869_data(info); 237 - int ret; 233 + int ret, mfr; 238 234 239 235 switch (reg) { 240 236 case PMBUS_STATUS_WORD: ··· 250 246 if (ret < 0) 251 247 return ret; 252 248 249 + mfr = pmbus_read_byte_data(client, page, 250 + PMBUS_STATUS_MFR_SPECIFIC); 251 + if (mfr < 0) 252 + return mfr; 253 + 253 254 ret = (ret & ~GENMASK(2, 2)) | 254 255 FIELD_PREP(GENMASK(2, 2), 255 - FIELD_GET(GENMASK(1, 1), 256 - pmbus_read_byte_data(client, page, 257 - PMBUS_STATUS_MFR_SPECIFIC))); 256 + FIELD_GET(GENMASK(1, 1), mfr)); 258 257 break; 259 258 case PMBUS_READ_VIN: 260 259 /*
+2
drivers/hwmon/pmbus/mp2975.c
··· 313 313 case PMBUS_STATUS_WORD: 314 314 /* MP2973 & MP2971 return PGOOD instead of PB_STATUS_POWER_GOOD_N. */ 315 315 ret = pmbus_read_word_data(client, page, phase, reg); 316 + if (ret < 0) 317 + return ret; 316 318 ret ^= PB_STATUS_POWER_GOOD_N; 317 319 break; 318 320 case PMBUS_OT_FAULT_LIMIT:
+2
drivers/i2c/busses/Kconfig
··· 1213 1213 tristate "NVIDIA Tegra internal I2C controller" 1214 1214 depends on ARCH_TEGRA || (COMPILE_TEST && (ARC || ARM || ARM64 || M68K || RISCV || SUPERH || SPARC)) 1215 1215 # COMPILE_TEST needs architectures with readsX()/writesX() primitives 1216 + depends on PINCTRL 1217 + # ARCH_TEGRA implies PINCTRL, but the COMPILE_TEST side doesn't. 1216 1218 help 1217 1219 If you say yes to this option, support will be included for the 1218 1220 I2C controller embedded in NVIDIA Tegra SOCs
+3
drivers/i2c/busses/i2c-cp2615.c
··· 298 298 if (!adap) 299 299 return -ENOMEM; 300 300 301 + if (!usbdev->serial) 302 + return -EINVAL; 303 + 301 304 strscpy(adap->name, usbdev->serial, sizeof(adap->name)); 302 305 adap->owner = THIS_MODULE; 303 306 adap->dev.parent = &usbif->dev;
+1
drivers/i2c/busses/i2c-fsi.c
··· 729 729 rc = i2c_add_adapter(&port->adapter); 730 730 if (rc < 0) { 731 731 dev_err(dev, "Failed to register adapter: %d\n", rc); 732 + of_node_put(np); 732 733 kfree(port); 733 734 continue; 734 735 }
+16 -1
drivers/i2c/busses/i2c-pxa.c
··· 268 268 struct pinctrl *pinctrl; 269 269 struct pinctrl_state *pinctrl_default; 270 270 struct pinctrl_state *pinctrl_recovery; 271 + bool reset_before_xfer; 271 272 }; 272 273 273 274 #define _IBMR(i2c) ((i2c)->reg_ibmr) ··· 1145 1144 { 1146 1145 struct pxa_i2c *i2c = adap->algo_data; 1147 1146 1147 + if (i2c->reset_before_xfer) { 1148 + i2c_pxa_reset(i2c); 1149 + i2c->reset_before_xfer = false; 1150 + } 1151 + 1148 1152 return i2c_pxa_internal_xfer(i2c, msgs, num, i2c_pxa_do_xfer); 1149 1153 } 1150 1154 ··· 1527 1521 } 1528 1522 } 1529 1523 1530 - i2c_pxa_reset(i2c); 1524 + /* 1525 + * Skip reset on Armada 3700 when recovery is used to avoid 1526 + * controller hang due to the pinctrl state changes done by 1527 + * the generic recovery initialization code. The reset will 1528 + * be performed later, prior to the first transfer. 1529 + */ 1530 + if (i2c_type == REGS_A3700 && i2c->adap.bus_recovery_info) 1531 + i2c->reset_before_xfer = true; 1532 + else 1533 + i2c_pxa_reset(i2c); 1531 1534 1532 1535 ret = i2c_add_numbered_adapter(&i2c->adap); 1533 1536 if (ret < 0)
+4 -1
drivers/i2c/busses/i2c-tegra.c
··· 2047 2047 * 2048 2048 * VI I2C device shouldn't be marked as IRQ-safe because VI I2C won't 2049 2049 * be used for atomic transfers. ACPI device is not IRQ safe also. 2050 + * 2051 + * Devices with pinctrl states cannot be marked IRQ-safe as the pinctrl 2052 + * state transitions during runtime PM require mutexes. 2050 2053 */ 2051 - if (!IS_VI(i2c_dev) && !has_acpi_companion(i2c_dev->dev)) 2054 + if (!IS_VI(i2c_dev) && !has_acpi_companion(i2c_dev->dev) && !i2c_dev->dev->pins) 2052 2055 pm_runtime_irq_safe(i2c_dev->dev); 2053 2056 2054 2057 pm_runtime_enable(i2c_dev->dev);
+12
drivers/i3c/Kconfig
··· 22 22 if I3C 23 23 source "drivers/i3c/master/Kconfig" 24 24 endif # I3C 25 + 26 + config I3C_OR_I2C 27 + tristate 28 + default m if I3C=m 29 + default I2C 30 + help 31 + Device drivers using module_i3c_i2c_driver() can use either 32 + i2c or i3c hosts, but cannot be built-in for the kernel when 33 + CONFIG_I3C=m. 34 + 35 + Add 'depends on I2C_OR_I3C' in Kconfig for those drivers to 36 + get the correct dependencies.
+4 -2
drivers/i3c/master/dw-i3c-master.c
··· 1024 1024 master->free_pos &= ~BIT(pos); 1025 1025 } 1026 1026 1027 - writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), 1027 + writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr) | DEV_ADDR_TABLE_SIR_REJECT, 1028 1028 master->regs + 1029 1029 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1030 1030 ··· 1053 1053 master->free_pos &= ~BIT(pos); 1054 1054 i3c_dev_set_master_data(dev, data); 1055 1055 1056 - writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr), 1056 + writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr) | DEV_ADDR_TABLE_SIR_REJECT, 1057 1057 master->regs + 1058 1058 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 1059 1059 ··· 1659 1659 pm_runtime_get_noresume(&pdev->dev); 1660 1660 1661 1661 INIT_WORK(&master->hj_work, dw_i3c_hj_work); 1662 + 1663 + device_set_of_node_from_dev(&master->base.i2c.dev, &pdev->dev); 1662 1664 ret = i3c_master_register(&master->base, &pdev->dev, 1663 1665 &dw_mipi_i3c_ops, false); 1664 1666 if (ret)
+1
drivers/i3c/master/mipi-i3c-hci/cmd.h
··· 17 17 #define CMD_0_TOC W0_BIT_(31) 18 18 #define CMD_0_ROC W0_BIT_(30) 19 19 #define CMD_0_ATTR W0_MASK(2, 0) 20 + #define CMD_0_TID W0_MASK(6, 3) 20 21 21 22 /* 22 23 * Response Descriptor Structure
+3 -5
drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
··· 331 331 CMD_A0_ROC | CMD_A0_TOC; 332 332 xfer->cmd_desc[1] = 0; 333 333 xfer->completion = &done; 334 - hci->io->queue_xfer(hci, xfer, 1); 335 - if (!wait_for_completion_timeout(&done, HZ) && 336 - hci->io->dequeue_xfer(hci, xfer, 1)) { 337 - ret = -ETIME; 334 + xfer->timeout = HZ; 335 + ret = i3c_hci_process_xfer(hci, xfer, 1); 336 + if (ret) 338 337 break; 339 - } 340 338 if ((RESP_STATUS(xfer->response) == RESP_ERR_ADDR_HEADER || 341 339 RESP_STATUS(xfer->response) == RESP_ERR_NACK) && 342 340 RESP_DATA_LENGTH(xfer->response) == 1) {
+3 -5
drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
··· 253 253 xfer[0].rnw = true; 254 254 xfer[0].cmd_desc[1] = CMD_A1_DATA_LENGTH(8); 255 255 xfer[1].completion = &done; 256 + xfer[1].timeout = HZ; 256 257 257 258 for (;;) { 258 259 ret = i3c_master_get_free_addr(&hci->master, next_addr); ··· 273 272 CMD_A0_ASSIGN_ADDRESS(next_addr) | 274 273 CMD_A0_ROC | 275 274 CMD_A0_TOC; 276 - hci->io->queue_xfer(hci, xfer, 2); 277 - if (!wait_for_completion_timeout(&done, HZ) && 278 - hci->io->dequeue_xfer(hci, xfer, 2)) { 279 - ret = -ETIME; 275 + ret = i3c_hci_process_xfer(hci, xfer, 2); 276 + if (ret) 280 277 break; 281 - } 282 278 if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) { 283 279 ret = 0; /* no more devices to be assigned */ 284 280 break;
+90 -53
drivers/i3c/master/mipi-i3c-hci/core.c
··· 152 152 if (hci->quirks & HCI_QUIRK_RESP_BUF_THLD) 153 153 amd_set_resp_buf_thld(hci); 154 154 155 - reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE); 155 + scoped_guard(spinlock_irqsave, &hci->lock) 156 + hci->irq_inactive = false; 157 + 158 + /* Enable bus with Hot-Join disabled */ 159 + reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE | HC_CONTROL_HOT_JOIN_CTRL); 156 160 dev_dbg(&hci->master.dev, "HC_CONTROL = %#x", reg_read(HC_CONTROL)); 157 161 158 162 return 0; ··· 181 177 return ret; 182 178 } 183 179 180 + static int i3c_hci_software_reset(struct i3c_hci *hci) 181 + { 182 + u32 regval; 183 + int ret; 184 + 185 + /* 186 + * SOFT_RST must be clear before we write to it. 187 + * Then we must wait until it clears again. 188 + */ 189 + ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval, 190 + !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC); 191 + if (ret) { 192 + dev_err(&hci->master.dev, "%s: Software reset stuck\n", __func__); 193 + return ret; 194 + } 195 + 196 + reg_write(RESET_CONTROL, SOFT_RST); 197 + 198 + ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval, 199 + !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC); 200 + if (ret) { 201 + dev_err(&hci->master.dev, "%s: Software reset failed\n", __func__); 202 + return ret; 203 + } 204 + 205 + return 0; 206 + } 207 + 184 208 void i3c_hci_sync_irq_inactive(struct i3c_hci *hci) 185 209 { 186 210 struct platform_device *pdev = to_platform_device(hci->master.dev.parent); 187 211 int irq = platform_get_irq(pdev, 0); 188 212 189 213 reg_write(INTR_SIGNAL_ENABLE, 0x0); 190 - hci->irq_inactive = true; 191 214 synchronize_irq(irq); 215 + scoped_guard(spinlock_irqsave, &hci->lock) 216 + hci->irq_inactive = true; 192 217 } 193 218 194 219 static void i3c_hci_bus_cleanup(struct i3c_master_controller *m) 195 220 { 196 221 struct i3c_hci *hci = to_i3c_hci(m); 197 222 198 - i3c_hci_bus_disable(hci); 223 + if (i3c_hci_bus_disable(hci)) 224 + i3c_hci_software_reset(hci); 199 225 hci->io->cleanup(hci); 200 226 } 201 227 ··· 244 210 void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci) 245 211 { 246 212 reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0)); 213 + } 214 + 215 + int i3c_hci_process_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n) 216 + { 217 + struct completion *done = xfer[n - 1].completion; 218 + unsigned long timeout = xfer[n - 1].timeout; 219 + int ret; 220 + 221 + ret = hci->io->queue_xfer(hci, xfer, n); 222 + if (ret) 223 + return ret; 224 + 225 + if (!wait_for_completion_timeout(done, timeout)) { 226 + if (hci->io->dequeue_xfer(hci, xfer, n)) { 227 + dev_err(&hci->master.dev, "%s: timeout error\n", __func__); 228 + return -ETIMEDOUT; 229 + } 230 + return 0; 231 + } 232 + 233 + if (hci->io->handle_error) { 234 + bool error = false; 235 + 236 + for (int i = 0; i < n && !error; i++) 237 + error = RESP_STATUS(xfer[i].response); 238 + if (error) 239 + return hci->io->handle_error(hci, xfer, n); 240 + } 241 + 242 + return 0; 247 243 } 248 244 249 245 static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m, ··· 316 252 last = i - 1; 317 253 xfer[last].cmd_desc[0] |= CMD_0_TOC; 318 254 xfer[last].completion = &done; 255 + xfer[last].timeout = HZ; 319 256 320 257 if (prefixed) 321 258 xfer--; 322 259 323 - ret = hci->io->queue_xfer(hci, xfer, nxfers); 260 + ret = i3c_hci_process_xfer(hci, xfer, nxfers); 324 261 if (ret) 325 262 goto out; 326 - if (!wait_for_completion_timeout(&done, HZ) && 327 - hci->io->dequeue_xfer(hci, xfer, nxfers)) { 328 - ret = -ETIME; 329 - goto out; 330 - } 331 263 for (i = prefixed; i < nxfers; i++) { 332 264 if (ccc->rnw) 333 265 ccc->dests[i - prefixed].payload.len = ··· 394 334 last = i - 1; 395 335 xfer[last].cmd_desc[0] |= CMD_0_TOC; 396 336 xfer[last].completion = &done; 337 + xfer[last].timeout = HZ; 397 338 398 - ret = hci->io->queue_xfer(hci, xfer, nxfers); 339 + ret = i3c_hci_process_xfer(hci, xfer, nxfers); 399 340 if (ret) 400 341 goto out; 401 - if (!wait_for_completion_timeout(&done, HZ) && 402 - hci->io->dequeue_xfer(hci, xfer, nxfers)) { 403 - ret = -ETIME; 404 - goto out; 405 - } 406 342 for (i = 0; i < nxfers; i++) { 407 343 if (i3c_xfers[i].rnw) 408 344 i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response); ··· 438 382 last = i - 1; 439 383 xfer[last].cmd_desc[0] |= CMD_0_TOC; 440 384 xfer[last].completion = &done; 385 + xfer[last].timeout = m->i2c.timeout; 441 386 442 - ret = hci->io->queue_xfer(hci, xfer, nxfers); 387 + ret = i3c_hci_process_xfer(hci, xfer, nxfers); 443 388 if (ret) 444 389 goto out; 445 - if (!wait_for_completion_timeout(&done, m->i2c.timeout) && 446 - hci->io->dequeue_xfer(hci, xfer, nxfers)) { 447 - ret = -ETIME; 448 - goto out; 449 - } 450 390 for (i = 0; i < nxfers; i++) { 451 391 if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) { 452 392 ret = -EIO; ··· 618 566 irqreturn_t result = IRQ_NONE; 619 567 u32 val; 620 568 569 + guard(spinlock)(&hci->lock); 570 + 621 571 /* 622 572 * The IRQ can be shared, so the handler may be called when the IRQ is 623 573 * due to a different device. That could happen when runtime suspended, ··· 653 599 result = IRQ_HANDLED; 654 600 655 601 return result; 656 - } 657 - 658 - static int i3c_hci_software_reset(struct i3c_hci *hci) 659 - { 660 - u32 regval; 661 - int ret; 662 - 663 - /* 664 - * SOFT_RST must be clear before we write to it. 665 - * Then we must wait until it clears again. 666 - */ 667 - ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval, 668 - !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC); 669 - if (ret) { 670 - dev_err(&hci->master.dev, "%s: Software reset stuck\n", __func__); 671 - return ret; 672 - } 673 - 674 - reg_write(RESET_CONTROL, SOFT_RST); 675 - 676 - ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval, 677 - !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC); 678 - if (ret) { 679 - dev_err(&hci->master.dev, "%s: Software reset failed\n", __func__); 680 - return ret; 681 - } 682 - 683 - return 0; 684 602 } 685 603 686 604 static inline bool is_version_1_1_or_newer(struct i3c_hci *hci) ··· 765 739 int ret; 766 740 767 741 ret = i3c_hci_bus_disable(hci); 768 - if (ret) 742 + if (ret) { 743 + /* Fall back to software reset to disable the bus */ 744 + ret = i3c_hci_software_reset(hci); 745 + i3c_hci_sync_irq_inactive(hci); 769 746 return ret; 747 + } 770 748 771 749 hci->io->suspend(hci); 772 750 ··· 790 760 791 761 mipi_i3c_hci_dat_v1.restore(hci); 792 762 793 - hci->irq_inactive = false; 794 - 795 763 hci->io->resume(hci); 796 764 797 - reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE); 765 + scoped_guard(spinlock_irqsave, &hci->lock) 766 + hci->irq_inactive = false; 767 + 768 + /* Enable bus with Hot-Join disabled */ 769 + reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE | HC_CONTROL_HOT_JOIN_CTRL); 798 770 799 771 return 0; 800 772 } ··· 956 924 if (!hci) 957 925 return -ENOMEM; 958 926 927 + spin_lock_init(&hci->lock); 928 + mutex_init(&hci->control_mutex); 929 + 959 930 /* 960 931 * Multi-bus instances share the same MMIO address range, but not 961 932 * necessarily in separate contiguous sub-ranges. To avoid overlapping ··· 984 949 ret = i3c_hci_init(hci); 985 950 if (ret) 986 951 return ret; 952 + 953 + hci->irq_inactive = true; 987 954 988 955 irq = platform_get_irq(pdev, 0); 989 956 ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
+83 -73
drivers/i3c/master/mipi-i3c-hci/dma.c
··· 129 129 dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma; 130 130 unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total; 131 131 unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz; 132 - unsigned int done_ptr, ibi_chunk_ptr; 132 + unsigned int done_ptr, ibi_chunk_ptr, xfer_space; 133 133 struct hci_xfer **src_xfers; 134 - spinlock_t lock; 135 134 struct completion op_done; 136 135 }; 137 136 ··· 260 261 261 262 rh->done_ptr = 0; 262 263 rh->ibi_chunk_ptr = 0; 264 + rh->xfer_space = rh->xfer_entries; 263 265 } 264 266 265 267 static void hci_dma_init_rings(struct i3c_hci *hci) ··· 344 344 goto err_out; 345 345 rh = &rings->headers[i]; 346 346 rh->regs = hci->base_regs + offset; 347 - spin_lock_init(&rh->lock); 348 347 init_completion(&rh->op_done); 349 348 350 349 rh->xfer_entries = XFER_RING_ENTRIES; ··· 438 439 } 439 440 } 440 441 442 + static struct i3c_dma *hci_dma_map_xfer(struct device *dev, struct hci_xfer *xfer) 443 + { 444 + enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 445 + bool need_bounce = device_iommu_mapped(dev) && xfer->rnw && (xfer->data_len & 3); 446 + 447 + return i3c_master_dma_map_single(dev, xfer->data, xfer->data_len, need_bounce, dir); 448 + } 449 + 450 + static int hci_dma_map_xfer_list(struct i3c_hci *hci, struct device *dev, 451 + struct hci_xfer *xfer_list, int n) 452 + { 453 + for (int i = 0; i < n; i++) { 454 + struct hci_xfer *xfer = xfer_list + i; 455 + 456 + if (!xfer->data) 457 + continue; 458 + 459 + xfer->dma = hci_dma_map_xfer(dev, xfer); 460 + if (!xfer->dma) { 461 + hci_dma_unmap_xfer(hci, xfer_list, i); 462 + return -ENOMEM; 463 + } 464 + } 465 + 466 + return 0; 467 + } 468 + 441 469 static int hci_dma_queue_xfer(struct i3c_hci *hci, 442 470 struct hci_xfer *xfer_list, int n) 443 471 { 444 472 struct hci_rings_data *rings = hci->io_data; 445 473 struct hci_rh_data *rh; 446 474 unsigned int i, ring, enqueue_ptr; 447 - u32 op1_val, op2_val; 475 + u32 op1_val; 476 + int ret; 477 + 478 + ret = hci_dma_map_xfer_list(hci, rings->sysdev, xfer_list, n); 479 + if (ret) 480 + return ret; 448 481 449 482 /* For now we only use ring 0 */ 450 483 ring = 0; 451 484 rh = &rings->headers[ring]; 485 + 486 + spin_lock_irq(&hci->lock); 487 + 488 + if (n > rh->xfer_space) { 489 + spin_unlock_irq(&hci->lock); 490 + hci_dma_unmap_xfer(hci, xfer_list, n); 491 + return -EBUSY; 492 + } 452 493 453 494 op1_val = rh_reg_read(RING_OPERATION1); 454 495 enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val); 455 496 for (i = 0; i < n; i++) { 456 497 struct hci_xfer *xfer = xfer_list + i; 457 498 u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr; 458 - enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE : 459 - DMA_TO_DEVICE; 460 - bool need_bounce; 461 499 462 500 /* store cmd descriptor */ 463 501 *ring_data++ = xfer->cmd_desc[0]; ··· 513 477 514 478 /* 2nd and 3rd words of Data Buffer Descriptor Structure */ 515 479 if (xfer->data) { 516 - need_bounce = device_iommu_mapped(rings->sysdev) && 517 - xfer->rnw && 518 - xfer->data_len != ALIGN(xfer->data_len, 4); 519 - xfer->dma = i3c_master_dma_map_single(rings->sysdev, 520 - xfer->data, 521 - xfer->data_len, 522 - need_bounce, 523 - dir); 524 - if (!xfer->dma) { 525 - hci_dma_unmap_xfer(hci, xfer_list, i); 526 - return -ENOMEM; 527 - } 528 480 *ring_data++ = lower_32_bits(xfer->dma->addr); 529 481 *ring_data++ = upper_32_bits(xfer->dma->addr); 530 482 } else { ··· 527 503 xfer->ring_entry = enqueue_ptr; 528 504 529 505 enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries; 530 - 531 - /* 532 - * We may update the hardware view of the enqueue pointer 533 - * only if we didn't reach its dequeue pointer. 534 - */ 535 - op2_val = rh_reg_read(RING_OPERATION2); 536 - if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) { 537 - /* the ring is full */ 538 - hci_dma_unmap_xfer(hci, xfer_list, i + 1); 539 - return -EBUSY; 540 - } 541 506 } 542 507 543 - /* take care to update the hardware enqueue pointer atomically */ 544 - spin_lock_irq(&rh->lock); 545 - op1_val = rh_reg_read(RING_OPERATION1); 508 + rh->xfer_space -= n; 509 + 546 510 op1_val &= ~RING_OP1_CR_ENQ_PTR; 547 511 op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr); 548 512 rh_reg_write(RING_OPERATION1, op1_val); 549 - spin_unlock_irq(&rh->lock); 513 + spin_unlock_irq(&hci->lock); 550 514 551 515 return 0; 552 516 } ··· 546 534 struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number]; 547 535 unsigned int i; 548 536 bool did_unqueue = false; 537 + u32 ring_status; 549 538 550 - /* stop the ring */ 551 - rh_reg_write(RING_CONTROL, RING_CTRL_ABORT); 552 - if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) { 553 - /* 554 - * We're deep in it if ever this condition is ever met. 555 - * Hardware might still be writing to memory, etc. 556 - */ 557 - dev_crit(&hci->master.dev, "unable to abort the ring\n"); 558 - WARN_ON(1); 539 + guard(mutex)(&hci->control_mutex); 540 + 541 + ring_status = rh_reg_read(RING_STATUS); 542 + if (ring_status & RING_STATUS_RUNNING) { 543 + /* stop the ring */ 544 + reinit_completion(&rh->op_done); 545 + rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_ABORT); 546 + wait_for_completion_timeout(&rh->op_done, HZ); 547 + ring_status = rh_reg_read(RING_STATUS); 548 + if (ring_status & RING_STATUS_RUNNING) { 549 + /* 550 + * We're deep in it if ever this condition is ever met. 551 + * Hardware might still be writing to memory, etc. 552 + */ 553 + dev_crit(&hci->master.dev, "unable to abort the ring\n"); 554 + WARN_ON(1); 555 + } 559 556 } 557 + 558 + spin_lock_irq(&hci->lock); 560 559 561 560 for (i = 0; i < n; i++) { 562 561 struct hci_xfer *xfer = xfer_list + i; ··· 582 559 u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx; 583 560 584 561 /* store no-op cmd descriptor */ 585 - *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7); 562 + *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7) | FIELD_PREP(CMD_0_TID, xfer->cmd_tid); 586 563 *ring_data++ = 0; 587 564 if (hci->cmd == &mipi_i3c_hci_cmd_v2) { 588 565 *ring_data++ = 0; ··· 600 577 } 601 578 602 579 /* restart the ring */ 580 + mipi_i3c_hci_resume(hci); 603 581 rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE); 582 + rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_RUN_STOP); 583 + 584 + spin_unlock_irq(&hci->lock); 604 585 605 586 return did_unqueue; 587 + } 588 + 589 + static int hci_dma_handle_error(struct i3c_hci *hci, struct hci_xfer *xfer_list, int n) 590 + { 591 + return hci_dma_dequeue_xfer(hci, xfer_list, n) ? -EIO : 0; 606 592 } 607 593 608 594 static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh) 609 595 { 610 596 u32 op1_val, op2_val, resp, *ring_resp; 611 597 unsigned int tid, done_ptr = rh->done_ptr; 598 + unsigned int done_cnt = 0; 612 599 struct hci_xfer *xfer; 613 600 614 601 for (;;) { ··· 636 603 dev_dbg(&hci->master.dev, "orphaned ring entry"); 637 604 } else { 638 605 hci_dma_unmap_xfer(hci, xfer, 1); 606 + rh->src_xfers[done_ptr] = NULL; 639 607 xfer->ring_entry = -1; 640 608 xfer->response = resp; 641 609 if (tid != xfer->cmd_tid) { ··· 651 617 652 618 done_ptr = (done_ptr + 1) % rh->xfer_entries; 653 619 rh->done_ptr = done_ptr; 620 + done_cnt += 1; 654 621 } 655 622 656 - /* take care to update the software dequeue pointer atomically */ 657 - spin_lock(&rh->lock); 623 + rh->xfer_space += done_cnt; 658 624 op1_val = rh_reg_read(RING_OPERATION1); 659 625 op1_val &= ~RING_OP1_CR_SW_DEQ_PTR; 660 626 op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr); 661 627 rh_reg_write(RING_OPERATION1, op1_val); 662 - spin_unlock(&rh->lock); 663 628 } 664 629 665 630 static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev, ··· 838 805 i3c_master_queue_ibi(dev, slot); 839 806 840 807 done: 841 - /* take care to update the ibi dequeue pointer atomically */ 842 - spin_lock(&rh->lock); 843 808 op1_val = rh_reg_read(RING_OPERATION1); 844 809 op1_val &= ~RING_OP1_IBI_DEQ_PTR; 845 810 op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr); 846 811 rh_reg_write(RING_OPERATION1, op1_val); 847 - spin_unlock(&rh->lock); 848 812 849 813 /* update the chunk pointer */ 850 814 rh->ibi_chunk_ptr += ibi_chunks; ··· 875 845 hci_dma_xfer_done(hci, rh); 876 846 if (status & INTR_RING_OP) 877 847 complete(&rh->op_done); 878 - 879 - if (status & INTR_TRANSFER_ABORT) { 880 - u32 ring_status; 881 - 882 - dev_notice_ratelimited(&hci->master.dev, 883 - "Ring %d: Transfer Aborted\n", i); 884 - mipi_i3c_hci_resume(hci); 885 - ring_status = rh_reg_read(RING_STATUS); 886 - if (!(ring_status & RING_STATUS_RUNNING) && 887 - status & INTR_TRANSFER_COMPLETION && 888 - status & INTR_TRANSFER_ERR) { 889 - /* 890 - * Ring stop followed by run is an Intel 891 - * specific required quirk after resuming the 892 - * halted controller. Do it only when the ring 893 - * is not in running state after a transfer 894 - * error. 895 - */ 896 - rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE); 897 - rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | 898 - RING_CTRL_RUN_STOP); 899 - } 900 - } 848 + if (status & INTR_TRANSFER_ABORT) 849 + dev_dbg(&hci->master.dev, "Ring %d: Transfer Aborted\n", i); 901 850 if (status & INTR_IBI_RING_FULL) 902 851 dev_err_ratelimited(&hci->master.dev, 903 852 "Ring %d: IBI Ring Full Condition\n", i); ··· 892 883 .cleanup = hci_dma_cleanup, 893 884 .queue_xfer = hci_dma_queue_xfer, 894 885 .dequeue_xfer = hci_dma_dequeue_xfer, 886 + .handle_error = hci_dma_handle_error, 895 887 .irq_handler = hci_dma_irq_handler, 896 888 .request_ibi = hci_dma_request_ibi, 897 889 .free_ibi = hci_dma_free_ibi,
+5
drivers/i3c/master/mipi-i3c-hci/hci.h
··· 50 50 const struct hci_io_ops *io; 51 51 void *io_data; 52 52 const struct hci_cmd_ops *cmd; 53 + spinlock_t lock; 54 + struct mutex control_mutex; 53 55 atomic_t next_cmd_tid; 54 56 bool irq_inactive; 55 57 u32 caps; ··· 89 87 unsigned int data_len; 90 88 unsigned int cmd_tid; 91 89 struct completion *completion; 90 + unsigned long timeout; 92 91 union { 93 92 struct { 94 93 /* PIO specific */ ··· 123 120 bool (*irq_handler)(struct i3c_hci *hci); 124 121 int (*queue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n); 125 122 bool (*dequeue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n); 123 + int (*handle_error)(struct i3c_hci *hci, struct hci_xfer *xfer, int n); 126 124 int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev, 127 125 const struct i3c_ibi_setup *req); 128 126 void (*free_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev); ··· 158 154 void amd_set_od_pp_timing(struct i3c_hci *hci); 159 155 void amd_set_resp_buf_thld(struct i3c_hci *hci); 160 156 void i3c_hci_sync_irq_inactive(struct i3c_hci *hci); 157 + int i3c_hci_process_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n); 161 158 162 159 #endif
+5 -11
drivers/i3c/master/mipi-i3c-hci/pio.c
··· 123 123 }; 124 124 125 125 struct hci_pio_data { 126 - spinlock_t lock; 127 126 struct hci_xfer *curr_xfer, *xfer_queue; 128 127 struct hci_xfer *curr_rx, *rx_queue; 129 128 struct hci_xfer *curr_tx, *tx_queue; ··· 211 212 return -ENOMEM; 212 213 213 214 hci->io_data = pio; 214 - spin_lock_init(&pio->lock); 215 215 216 216 __hci_pio_init(hci, &size_val); 217 217 ··· 629 631 xfer[i].data_left = xfer[i].data_len; 630 632 } 631 633 632 - spin_lock_irq(&pio->lock); 634 + spin_lock_irq(&hci->lock); 633 635 prev_queue_tail = pio->xfer_queue; 634 636 pio->xfer_queue = &xfer[n - 1]; 635 637 if (pio->curr_xfer) { ··· 643 645 pio_reg_read(INTR_STATUS), 644 646 pio_reg_read(INTR_SIGNAL_ENABLE)); 645 647 } 646 - spin_unlock_irq(&pio->lock); 648 + spin_unlock_irq(&hci->lock); 647 649 return 0; 648 650 } 649 651 ··· 714 716 struct hci_pio_data *pio = hci->io_data; 715 717 int ret; 716 718 717 - spin_lock_irq(&pio->lock); 719 + spin_lock_irq(&hci->lock); 718 720 dev_dbg(&hci->master.dev, "n=%d status=%#x/%#x", n, 719 721 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE)); 720 722 dev_dbg(&hci->master.dev, "main_status = %#x/%#x", 721 723 readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28)); 722 724 723 725 ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n); 724 - spin_unlock_irq(&pio->lock); 726 + spin_unlock_irq(&hci->lock); 725 727 return ret; 726 728 } 727 729 ··· 1014 1016 struct hci_pio_data *pio = hci->io_data; 1015 1017 u32 status; 1016 1018 1017 - spin_lock(&pio->lock); 1018 1019 status = pio_reg_read(INTR_STATUS); 1019 1020 dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x", 1020 1021 status, pio->enabled_irqs); 1021 1022 status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS; 1022 - if (!status) { 1023 - spin_unlock(&pio->lock); 1023 + if (!status) 1024 1024 return false; 1025 - } 1026 1025 1027 1026 if (status & STAT_IBI_STATUS_THLD) 1028 1027 hci_pio_process_ibi(hci, pio); ··· 1053 1058 pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs); 1054 1059 dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x", 1055 1060 pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE)); 1056 - spin_unlock(&pio->lock); 1057 1061 return true; 1058 1062 } 1059 1063
+11 -2
drivers/iio/adc/ad7768-1.c
··· 531 531 return ret; 532 532 } 533 533 534 - static void ad7768_fill_scale_tbl(struct iio_dev *dev) 534 + static int ad7768_fill_scale_tbl(struct iio_dev *dev) 535 535 { 536 536 struct ad7768_state *st = iio_priv(dev); 537 537 const struct iio_scan_type *scan_type; ··· 541 541 u64 tmp2; 542 542 543 543 scan_type = iio_get_current_scan_type(dev, &dev->channels[0]); 544 + if (IS_ERR(scan_type)) { 545 + dev_err(&st->spi->dev, "Failed to get scan type.\n"); 546 + return PTR_ERR(scan_type); 547 + } 548 + 544 549 if (scan_type->sign == 's') 545 550 val2 = scan_type->realbits - 1; 546 551 else ··· 570 565 st->scale_tbl[i][0] = tmp0; /* Integer part */ 571 566 st->scale_tbl[i][1] = abs(tmp1); /* Fractional part */ 572 567 } 568 + 569 + return 0; 573 570 } 574 571 575 572 static int ad7768_set_sinc3_dec_rate(struct ad7768_state *st, ··· 676 669 } 677 670 678 671 /* Update scale table: scale values vary according to the precision */ 679 - ad7768_fill_scale_tbl(dev); 672 + ret = ad7768_fill_scale_tbl(dev); 673 + if (ret) 674 + return ret; 680 675 681 676 ad7768_fill_samp_freq_tbl(st); 682 677
+1 -1
drivers/iio/chemical/bme680_core.c
··· 613 613 * + heater duration 614 614 */ 615 615 int wait_eoc_us = ((data->oversampling_temp + data->oversampling_press + 616 - data->oversampling_humid) * 1936) + (477 * 4) + 616 + data->oversampling_humid) * 1963) + (477 * 4) + 617 617 (477 * 5) + 1000 + (data->heater_dur * 1000); 618 618 619 619 fsleep(wait_eoc_us);
+1 -1
drivers/iio/chemical/sps30_i2c.c
··· 171 171 if (!sps30_i2c_meas_ready(state)) 172 172 return -ETIMEDOUT; 173 173 174 - return sps30_i2c_command(state, SPS30_I2C_READ_MEAS, NULL, 0, meas, sizeof(num) * num); 174 + return sps30_i2c_command(state, SPS30_I2C_READ_MEAS, NULL, 0, meas, sizeof(*meas) * num); 175 175 } 176 176 177 177 static int sps30_i2c_clean_fan(struct sps30_state *state)
+1 -1
drivers/iio/chemical/sps30_serial.c
··· 303 303 if (msleep_interruptible(1000)) 304 304 return -EINTR; 305 305 306 - ret = sps30_serial_command(state, SPS30_SERIAL_READ_MEAS, NULL, 0, meas, num * sizeof(num)); 306 + ret = sps30_serial_command(state, SPS30_SERIAL_READ_MEAS, NULL, 0, meas, num * sizeof(*meas)); 307 307 if (ret < 0) 308 308 return ret; 309 309 /* if measurements aren't ready sensor returns empty frame */
+1 -1
drivers/iio/dac/ds4424.c
··· 140 140 141 141 switch (mask) { 142 142 case IIO_CHAN_INFO_RAW: 143 - if (val < S8_MIN || val > S8_MAX) 143 + if (val <= S8_MIN || val > S8_MAX) 144 144 return -EINVAL; 145 145 146 146 if (val > 0) {
+1 -1
drivers/iio/frequency/adf4377.c
··· 508 508 return ret; 509 509 510 510 return regmap_read_poll_timeout(st->regmap, 0x0, read_val, 511 - !(read_val & (ADF4377_0000_SOFT_RESET_R_MSK | 511 + !(read_val & (ADF4377_0000_SOFT_RESET_MSK | 512 512 ADF4377_0000_SOFT_RESET_R_MSK)), 200, 200 * 100); 513 513 } 514 514
+13 -5
drivers/iio/gyro/mpu3050-core.c
··· 322 322 } 323 323 case IIO_CHAN_INFO_RAW: 324 324 /* Resume device */ 325 - pm_runtime_get_sync(mpu3050->dev); 325 + ret = pm_runtime_resume_and_get(mpu3050->dev); 326 + if (ret) 327 + return ret; 326 328 mutex_lock(&mpu3050->lock); 327 329 328 330 ret = mpu3050_set_8khz_samplerate(mpu3050); ··· 649 647 static int mpu3050_buffer_preenable(struct iio_dev *indio_dev) 650 648 { 651 649 struct mpu3050 *mpu3050 = iio_priv(indio_dev); 650 + int ret; 652 651 653 - pm_runtime_get_sync(mpu3050->dev); 652 + ret = pm_runtime_resume_and_get(mpu3050->dev); 653 + if (ret) 654 + return ret; 654 655 655 656 /* Unless we have OUR trigger active, run at full speed */ 656 - if (!mpu3050->hw_irq_trigger) 657 - return mpu3050_set_8khz_samplerate(mpu3050); 657 + if (!mpu3050->hw_irq_trigger) { 658 + ret = mpu3050_set_8khz_samplerate(mpu3050); 659 + if (ret) 660 + pm_runtime_put_autosuspend(mpu3050->dev); 661 + } 658 662 659 - return 0; 663 + return ret; 660 664 } 661 665 662 666 static int mpu3050_buffer_postdisable(struct iio_dev *indio_dev)
+1 -2
drivers/iio/gyro/mpu3050-i2c.c
··· 19 19 struct mpu3050 *mpu3050 = i2c_mux_priv(mux); 20 20 21 21 /* Just power up the device, that is all that is needed */ 22 - pm_runtime_get_sync(mpu3050->dev); 23 - return 0; 22 + return pm_runtime_resume_and_get(mpu3050->dev); 24 23 } 25 24 26 25 static int mpu3050_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
+1 -1
drivers/iio/imu/adis.c
··· 526 526 527 527 adis->spi = spi; 528 528 adis->data = data; 529 - if (!adis->ops->write && !adis->ops->read && !adis->ops->reset) 529 + if (!adis->ops) 530 530 adis->ops = &adis_default_ops; 531 531 else if (!adis->ops->write || !adis->ops->read || !adis->ops->reset) 532 532 return -EINVAL;
+2
drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
··· 651 651 return -EINVAL; 652 652 653 653 conf.odr = inv_icm42600_accel_odr_conv[idx / 2]; 654 + if (conf.odr == st->conf.accel.odr) 655 + return 0; 654 656 655 657 pm_runtime_get_sync(dev); 656 658 mutex_lock(&st->lock);
+4
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
··· 371 371 static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev) 372 372 { 373 373 struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev); 374 + struct inv_icm42600_sensor_state *sensor_st = iio_priv(indio_dev); 375 + struct inv_sensors_timestamp *ts = &sensor_st->ts; 374 376 struct device *dev = regmap_get_device(st->map); 375 377 unsigned int sensor; 376 378 unsigned int *watermark; ··· 393 391 } 394 392 395 393 mutex_lock(&st->lock); 394 + 395 + inv_sensors_timestamp_apply_odr(ts, 0, 0, 0); 396 396 397 397 ret = inv_icm42600_buffer_set_fifo_en(st, st->fifo.en & ~sensor); 398 398 if (ret)
+2
drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
··· 358 358 return -EINVAL; 359 359 360 360 conf.odr = inv_icm42600_gyro_odr_conv[idx / 2]; 361 + if (conf.odr == st->conf.gyro.odr) 362 + return 0; 361 363 362 364 pm_runtime_get_sync(dev); 363 365 mutex_lock(&st->lock);
+1 -1
drivers/iio/imu/inv_icm45600/inv_icm45600.h
··· 205 205 #define INV_ICM45600_SPI_SLEW_RATE_38NS 0 206 206 207 207 #define INV_ICM45600_REG_INT1_CONFIG2 0x0018 208 - #define INV_ICM45600_INT1_CONFIG2_PUSH_PULL BIT(2) 208 + #define INV_ICM45600_INT1_CONFIG2_OPEN_DRAIN BIT(2) 209 209 #define INV_ICM45600_INT1_CONFIG2_LATCHED BIT(1) 210 210 #define INV_ICM45600_INT1_CONFIG2_ACTIVE_HIGH BIT(0) 211 211 #define INV_ICM45600_INT1_CONFIG2_ACTIVE_LOW 0x00
+8 -3
drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
··· 637 637 break; 638 638 } 639 639 640 - if (!open_drain) 641 - val |= INV_ICM45600_INT1_CONFIG2_PUSH_PULL; 640 + if (open_drain) 641 + val |= INV_ICM45600_INT1_CONFIG2_OPEN_DRAIN; 642 642 643 643 ret = regmap_write(st->map, INV_ICM45600_REG_INT1_CONFIG2, val); 644 644 if (ret) ··· 744 744 */ 745 745 fsleep(5 * USEC_PER_MSEC); 746 746 747 + /* set pm_runtime active early for disable vddio resource cleanup */ 748 + ret = pm_runtime_set_active(dev); 749 + if (ret) 750 + return ret; 751 + 747 752 ret = inv_icm45600_enable_regulator_vddio(st); 748 753 if (ret) 749 754 return ret; ··· 781 776 if (ret) 782 777 return ret; 783 778 784 - ret = devm_pm_runtime_set_active_enabled(dev); 779 + ret = devm_pm_runtime_enable(dev); 785 780 if (ret) 786 781 return ret; 787 782
+8
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
··· 1943 1943 irq_type); 1944 1944 return -EINVAL; 1945 1945 } 1946 + 1947 + /* 1948 + * Acking interrupts by status register does not work reliably 1949 + * but seem to work when this bit is set. 1950 + */ 1951 + if (st->chip_type == INV_MPU9150) 1952 + st->irq_mask |= INV_MPU6050_INT_RD_CLEAR; 1953 + 1946 1954 device_set_wakeup_capable(dev, true); 1947 1955 1948 1956 st->vdd_supply = devm_regulator_get(dev, "vdd");
+2
drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
··· 390 390 /* enable level triggering */ 391 391 #define INV_MPU6050_LATCH_INT_EN 0x20 392 392 #define INV_MPU6050_BIT_BYPASS_EN 0x2 393 + /* allow acking interrupts by any register read */ 394 + #define INV_MPU6050_INT_RD_CLEAR 0x10 393 395 394 396 /* Allowed timestamp period jitter in percent */ 395 397 #define INV_MPU6050_TS_PERIOD_JITTER 4
+4 -1
drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
··· 248 248 switch (st->chip_type) { 249 249 case INV_MPU6000: 250 250 case INV_MPU6050: 251 - case INV_MPU9150: 252 251 /* 253 252 * WoM is not supported and interrupt status read seems to be broken for 254 253 * some chips. Since data ready is the only interrupt, bypass interrupt ··· 256 257 wom_bits = 0; 257 258 int_status = INV_MPU6050_BIT_RAW_DATA_RDY_INT; 258 259 goto data_ready_interrupt; 260 + case INV_MPU9150: 261 + /* IRQ needs to be acked */ 262 + wom_bits = 0; 263 + break; 259 264 case INV_MPU6500: 260 265 case INV_MPU6515: 261 266 case INV_MPU6880:
+4 -2
drivers/iio/industrialio-buffer.c
··· 228 228 written = 0; 229 229 add_wait_queue(&rb->pollq, &wait); 230 230 do { 231 - if (!indio_dev->info) 232 - return -ENODEV; 231 + if (!indio_dev->info) { 232 + ret = -ENODEV; 233 + break; 234 + } 233 235 234 236 if (!iio_buffer_space_available(rb)) { 235 237 if (signal_pending(current)) {
+1 -1
drivers/iio/light/bh1780.c
··· 109 109 case IIO_LIGHT: 110 110 pm_runtime_get_sync(&bh1780->client->dev); 111 111 value = bh1780_read_word(bh1780, BH1780_REG_DLOW); 112 + pm_runtime_put_autosuspend(&bh1780->client->dev); 112 113 if (value < 0) 113 114 return value; 114 - pm_runtime_put_autosuspend(&bh1780->client->dev); 115 115 *val = value; 116 116 117 117 return IIO_VAL_INT;
+1 -2
drivers/iio/magnetometer/Kconfig
··· 143 143 tristate "MEMSIC MMC5633 3-axis magnetic sensor" 144 144 select REGMAP_I2C 145 145 select REGMAP_I3C if I3C 146 - depends on I2C 147 - depends on I3C || !I3C 146 + depends on I3C_OR_I2C 148 147 help 149 148 Say yes here to build support for the MEMSIC MMC5633 3-axis 150 149 magnetic sensor.
+1 -1
drivers/iio/magnetometer/tlv493d.c
··· 171 171 switch (ch) { 172 172 case TLV493D_AXIS_X: 173 173 val = FIELD_GET(TLV493D_BX_MAG_X_AXIS_MSB, b[TLV493D_RD_REG_BX]) << 4 | 174 - FIELD_GET(TLV493D_BX2_MAG_X_AXIS_LSB, b[TLV493D_RD_REG_BX2]) >> 4; 174 + FIELD_GET(TLV493D_BX2_MAG_X_AXIS_LSB, b[TLV493D_RD_REG_BX2]); 175 175 break; 176 176 case TLV493D_AXIS_Y: 177 177 val = FIELD_GET(TLV493D_BY_MAG_Y_AXIS_MSB, b[TLV493D_RD_REG_BY]) << 4 |
+1 -1
drivers/iio/potentiometer/mcp4131.c
··· 221 221 222 222 mutex_lock(&data->lock); 223 223 224 - data->buf[0] = address << MCP4131_WIPER_SHIFT; 224 + data->buf[0] = address; 225 225 data->buf[0] |= MCP4131_WRITE | (val >> 8); 226 226 data->buf[1] = val & 0xFF; /* 8 bits here */ 227 227
+4 -2
drivers/iio/proximity/hx9023s.c
··· 719 719 struct device *dev = regmap_get_device(data->regmap); 720 720 unsigned int i, period_ms; 721 721 722 + if (!val && !val2) 723 + return -EINVAL; 724 + 722 725 period_ms = div_u64(NANO, (val * MEGA + val2)); 723 726 724 727 for (i = 0; i < ARRAY_SIZE(hx9023s_samp_freq_table); i++) { ··· 1037 1034 if (!bin) 1038 1035 return -ENOMEM; 1039 1036 1040 - memcpy(bin->data, fw->data, fw->size); 1041 - 1042 1037 bin->fw_size = fw->size; 1038 + memcpy(bin->data, fw->data, bin->fw_size); 1043 1039 bin->fw_ver = bin->data[FW_VER_OFFSET]; 1044 1040 bin->reg_count = get_unaligned_le16(bin->data + FW_REG_CNT_OFFSET); 1045 1041
+14 -1
drivers/iommu/amd/iommu.c
··· 2909 2909 2910 2910 static struct protection_domain identity_domain; 2911 2911 2912 + static int amd_iommu_identity_attach(struct iommu_domain *dom, struct device *dev, 2913 + struct iommu_domain *old) 2914 + { 2915 + /* 2916 + * Don't allow attaching a device to the identity domain if SNP is 2917 + * enabled. 2918 + */ 2919 + if (amd_iommu_snp_en) 2920 + return -EINVAL; 2921 + 2922 + return amd_iommu_attach_device(dom, dev, old); 2923 + } 2924 + 2912 2925 static const struct iommu_domain_ops identity_domain_ops = { 2913 - .attach_dev = amd_iommu_attach_device, 2926 + .attach_dev = amd_iommu_identity_attach, 2914 2927 }; 2915 2928 2916 2929 void amd_iommu_init_identity_domain(void)
+1 -2
drivers/iommu/intel/dmar.c
··· 1314 1314 if (fault & DMA_FSTS_ITE) { 1315 1315 head = readl(iommu->reg + DMAR_IQH_REG); 1316 1316 head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH; 1317 - head |= 1; 1318 1317 tail = readl(iommu->reg + DMAR_IQT_REG); 1319 1318 tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH; 1320 1319 ··· 1330 1331 do { 1331 1332 if (qi->desc_status[head] == QI_IN_USE) 1332 1333 qi->desc_status[head] = QI_ABORT; 1333 - head = (head - 2 + QI_LENGTH) % QI_LENGTH; 1334 + head = (head - 1 + QI_LENGTH) % QI_LENGTH; 1334 1335 } while (head != tail); 1335 1336 1336 1337 /*
+8 -4
drivers/iommu/intel/svm.c
··· 164 164 if (IS_ERR(dev_pasid)) 165 165 return PTR_ERR(dev_pasid); 166 166 167 - ret = iopf_for_domain_replace(domain, old, dev); 168 - if (ret) 169 - goto out_remove_dev_pasid; 167 + /* SVA with non-IOMMU/PRI IOPF handling is allowed. */ 168 + if (info->pri_supported) { 169 + ret = iopf_for_domain_replace(domain, old, dev); 170 + if (ret) 171 + goto out_remove_dev_pasid; 172 + } 170 173 171 174 /* Setup the pasid table: */ 172 175 sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0; ··· 184 181 185 182 return 0; 186 183 out_unwind_iopf: 187 - iopf_for_domain_replace(old, domain, dev); 184 + if (info->pri_supported) 185 + iopf_for_domain_replace(old, domain, dev); 188 186 out_remove_dev_pasid: 189 187 domain_remove_dev_pasid(domain, dev, pasid); 190 188 return ret;
+6 -6
drivers/iommu/iommu-sva.c
··· 182 182 iommu_detach_device_pasid(domain, dev, iommu_mm->pasid); 183 183 if (--domain->users == 0) { 184 184 list_del(&domain->next); 185 - iommu_domain_free(domain); 186 - } 185 + if (list_empty(&iommu_mm->sva_domains)) { 186 + list_del(&iommu_mm->mm_list_elm); 187 + if (list_empty(&iommu_sva_mms)) 188 + iommu_sva_present = false; 189 + } 187 190 188 - if (list_empty(&iommu_mm->sva_domains)) { 189 - list_del(&iommu_mm->mm_list_elm); 190 - if (list_empty(&iommu_sva_mms)) 191 - iommu_sva_present = false; 191 + iommu_domain_free(domain); 192 192 } 193 193 194 194 mutex_unlock(&iommu_sva_lock);
+5 -1
drivers/iommu/iommu.c
··· 1213 1213 if (addr == end) 1214 1214 goto map_end; 1215 1215 1216 - phys_addr = iommu_iova_to_phys(domain, addr); 1216 + /* 1217 + * Return address by iommu_iova_to_phys for 0 is 1218 + * ambiguous. Offset to address 1 if addr is 0. 1219 + */ 1220 + phys_addr = iommu_iova_to_phys(domain, addr ? addr : 1); 1217 1221 if (!phys_addr) { 1218 1222 map_size += pg_size; 1219 1223 continue;
+17 -4
drivers/irqchip/irq-riscv-aplic-main.c
··· 116 116 .ops = &aplic_syscore_ops, 117 117 }; 118 118 119 + static bool aplic_syscore_registered __ro_after_init; 120 + 121 + static void aplic_syscore_init(void) 122 + { 123 + if (!aplic_syscore_registered) { 124 + register_syscore(&aplic_syscore); 125 + aplic_syscore_registered = true; 126 + } 127 + } 128 + 119 129 static int aplic_pm_notifier(struct notifier_block *nb, unsigned long action, void *data) 120 130 { 121 131 struct aplic_priv *priv = container_of(nb, struct aplic_priv, genpd_nb); ··· 382 372 rc = aplic_msi_setup(dev, regs); 383 373 else 384 374 rc = aplic_direct_setup(dev, regs); 385 - if (rc) 375 + 376 + if (rc) { 386 377 dev_err_probe(dev, rc, "failed to setup APLIC in %s mode\n", 387 378 msi_mode ? "MSI" : "direct"); 388 - else 389 - register_syscore(&aplic_syscore); 379 + return rc; 380 + } 381 + 382 + aplic_syscore_init(); 390 383 391 384 #ifdef CONFIG_ACPI 392 385 if (!acpi_disabled) 393 386 acpi_dev_clear_dependencies(ACPI_COMPANION(dev)); 394 387 #endif 395 388 396 - return rc; 389 + return 0; 397 390 } 398 391 399 392 static const struct of_device_id aplic_match[] = {
+1
drivers/irqchip/irq-riscv-rpmi-sysmsi.c
··· 250 250 rc = riscv_acpi_get_gsi_info(fwnode, &priv->gsi_base, &id, 251 251 &nr_irqs, NULL); 252 252 if (rc) { 253 + mbox_free_channel(priv->chan); 253 254 dev_err(dev, "failed to find GSI mapping\n"); 254 255 return rc; 255 256 }
+1 -2
drivers/misc/amd-sbi/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 config AMD_SBRMI_I2C 3 3 tristate "AMD side band RMI support" 4 - depends on I2C 4 + depends on I3C_OR_I2C 5 5 depends on ARM || ARM64 || COMPILE_TEST 6 6 select REGMAP_I2C 7 - depends on I3C || !I3C 8 7 select REGMAP_I3C if I3C 9 8 help 10 9 Side band RMI over I2C/I3C support for AMD out of band management.
+9
drivers/mmc/host/sdhci-pci-gli.c
··· 68 68 #define GLI_9750_MISC_TX1_DLY_VALUE 0x5 69 69 #define SDHCI_GLI_9750_MISC_SSC_OFF BIT(26) 70 70 71 + #define SDHCI_GLI_9750_GM_BURST_SIZE 0x510 72 + #define SDHCI_GLI_9750_GM_BURST_SIZE_R_OSRC_LMT GENMASK(17, 16) 73 + 71 74 #define SDHCI_GLI_9750_TUNING_CONTROL 0x540 72 75 #define SDHCI_GLI_9750_TUNING_CONTROL_EN BIT(4) 73 76 #define GLI_9750_TUNING_CONTROL_EN_ON 0x1 ··· 348 345 u32 misc_value; 349 346 u32 parameter_value; 350 347 u32 control_value; 348 + u32 burst_value; 351 349 u16 ctrl2; 352 350 353 351 gl9750_wt_on(host); 352 + 353 + /* clear R_OSRC_Lmt to avoid DMA write corruption */ 354 + burst_value = sdhci_readl(host, SDHCI_GLI_9750_GM_BURST_SIZE); 355 + burst_value &= ~SDHCI_GLI_9750_GM_BURST_SIZE_R_OSRC_LMT; 356 + sdhci_writel(host, burst_value, SDHCI_GLI_9750_GM_BURST_SIZE); 354 357 355 358 driving_value = sdhci_readl(host, SDHCI_GLI_9750_DRIVING); 356 359 pll_value = sdhci_readl(host, SDHCI_GLI_9750_PLL);
+8 -1
drivers/mmc/host/sdhci.c
··· 4532 4532 * their platform code before calling sdhci_add_host(), and we 4533 4533 * won't assume 8-bit width for hosts without that CAP. 4534 4534 */ 4535 - if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 4535 + if (host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA) { 4536 + host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50); 4537 + if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400) 4538 + host->caps1 &= ~SDHCI_SUPPORT_HS400; 4539 + mmc->caps2 &= ~(MMC_CAP2_HS200 | MMC_CAP2_HS400 | MMC_CAP2_HS400_ES); 4540 + mmc->caps &= ~(MMC_CAP_DDR | MMC_CAP_UHS); 4541 + } else { 4536 4542 mmc->caps |= MMC_CAP_4_BIT_DATA; 4543 + } 4537 4544 4538 4545 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 4539 4546 mmc->caps &= ~MMC_CAP_CMD23;
+2 -4
drivers/mtd/nand/raw/brcmnand/brcmnand.c
··· 2350 2350 for (i = 0; i < ctrl->max_oob; i += 4) 2351 2351 oob_reg_write(ctrl, i, 0xffffffff); 2352 2352 2353 - if (mtd->oops_panic_write) 2353 + if (mtd->oops_panic_write) { 2354 2354 /* switch to interrupt polling and PIO mode */ 2355 2355 disable_ctrl_irqs(ctrl); 2356 - 2357 - if (use_dma(ctrl) && (has_edu(ctrl) || !oob) && flash_dma_buf_ok(buf)) { 2356 + } else if (use_dma(ctrl) && (has_edu(ctrl) || !oob) && flash_dma_buf_ok(buf)) { 2358 2357 if (ctrl->dma_trans(host, addr, (u32 *)buf, oob, mtd->writesize, 2359 2358 CMD_PROGRAM_PAGE)) 2360 - 2361 2359 ret = -EIO; 2362 2360 2363 2361 goto out;
+1 -1
drivers/mtd/nand/raw/cadence-nand-controller.c
··· 3133 3133 sizeof(*cdns_ctrl->cdma_desc), 3134 3134 &cdns_ctrl->dma_cdma_desc, 3135 3135 GFP_KERNEL); 3136 - if (!cdns_ctrl->dma_cdma_desc) 3136 + if (!cdns_ctrl->cdma_desc) 3137 3137 return -ENOMEM; 3138 3138 3139 3139 cdns_ctrl->buf_size = SZ_16K;
+12 -2
drivers/mtd/nand/raw/nand_base.c
··· 4737 4737 static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 4738 4738 { 4739 4739 struct nand_chip *chip = mtd_to_nand(mtd); 4740 + int ret; 4740 4741 4741 4742 if (!chip->ops.lock_area) 4742 4743 return -ENOTSUPP; 4743 4744 4744 - return chip->ops.lock_area(chip, ofs, len); 4745 + nand_get_device(chip); 4746 + ret = chip->ops.lock_area(chip, ofs, len); 4747 + nand_release_device(chip); 4748 + 4749 + return ret; 4745 4750 } 4746 4751 4747 4752 /** ··· 4758 4753 static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 4759 4754 { 4760 4755 struct nand_chip *chip = mtd_to_nand(mtd); 4756 + int ret; 4761 4757 4762 4758 if (!chip->ops.unlock_area) 4763 4759 return -ENOTSUPP; 4764 4760 4765 - return chip->ops.unlock_area(chip, ofs, len); 4761 + nand_get_device(chip); 4762 + ret = chip->ops.unlock_area(chip, ofs, len); 4763 + nand_release_device(chip); 4764 + 4765 + return ret; 4766 4766 } 4767 4767 4768 4768 /* Set default functions */
+3
drivers/mtd/nand/raw/pl35x-nand-controller.c
··· 862 862 PL35X_SMC_NAND_TAR_CYCLES(tmgs.t_ar) | 863 863 PL35X_SMC_NAND_TRR_CYCLES(tmgs.t_rr); 864 864 865 + writel(plnand->timings, nfc->conf_regs + PL35X_SMC_CYCLES); 866 + pl35x_smc_update_regs(nfc); 867 + 865 868 return 0; 866 869 } 867 870
+3 -3
drivers/mtd/parsers/redboot.c
··· 270 270 271 271 strcpy(names, fl->img->name); 272 272 #ifdef CONFIG_MTD_REDBOOT_PARTS_READONLY 273 - if (!memcmp(names, "RedBoot", 8) || 274 - !memcmp(names, "RedBoot config", 15) || 275 - !memcmp(names, "FIS directory", 14)) { 273 + if (!strcmp(names, "RedBoot") || 274 + !strcmp(names, "RedBoot config") || 275 + !strcmp(names, "FIS directory")) { 276 276 parts[i].mask_flags = MTD_WRITEABLE; 277 277 } 278 278 #endif
+7 -7
drivers/mtd/spi-nor/core.c
··· 2345 2345 } 2346 2346 2347 2347 /** 2348 - * spi_nor_spimem_check_op - check if the operation is supported 2349 - * by controller 2348 + * spi_nor_spimem_check_read_pp_op - check if a read or a page program operation is 2349 + * supported by controller 2350 2350 *@nor: pointer to a 'struct spi_nor' 2351 2351 *@op: pointer to op template to be checked 2352 2352 * 2353 2353 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2354 2354 */ 2355 - static int spi_nor_spimem_check_op(struct spi_nor *nor, 2356 - struct spi_mem_op *op) 2355 + static int spi_nor_spimem_check_read_pp_op(struct spi_nor *nor, 2356 + struct spi_mem_op *op) 2357 2357 { 2358 2358 /* 2359 2359 * First test with 4 address bytes. The opcode itself might ··· 2396 2396 if (spi_nor_protocol_is_dtr(nor->read_proto)) 2397 2397 op.dummy.nbytes *= 2; 2398 2398 2399 - return spi_nor_spimem_check_op(nor, &op); 2399 + return spi_nor_spimem_check_read_pp_op(nor, &op); 2400 2400 } 2401 2401 2402 2402 /** ··· 2414 2414 2415 2415 spi_nor_spimem_setup_op(nor, &op, pp->proto); 2416 2416 2417 - return spi_nor_spimem_check_op(nor, &op); 2417 + return spi_nor_spimem_check_read_pp_op(nor, &op); 2418 2418 } 2419 2419 2420 2420 /** ··· 2466 2466 2467 2467 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 2468 2468 2469 - if (spi_nor_spimem_check_op(nor, &op)) 2469 + if (!spi_mem_supports_op(nor->spimem, &op)) 2470 2470 nor->flags |= SNOR_F_NO_READ_CR; 2471 2471 } 2472 2472 }
+11 -5
drivers/net/bonding/bond_debugfs.c
··· 34 34 for (; hash_index != RLB_NULL_INDEX; 35 35 hash_index = client_info->used_next) { 36 36 client_info = &(bond_info->rx_hashtbl[hash_index]); 37 - seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n", 38 - &client_info->ip_src, 39 - &client_info->ip_dst, 40 - &client_info->mac_dst, 41 - client_info->slave->dev->name); 37 + if (client_info->slave) 38 + seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n", 39 + &client_info->ip_src, 40 + &client_info->ip_dst, 41 + &client_info->mac_dst, 42 + client_info->slave->dev->name); 43 + else 44 + seq_printf(m, "%-15pI4 %-15pI4 %-17pM (none)\n", 45 + &client_info->ip_src, 46 + &client_info->ip_dst, 47 + &client_info->mac_dst); 42 48 } 43 49 44 50 spin_unlock_bh(&bond->mode_lock);
+65 -7
drivers/net/bonding/bond_main.c
··· 1509 1509 return features; 1510 1510 } 1511 1511 1512 + static int bond_header_create(struct sk_buff *skb, struct net_device *bond_dev, 1513 + unsigned short type, const void *daddr, 1514 + const void *saddr, unsigned int len) 1515 + { 1516 + struct bonding *bond = netdev_priv(bond_dev); 1517 + const struct header_ops *slave_ops; 1518 + struct slave *slave; 1519 + int ret = 0; 1520 + 1521 + rcu_read_lock(); 1522 + slave = rcu_dereference(bond->curr_active_slave); 1523 + if (slave) { 1524 + slave_ops = READ_ONCE(slave->dev->header_ops); 1525 + if (slave_ops && slave_ops->create) 1526 + ret = slave_ops->create(skb, slave->dev, 1527 + type, daddr, saddr, len); 1528 + } 1529 + rcu_read_unlock(); 1530 + return ret; 1531 + } 1532 + 1533 + static int bond_header_parse(const struct sk_buff *skb, 1534 + const struct net_device *dev, 1535 + unsigned char *haddr) 1536 + { 1537 + struct bonding *bond = netdev_priv(dev); 1538 + const struct header_ops *slave_ops; 1539 + struct slave *slave; 1540 + int ret = 0; 1541 + 1542 + rcu_read_lock(); 1543 + slave = rcu_dereference(bond->curr_active_slave); 1544 + if (slave) { 1545 + slave_ops = READ_ONCE(slave->dev->header_ops); 1546 + if (slave_ops && slave_ops->parse) 1547 + ret = slave_ops->parse(skb, slave->dev, haddr); 1548 + } 1549 + rcu_read_unlock(); 1550 + return ret; 1551 + } 1552 + 1553 + static const struct header_ops bond_header_ops = { 1554 + .create = bond_header_create, 1555 + .parse = bond_header_parse, 1556 + }; 1557 + 1512 1558 static void bond_setup_by_slave(struct net_device *bond_dev, 1513 1559 struct net_device *slave_dev) 1514 1560 { ··· 1562 1516 1563 1517 dev_close(bond_dev); 1564 1518 1565 - bond_dev->header_ops = slave_dev->header_ops; 1519 + bond_dev->header_ops = slave_dev->header_ops ? 1520 + &bond_header_ops : NULL; 1566 1521 1567 1522 bond_dev->type = slave_dev->type; 1568 1523 bond_dev->hard_header_len = slave_dev->hard_header_len; ··· 2848 2801 2849 2802 continue; 2850 2803 2804 + case BOND_LINK_FAIL: 2805 + case BOND_LINK_BACK: 2806 + slave_dbg(bond->dev, slave->dev, "link_new_state %d on slave\n", 2807 + slave->link_new_state); 2808 + continue; 2809 + 2851 2810 default: 2852 - slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n", 2811 + slave_err(bond->dev, slave->dev, "invalid link_new_state %d on slave\n", 2853 2812 slave->link_new_state); 2854 2813 bond_propose_link_state(slave, BOND_LINK_NOCHANGE); 2855 2814 ··· 3430 3377 } else if (is_arp) { 3431 3378 return bond_arp_rcv(skb, bond, slave); 3432 3379 #if IS_ENABLED(CONFIG_IPV6) 3433 - } else if (is_ipv6) { 3380 + } else if (is_ipv6 && likely(ipv6_mod_enabled())) { 3434 3381 return bond_na_rcv(skb, bond, slave); 3435 3382 #endif 3436 3383 } else { ··· 5122 5069 { 5123 5070 struct bond_up_slave *usable, *all; 5124 5071 5125 - usable = rtnl_dereference(bond->usable_slaves); 5126 - rcu_assign_pointer(bond->usable_slaves, usable_slaves); 5127 - kfree_rcu(usable, rcu); 5128 - 5129 5072 all = rtnl_dereference(bond->all_slaves); 5130 5073 rcu_assign_pointer(bond->all_slaves, all_slaves); 5131 5074 kfree_rcu(all, rcu); 5075 + 5076 + if (BOND_MODE(bond) == BOND_MODE_BROADCAST) { 5077 + kfree_rcu(usable_slaves, rcu); 5078 + return; 5079 + } 5080 + 5081 + usable = rtnl_dereference(bond->usable_slaves); 5082 + rcu_assign_pointer(bond->usable_slaves, usable_slaves); 5083 + kfree_rcu(usable, rcu); 5132 5084 } 5133 5085 5134 5086 static void bond_reset_slave_arr(struct bonding *bond)
+3
drivers/net/caif/caif_serial.c
··· 297 297 dev_close(ser->dev); 298 298 unregister_netdevice(ser->dev); 299 299 debugfs_deinit(ser); 300 + tty_kref_put(tty->link); 300 301 tty_kref_put(tty); 301 302 } 302 303 rtnl_unlock(); ··· 332 331 333 332 ser = netdev_priv(dev); 334 333 ser->tty = tty_kref_get(tty); 334 + tty_kref_get(tty->link); 335 335 ser->dev = dev; 336 336 debugfs_init(ser, tty); 337 337 tty->receive_room = 4096; ··· 341 339 rtnl_lock(); 342 340 result = register_netdevice(dev); 343 341 if (result) { 342 + tty_kref_put(tty->link); 344 343 tty_kref_put(tty); 345 344 rtnl_unlock(); 346 345 free_netdev(dev);
+1 -1
drivers/net/can/dev/calc_bittiming.c
··· 8 8 #include <linux/units.h> 9 9 #include <linux/can/dev.h> 10 10 11 - #define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ 11 + #define CAN_CALC_MAX_ERROR 500 /* max error 5% */ 12 12 13 13 /* CiA recommended sample points for Non Return to Zero encoding. */ 14 14 static int can_calc_sample_point_nrz(const struct can_bittiming *bt)
+4 -1
drivers/net/can/spi/hi311x.c
··· 755 755 return ret; 756 756 757 757 mutex_lock(&priv->hi3110_lock); 758 - hi3110_power_enable(priv->transceiver, 1); 758 + ret = hi3110_power_enable(priv->transceiver, 1); 759 + if (ret) 760 + goto out_close_candev; 759 761 760 762 priv->force_quit = 0; 761 763 priv->tx_skb = NULL; ··· 792 790 hi3110_hw_sleep(spi); 793 791 out_close: 794 792 hi3110_power_enable(priv->transceiver, 0); 793 + out_close_candev: 795 794 close_candev(net); 796 795 mutex_unlock(&priv->hi3110_lock); 797 796 return ret;
+6 -2
drivers/net/dsa/bcm_sf2.c
··· 980 980 ret = bcm_sf2_sw_rst(priv); 981 981 if (ret) { 982 982 pr_err("%s: failed to software reset switch\n", __func__); 983 + if (!priv->wol_ports_mask) 984 + clk_disable_unprepare(priv->clk); 983 985 return ret; 984 986 } 985 987 986 988 bcm_sf2_crossbar_setup(priv); 987 989 988 990 ret = bcm_sf2_cfp_resume(ds); 989 - if (ret) 991 + if (ret) { 992 + if (!priv->wol_ports_mask) 993 + clk_disable_unprepare(priv->clk); 990 994 return ret; 991 - 995 + } 992 996 if (priv->hw_params.num_gphy == 1) 993 997 bcm_sf2_gphy_enable_set(ds, true); 994 998
+8 -3
drivers/net/dsa/microchip/ksz_ptp.c
··· 1108 1108 const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops; 1109 1109 struct ksz_irq *ptpirq = &port->ptpirq; 1110 1110 struct ksz_ptp_irq *ptpmsg_irq; 1111 + int ret; 1111 1112 1112 1113 ptpmsg_irq = &port->ptpmsg_irq[n]; 1113 1114 ptpmsg_irq->num = irq_create_mapping(ptpirq->domain, n); ··· 1120 1119 1121 1120 strscpy(ptpmsg_irq->name, name[n]); 1122 1121 1123 - return request_threaded_irq(ptpmsg_irq->num, NULL, 1124 - ksz_ptp_msg_thread_fn, IRQF_ONESHOT, 1125 - ptpmsg_irq->name, ptpmsg_irq); 1122 + ret = request_threaded_irq(ptpmsg_irq->num, NULL, 1123 + ksz_ptp_msg_thread_fn, IRQF_ONESHOT, 1124 + ptpmsg_irq->name, ptpmsg_irq); 1125 + if (ret) 1126 + irq_dispose_mapping(ptpmsg_irq->num); 1127 + 1128 + return ret; 1126 1129 } 1127 1130 1128 1131 int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
-1
drivers/net/dsa/mxl862xx/mxl862xx.c
··· 149 149 return -ENOMEM; 150 150 151 151 bus->priv = priv; 152 - ds->user_mii_bus = bus; 153 152 bus->name = KBUILD_MODNAME "-mii"; 154 153 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev)); 155 154 bus->read_c45 = mxl862xx_phy_read_c45_mii_bus;
+1 -2
drivers/net/dsa/realtek/rtl8365mb.c
··· 1480 1480 1481 1481 stats->rx_packets = cnt[RTL8365MB_MIB_ifInUcastPkts] + 1482 1482 cnt[RTL8365MB_MIB_ifInMulticastPkts] + 1483 - cnt[RTL8365MB_MIB_ifInBroadcastPkts] - 1484 - cnt[RTL8365MB_MIB_ifOutDiscards]; 1483 + cnt[RTL8365MB_MIB_ifInBroadcastPkts]; 1485 1484 1486 1485 stats->tx_packets = cnt[RTL8365MB_MIB_ifOutUcastPkts] + 1487 1486 cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
+3 -3
drivers/net/dsa/realtek/rtl8366rb-leds.c
··· 12 12 case 0: 13 13 return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); 14 14 case 1: 15 - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); 15 + return FIELD_PREP(RTL8366RB_LED_X_1_CTRL_MASK, BIT(port)); 16 16 case 2: 17 - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); 17 + return FIELD_PREP(RTL8366RB_LED_2_X_CTRL_MASK, BIT(port)); 18 18 case 3: 19 - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); 19 + return FIELD_PREP(RTL8366RB_LED_X_3_CTRL_MASK, BIT(port)); 20 20 default: 21 21 return 0; 22 22 }
+3 -4
drivers/net/dsa/sja1105/sja1105_main.c
··· 2339 2339 goto out; 2340 2340 } 2341 2341 2342 + rc = sja1105_reload_cbs(priv); 2343 + 2344 + out: 2342 2345 dsa_switch_for_each_available_port(dp, ds) 2343 2346 if (dp->pl) 2344 2347 phylink_replay_link_end(dp->pl); 2345 2348 2346 - rc = sja1105_reload_cbs(priv); 2347 - if (rc < 0) 2348 - goto out; 2349 - out: 2350 2349 mutex_unlock(&priv->mgmt_lock); 2351 2350 mutex_unlock(&priv->fdb_lock); 2352 2351
-1
drivers/net/ethernet/airoha/airoha_eth.c
··· 3083 3083 if (!port) 3084 3084 continue; 3085 3085 3086 - airoha_dev_stop(port->dev); 3087 3086 unregister_netdev(port->dev); 3088 3087 airoha_metadata_dst_free(port); 3089 3088 }
+10 -9
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
··· 1271 1271 if (ret) 1272 1272 goto err_napi; 1273 1273 1274 + /* Reset the phy settings */ 1275 + ret = xgbe_phy_reset(pdata); 1276 + if (ret) 1277 + goto err_irqs; 1278 + 1279 + /* Start the phy */ 1274 1280 ret = phy_if->phy_start(pdata); 1275 1281 if (ret) 1276 1282 goto err_irqs; 1277 1283 1278 1284 hw_if->enable_tx(pdata); 1279 1285 hw_if->enable_rx(pdata); 1286 + /* Synchronize flag with hardware state after enabling TX/RX. 1287 + * This prevents stale state after device restart cycles. 1288 + */ 1289 + pdata->data_path_stopped = false; 1280 1290 1281 1291 udp_tunnel_nic_reset_ntf(netdev); 1282 - 1283 - /* Reset the phy settings */ 1284 - ret = xgbe_phy_reset(pdata); 1285 - if (ret) 1286 - goto err_txrx; 1287 1292 1288 1293 netif_tx_start_all_queues(netdev); 1289 1294 ··· 1298 1293 clear_bit(XGBE_STOPPED, &pdata->dev_state); 1299 1294 1300 1295 return 0; 1301 - 1302 - err_txrx: 1303 - hw_if->disable_rx(pdata); 1304 - hw_if->disable_tx(pdata); 1305 1296 1306 1297 err_irqs: 1307 1298 xgbe_free_irqs(pdata);
+75 -7
drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
··· 1942 1942 static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata) 1943 1943 { 1944 1944 struct xgbe_phy_data *phy_data = pdata->phy_data; 1945 - unsigned int reg; 1945 + int reg; 1946 1946 1947 1947 /* step 2: force PCS to send RX_ADAPT Req to PHY */ 1948 1948 XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4, ··· 1964 1964 1965 1965 /* Step 4: Check for Block lock */ 1966 1966 1967 - /* Link status is latched low, so read once to clear 1968 - * and then read again to get current state 1967 + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); 1968 + if (reg < 0) 1969 + goto set_mode; 1970 + 1971 + /* Link status is latched low so that momentary link drops 1972 + * can be detected. If link was already down read again 1973 + * to get the latest state. 1969 1974 */ 1970 - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); 1971 - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); 1975 + if (!pdata->phy.link && !(reg & MDIO_STAT1_LSTATUS)) { 1976 + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); 1977 + if (reg < 0) 1978 + goto set_mode; 1979 + } 1980 + 1972 1981 if (reg & MDIO_STAT1_LSTATUS) { 1973 1982 /* If the block lock is found, update the helpers 1974 1983 * and declare the link up ··· 2015 2006 2016 2007 /* perform rx adaptation */ 2017 2008 xgbe_rx_adaptation(pdata); 2009 + } 2010 + 2011 + /* 2012 + * xgbe_phy_stop_data_path - Stop TX/RX to prevent packet corruption 2013 + * @pdata: driver private data 2014 + * 2015 + * This function stops the data path (TX and RX) to prevent packet 2016 + * corruption during critical PHY operations like RX adaptation. 2017 + * Must be called before initiating RX adaptation when link goes down. 2018 + */ 2019 + static void xgbe_phy_stop_data_path(struct xgbe_prv_data *pdata) 2020 + { 2021 + if (pdata->data_path_stopped) 2022 + return; 2023 + 2024 + /* Stop TX/RX to prevent packet corruption during RX adaptation */ 2025 + pdata->hw_if.disable_tx(pdata); 2026 + pdata->hw_if.disable_rx(pdata); 2027 + pdata->data_path_stopped = true; 2028 + 2029 + netif_dbg(pdata, link, pdata->netdev, 2030 + "stopping data path for RX adaptation\n"); 2031 + } 2032 + 2033 + /* 2034 + * xgbe_phy_start_data_path - Re-enable TX/RX after RX adaptation 2035 + * @pdata: driver private data 2036 + * 2037 + * This function re-enables the data path (TX and RX) after RX adaptation 2038 + * has completed successfully. Only called when link is confirmed up. 2039 + */ 2040 + static void xgbe_phy_start_data_path(struct xgbe_prv_data *pdata) 2041 + { 2042 + if (!pdata->data_path_stopped) 2043 + return; 2044 + 2045 + pdata->hw_if.enable_rx(pdata); 2046 + pdata->hw_if.enable_tx(pdata); 2047 + pdata->data_path_stopped = false; 2048 + 2049 + netif_dbg(pdata, link, pdata->netdev, 2050 + "restarting data path after RX adaptation\n"); 2018 2051 } 2019 2052 2020 2053 static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata) ··· 2852 2801 if (pdata->en_rx_adap) { 2853 2802 /* if the link is available and adaptation is done, 2854 2803 * declare link up 2804 + * 2805 + * Note: When link is up and adaptation is done, we can 2806 + * safely re-enable the data path if it was stopped 2807 + * for adaptation. 2855 2808 */ 2856 - if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done) 2809 + if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done) { 2810 + xgbe_phy_start_data_path(pdata); 2857 2811 return 1; 2812 + } 2858 2813 /* If either link is not available or adaptation is not done, 2859 2814 * retrigger the adaptation logic. (if the mode is not set, 2860 2815 * then issue mailbox command first) 2861 2816 */ 2817 + 2818 + /* CRITICAL: Stop data path BEFORE triggering RX adaptation 2819 + * to prevent CRC errors from packets corrupted during 2820 + * the adaptation process. This is especially important 2821 + * when AN is OFF in 10G KR mode. 2822 + */ 2823 + xgbe_phy_stop_data_path(pdata); 2824 + 2862 2825 if (pdata->mode_set) { 2863 2826 xgbe_phy_rx_adaptation(pdata); 2864 2827 } else { ··· 2880 2815 xgbe_phy_set_mode(pdata, phy_data->cur_mode); 2881 2816 } 2882 2817 2883 - if (pdata->rx_adapt_done) 2818 + if (pdata->rx_adapt_done) { 2819 + /* Adaptation complete, safe to re-enable data path */ 2820 + xgbe_phy_start_data_path(pdata); 2884 2821 return 1; 2822 + } 2885 2823 } else if (reg & MDIO_STAT1_LSTATUS) 2886 2824 return 1; 2887 2825
+4
drivers/net/ethernet/amd/xgbe/xgbe.h
··· 1243 1243 bool en_rx_adap; 1244 1244 int rx_adapt_retries; 1245 1245 bool rx_adapt_done; 1246 + /* Flag to track if data path (TX/RX) was stopped for RX adaptation. 1247 + * This prevents packet corruption during the adaptation window. 1248 + */ 1249 + bool data_path_stopped; 1246 1250 bool mode_set; 1247 1251 bool sph; 1248 1252 };
+11
drivers/net/ethernet/arc/emac_main.c
··· 934 934 /* Set poll rate so that it polls every 1 ms */ 935 935 arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000); 936 936 937 + /* 938 + * Put the device into a known quiescent state before requesting 939 + * the IRQ. Clear only EMAC interrupt status bits here; leave the 940 + * MDIO completion bit alone and avoid writing TXPL_MASK, which is 941 + * used to force TX polling rather than acknowledge interrupts. 942 + */ 943 + arc_reg_set(priv, R_ENABLE, 0); 944 + arc_reg_set(priv, R_STATUS, RXINT_MASK | TXINT_MASK | ERR_MASK | 945 + TXCH_MASK | MSER_MASK | RXCR_MASK | 946 + RXFR_MASK | RXFL_MASK); 947 + 937 948 ndev->irq = irq; 938 949 dev_info(dev, "IRQ is %d\n", ndev->irq); 939 950
+2
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 2929 2929 u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1); 2930 2930 u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2); 2931 2931 2932 + if (type >= ARRAY_SIZE(bp->bs_trace)) 2933 + goto async_event_process_exit; 2932 2934 bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset); 2933 2935 goto async_event_process_exit; 2934 2936 }
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 2146 2146 }; 2147 2147 2148 2148 #define BNXT_TRACE_BUF_MAGIC_BYTE ((u8)0xbc) 2149 - #define BNXT_TRACE_MAX 11 2149 + #define BNXT_TRACE_MAX (DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE + 1) 2150 2150 2151 2151 struct bnxt_bs_trace_info { 2152 2152 u8 *magic_byte;
+2 -2
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 979 979 980 980 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != 981 981 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && 982 - netif_is_rxfh_configured(dev)) { 983 - netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n"); 982 + (netif_is_rxfh_configured(dev) || bp->num_rss_ctx)) { 983 + netdev_warn(dev, "RSS table size change required, RSS table entries must be default (with no additional RSS contexts present) to proceed\n"); 984 984 return -EINVAL; 985 985 } 986 986
+12 -19
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 1342 1342 } 1343 1343 } 1344 1344 1345 - void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, 1346 - bool tx_lpi_enabled) 1345 + void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) 1347 1346 { 1348 1347 struct bcmgenet_priv *priv = netdev_priv(dev); 1349 1348 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; ··· 1362 1363 1363 1364 /* Enable EEE and switch to a 27Mhz clock automatically */ 1364 1365 reg = bcmgenet_readl(priv->base + off); 1365 - if (tx_lpi_enabled) 1366 + if (enable) 1366 1367 reg |= TBUF_EEE_EN | TBUF_PM_EN; 1367 1368 else 1368 1369 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); ··· 1381 1382 priv->clk_eee_enabled = false; 1382 1383 } 1383 1384 1384 - priv->eee.eee_enabled = enable; 1385 - priv->eee.tx_lpi_enabled = tx_lpi_enabled; 1386 1385 } 1387 1386 1388 1387 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e) 1389 1388 { 1390 1389 struct bcmgenet_priv *priv = netdev_priv(dev); 1391 - struct ethtool_keee *p = &priv->eee; 1390 + int ret; 1392 1391 1393 1392 if (GENET_IS_V1(priv)) 1394 1393 return -EOPNOTSUPP; ··· 1394 1397 if (!dev->phydev) 1395 1398 return -ENODEV; 1396 1399 1397 - e->tx_lpi_enabled = p->tx_lpi_enabled; 1400 + ret = phy_ethtool_get_eee(dev->phydev, e); 1401 + if (ret) 1402 + return ret; 1403 + 1404 + /* tx_lpi_timer is maintained by the MAC hardware register; the 1405 + * PHY-level eee_cfg timer is not set for GENET. 1406 + */ 1398 1407 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); 1399 1408 1400 - return phy_ethtool_get_eee(dev->phydev, e); 1409 + return 0; 1401 1410 } 1402 1411 1403 1412 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e) 1404 1413 { 1405 1414 struct bcmgenet_priv *priv = netdev_priv(dev); 1406 - struct ethtool_keee *p = &priv->eee; 1407 - bool active; 1408 1415 1409 1416 if (GENET_IS_V1(priv)) 1410 1417 return -EOPNOTSUPP; ··· 1416 1415 if (!dev->phydev) 1417 1416 return -ENODEV; 1418 1417 1419 - p->eee_enabled = e->eee_enabled; 1420 - 1421 - if (!p->eee_enabled) { 1422 - bcmgenet_eee_enable_set(dev, false, false); 1423 - } else { 1424 - active = phy_init_eee(dev->phydev, false) >= 0; 1425 - bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); 1426 - bcmgenet_eee_enable_set(dev, active, e->tx_lpi_enabled); 1427 - } 1418 + bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); 1428 1419 1429 1420 return phy_ethtool_set_eee(dev->phydev, e); 1430 1421 }
+1 -4
drivers/net/ethernet/broadcom/genet/bcmgenet.h
··· 665 665 u8 sopass[SOPASS_MAX]; 666 666 667 667 struct bcmgenet_mib_counters mib; 668 - 669 - struct ethtool_keee eee; 670 668 }; 671 669 672 670 static inline bool bcmgenet_has_40bits(struct bcmgenet_priv *priv) ··· 747 749 int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, 748 750 enum bcmgenet_power_mode mode); 749 751 750 - void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, 751 - bool tx_lpi_enabled); 752 + void bcmgenet_eee_enable_set(struct net_device *dev, bool enable); 752 753 753 754 #endif /* __BCMGENET_H__ */
+1 -1
drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
··· 123 123 while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS) 124 124 & RBUF_STATUS_WOL)) { 125 125 retries++; 126 - if (retries > 5) { 126 + if (retries > 50) { 127 127 netdev_crit(dev, "polling wol mode timeout\n"); 128 128 return -ETIMEDOUT; 129 129 }
+5 -5
drivers/net/ethernet/broadcom/genet/bcmmii.c
··· 29 29 struct bcmgenet_priv *priv = netdev_priv(dev); 30 30 struct phy_device *phydev = dev->phydev; 31 31 u32 reg, cmd_bits = 0; 32 - bool active; 33 32 34 33 /* speed */ 35 34 if (phydev->speed == SPEED_1000) ··· 89 90 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 90 91 spin_unlock_bh(&priv->reg_lock); 91 92 92 - active = phy_init_eee(phydev, 0) >= 0; 93 - bcmgenet_eee_enable_set(dev, 94 - priv->eee.eee_enabled && active, 95 - priv->eee.tx_lpi_enabled); 96 93 } 97 94 98 95 /* setup netdev link state when PHY link status change and ··· 107 112 reg &= ~RGMII_LINK; 108 113 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); 109 114 } 115 + 116 + bcmgenet_eee_enable_set(dev, phydev->enable_tx_lpi); 110 117 111 118 phy_print_status(phydev); 112 119 } ··· 408 411 409 412 /* Indicate that the MAC is responsible for PHY PM */ 410 413 dev->phydev->mac_managed_pm = true; 414 + 415 + if (!GENET_IS_V1(priv)) 416 + phy_support_eee(dev->phydev); 411 417 412 418 return 0; 413 419 }
+11
drivers/net/ethernet/broadcom/tg3.c
··· 17029 17029 return err; 17030 17030 } 17031 17031 17032 + static int tg3_is_default_mac_address(u8 *addr) 17033 + { 17034 + static const u8 default_mac_address[ETH_ALEN] = { 0x00, 0x10, 0x18, 0x00, 0x00, 0x00 }; 17035 + 17036 + return ether_addr_equal(default_mac_address, addr); 17037 + } 17038 + 17032 17039 static int tg3_get_device_address(struct tg3 *tp, u8 *addr) 17033 17040 { 17034 17041 u32 hi, lo, mac_offset; ··· 17109 17102 17110 17103 if (!is_valid_ether_addr(addr)) 17111 17104 return -EINVAL; 17105 + 17106 + if (tg3_is_default_mac_address(addr)) 17107 + return device_get_mac_address(&tp->pdev->dev, addr); 17108 + 17112 17109 return 0; 17113 17110 } 17114 17111
+117 -7
drivers/net/ethernet/cadence/macb_main.c
··· 36 36 #include <linux/tcp.h> 37 37 #include <linux/types.h> 38 38 #include <linux/udp.h> 39 + #include <linux/gcd.h> 39 40 #include <net/pkt_sched.h> 40 41 #include "macb.h" 41 42 ··· 669 668 netif_tx_stop_all_queues(ndev); 670 669 } 671 670 671 + /* Use juggling algorithm to left rotate tx ring and tx skb array */ 672 + static void gem_shuffle_tx_one_ring(struct macb_queue *queue) 673 + { 674 + unsigned int head, tail, count, ring_size, desc_size; 675 + struct macb_tx_skb tx_skb, *skb_curr, *skb_next; 676 + struct macb_dma_desc *desc_curr, *desc_next; 677 + unsigned int i, cycles, shift, curr, next; 678 + struct macb *bp = queue->bp; 679 + unsigned char desc[24]; 680 + unsigned long flags; 681 + 682 + desc_size = macb_dma_desc_get_size(bp); 683 + 684 + if (WARN_ON_ONCE(desc_size > ARRAY_SIZE(desc))) 685 + return; 686 + 687 + spin_lock_irqsave(&queue->tx_ptr_lock, flags); 688 + head = queue->tx_head; 689 + tail = queue->tx_tail; 690 + ring_size = bp->tx_ring_size; 691 + count = CIRC_CNT(head, tail, ring_size); 692 + 693 + if (!(tail % ring_size)) 694 + goto unlock; 695 + 696 + if (!count) { 697 + queue->tx_head = 0; 698 + queue->tx_tail = 0; 699 + goto unlock; 700 + } 701 + 702 + shift = tail % ring_size; 703 + cycles = gcd(ring_size, shift); 704 + 705 + for (i = 0; i < cycles; i++) { 706 + memcpy(&desc, macb_tx_desc(queue, i), desc_size); 707 + memcpy(&tx_skb, macb_tx_skb(queue, i), 708 + sizeof(struct macb_tx_skb)); 709 + 710 + curr = i; 711 + next = (curr + shift) % ring_size; 712 + 713 + while (next != i) { 714 + desc_curr = macb_tx_desc(queue, curr); 715 + desc_next = macb_tx_desc(queue, next); 716 + 717 + memcpy(desc_curr, desc_next, desc_size); 718 + 719 + if (next == ring_size - 1) 720 + desc_curr->ctrl &= ~MACB_BIT(TX_WRAP); 721 + if (curr == ring_size - 1) 722 + desc_curr->ctrl |= MACB_BIT(TX_WRAP); 723 + 724 + skb_curr = macb_tx_skb(queue, curr); 725 + skb_next = macb_tx_skb(queue, next); 726 + memcpy(skb_curr, skb_next, sizeof(struct macb_tx_skb)); 727 + 728 + curr = next; 729 + next = (curr + shift) % ring_size; 730 + } 731 + 732 + desc_curr = macb_tx_desc(queue, curr); 733 + memcpy(desc_curr, &desc, desc_size); 734 + if (i == ring_size - 1) 735 + desc_curr->ctrl &= ~MACB_BIT(TX_WRAP); 736 + if (curr == ring_size - 1) 737 + desc_curr->ctrl |= MACB_BIT(TX_WRAP); 738 + memcpy(macb_tx_skb(queue, curr), &tx_skb, 739 + sizeof(struct macb_tx_skb)); 740 + } 741 + 742 + queue->tx_head = count; 743 + queue->tx_tail = 0; 744 + 745 + /* Make descriptor updates visible to hardware */ 746 + wmb(); 747 + 748 + unlock: 749 + spin_unlock_irqrestore(&queue->tx_ptr_lock, flags); 750 + } 751 + 752 + /* Rotate the queue so that the tail is at index 0 */ 753 + static void gem_shuffle_tx_rings(struct macb *bp) 754 + { 755 + struct macb_queue *queue; 756 + int q; 757 + 758 + for (q = 0, queue = bp->queues; q < bp->num_queues; q++, queue++) 759 + gem_shuffle_tx_one_ring(queue); 760 + } 761 + 672 762 static void macb_mac_link_up(struct phylink_config *config, 673 763 struct phy_device *phy, 674 764 unsigned int mode, phy_interface_t interface, ··· 798 706 ctrl |= MACB_BIT(PAE); 799 707 800 708 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 801 - queue->tx_head = 0; 802 - queue->tx_tail = 0; 803 709 queue_writel(queue, IER, 804 710 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); 805 711 } ··· 811 721 812 722 spin_unlock_irqrestore(&bp->lock, flags); 813 723 814 - if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) 724 + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { 815 725 macb_set_tx_clk(bp, speed); 726 + gem_shuffle_tx_rings(bp); 727 + } 816 728 817 729 /* Enable Rx and Tx; Enable PTP unicast */ 818 730 ctrl = macb_readl(bp, NCR); ··· 2669 2577 desc->ctrl = 0; 2670 2578 } 2671 2579 2580 + static void gem_init_rx_ring(struct macb_queue *queue) 2581 + { 2582 + queue->rx_tail = 0; 2583 + queue->rx_prepared_head = 0; 2584 + 2585 + gem_rx_refill(queue); 2586 + } 2587 + 2672 2588 static void gem_init_rings(struct macb *bp) 2673 2589 { 2674 2590 struct macb_queue *queue; ··· 2694 2594 queue->tx_head = 0; 2695 2595 queue->tx_tail = 0; 2696 2596 2697 - queue->rx_tail = 0; 2698 - queue->rx_prepared_head = 0; 2699 - 2700 - gem_rx_refill(queue); 2597 + gem_init_rx_ring(queue); 2701 2598 } 2702 2599 2703 2600 macb_init_tieoff(bp); ··· 3982 3885 { 3983 3886 struct macb *bp = netdev_priv(netdev); 3984 3887 int ret; 3888 + 3889 + if (!(netdev->hw_features & NETIF_F_NTUPLE)) 3890 + return -EOPNOTSUPP; 3985 3891 3986 3892 switch (cmd->cmd) { 3987 3893 case ETHTOOL_SRXCLSRLINS: ··· 5955 5855 rtnl_unlock(); 5956 5856 } 5957 5857 5858 + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) 5859 + macb_init_buffers(bp); 5860 + 5958 5861 for (q = 0, queue = bp->queues; q < bp->num_queues; 5959 5862 ++q, ++queue) { 5863 + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { 5864 + if (macb_is_gem(bp)) 5865 + gem_init_rx_ring(queue); 5866 + else 5867 + macb_init_rx_ring(queue); 5868 + } 5869 + 5960 5870 napi_enable(&queue->napi_rx); 5961 5871 napi_enable(&queue->napi_tx); 5962 5872 }
+3 -1
drivers/net/ethernet/cadence/macb_ptp.c
··· 357 357 { 358 358 struct macb *bp = netdev_priv(ndev); 359 359 360 - if (bp->ptp_clock) 360 + if (bp->ptp_clock) { 361 361 ptp_clock_unregister(bp->ptp_clock); 362 + bp->ptp_clock = NULL; 363 + } 362 364 363 365 gem_ptp_clear_timer(bp); 364 366
+10 -14
drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c
··· 333 333 334 334 mdio_node = of_get_child_by_name(np, "mdio"); 335 335 if (!mdio_node) 336 - return 0; 336 + return -ENODEV; 337 337 338 338 phy_node = of_get_next_child(mdio_node, NULL); 339 - if (!phy_node) 339 + if (!phy_node) { 340 + err = -ENODEV; 340 341 goto of_put_mdio_node; 342 + } 341 343 342 344 err = of_property_read_u32(phy_node, "reg", &addr); 343 345 if (err) ··· 425 423 426 424 addr = netc_get_phy_addr(gchild); 427 425 if (addr < 0) { 426 + if (addr == -ENODEV) 427 + continue; 428 + 428 429 dev_err(dev, "Failed to get PHY address\n"); 429 430 return addr; 430 431 } ··· 437 432 "Find same PHY address in EMDIO and ENETC node\n"); 438 433 return -EINVAL; 439 434 } 440 - 441 - /* The default value of LaBCR[MDIO_PHYAD_PRTAD ] is 442 - * 0, so no need to set the register. 443 - */ 444 - if (!addr) 445 - continue; 446 435 447 436 switch (bus_devfn) { 448 437 case IMX95_ENETC0_BUS_DEVFN: ··· 577 578 578 579 addr = netc_get_phy_addr(np); 579 580 if (addr < 0) { 581 + if (addr == -ENODEV) 582 + return 0; 583 + 580 584 dev_err(dev, "Failed to get PHY address\n"); 581 585 return addr; 582 586 } 583 - 584 - /* The default value of LaBCR[MDIO_PHYAD_PRTAD] is 0, 585 - * so no need to set the register. 586 - */ 587 - if (!addr) 588 - return 0; 589 587 590 588 if (phy_mask & BIT(addr)) { 591 589 dev_err(dev,
-2
drivers/net/ethernet/intel/e1000/e1000_main.c
··· 2952 2952 dma_error: 2953 2953 dev_err(&pdev->dev, "TX DMA map failed\n"); 2954 2954 buffer_info->dma = 0; 2955 - if (count) 2956 - count--; 2957 2955 2958 2956 while (count--) { 2959 2957 if (i == 0)
-2
drivers/net/ethernet/intel/e1000e/netdev.c
··· 5652 5652 dma_error: 5653 5653 dev_err(&pdev->dev, "Tx DMA map failed\n"); 5654 5654 buffer_info->dma = 0; 5655 - if (count) 5656 - count--; 5657 5655 5658 5656 while (count--) { 5659 5657 if (i == 0)
+7 -7
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
··· 3833 3833 cfilter.n_proto = ETH_P_IP; 3834 3834 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3835 3835 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, 3836 - ARRAY_SIZE(tcf.dst_ip)); 3837 - else if (mask.src_ip[0] & tcf.dst_ip[0]) 3836 + sizeof(cfilter.ip.v4.dst_ip)); 3837 + else if (mask.src_ip[0] & tcf.src_ip[0]) 3838 3838 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, 3839 - ARRAY_SIZE(tcf.dst_ip)); 3839 + sizeof(cfilter.ip.v4.src_ip)); 3840 3840 break; 3841 3841 case VIRTCHNL_TCP_V6_FLOW: 3842 3842 cfilter.n_proto = ETH_P_IPV6; ··· 3891 3891 /* for ipv6, mask is set for all sixteen bytes (4 words) */ 3892 3892 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) 3893 3893 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, 3894 - sizeof(cfilter.ip.v6.src_ip6))) 3894 + sizeof(cfilter.ip.v6.dst_ip6))) 3895 3895 continue; 3896 3896 if (mask.vlan_id) 3897 3897 if (cfilter.vlan_id != cf->vlan_id) ··· 3979 3979 cfilter->n_proto = ETH_P_IP; 3980 3980 if (mask.dst_ip[0] & tcf.dst_ip[0]) 3981 3981 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, 3982 - ARRAY_SIZE(tcf.dst_ip)); 3983 - else if (mask.src_ip[0] & tcf.dst_ip[0]) 3982 + sizeof(cfilter->ip.v4.dst_ip)); 3983 + else if (mask.src_ip[0] & tcf.src_ip[0]) 3984 3984 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, 3985 - ARRAY_SIZE(tcf.dst_ip)); 3985 + sizeof(cfilter->ip.v4.src_ip)); 3986 3986 break; 3987 3987 case VIRTCHNL_TCP_V6_FLOW: 3988 3988 cfilter->n_proto = ETH_P_IPV6;
+1 -2
drivers/net/ethernet/intel/iavf/iavf.h
··· 260 260 struct work_struct adminq_task; 261 261 struct work_struct finish_config; 262 262 wait_queue_head_t down_waitqueue; 263 - wait_queue_head_t reset_waitqueue; 264 263 wait_queue_head_t vc_waitqueue; 265 264 struct iavf_q_vector *q_vectors; 266 265 struct list_head vlan_filter_list; ··· 625 626 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter); 626 627 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, 627 628 const u8 *macaddr); 628 - int iavf_wait_for_reset(struct iavf_adapter *adapter); 629 + void iavf_reset_step(struct iavf_adapter *adapter); 629 630 #endif /* _IAVF_H_ */
+6 -13
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
··· 492 492 { 493 493 struct iavf_adapter *adapter = netdev_priv(netdev); 494 494 u32 new_rx_count, new_tx_count; 495 - int ret = 0; 496 495 497 496 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 498 497 return -EINVAL; ··· 536 537 } 537 538 538 539 if (netif_running(netdev)) { 539 - iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); 540 - ret = iavf_wait_for_reset(adapter); 541 - if (ret) 542 - netdev_warn(netdev, "Changing ring parameters timeout or interrupted waiting for reset"); 540 + adapter->flags |= IAVF_FLAG_RESET_NEEDED; 541 + iavf_reset_step(adapter); 543 542 } 544 543 545 - return ret; 544 + return 0; 546 545 } 547 546 548 547 /** ··· 1720 1723 { 1721 1724 struct iavf_adapter *adapter = netdev_priv(netdev); 1722 1725 u32 num_req = ch->combined_count; 1723 - int ret = 0; 1724 1726 1725 1727 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && 1726 1728 adapter->num_tc) { ··· 1741 1745 1742 1746 adapter->num_req_queues = num_req; 1743 1747 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; 1744 - iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); 1748 + adapter->flags |= IAVF_FLAG_RESET_NEEDED; 1749 + iavf_reset_step(adapter); 1745 1750 1746 - ret = iavf_wait_for_reset(adapter); 1747 - if (ret) 1748 - netdev_warn(netdev, "Changing channel count timeout or interrupted waiting for reset"); 1749 - 1750 - return ret; 1751 + return 0; 1751 1752 } 1752 1753 1753 1754 /**
+34 -56
drivers/net/ethernet/intel/iavf/iavf_main.c
··· 186 186 } 187 187 188 188 /** 189 - * iavf_wait_for_reset - Wait for reset to finish. 190 - * @adapter: board private structure 191 - * 192 - * Returns 0 if reset finished successfully, negative on timeout or interrupt. 193 - */ 194 - int iavf_wait_for_reset(struct iavf_adapter *adapter) 195 - { 196 - int ret = wait_event_interruptible_timeout(adapter->reset_waitqueue, 197 - !iavf_is_reset_in_progress(adapter), 198 - msecs_to_jiffies(5000)); 199 - 200 - /* If ret < 0 then it means wait was interrupted. 201 - * If ret == 0 then it means we got a timeout while waiting 202 - * for reset to finish. 203 - * If ret > 0 it means reset has finished. 204 - */ 205 - if (ret > 0) 206 - return 0; 207 - else if (ret < 0) 208 - return -EINTR; 209 - else 210 - return -EBUSY; 211 - } 212 - 213 - /** 214 189 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code 215 190 * @hw: pointer to the HW structure 216 191 * @mem: ptr to mem struct to fill out ··· 757 782 adapter->num_vlan_filters++; 758 783 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER); 759 784 } else if (f->state == IAVF_VLAN_REMOVE) { 760 - /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed. 761 - * We can safely only change the state here. 785 + /* Re-add the filter since we cannot tell whether the 786 + * pending delete has already been processed by the PF. 787 + * A duplicate add is harmless. 762 788 */ 763 - f->state = IAVF_VLAN_ACTIVE; 789 + f->state = IAVF_VLAN_ADD; 790 + iavf_schedule_aq_request(adapter, 791 + IAVF_FLAG_AQ_ADD_VLAN_FILTER); 764 792 } 765 793 766 794 clearout: ··· 3014 3036 3015 3037 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; 3016 3038 3039 + iavf_ptp_release(adapter); 3040 + 3017 3041 /* We don't use netif_running() because it may be true prior to 3018 3042 * ndo_open() returning, so we can't assume it means all our open 3019 3043 * tasks have finished, since we're not holding the rtnl_lock here. ··· 3091 3111 } 3092 3112 3093 3113 /** 3094 - * iavf_reset_task - Call-back task to handle hardware reset 3095 - * @work: pointer to work_struct 3114 + * iavf_reset_step - Perform the VF reset sequence 3115 + * @adapter: board private structure 3096 3116 * 3097 - * During reset we need to shut down and reinitialize the admin queue 3098 - * before we can use it to communicate with the PF again. We also clear 3099 - * and reinit the rings because that context is lost as well. 3100 - **/ 3101 - static void iavf_reset_task(struct work_struct *work) 3117 + * Requests a reset from PF, polls for completion, and reconfigures 3118 + * the driver. Caller must hold the netdev instance lock. 3119 + * 3120 + * This can sleep for several seconds while polling HW registers. 3121 + */ 3122 + void iavf_reset_step(struct iavf_adapter *adapter) 3102 3123 { 3103 - struct iavf_adapter *adapter = container_of(work, 3104 - struct iavf_adapter, 3105 - reset_task); 3106 3124 struct virtchnl_vf_resource *vfres = adapter->vf_res; 3107 3125 struct net_device *netdev = adapter->netdev; 3108 3126 struct iavf_hw *hw = &adapter->hw; ··· 3111 3133 int i = 0, err; 3112 3134 bool running; 3113 3135 3114 - netdev_lock(netdev); 3136 + netdev_assert_locked(netdev); 3115 3137 3116 3138 iavf_misc_irq_disable(adapter); 3117 3139 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { ··· 3156 3178 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", 3157 3179 reg_val); 3158 3180 iavf_disable_vf(adapter); 3159 - netdev_unlock(netdev); 3160 3181 return; /* Do not attempt to reinit. It's dead, Jim. */ 3161 3182 } 3162 3183 ··· 3167 3190 iavf_startup(adapter); 3168 3191 queue_delayed_work(adapter->wq, &adapter->watchdog_task, 3169 3192 msecs_to_jiffies(30)); 3170 - netdev_unlock(netdev); 3171 3193 return; 3172 3194 } 3173 3195 ··· 3186 3210 3187 3211 iavf_change_state(adapter, __IAVF_RESETTING); 3188 3212 adapter->flags &= ~IAVF_FLAG_RESET_PENDING; 3213 + 3214 + iavf_ptp_release(adapter); 3189 3215 3190 3216 /* free the Tx/Rx rings and descriptors, might be better to just 3191 3217 * re-use them sometime in the future ··· 3309 3331 3310 3332 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; 3311 3333 3312 - wake_up(&adapter->reset_waitqueue); 3313 - netdev_unlock(netdev); 3314 - 3315 3334 return; 3316 3335 reset_err: 3317 3336 if (running) { ··· 3317 3342 } 3318 3343 iavf_disable_vf(adapter); 3319 3344 3320 - netdev_unlock(netdev); 3321 3345 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 3346 + } 3347 + 3348 + static void iavf_reset_task(struct work_struct *work) 3349 + { 3350 + struct iavf_adapter *adapter = container_of(work, 3351 + struct iavf_adapter, 3352 + reset_task); 3353 + struct net_device *netdev = adapter->netdev; 3354 + 3355 + netdev_lock(netdev); 3356 + iavf_reset_step(adapter); 3357 + netdev_unlock(netdev); 3322 3358 } 3323 3359 3324 3360 /** ··· 4597 4611 static int iavf_change_mtu(struct net_device *netdev, int new_mtu) 4598 4612 { 4599 4613 struct iavf_adapter *adapter = netdev_priv(netdev); 4600 - int ret = 0; 4601 4614 4602 4615 netdev_dbg(netdev, "changing MTU from %d to %d\n", 4603 4616 netdev->mtu, new_mtu); 4604 4617 WRITE_ONCE(netdev->mtu, new_mtu); 4605 4618 4606 4619 if (netif_running(netdev)) { 4607 - iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); 4608 - ret = iavf_wait_for_reset(adapter); 4609 - if (ret < 0) 4610 - netdev_warn(netdev, "MTU change interrupted waiting for reset"); 4611 - else if (ret) 4612 - netdev_warn(netdev, "MTU change timed out waiting for reset"); 4620 + adapter->flags |= IAVF_FLAG_RESET_NEEDED; 4621 + iavf_reset_step(adapter); 4613 4622 } 4614 4623 4615 - return ret; 4624 + return 0; 4616 4625 } 4617 4626 4618 4627 /** ··· 5411 5430 5412 5431 /* Setup the wait queue for indicating transition to down status */ 5413 5432 init_waitqueue_head(&adapter->down_waitqueue); 5414 - 5415 - /* Setup the wait queue for indicating transition to running state */ 5416 - init_waitqueue_head(&adapter->reset_waitqueue); 5417 5433 5418 5434 /* Setup the wait queue for indicating virtchannel events */ 5419 5435 init_waitqueue_head(&adapter->vc_waitqueue);
-1
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
··· 2736 2736 case VIRTCHNL_OP_ENABLE_QUEUES: 2737 2737 /* enable transmits */ 2738 2738 iavf_irq_enable(adapter, true); 2739 - wake_up(&adapter->reset_waitqueue); 2740 2739 adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED; 2741 2740 break; 2742 2741 case VIRTCHNL_OP_DISABLE_QUEUES:
+2 -2
drivers/net/ethernet/intel/ice/devlink/devlink.c
··· 1360 1360 1361 1361 cdev = pf->cdev_info; 1362 1362 if (!cdev) 1363 - return -ENODEV; 1363 + return -EOPNOTSUPP; 1364 1364 1365 1365 ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2); 1366 1366 ··· 1427 1427 1428 1428 cdev = pf->cdev_info; 1429 1429 if (!cdev) 1430 - return -ENODEV; 1430 + return -EOPNOTSUPP; 1431 1431 1432 1432 ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP); 1433 1433
+2
drivers/net/ethernet/intel/igc/igc.h
··· 781 781 struct kernel_hwtstamp_config *config, 782 782 struct netlink_ext_ack *extack); 783 783 void igc_ptp_tx_hang(struct igc_adapter *adapter); 784 + void igc_ptp_clear_xsk_tx_tstamp_queue(struct igc_adapter *adapter, 785 + u16 queue_id); 784 786 void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts); 785 787 void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter); 786 788
+9 -5
drivers/net/ethernet/intel/igc/igc_main.c
··· 264 264 /* reset next_to_use and next_to_clean */ 265 265 tx_ring->next_to_use = 0; 266 266 tx_ring->next_to_clean = 0; 267 + 268 + /* Clear any lingering XSK TX timestamp requests */ 269 + if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) { 270 + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); 271 + 272 + igc_ptp_clear_xsk_tx_tstamp_queue(adapter, tx_ring->queue_index); 273 + } 267 274 } 268 275 269 276 /** ··· 1737 1730 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb 1738 1731 * in order to meet this minimum size requirement. 1739 1732 */ 1740 - if (skb->len < 17) { 1741 - if (skb_padto(skb, 17)) 1742 - return NETDEV_TX_OK; 1743 - skb->len = 17; 1744 - } 1733 + if (skb_put_padto(skb, 17)) 1734 + return NETDEV_TX_OK; 1745 1735 1746 1736 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); 1747 1737 }
+33
drivers/net/ethernet/intel/igc/igc_ptp.c
··· 577 577 spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); 578 578 } 579 579 580 + /** 581 + * igc_ptp_clear_xsk_tx_tstamp_queue - Clear pending XSK TX timestamps for a queue 582 + * @adapter: Board private structure 583 + * @queue_id: TX queue index to clear timestamps for 584 + * 585 + * Iterates over all TX timestamp registers and releases any pending 586 + * timestamp requests associated with the given TX queue. This is 587 + * called when an XDP pool is being disabled to ensure no stale 588 + * timestamp references remain. 589 + */ 590 + void igc_ptp_clear_xsk_tx_tstamp_queue(struct igc_adapter *adapter, u16 queue_id) 591 + { 592 + unsigned long flags; 593 + int i; 594 + 595 + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); 596 + 597 + for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { 598 + struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; 599 + 600 + if (tstamp->buffer_type != IGC_TX_BUFFER_TYPE_XSK) 601 + continue; 602 + if (tstamp->xsk_queue_index != queue_id) 603 + continue; 604 + if (!tstamp->xsk_tx_buffer) 605 + continue; 606 + 607 + igc_ptp_free_tx_buffer(adapter, tstamp); 608 + } 609 + 610 + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); 611 + } 612 + 580 613 static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter) 581 614 { 582 615 struct igc_hw *hw = &adapter->hw;
+36 -13
drivers/net/ethernet/intel/libie/fwlog.c
··· 433 433 module = libie_find_module_by_dentry(fwlog->debugfs_modules, dentry); 434 434 if (module < 0) { 435 435 dev_info(dev, "unknown module\n"); 436 - return -EINVAL; 436 + count = -EINVAL; 437 + goto free_cmd_buf; 437 438 } 438 439 439 440 cnt = sscanf(cmd_buf, "%s", user_val); 440 - if (cnt != 1) 441 - return -EINVAL; 441 + if (cnt != 1) { 442 + count = -EINVAL; 443 + goto free_cmd_buf; 444 + } 442 445 443 446 log_level = sysfs_match_string(libie_fwlog_level_string, user_val); 444 447 if (log_level < 0) { 445 448 dev_info(dev, "unknown log level '%s'\n", user_val); 446 - return -EINVAL; 449 + count = -EINVAL; 450 + goto free_cmd_buf; 447 451 } 448 452 449 453 if (module != LIBIE_AQC_FW_LOG_ID_MAX) { ··· 461 457 for (i = 0; i < LIBIE_AQC_FW_LOG_ID_MAX; i++) 462 458 fwlog->cfg.module_entries[i].log_level = log_level; 463 459 } 460 + 461 + free_cmd_buf: 462 + kfree(cmd_buf); 464 463 465 464 return count; 466 465 } ··· 522 515 return PTR_ERR(cmd_buf); 523 516 524 517 ret = sscanf(cmd_buf, "%s", user_val); 525 - if (ret != 1) 526 - return -EINVAL; 518 + if (ret != 1) { 519 + count = -EINVAL; 520 + goto free_cmd_buf; 521 + } 527 522 528 523 ret = kstrtos16(user_val, 0, &nr_messages); 529 - if (ret) 530 - return ret; 524 + if (ret) { 525 + count = ret; 526 + goto free_cmd_buf; 527 + } 531 528 532 529 if (nr_messages < LIBIE_AQC_FW_LOG_MIN_RESOLUTION || 533 530 nr_messages > LIBIE_AQC_FW_LOG_MAX_RESOLUTION) { 534 531 dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n", 535 532 nr_messages, LIBIE_AQC_FW_LOG_MIN_RESOLUTION, 536 533 LIBIE_AQC_FW_LOG_MAX_RESOLUTION); 537 - return -EINVAL; 534 + count = -EINVAL; 535 + goto free_cmd_buf; 538 536 } 539 537 540 538 fwlog->cfg.log_resolution = nr_messages; 539 + 540 + free_cmd_buf: 541 + kfree(cmd_buf); 541 542 542 543 return count; 543 544 } ··· 603 588 return PTR_ERR(cmd_buf); 604 589 605 590 ret = sscanf(cmd_buf, "%s", user_val); 606 - if (ret != 1) 607 - return -EINVAL; 591 + if (ret != 1) { 592 + ret = -EINVAL; 593 + goto free_cmd_buf; 594 + } 608 595 609 596 ret = kstrtobool(user_val, &enable); 610 597 if (ret) ··· 641 624 */ 642 625 if (WARN_ON(ret != (ssize_t)count && ret >= 0)) 643 626 ret = -EIO; 627 + free_cmd_buf: 628 + kfree(cmd_buf); 644 629 645 630 return ret; 646 631 } ··· 701 682 return PTR_ERR(cmd_buf); 702 683 703 684 ret = sscanf(cmd_buf, "%s", user_val); 704 - if (ret != 1) 705 - return -EINVAL; 685 + if (ret != 1) { 686 + ret = -EINVAL; 687 + goto free_cmd_buf; 688 + } 706 689 707 690 index = sysfs_match_string(libie_fwlog_log_size, user_val); 708 691 if (index < 0) { ··· 733 712 */ 734 713 if (WARN_ON(ret != (ssize_t)count && ret >= 0)) 735 714 ret = -EIO; 715 + free_cmd_buf: 716 + kfree(cmd_buf); 736 717 737 718 return ret; 738 719 }
+2 -2
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
··· 5016 5016 if (priv->percpu_pools) 5017 5017 numbufs = port->nrxqs * 2; 5018 5018 5019 - if (change_percpu) 5019 + if (change_percpu && priv->global_tx_fc) 5020 5020 mvpp2_bm_pool_update_priv_fc(priv, false); 5021 5021 5022 5022 for (i = 0; i < numbufs; i++) ··· 5041 5041 mvpp2_open(port->dev); 5042 5042 } 5043 5043 5044 - if (change_percpu) 5044 + if (change_percpu && priv->global_tx_fc) 5045 5045 mvpp2_bm_pool_update_priv_fc(priv, true); 5046 5046 5047 5047 return 0;
+3 -3
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
··· 327 327 rvu_report_pair_end(fmsg); 328 328 break; 329 329 case NIX_AF_RVU_RAS: 330 - intr_val = nix_event_context->nix_af_rvu_err; 330 + intr_val = nix_event_context->nix_af_rvu_ras; 331 331 rvu_report_pair_start(fmsg, "NIX_AF_RAS"); 332 332 devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ", 333 - nix_event_context->nix_af_rvu_err); 333 + nix_event_context->nix_af_rvu_ras); 334 334 devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:"); 335 335 if (intr_val & BIT_ULL(34)) 336 336 devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S"); ··· 475 475 if (blkaddr < 0) 476 476 return blkaddr; 477 477 478 - if (nix_event_ctx->nix_af_rvu_int) 478 + if (nix_event_ctx->nix_af_rvu_ras) 479 479 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL); 480 480 481 481 return 0;
-1
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
··· 47 47 "SQ 0x%x: cc (0x%x) != pc (0x%x)\n", 48 48 sq->sqn, sq->cc, sq->pc); 49 49 sq->cc = 0; 50 - sq->dma_fifo_cc = 0; 51 50 sq->pc = 0; 52 51 } 53 52
+1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
··· 287 287 struct mlx5e_ipsec_dwork *dwork; 288 288 struct mlx5e_ipsec_limits limits; 289 289 u32 rx_mapped_id; 290 + u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)]; 290 291 }; 291 292 292 293 struct mlx5_accel_pol_xfrm_attrs {
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
··· 2912 2912 goto out; 2913 2913 2914 2914 peer_priv = mlx5_devcom_get_next_peer_data(priv->devcom, &tmp); 2915 - if (peer_priv) 2915 + if (peer_priv && peer_priv->ipsec) 2916 2916 complete_all(&peer_priv->ipsec->comp); 2917 2917 2918 2918 mlx5_devcom_for_each_peer_end(priv->devcom);
+23 -29
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
··· 310 310 mlx5e_ipsec_aso_query(sa_entry, data); 311 311 } 312 312 313 - static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry, 314 - u32 mode_param) 313 + static void 314 + mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry, 315 + u32 mode_param, 316 + struct mlx5_accel_esp_xfrm_attrs *attrs) 315 317 { 316 - struct mlx5_accel_esp_xfrm_attrs attrs = {}; 317 318 struct mlx5_wqe_aso_ctrl_seg data = {}; 318 319 319 320 if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) { ··· 324 323 sa_entry->esn_state.overlap = 1; 325 324 } 326 325 327 - mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs); 328 - 329 - /* It is safe to execute the modify below unlocked since the only flows 330 - * that could affect this HW object, are create, destroy and this work. 331 - * 332 - * Creation flow can't co-exist with this modify work, the destruction 333 - * flow would cancel this work, and this work is a single entity that 334 - * can't conflict with it self. 335 - */ 336 - spin_unlock_bh(&sa_entry->x->lock); 337 - mlx5_accel_esp_modify_xfrm(sa_entry, &attrs); 338 - spin_lock_bh(&sa_entry->x->lock); 326 + mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, attrs); 339 327 340 328 data.data_offset_condition_operand = 341 329 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET; ··· 360 370 static void mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry *sa_entry) 361 371 { 362 372 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; 363 - struct mlx5e_ipsec *ipsec = sa_entry->ipsec; 364 - struct mlx5e_ipsec_aso *aso = ipsec->aso; 365 373 bool soft_arm, hard_arm; 366 374 u64 hard_cnt; 367 375 368 376 lockdep_assert_held(&sa_entry->x->lock); 369 377 370 - soft_arm = !MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm); 371 - hard_arm = !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm); 378 + soft_arm = !MLX5_GET(ipsec_aso, sa_entry->ctx, soft_lft_arm); 379 + hard_arm = !MLX5_GET(ipsec_aso, sa_entry->ctx, hard_lft_arm); 372 380 if (!soft_arm && !hard_arm) 373 381 /* It is not lifetime event */ 374 382 return; 375 383 376 - hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt); 384 + hard_cnt = MLX5_GET(ipsec_aso, sa_entry->ctx, remove_flow_pkt_cnt); 377 385 if (!hard_cnt || hard_arm) { 378 386 /* It is possible to see packet counter equal to zero without 379 387 * hard limit event armed. Such situation can be if packet ··· 441 453 struct mlx5e_ipsec_work *work = 442 454 container_of(_work, struct mlx5e_ipsec_work, work); 443 455 struct mlx5e_ipsec_sa_entry *sa_entry = work->data; 456 + struct mlx5_accel_esp_xfrm_attrs tmp = {}; 444 457 struct mlx5_accel_esp_xfrm_attrs *attrs; 445 - struct mlx5e_ipsec_aso *aso; 458 + bool need_modify = false; 446 459 int ret; 447 460 448 - aso = sa_entry->ipsec->aso; 449 461 attrs = &sa_entry->attrs; 450 462 451 463 spin_lock_bh(&sa_entry->x->lock); ··· 453 465 if (ret) 454 466 goto unlock; 455 467 456 - if (attrs->replay_esn.trigger && 457 - !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) { 458 - u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter); 459 - 460 - mlx5e_ipsec_update_esn_state(sa_entry, mode_param); 461 - } 462 - 463 468 if (attrs->lft.soft_packet_limit != XFRM_INF) 464 469 mlx5e_ipsec_handle_limits(sa_entry); 465 470 471 + if (attrs->replay_esn.trigger && 472 + !MLX5_GET(ipsec_aso, sa_entry->ctx, esn_event_arm)) { 473 + u32 mode_param = MLX5_GET(ipsec_aso, sa_entry->ctx, 474 + mode_parameter); 475 + 476 + mlx5e_ipsec_update_esn_state(sa_entry, mode_param, &tmp); 477 + need_modify = true; 478 + } 479 + 466 480 unlock: 467 481 spin_unlock_bh(&sa_entry->x->lock); 482 + if (need_modify) 483 + mlx5_accel_esp_modify_xfrm(sa_entry, &tmp); 468 484 kfree(work); 469 485 } 470 486 ··· 621 629 /* We are in atomic context */ 622 630 udelay(10); 623 631 } while (ret && time_is_after_jiffies(expires)); 632 + if (!ret) 633 + memcpy(sa_entry->ctx, aso->ctx, MLX5_ST_SZ_BYTES(ipsec_aso)); 624 634 spin_unlock_bh(&aso->lock); 625 635 return ret; 626 636 }
+9 -14
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 1589 1589 struct skb_shared_info *sinfo; 1590 1590 u32 frag_consumed_bytes; 1591 1591 struct bpf_prog *prog; 1592 + u8 nr_frags_free = 0; 1592 1593 struct sk_buff *skb; 1593 1594 dma_addr_t addr; 1594 1595 u32 truesize; ··· 1632 1631 1633 1632 prog = rcu_dereference(rq->xdp_prog); 1634 1633 if (prog) { 1635 - u8 nr_frags_free, old_nr_frags = sinfo->nr_frags; 1634 + u8 old_nr_frags = sinfo->nr_frags; 1636 1635 1637 1636 if (mlx5e_xdp_handle(rq, prog, mxbuf)) { 1638 1637 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, 1639 1638 rq->flags)) { 1640 1639 struct mlx5e_wqe_frag_info *pwi; 1641 - 1642 - wi -= old_nr_frags - sinfo->nr_frags; 1643 1640 1644 1641 for (pwi = head_wi; pwi < wi; pwi++) 1645 1642 pwi->frag_page->frags++; ··· 1646 1647 } 1647 1648 1648 1649 nr_frags_free = old_nr_frags - sinfo->nr_frags; 1649 - if (unlikely(nr_frags_free)) { 1650 - wi -= nr_frags_free; 1650 + if (unlikely(nr_frags_free)) 1651 1651 truesize -= nr_frags_free * frag_info->frag_stride; 1652 - } 1653 1652 } 1654 1653 1655 1654 skb = mlx5e_build_linear_skb( ··· 1663 1666 1664 1667 if (xdp_buff_has_frags(&mxbuf->xdp)) { 1665 1668 /* sinfo->nr_frags is reset by build_skb, calculate again. */ 1666 - xdp_update_skb_frags_info(skb, wi - head_wi - 1, 1669 + xdp_update_skb_frags_info(skb, wi - head_wi - nr_frags_free - 1, 1667 1670 sinfo->xdp_frags_size, truesize, 1668 1671 xdp_buff_get_skb_flags(&mxbuf->xdp)); 1669 1672 ··· 1954 1957 1955 1958 if (prog) { 1956 1959 u8 nr_frags_free, old_nr_frags = sinfo->nr_frags; 1960 + u8 new_nr_frags; 1957 1961 u32 len; 1958 1962 1959 1963 if (mlx5e_xdp_handle(rq, prog, mxbuf)) { 1960 1964 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { 1961 1965 struct mlx5e_frag_page *pfp; 1962 - 1963 - frag_page -= old_nr_frags - sinfo->nr_frags; 1964 1966 1965 1967 for (pfp = head_page; pfp < frag_page; pfp++) 1966 1968 pfp->frags++; ··· 1971 1975 return NULL; /* page/packet was consumed by XDP */ 1972 1976 } 1973 1977 1974 - nr_frags_free = old_nr_frags - sinfo->nr_frags; 1975 - if (unlikely(nr_frags_free)) { 1976 - frag_page -= nr_frags_free; 1978 + new_nr_frags = sinfo->nr_frags; 1979 + nr_frags_free = old_nr_frags - new_nr_frags; 1980 + if (unlikely(nr_frags_free)) 1977 1981 truesize -= (nr_frags_free - 1) * PAGE_SIZE + 1978 1982 ALIGN(pg_consumed_bytes, 1979 1983 BIT(rq->mpwqe.log_stride_sz)); 1980 - } 1981 1984 1982 1985 len = mxbuf->xdp.data_end - mxbuf->xdp.data; 1983 1986 ··· 1998 2003 struct mlx5e_frag_page *pagep; 1999 2004 2000 2005 /* sinfo->nr_frags is reset by build_skb, calculate again. */ 2001 - xdp_update_skb_frags_info(skb, frag_page - head_page, 2006 + xdp_update_skb_frags_info(skb, new_nr_frags, 2002 2007 sinfo->xdp_frags_size, 2003 2008 truesize, 2004 2009 xdp_buff_get_skb_flags(&mxbuf->xdp));
+9 -14
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
··· 1489 1489 return err; 1490 1490 } 1491 1491 1492 - static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev) 1492 + static u32 mlx5_esw_qos_lag_link_speed_get(struct mlx5_core_dev *mdev, 1493 + bool take_rtnl) 1493 1494 { 1494 1495 struct ethtool_link_ksettings lksettings; 1495 1496 struct net_device *slave, *master; 1496 1497 u32 speed = SPEED_UNKNOWN; 1497 1498 1498 - /* Lock ensures a stable reference to master and slave netdevice 1499 - * while port speed of master is queried. 1500 - */ 1501 - ASSERT_RTNL(); 1502 - 1503 1499 slave = mlx5_uplink_netdev_get(mdev); 1504 1500 if (!slave) 1505 1501 goto out; 1506 1502 1503 + if (take_rtnl) 1504 + rtnl_lock(); 1507 1505 master = netdev_master_upper_dev_get(slave); 1508 1506 if (master && !__ethtool_get_link_ksettings(master, &lksettings)) 1509 1507 speed = lksettings.base.speed; 1508 + if (take_rtnl) 1509 + rtnl_unlock(); 1510 1510 1511 1511 out: 1512 1512 mlx5_uplink_netdev_put(mdev, slave); ··· 1514 1514 } 1515 1515 1516 1516 static int mlx5_esw_qos_max_link_speed_get(struct mlx5_core_dev *mdev, u32 *link_speed_max, 1517 - bool hold_rtnl_lock, struct netlink_ext_ack *extack) 1517 + bool take_rtnl, 1518 + struct netlink_ext_ack *extack) 1518 1519 { 1519 1520 int err; 1520 1521 1521 1522 if (!mlx5_lag_is_active(mdev)) 1522 1523 goto skip_lag; 1523 1524 1524 - if (hold_rtnl_lock) 1525 - rtnl_lock(); 1526 - 1527 - *link_speed_max = mlx5_esw_qos_lag_link_speed_get_locked(mdev); 1528 - 1529 - if (hold_rtnl_lock) 1530 - rtnl_unlock(); 1525 + *link_speed_max = mlx5_esw_qos_lag_link_speed_get(mdev, take_rtnl); 1531 1526 1532 1527 if (*link_speed_max != (u32)SPEED_UNKNOWN) 1533 1528 return 0;
+4 -3
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 1072 1072 1073 1073 static void mlx5_eswitch_event_handler_unregister(struct mlx5_eswitch *esw) 1074 1074 { 1075 - if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) 1075 + if (esw->mode == MLX5_ESWITCH_OFFLOADS && 1076 + mlx5_eswitch_is_funcs_handler(esw->dev)) { 1076 1077 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb); 1077 - 1078 - flush_workqueue(esw->work_queue); 1078 + atomic_inc(&esw->esw_funcs.generation); 1079 + } 1079 1080 } 1080 1081 1081 1082 static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
+2
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 335 335 struct mlx5_host_work { 336 336 struct work_struct work; 337 337 struct mlx5_eswitch *esw; 338 + int work_gen; 338 339 }; 339 340 340 341 struct mlx5_esw_functions { 341 342 struct mlx5_nb nb; 343 + atomic_t generation; 342 344 bool host_funcs_disabled; 343 345 u16 num_vfs; 344 346 u16 num_ec_vfs;
+25 -20
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 1241 1241 flows[peer_vport->index] = flow; 1242 1242 } 1243 1243 1244 - if (mlx5_esw_host_functions_enabled(esw->dev)) { 1245 - mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, 1246 - mlx5_core_max_vfs(peer_dev)) { 1247 - esw_set_peer_miss_rule_source_port(esw, peer_esw, 1248 - spec, 1249 - peer_vport->vport); 1250 - 1251 - flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1252 - spec, &flow_act, &dest, 1); 1253 - if (IS_ERR(flow)) { 1254 - err = PTR_ERR(flow); 1255 - goto add_vf_flow_err; 1256 - } 1257 - flows[peer_vport->index] = flow; 1244 + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, 1245 + mlx5_core_max_vfs(peer_dev)) { 1246 + esw_set_peer_miss_rule_source_port(esw, peer_esw, spec, 1247 + peer_vport->vport); 1248 + flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1249 + spec, &flow_act, &dest, 1); 1250 + if (IS_ERR(flow)) { 1251 + err = PTR_ERR(flow); 1252 + goto add_vf_flow_err; 1258 1253 } 1254 + flows[peer_vport->index] = flow; 1259 1255 } 1260 1256 1261 1257 if (mlx5_core_ec_sriov_enabled(peer_dev)) { ··· 1343 1347 mlx5_del_flow_rules(flows[peer_vport->index]); 1344 1348 } 1345 1349 1346 - if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { 1350 + if (mlx5_core_is_ecpf_esw_manager(peer_dev) && 1351 + mlx5_esw_host_functions_enabled(peer_dev)) { 1347 1352 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); 1348 1353 mlx5_del_flow_rules(flows[peer_vport->index]); 1349 1354 } ··· 3579 3582 } 3580 3583 3581 3584 static void 3582 - esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out) 3585 + esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, int work_gen, 3586 + const u32 *out) 3583 3587 { 3584 3588 struct devlink *devlink; 3585 3589 bool host_pf_disabled; 3586 3590 u16 new_num_vfs; 3591 + 3592 + devlink = priv_to_devlink(esw->dev); 3593 + devl_lock(devlink); 3594 + 3595 + /* Stale work from one or more mode changes ago. Bail out. */ 3596 + if (work_gen != atomic_read(&esw->esw_funcs.generation)) 3597 + goto unlock; 3587 3598 3588 3599 new_num_vfs = MLX5_GET(query_esw_functions_out, out, 3589 3600 host_params_context.host_num_of_vfs); ··· 3599 3594 host_params_context.host_pf_disabled); 3600 3595 3601 3596 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled) 3602 - return; 3597 + goto unlock; 3603 3598 3604 - devlink = priv_to_devlink(esw->dev); 3605 - devl_lock(devlink); 3606 3599 /* Number of VFs can only change from "0 to x" or "x to 0". */ 3607 3600 if (esw->esw_funcs.num_vfs > 0) { 3608 3601 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); ··· 3615 3612 } 3616 3613 } 3617 3614 esw->esw_funcs.num_vfs = new_num_vfs; 3615 + unlock: 3618 3616 devl_unlock(devlink); 3619 3617 } 3620 3618 ··· 3632 3628 if (IS_ERR(out)) 3633 3629 goto out; 3634 3630 3635 - esw_vfs_changed_event_handler(esw, out); 3631 + esw_vfs_changed_event_handler(esw, host_work->work_gen, out); 3636 3632 kvfree(out); 3637 3633 out: 3638 3634 kfree(host_work); ··· 3652 3648 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs); 3653 3649 3654 3650 host_work->esw = esw; 3651 + host_work->work_gen = atomic_read(&esw_funcs->generation); 3655 3652 3656 3653 INIT_WORK(&host_work->work, esw_functions_changed_event_handler); 3657 3654 queue_work(esw->work_queue, &host_work->work);
+1
drivers/net/ethernet/microsoft/mana/gdma_main.c
··· 1934 1934 mana_gd_remove_irqs(pdev); 1935 1935 free_workqueue: 1936 1936 destroy_workqueue(gc->service_wq); 1937 + gc->service_wq = NULL; 1937 1938 dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err); 1938 1939 return err; 1939 1940 }
+3 -3
drivers/net/ethernet/microsoft/mana/hw_channel.c
··· 814 814 gc->max_num_cqs = 0; 815 815 } 816 816 817 - kfree(hwc->caller_ctx); 818 - hwc->caller_ctx = NULL; 819 - 820 817 if (hwc->txq) 821 818 mana_hwc_destroy_wq(hwc, hwc->txq); 822 819 ··· 822 825 823 826 if (hwc->cq) 824 827 mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq); 828 + 829 + kfree(hwc->caller_ctx); 830 + hwc->caller_ctx = NULL; 825 831 826 832 mana_gd_free_res_map(&hwc->inflight_msg_res); 827 833
+13 -6
drivers/net/ethernet/spacemit/k1_emac.c
··· 565 565 DMA_FROM_DEVICE); 566 566 if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) { 567 567 dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n"); 568 - goto err_free_skb; 568 + dev_kfree_skb_any(skb); 569 + rx_buf->skb = NULL; 570 + break; 569 571 } 570 572 571 573 rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i]; ··· 592 590 593 591 rx_ring->head = i; 594 592 return; 595 - 596 - err_free_skb: 597 - dev_kfree_skb_any(skb); 598 - rx_buf->skb = NULL; 599 593 } 600 594 601 595 /* Returns number of packets received */ ··· 733 735 struct emac_desc tx_desc, *tx_desc_addr; 734 736 struct device *dev = &priv->pdev->dev; 735 737 struct emac_tx_desc_buffer *tx_buf; 736 - u32 head, old_head, frag_num, f; 738 + u32 head, old_head, frag_num, f, i; 737 739 bool buf_idx; 738 740 739 741 frag_num = skb_shinfo(skb)->nr_frags; ··· 801 803 802 804 err_free_skb: 803 805 dev_dstats_tx_dropped(priv->ndev); 806 + 807 + i = old_head; 808 + while (i != head) { 809 + emac_free_tx_buf(priv, i); 810 + 811 + if (++i == tx_ring->total_cnt) 812 + i = 0; 813 + } 814 + 804 815 dev_kfree_skb_any(skb); 805 816 } 806 817
+9 -7
drivers/net/ethernet/ti/am65-cpsw-nuss.c
··· 1351 1351 ndev_priv = netdev_priv(ndev); 1352 1352 am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark); 1353 1353 skb_put(skb, pkt_len); 1354 - if (port->rx_ts_enabled) 1354 + if (port->rx_ts_filter) 1355 1355 am65_cpts_rx_timestamp(common->cpts, skb); 1356 1356 skb_mark_for_recycle(skb); 1357 1357 skb->protocol = eth_type_trans(skb, ndev); ··· 1811 1811 1812 1812 switch (cfg->rx_filter) { 1813 1813 case HWTSTAMP_FILTER_NONE: 1814 - port->rx_ts_enabled = false; 1814 + port->rx_ts_filter = HWTSTAMP_FILTER_NONE; 1815 1815 break; 1816 1816 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 1817 1817 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 1818 1818 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 1819 + port->rx_ts_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 1820 + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 1821 + break; 1819 1822 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 1820 1823 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 1821 1824 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: ··· 1828 1825 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1829 1826 case HWTSTAMP_FILTER_PTP_V2_SYNC: 1830 1827 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 1831 - port->rx_ts_enabled = true; 1832 - cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 1828 + port->rx_ts_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 1829 + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 1833 1830 break; 1834 1831 case HWTSTAMP_FILTER_ALL: 1835 1832 case HWTSTAMP_FILTER_SOME: ··· 1866 1863 ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN | 1867 1864 AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN; 1868 1865 1869 - if (port->rx_ts_enabled) 1866 + if (port->rx_ts_filter) 1870 1867 ts_ctrl |= AM65_CPSW_TS_RX_ANX_ALL_EN | 1871 1868 AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN; 1872 1869 ··· 1891 1888 cfg->flags = 0; 1892 1889 cfg->tx_type = port->tx_ts_enabled ? 1893 1890 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 1894 - cfg->rx_filter = port->rx_ts_enabled ? HWTSTAMP_FILTER_PTP_V2_EVENT | 1895 - HWTSTAMP_FILTER_PTP_V1_L4_EVENT : HWTSTAMP_FILTER_NONE; 1891 + cfg->rx_filter = port->rx_ts_filter; 1896 1892 1897 1893 return 0; 1898 1894 }
+1 -1
drivers/net/ethernet/ti/am65-cpsw-nuss.h
··· 52 52 bool disabled; 53 53 struct am65_cpsw_slave_data slave; 54 54 bool tx_ts_enabled; 55 - bool rx_ts_enabled; 55 + enum hwtstamp_rx_filters rx_ts_filter; 56 56 struct am65_cpsw_qos qos; 57 57 struct devlink_port devlink_port; 58 58 struct bpf_prog *xdp_prog;
+5
drivers/net/ethernet/ti/icssg/icssg_common.c
··· 1075 1075 xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false); 1076 1076 1077 1077 *xdp_state = emac_run_xdp(emac, &xdp, &pkt_len); 1078 + if (*xdp_state == ICSSG_XDP_CONSUMED) { 1079 + page_pool_recycle_direct(pool, page); 1080 + goto requeue; 1081 + } 1082 + 1078 1083 if (*xdp_state != ICSSG_XDP_PASS) 1079 1084 goto requeue; 1080 1085 headroom = xdp.data - xdp.data_hard_start;
+1
drivers/net/mctp/mctp-i2c.c
··· 343 343 } else { 344 344 status = NET_RX_DROP; 345 345 spin_unlock_irqrestore(&midev->lock, flags); 346 + kfree_skb(skb); 346 347 } 347 348 348 349 if (status == NET_RX_SUCCESS) {
+1 -2
drivers/net/mctp/mctp-usb.c
··· 329 329 SET_NETDEV_DEV(netdev, &intf->dev); 330 330 dev = netdev_priv(netdev); 331 331 dev->netdev = netdev; 332 - dev->usbdev = usb_get_dev(interface_to_usbdev(intf)); 332 + dev->usbdev = interface_to_usbdev(intf); 333 333 dev->intf = intf; 334 334 usb_set_intfdata(intf, dev); 335 335 ··· 365 365 mctp_unregister_netdev(dev->netdev); 366 366 usb_free_urb(dev->tx_urb); 367 367 usb_free_urb(dev->rx_urb); 368 - usb_put_dev(dev->usbdev); 369 368 free_netdev(dev->netdev); 370 369 } 371 370
+4 -1
drivers/net/netdevsim/netdev.c
··· 109 109 int ret; 110 110 111 111 ret = __dev_forward_skb(rx_dev, skb); 112 - if (ret) 112 + if (ret) { 113 + if (psp_ext) 114 + __skb_ext_put(psp_ext); 113 115 return ret; 116 + } 114 117 115 118 nsim_psp_handle_ext(skb, psp_ext); 116 119
+7 -1
drivers/net/phy/sfp.c
··· 367 367 sfp->state_ignore_mask |= SFP_F_TX_FAULT; 368 368 } 369 369 370 + static void sfp_fixup_ignore_tx_fault_and_los(struct sfp *sfp) 371 + { 372 + sfp_fixup_ignore_tx_fault(sfp); 373 + sfp_fixup_ignore_los(sfp); 374 + } 375 + 370 376 static void sfp_fixup_ignore_hw(struct sfp *sfp, unsigned int mask) 371 377 { 372 378 sfp->state_hw_mask &= ~mask; ··· 536 530 // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in 537 531 // their EEPROM 538 532 SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex, 539 - sfp_fixup_ignore_tx_fault), 533 + sfp_fixup_ignore_tx_fault_and_los), 540 534 541 535 // Lantech 8330-262D-E and 8330-265D can operate at 2500base-X, but 542 536 // incorrectly report 2500MBd NRZ in their EEPROM.
+6 -6
drivers/net/usb/aqc111.c
··· 1395 1395 aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, 1396 1396 SFR_MEDIUM_STATUS_MODE, 2, &reg16); 1397 1397 1398 - aqc111_write_cmd(dev, AQ_WOL_CFG, 0, 0, 1399 - WOL_CFG_SIZE, &wol_cfg); 1400 - aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, 1401 - &aqc111_data->phy_cfg); 1398 + aqc111_write_cmd_nopm(dev, AQ_WOL_CFG, 0, 0, 1399 + WOL_CFG_SIZE, &wol_cfg); 1400 + aqc111_write32_cmd_nopm(dev, AQ_PHY_OPS, 0, 0, 1401 + &aqc111_data->phy_cfg); 1402 1402 } else { 1403 1403 aqc111_data->phy_cfg |= AQ_LOW_POWER; 1404 - aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, 1405 - &aqc111_data->phy_cfg); 1404 + aqc111_write32_cmd_nopm(dev, AQ_PHY_OPS, 0, 0, 1405 + &aqc111_data->phy_cfg); 1406 1406 1407 1407 /* Disable RX path */ 1408 1408 aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC,
+6 -4
drivers/net/usb/cdc_ncm.c
··· 1656 1656 struct usbnet *dev = netdev_priv(skb_in->dev); 1657 1657 struct usb_cdc_ncm_ndp16 *ndp16; 1658 1658 int ret = -EINVAL; 1659 + size_t ndp_len; 1659 1660 1660 1661 if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) { 1661 1662 netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n", ··· 1676 1675 sizeof(struct usb_cdc_ncm_dpe16)); 1677 1676 ret--; /* we process NDP entries except for the last one */ 1678 1677 1679 - if ((sizeof(struct usb_cdc_ncm_ndp16) + 1680 - ret * (sizeof(struct usb_cdc_ncm_dpe16))) > skb_in->len) { 1678 + ndp_len = struct_size_t(struct usb_cdc_ncm_ndp16, dpe16, ret); 1679 + if (ndpoffset + ndp_len > skb_in->len) { 1681 1680 netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret); 1682 1681 ret = -EINVAL; 1683 1682 } ··· 1693 1692 struct usbnet *dev = netdev_priv(skb_in->dev); 1694 1693 struct usb_cdc_ncm_ndp32 *ndp32; 1695 1694 int ret = -EINVAL; 1695 + size_t ndp_len; 1696 1696 1697 1697 if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp32)) > skb_in->len) { 1698 1698 netif_dbg(dev, rx_err, dev->net, "invalid NDP offset <%u>\n", ··· 1713 1711 sizeof(struct usb_cdc_ncm_dpe32)); 1714 1712 ret--; /* we process NDP entries except for the last one */ 1715 1713 1716 - if ((sizeof(struct usb_cdc_ncm_ndp32) + 1717 - ret * (sizeof(struct usb_cdc_ncm_dpe32))) > skb_in->len) { 1714 + ndp_len = struct_size_t(struct usb_cdc_ncm_ndp32, dpe32, ret); 1715 + if (ndpoffset + ndp_len > skb_in->len) { 1718 1716 netif_dbg(dev, rx_err, dev->net, "Invalid nframes = %d\n", ret); 1719 1717 ret = -EINVAL; 1720 1718 }
+8 -4
drivers/net/usb/lan78xx.c
··· 3119 3119 int ret; 3120 3120 u32 buf; 3121 3121 3122 + /* LAN7850 is USB 2.0 and does not support LTM */ 3123 + if (dev->chipid == ID_REV_CHIP_ID_7850_) 3124 + return 0; 3125 + 3122 3126 ret = lan78xx_read_reg(dev, USB_CFG1, &buf); 3123 3127 if (ret < 0) 3124 3128 goto init_ltm_failed; ··· 3833 3829 */ 3834 3830 if (!(dev->net->features & NETIF_F_RXCSUM) || 3835 3831 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) || 3832 + unlikely(rx_cmd_a & RX_CMD_A_CSE_MASK_) || 3836 3833 ((rx_cmd_a & RX_CMD_A_FVTG_) && 3837 3834 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) { 3838 3835 skb->ip_summed = CHECKSUM_NONE; ··· 3906 3901 return 0; 3907 3902 } 3908 3903 3909 - if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) { 3904 + if (unlikely(rx_cmd_a & RX_CMD_A_RED_) && 3905 + (rx_cmd_a & RX_CMD_A_RX_HARD_ERRS_MASK_)) { 3910 3906 netif_dbg(dev, rx_err, dev->net, 3911 3907 "Error rx_cmd_a=0x%08x", rx_cmd_a); 3912 3908 } else { ··· 4182 4176 } 4183 4177 4184 4178 tx_data += len; 4185 - entry->length += len; 4179 + entry->length += max_t(unsigned int, len, ETH_ZLEN); 4186 4180 entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1; 4187 4181 4188 4182 dev_kfree_skb_any(skb); ··· 4551 4545 phylink_stop(dev->phylink); 4552 4546 phylink_disconnect_phy(dev->phylink); 4553 4547 rtnl_unlock(); 4554 - 4555 - netif_napi_del(&dev->napi); 4556 4548 4557 4549 unregister_netdev(net); 4558 4550
+3
drivers/net/usb/lan78xx.h
··· 74 74 #define RX_CMD_A_ICSM_ (0x00004000) 75 75 #define RX_CMD_A_LEN_MASK_ (0x00003FFF) 76 76 77 + #define RX_CMD_A_RX_HARD_ERRS_MASK_ \ 78 + (RX_CMD_A_RX_ERRS_MASK_ & ~RX_CMD_A_CSE_MASK_) 79 + 77 80 /* Rx Command B */ 78 81 #define RX_CMD_B_CSUM_SHIFT_ (16) 79 82 #define RX_CMD_B_CSUM_MASK_ (0xFFFF0000)
+2 -2
drivers/net/usb/qmi_wwan.c
··· 928 928 929 929 static const struct driver_info qmi_wwan_info = { 930 930 .description = "WWAN/QMI device", 931 - .flags = FLAG_WWAN | FLAG_SEND_ZLP, 931 + .flags = FLAG_WWAN | FLAG_NOMAXMTU | FLAG_SEND_ZLP, 932 932 .bind = qmi_wwan_bind, 933 933 .unbind = qmi_wwan_unbind, 934 934 .manage_power = qmi_wwan_manage_power, ··· 937 937 938 938 static const struct driver_info qmi_wwan_info_quirk_dtr = { 939 939 .description = "WWAN/QMI device", 940 - .flags = FLAG_WWAN | FLAG_SEND_ZLP, 940 + .flags = FLAG_WWAN | FLAG_NOMAXMTU | FLAG_SEND_ZLP, 941 941 .bind = qmi_wwan_bind, 942 942 .unbind = qmi_wwan_unbind, 943 943 .manage_power = qmi_wwan_manage_power,
+4 -3
drivers/net/usb/usbnet.c
··· 1829 1829 if ((dev->driver_info->flags & FLAG_NOARP) != 0) 1830 1830 net->flags |= IFF_NOARP; 1831 1831 1832 - if (net->max_mtu > (dev->hard_mtu - net->hard_header_len)) 1832 + if ((dev->driver_info->flags & FLAG_NOMAXMTU) == 0 && 1833 + net->max_mtu > (dev->hard_mtu - net->hard_header_len)) 1833 1834 net->max_mtu = dev->hard_mtu - net->hard_header_len; 1834 1835 1835 - if (net->mtu > net->max_mtu) 1836 - net->mtu = net->max_mtu; 1836 + if (net->mtu > (dev->hard_mtu - net->hard_header_len)) 1837 + net->mtu = dev->hard_mtu - net->hard_header_len; 1837 1838 1838 1839 } else if (!info->in || !info->out) 1839 1840 status = usbnet_get_endpoints(dev, udev);
+2 -4
drivers/net/wireless/ath/ath9k/channel.c
··· 1006 1006 skb_set_queue_mapping(skb, IEEE80211_AC_VO); 1007 1007 1008 1008 if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, NULL)) 1009 - goto error; 1009 + return; 1010 1010 1011 1011 txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; 1012 1012 if (ath_tx_start(sc->hw, skb, &txctl)) ··· 1119 1119 1120 1120 skb->priority = 7; 1121 1121 skb_set_queue_mapping(skb, IEEE80211_AC_VO); 1122 - if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, &sta)) { 1123 - dev_kfree_skb_any(skb); 1122 + if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, &sta)) 1124 1123 return false; 1125 - } 1126 1124 break; 1127 1125 default: 1128 1126 return false;
+1 -3
drivers/net/wireless/mediatek/mt76/scan.c
··· 63 63 64 64 rcu_read_lock(); 65 65 66 - if (!ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL)) { 67 - ieee80211_free_txskb(phy->hw, skb); 66 + if (!ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL)) 68 67 goto out; 69 - } 70 68 71 69 info = IEEE80211_SKB_CB(skb); 72 70 if (req->no_cck)
+1 -1
drivers/net/wireless/ti/wlcore/tx.c
··· 210 210 if (skb_headroom(skb) < (total_len - skb->len) && 211 211 pskb_expand_head(skb, (total_len - skb->len), 0, GFP_ATOMIC)) { 212 212 wl1271_free_tx_id(wl, id); 213 - return -EAGAIN; 213 + return -ENOMEM; 214 214 } 215 215 desc = skb_push(skb, total_len - skb->len); 216 216
+1 -2
drivers/net/wireless/virtual/mac80211_hwsim.c
··· 3021 3021 hwsim->tmp_chan->band, 3022 3022 NULL)) { 3023 3023 rcu_read_unlock(); 3024 - kfree_skb(probe); 3025 3024 continue; 3026 3025 } 3027 3026 ··· 6488 6489 if (info->attrs[HWSIM_ATTR_PMSR_SUPPORT]) { 6489 6490 struct cfg80211_pmsr_capabilities *pmsr_capa; 6490 6491 6491 - pmsr_capa = kmalloc_obj(*pmsr_capa); 6492 + pmsr_capa = kzalloc_obj(*pmsr_capa); 6492 6493 if (!pmsr_capa) { 6493 6494 ret = -ENOMEM; 6494 6495 goto out_free;
+2 -2
drivers/nfc/nxp-nci/i2c.c
··· 47 47 { 48 48 struct nxp_nci_i2c_phy *phy = (struct nxp_nci_i2c_phy *) phy_id; 49 49 50 - gpiod_set_value(phy->gpiod_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0); 51 - gpiod_set_value(phy->gpiod_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0); 50 + gpiod_set_value_cansleep(phy->gpiod_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0); 51 + gpiod_set_value_cansleep(phy->gpiod_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0); 52 52 usleep_range(10000, 15000); 53 53 54 54 if (mode == NXP_NCI_MODE_COLD)
+3 -2
drivers/nvdimm/bus.c
··· 486 486 static void nd_async_device_register(void *d, async_cookie_t cookie) 487 487 { 488 488 struct device *dev = d; 489 + struct device *parent = dev->parent; 489 490 490 491 if (device_add(dev) != 0) { 491 492 dev_err(dev, "%s: failed\n", __func__); 492 493 put_device(dev); 493 494 } 494 495 put_device(dev); 495 - if (dev->parent) 496 - put_device(dev->parent); 496 + if (parent) 497 + put_device(parent); 497 498 } 498 499 499 500 static void nd_async_device_unregister(void *d, async_cookie_t cookie)
+1 -2
drivers/nvme/host/core.c
··· 4834 4834 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 4835 4835 const struct blk_mq_ops *ops, unsigned int cmd_size) 4836 4836 { 4837 - struct queue_limits lim = {}; 4838 4837 int ret; 4839 4838 4840 4839 memset(set, 0, sizeof(*set)); ··· 4860 4861 if (ctrl->admin_q) 4861 4862 blk_put_queue(ctrl->admin_q); 4862 4863 4863 - ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL); 4864 + ctrl->admin_q = blk_mq_alloc_queue(set, NULL, NULL); 4864 4865 if (IS_ERR(ctrl->admin_q)) { 4865 4866 ret = PTR_ERR(ctrl->admin_q); 4866 4867 goto out_free_tagset;
+5 -3
drivers/nvme/host/pci.c
··· 544 544 /* Free memory and continue on */ 545 545 nvme_dbbuf_dma_free(dev); 546 546 547 - for (i = 1; i <= dev->online_queues; i++) 547 + for (i = 1; i < dev->online_queues; i++) 548 548 nvme_dbbuf_free(&dev->queues[i]); 549 549 } 550 550 } ··· 1625 1625 static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) 1626 1626 { 1627 1627 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1628 + int irq; 1628 1629 1629 1630 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); 1630 1631 1631 - disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1632 + irq = pci_irq_vector(pdev, nvmeq->cq_vector); 1633 + disable_irq(irq); 1632 1634 spin_lock(&nvmeq->cq_poll_lock); 1633 1635 nvme_poll_cq(nvmeq, NULL); 1634 1636 spin_unlock(&nvmeq->cq_poll_lock); 1635 - enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1637 + enable_irq(irq); 1636 1638 } 1637 1639 1638 1640 static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+1 -1
drivers/nvme/target/admin-cmd.c
··· 1585 1585 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; 1586 1586 mutex_unlock(&ctrl->lock); 1587 1587 1588 - queue_work(nvmet_wq, &ctrl->async_event_work); 1588 + queue_work(nvmet_aen_wq, &ctrl->async_event_work); 1589 1589 } 1590 1590 1591 1591 void nvmet_execute_keep_alive(struct nvmet_req *req)
+12 -2
drivers/nvme/target/core.c
··· 27 27 28 28 struct workqueue_struct *nvmet_wq; 29 29 EXPORT_SYMBOL_GPL(nvmet_wq); 30 + struct workqueue_struct *nvmet_aen_wq; 31 + EXPORT_SYMBOL_GPL(nvmet_aen_wq); 30 32 31 33 /* 32 34 * This read/write semaphore is used to synchronize access to configuration ··· 208 206 list_add_tail(&aen->entry, &ctrl->async_events); 209 207 mutex_unlock(&ctrl->lock); 210 208 211 - queue_work(nvmet_wq, &ctrl->async_event_work); 209 + queue_work(nvmet_aen_wq, &ctrl->async_event_work); 212 210 } 213 211 214 212 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) ··· 1958 1956 if (!nvmet_wq) 1959 1957 goto out_free_buffered_work_queue; 1960 1958 1959 + nvmet_aen_wq = alloc_workqueue("nvmet-aen-wq", 1960 + WQ_MEM_RECLAIM | WQ_UNBOUND, 0); 1961 + if (!nvmet_aen_wq) 1962 + goto out_free_nvmet_work_queue; 1963 + 1961 1964 error = nvmet_init_debugfs(); 1962 1965 if (error) 1963 - goto out_free_nvmet_work_queue; 1966 + goto out_free_nvmet_aen_work_queue; 1964 1967 1965 1968 error = nvmet_init_discovery(); 1966 1969 if (error) ··· 1981 1974 nvmet_exit_discovery(); 1982 1975 out_exit_debugfs: 1983 1976 nvmet_exit_debugfs(); 1977 + out_free_nvmet_aen_work_queue: 1978 + destroy_workqueue(nvmet_aen_wq); 1984 1979 out_free_nvmet_work_queue: 1985 1980 destroy_workqueue(nvmet_wq); 1986 1981 out_free_buffered_work_queue: ··· 2000 1991 nvmet_exit_discovery(); 2001 1992 nvmet_exit_debugfs(); 2002 1993 ida_destroy(&cntlid_ida); 1994 + destroy_workqueue(nvmet_aen_wq); 2003 1995 destroy_workqueue(nvmet_wq); 2004 1996 destroy_workqueue(buffered_io_wq); 2005 1997 destroy_workqueue(zbd_wq);
+1
drivers/nvme/target/nvmet.h
··· 501 501 extern struct workqueue_struct *buffered_io_wq; 502 502 extern struct workqueue_struct *zbd_wq; 503 503 extern struct workqueue_struct *nvmet_wq; 504 + extern struct workqueue_struct *nvmet_aen_wq; 504 505 505 506 static inline void nvmet_set_result(struct nvmet_req *req, u32 result) 506 507 {
+1
drivers/nvme/target/rdma.c
··· 2087 2087 mutex_unlock(&nvmet_rdma_queue_mutex); 2088 2088 2089 2089 flush_workqueue(nvmet_wq); 2090 + flush_workqueue(nvmet_aen_wq); 2090 2091 } 2091 2092 2092 2093 static struct ib_client nvmet_rdma_ib_client = {
+5
drivers/pci/endpoint/functions/pci-epf-test.c
··· 894 894 dev_err(&epf->dev, "pci_epc_set_bar() failed: %d\n", ret); 895 895 bar->submap = old_submap; 896 896 bar->num_submap = old_nsub; 897 + ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, bar); 898 + if (ret) 899 + dev_warn(&epf->dev, "Failed to restore the original BAR mapping: %d\n", 900 + ret); 901 + 897 902 kfree(submap); 898 903 goto err; 899 904 }
+41 -13
drivers/pci/pwrctrl/core.c
··· 268 268 } 269 269 EXPORT_SYMBOL_GPL(pci_pwrctrl_power_on_devices); 270 270 271 + /* 272 + * Check whether the pwrctrl device really needs to be created or not. The 273 + * pwrctrl device will only be created if the node satisfies below requirements: 274 + * 275 + * 1. Presence of compatible property with "pci" prefix to match against the 276 + * pwrctrl driver (AND) 277 + * 2. At least one of the power supplies defined in the devicetree node of the 278 + * device (OR) in the remote endpoint parent node to indicate pwrctrl 279 + * requirement. 280 + */ 281 + static bool pci_pwrctrl_is_required(struct device_node *np) 282 + { 283 + struct device_node *endpoint; 284 + const char *compat; 285 + int ret; 286 + 287 + ret = of_property_read_string(np, "compatible", &compat); 288 + if (ret < 0) 289 + return false; 290 + 291 + if (!strstarts(compat, "pci")) 292 + return false; 293 + 294 + if (of_pci_supply_present(np)) 295 + return true; 296 + 297 + if (of_graph_is_present(np)) { 298 + for_each_endpoint_of_node(np, endpoint) { 299 + struct device_node *remote __free(device_node) = 300 + of_graph_get_remote_port_parent(endpoint); 301 + if (remote) { 302 + if (of_pci_supply_present(remote)) 303 + return true; 304 + } 305 + } 306 + } 307 + 308 + return false; 309 + } 310 + 271 311 static int pci_pwrctrl_create_device(struct device_node *np, 272 312 struct device *parent) 273 313 { ··· 327 287 return 0; 328 288 } 329 289 330 - /* 331 - * Sanity check to make sure that the node has the compatible property 332 - * to allow driver binding. 333 - */ 334 - if (!of_property_present(np, "compatible")) 335 - return 0; 336 - 337 - /* 338 - * Check whether the pwrctrl device really needs to be created or not. 339 - * This is decided based on at least one of the power supplies defined 340 - * in the devicetree node of the device or the graph property. 341 - */ 342 - if (!of_pci_supply_present(np) && !of_graph_is_present(np)) { 290 + if (!pci_pwrctrl_is_required(np)) { 343 291 dev_dbg(parent, "Skipping OF node: %s\n", np->name); 344 292 return 0; 345 293 }
+4 -8
drivers/pmdomain/bcm/bcm2835-power.c
··· 9 9 #include <linux/clk.h> 10 10 #include <linux/delay.h> 11 11 #include <linux/io.h> 12 + #include <linux/iopoll.h> 12 13 #include <linux/mfd/bcm2835-pm.h> 13 14 #include <linux/module.h> 14 15 #include <linux/platform_device.h> ··· 154 153 static int bcm2835_asb_control(struct bcm2835_power *power, u32 reg, bool enable) 155 154 { 156 155 void __iomem *base = power->asb; 157 - u64 start; 158 156 u32 val; 159 157 160 158 switch (reg) { ··· 166 166 break; 167 167 } 168 168 169 - start = ktime_get_ns(); 170 - 171 169 /* Enable the module's async AXI bridges. */ 172 170 if (enable) { 173 171 val = readl(base + reg) & ~ASB_REQ_STOP; ··· 174 176 } 175 177 writel(PM_PASSWORD | val, base + reg); 176 178 177 - while (!!(readl(base + reg) & ASB_ACK) == enable) { 178 - cpu_relax(); 179 - if (ktime_get_ns() - start >= 1000) 180 - return -ETIMEDOUT; 181 - } 179 + if (readl_poll_timeout_atomic(base + reg, val, 180 + !!(val & ASB_ACK) != enable, 0, 5)) 181 + return -ETIMEDOUT; 182 182 183 183 return 0; 184 184 }
+1 -1
drivers/pmdomain/mediatek/mtk-pm-domains.c
··· 1203 1203 scpsys->soc_data = soc; 1204 1204 1205 1205 scpsys->pd_data.domains = scpsys->domains; 1206 - scpsys->pd_data.num_domains = soc->num_domains; 1206 + scpsys->pd_data.num_domains = num_domains; 1207 1207 1208 1208 parent = dev->parent; 1209 1209 if (!parent) {
+1 -1
drivers/power/sequencing/pwrseq-pcie-m2.c
··· 109 109 if (!ctx) 110 110 return -ENOMEM; 111 111 112 - ctx->of_node = of_node_get(dev->of_node); 112 + ctx->of_node = dev_of_node(dev); 113 113 ctx->pdata = device_get_match_data(dev); 114 114 if (!ctx->pdata) 115 115 return dev_err_probe(dev, -ENODEV,
+10 -4
drivers/regulator/pca9450-regulator.c
··· 1293 1293 struct regulator_dev *ldo5; 1294 1294 struct pca9450 *pca9450; 1295 1295 unsigned int device_id, i; 1296 + const char *type_name; 1296 1297 int ret; 1297 1298 1298 1299 pca9450 = devm_kzalloc(&i2c->dev, sizeof(struct pca9450), GFP_KERNEL); ··· 1304 1303 case PCA9450_TYPE_PCA9450A: 1305 1304 regulator_desc = pca9450a_regulators; 1306 1305 pca9450->rcnt = ARRAY_SIZE(pca9450a_regulators); 1306 + type_name = "pca9450a"; 1307 1307 break; 1308 1308 case PCA9450_TYPE_PCA9450BC: 1309 1309 regulator_desc = pca9450bc_regulators; 1310 1310 pca9450->rcnt = ARRAY_SIZE(pca9450bc_regulators); 1311 + type_name = "pca9450bc"; 1311 1312 break; 1312 1313 case PCA9450_TYPE_PCA9451A: 1314 + regulator_desc = pca9451a_regulators; 1315 + pca9450->rcnt = ARRAY_SIZE(pca9451a_regulators); 1316 + type_name = "pca9451a"; 1317 + break; 1313 1318 case PCA9450_TYPE_PCA9452: 1314 1319 regulator_desc = pca9451a_regulators; 1315 1320 pca9450->rcnt = ARRAY_SIZE(pca9451a_regulators); 1321 + type_name = "pca9452"; 1316 1322 break; 1317 1323 default: 1318 1324 dev_err(&i2c->dev, "Unknown device type"); ··· 1377 1369 if (pca9450->irq) { 1378 1370 ret = devm_request_threaded_irq(pca9450->dev, pca9450->irq, NULL, 1379 1371 pca9450_irq_handler, 1380 - (IRQF_TRIGGER_FALLING | IRQF_ONESHOT), 1372 + (IRQF_TRIGGER_LOW | IRQF_ONESHOT), 1381 1373 "pca9450-irq", pca9450); 1382 1374 if (ret != 0) 1383 1375 return dev_err_probe(pca9450->dev, ret, "Failed to request IRQ: %d\n", ··· 1421 1413 pca9450_i2c_restart_handler, pca9450)) 1422 1414 dev_warn(&i2c->dev, "Failed to register restart handler\n"); 1423 1415 1424 - dev_info(&i2c->dev, "%s probed.\n", 1425 - type == PCA9450_TYPE_PCA9450A ? "pca9450a" : 1426 - (type == PCA9450_TYPE_PCA9451A ? "pca9451a" : "pca9450bc")); 1416 + dev_info(&i2c->dev, "%s probed.\n", type_name); 1427 1417 1428 1418 return 0; 1429 1419 }
+1 -1
drivers/remoteproc/imx_rproc.c
··· 617 617 618 618 err = of_reserved_mem_region_to_resource(np, i++, &res); 619 619 if (err) 620 - return 0; 620 + break; 621 621 622 622 /* 623 623 * Ignore the first memory region which will be used vdev buffer.
+39
drivers/remoteproc/mtk_scp.c
··· 1592 1592 }; 1593 1593 MODULE_DEVICE_TABLE(of, mtk_scp_of_match); 1594 1594 1595 + static int __maybe_unused scp_suspend(struct device *dev) 1596 + { 1597 + struct mtk_scp *scp = dev_get_drvdata(dev); 1598 + struct rproc *rproc = scp->rproc; 1599 + 1600 + /* 1601 + * Only unprepare if the SCP is running and holding the clock. 1602 + * 1603 + * Note: `scp_ops` doesn't implement .attach() callback, hence 1604 + * `rproc->state` can never be RPROC_ATTACHED. Otherwise, it 1605 + * should also be checked here. 1606 + */ 1607 + if (rproc->state == RPROC_RUNNING) 1608 + clk_unprepare(scp->clk); 1609 + return 0; 1610 + } 1611 + 1612 + static int __maybe_unused scp_resume(struct device *dev) 1613 + { 1614 + struct mtk_scp *scp = dev_get_drvdata(dev); 1615 + struct rproc *rproc = scp->rproc; 1616 + 1617 + /* 1618 + * Only prepare if the SCP was running and holding the clock. 1619 + * 1620 + * Note: `scp_ops` doesn't implement .attach() callback, hence 1621 + * `rproc->state` can never be RPROC_ATTACHED. Otherwise, it 1622 + * should also be checked here. 1623 + */ 1624 + if (rproc->state == RPROC_RUNNING) 1625 + return clk_prepare(scp->clk); 1626 + return 0; 1627 + } 1628 + 1629 + static const struct dev_pm_ops scp_pm_ops = { 1630 + SET_SYSTEM_SLEEP_PM_OPS(scp_suspend, scp_resume) 1631 + }; 1632 + 1595 1633 static struct platform_driver mtk_scp_driver = { 1596 1634 .probe = scp_probe, 1597 1635 .remove = scp_remove, 1598 1636 .driver = { 1599 1637 .name = "mtk-scp", 1600 1638 .of_match_table = mtk_scp_of_match, 1639 + .pm = &scp_pm_ops, 1601 1640 }, 1602 1641 }; 1603 1642
+1 -1
drivers/remoteproc/qcom_sysmon.c
··· 203 203 }; 204 204 205 205 struct ssctl_subsys_event_req { 206 - u8 subsys_name_len; 206 + u32 subsys_name_len; 207 207 char subsys_name[SSCTL_SUBSYS_NAME_LENGTH]; 208 208 u32 event; 209 209 u8 evt_driven_valid;
+1 -1
drivers/remoteproc/qcom_wcnss.c
··· 537 537 538 538 wcnss->mem_phys = wcnss->mem_reloc = res.start; 539 539 wcnss->mem_size = resource_size(&res); 540 - wcnss->mem_region = devm_ioremap_resource_wc(wcnss->dev, &res); 540 + wcnss->mem_region = devm_ioremap_wc(wcnss->dev, wcnss->mem_phys, wcnss->mem_size); 541 541 if (IS_ERR(wcnss->mem_region)) { 542 542 dev_err(wcnss->dev, "unable to map memory region: %pR\n", &res); 543 543 return PTR_ERR(wcnss->mem_region);
+2
drivers/resctrl/mpam_devices.c
··· 1428 1428 static int mpam_restore_mbwu_state(void *_ris) 1429 1429 { 1430 1430 int i; 1431 + u64 val; 1431 1432 struct mon_read mwbu_arg; 1432 1433 struct mpam_msc_ris *ris = _ris; 1433 1434 struct mpam_class *class = ris->vmsc->comp->class; ··· 1438 1437 mwbu_arg.ris = ris; 1439 1438 mwbu_arg.ctx = &ris->mbwu_state[i].cfg; 1440 1439 mwbu_arg.type = mpam_msmon_choose_counter(class); 1440 + mwbu_arg.val = &val; 1441 1441 1442 1442 __ris_msmon_read(&mwbu_arg); 1443 1443 }
+15 -7
drivers/resctrl/test_mpam_devices.c
··· 322 322 mutex_unlock(&mpam_list_lock); 323 323 } 324 324 325 + static void __test_mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) 326 + { 327 + /* Avoid warnings when running with CONFIG_DEBUG_PREEMPT */ 328 + guard(preempt)(); 329 + 330 + mpam_reset_msc_bitmap(msc, reg, wd); 331 + } 332 + 325 333 static void test_mpam_reset_msc_bitmap(struct kunit *test) 326 334 { 327 - char __iomem *buf = kunit_kzalloc(test, SZ_16K, GFP_KERNEL); 335 + char __iomem *buf = (__force char __iomem *)kunit_kzalloc(test, SZ_16K, GFP_KERNEL); 328 336 struct mpam_msc fake_msc = {}; 329 337 u32 *test_result; 330 338 ··· 347 339 mutex_init(&fake_msc.part_sel_lock); 348 340 mutex_lock(&fake_msc.part_sel_lock); 349 341 350 - test_result = (u32 *)(buf + MPAMCFG_CPBM); 342 + test_result = (__force u32 *)(buf + MPAMCFG_CPBM); 351 343 352 - mpam_reset_msc_bitmap(&fake_msc, MPAMCFG_CPBM, 0); 344 + __test_mpam_reset_msc_bitmap(&fake_msc, MPAMCFG_CPBM, 0); 353 345 KUNIT_EXPECT_EQ(test, test_result[0], 0); 354 346 KUNIT_EXPECT_EQ(test, test_result[1], 0); 355 347 test_result[0] = 0; 356 348 test_result[1] = 0; 357 349 358 - mpam_reset_msc_bitmap(&fake_msc, MPAMCFG_CPBM, 1); 350 + __test_mpam_reset_msc_bitmap(&fake_msc, MPAMCFG_CPBM, 1); 359 351 KUNIT_EXPECT_EQ(test, test_result[0], 1); 360 352 KUNIT_EXPECT_EQ(test, test_result[1], 0); 361 353 test_result[0] = 0; 362 354 test_result[1] = 0; 363 355 364 - mpam_reset_msc_bitmap(&fake_msc, MPAMCFG_CPBM, 16); 356 + __test_mpam_reset_msc_bitmap(&fake_msc, MPAMCFG_CPBM, 16); 365 357 KUNIT_EXPECT_EQ(test, test_result[0], 0xffff); 366 358 KUNIT_EXPECT_EQ(test, test_result[1], 0); 367 359 test_result[0] = 0; 368 360 test_result[1] = 0; 369 361 370 - mpam_reset_msc_bitmap(&fake_msc, MPAMCFG_CPBM, 32); 362 + __test_mpam_reset_msc_bitmap(&fake_msc, MPAMCFG_CPBM, 32); 371 363 KUNIT_EXPECT_EQ(test, test_result[0], 0xffffffff); 372 364 KUNIT_EXPECT_EQ(test, test_result[1], 0); 373 365 test_result[0] = 0; 374 366 test_result[1] = 0; 375 367 376 - mpam_reset_msc_bitmap(&fake_msc, MPAMCFG_CPBM, 33); 368 + __test_mpam_reset_msc_bitmap(&fake_msc, MPAMCFG_CPBM, 33); 377 369 KUNIT_EXPECT_EQ(test, test_result[0], 0xffffffff); 378 370 KUNIT_EXPECT_EQ(test, test_result[1], 1); 379 371 test_result[0] = 0;
+3
drivers/reset/reset-rzg2l-usbphy-ctrl.c
··· 136 136 { 137 137 u32 val = power_on ? 0 : 1; 138 138 139 + if (!pwrrdy) 140 + return 0; 141 + 139 142 /* The initialization path guarantees that the mask is 1 bit long. */ 140 143 return regmap_field_update_bits(pwrrdy, 1, val); 141 144 }
+16
drivers/s390/block/dasd_eckd.c
··· 6135 6135 static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid, 6136 6136 char *sec_busid) 6137 6137 { 6138 + struct dasd_eckd_private *prim_priv, *sec_priv; 6138 6139 struct dasd_device *primary, *secondary; 6139 6140 struct dasd_copy_relation *copy; 6140 6141 struct dasd_block *block; ··· 6155 6154 secondary = copy_relation_find_device(copy, sec_busid); 6156 6155 if (!secondary) 6157 6156 return DASD_COPYPAIRSWAP_SECONDARY; 6157 + 6158 + prim_priv = primary->private; 6159 + sec_priv = secondary->private; 6158 6160 6159 6161 /* 6160 6162 * usually the device should be quiesced for swap ··· 6185 6181 dev_name(&primary->cdev->dev), 6186 6182 dev_name(&secondary->cdev->dev), rc); 6187 6183 } 6184 + 6185 + if (primary->stopped & DASD_STOPPED_QUIESCE) { 6186 + dasd_device_set_stop_bits(secondary, DASD_STOPPED_QUIESCE); 6187 + dasd_device_remove_stop_bits(primary, DASD_STOPPED_QUIESCE); 6188 + } 6189 + 6190 + /* 6191 + * The secondary device never got through format detection, but since it 6192 + * is a copy of the primary device, the format is exactly the same; 6193 + * therefore, the detected layout can simply be copied. 6194 + */ 6195 + sec_priv->uses_cdl = prim_priv->uses_cdl; 6188 6196 6189 6197 /* re-enable device */ 6190 6198 dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC);
+7 -5
drivers/s390/crypto/zcrypt_ccamisc.c
··· 1639 1639 1640 1640 memset(ci, 0, sizeof(*ci)); 1641 1641 1642 - /* get first info from zcrypt device driver about this apqn */ 1643 - rc = zcrypt_device_status_ext(cardnr, domain, &devstat); 1644 - if (rc) 1645 - return rc; 1646 - ci->hwtype = devstat.hwtype; 1642 + /* if specific domain given, fetch status and hw info for this apqn */ 1643 + if (domain != AUTOSEL_DOM) { 1644 + rc = zcrypt_device_status_ext(cardnr, domain, &devstat); 1645 + if (rc) 1646 + return rc; 1647 + ci->hwtype = devstat.hwtype; 1648 + } 1647 1649 1648 1650 /* 1649 1651 * Prep memory for rule array and var array use.
+1 -2
drivers/s390/crypto/zcrypt_cex4.c
··· 85 85 86 86 memset(&ci, 0, sizeof(ci)); 87 87 88 - if (ap_domain_index >= 0) 89 - cca_get_info(ac->id, ap_domain_index, &ci, 0); 88 + cca_get_info(ac->id, AUTOSEL_DOM, &ci, 0); 90 89 91 90 return sysfs_emit(buf, "%s\n", ci.serial); 92 91 }
+1 -1
drivers/scsi/hisi_sas/hisi_sas_main.c
··· 2578 2578 shost->transportt = hisi_sas_stt; 2579 2579 shost->max_id = HISI_SAS_MAX_DEVICES; 2580 2580 shost->max_lun = ~0; 2581 - shost->max_channel = 1; 2581 + shost->max_channel = 0; 2582 2582 shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN; 2583 2583 if (hisi_hba->hw->slot_index_alloc) { 2584 2584 shost->can_queue = HISI_SAS_MAX_COMMANDS;
+1 -1
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
··· 4993 4993 shost->transportt = hisi_sas_stt; 4994 4994 shost->max_id = HISI_SAS_MAX_DEVICES; 4995 4995 shost->max_lun = ~0; 4996 - shost->max_channel = 1; 4996 + shost->max_channel = 0; 4997 4997 shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN; 4998 4998 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 4999 4999 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
-2
drivers/scsi/qla2xxx/qla_iocb.c
··· 2751 2751 if (!elsio->u.els_logo.els_logo_pyld) { 2752 2752 /* ref: INIT */ 2753 2753 kref_put(&sp->cmd_kref, qla2x00_sp_release); 2754 - qla2x00_free_fcport(fcport); 2755 2754 return QLA_FUNCTION_FAILED; 2756 2755 } 2757 2756 ··· 2775 2776 if (rval != QLA_SUCCESS) { 2776 2777 /* ref: INIT */ 2777 2778 kref_put(&sp->cmd_kref, qla2x00_sp_release); 2778 - qla2x00_free_fcport(fcport); 2779 2779 return QLA_FUNCTION_FAILED; 2780 2780 } 2781 2781
+2 -6
drivers/scsi/scsi_scan.c
··· 360 360 * default device queue depth to figure out sbitmap shift 361 361 * since we use this queue depth most of times. 362 362 */ 363 - if (scsi_realloc_sdev_budget_map(sdev, depth)) { 364 - kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags); 365 - put_device(&starget->dev); 366 - kfree(sdev); 367 - goto out; 368 - } 363 + if (scsi_realloc_sdev_budget_map(sdev, depth)) 364 + goto out_device_destroy; 369 365 370 366 scsi_change_queue_depth(sdev, depth); 371 367
+2 -4
drivers/slimbus/qcom-ngd-ctrl.c
··· 1535 1535 ngd->id = id; 1536 1536 ngd->pdev->dev.parent = parent; 1537 1537 1538 - ret = driver_set_override(&ngd->pdev->dev, 1539 - &ngd->pdev->driver_override, 1540 - QCOM_SLIM_NGD_DRV_NAME, 1541 - strlen(QCOM_SLIM_NGD_DRV_NAME)); 1538 + ret = device_set_driver_override(&ngd->pdev->dev, 1539 + QCOM_SLIM_NGD_DRV_NAME); 1542 1540 if (ret) { 1543 1541 platform_device_put(ngd->pdev); 1544 1542 kfree(ngd);
+22 -2
drivers/soc/fsl/qbman/qman.c
··· 1827 1827 1828 1828 void qman_destroy_fq(struct qman_fq *fq) 1829 1829 { 1830 + int leaked; 1831 + 1830 1832 /* 1831 1833 * We don't need to lock the FQ as it is a pre-condition that the FQ be 1832 1834 * quiesced. Instead, run some checks. ··· 1836 1834 switch (fq->state) { 1837 1835 case qman_fq_state_parked: 1838 1836 case qman_fq_state_oos: 1839 - if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) 1840 - qman_release_fqid(fq->fqid); 1837 + /* 1838 + * There's a race condition here on releasing the fqid, 1839 + * setting the fq_table to NULL, and freeing the fqid. 1840 + * To prevent it, this order should be respected: 1841 + */ 1842 + if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) { 1843 + leaked = qman_shutdown_fq(fq->fqid); 1844 + if (leaked) 1845 + pr_debug("FQID %d leaked\n", fq->fqid); 1846 + } 1841 1847 1842 1848 DPAA_ASSERT(fq_table[fq->idx]); 1843 1849 fq_table[fq->idx] = NULL; 1850 + 1851 + if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID) && !leaked) { 1852 + /* 1853 + * fq_table[fq->idx] should be set to null before 1854 + * freeing fq->fqid otherwise it could by allocated by 1855 + * qman_alloc_fqid() while still being !NULL 1856 + */ 1857 + smp_wmb(); 1858 + gen_pool_free(qm_fqalloc, fq->fqid | DPAA_GENALLOC_OFF, 1); 1859 + } 1844 1860 return; 1845 1861 default: 1846 1862 break;
+2 -2
drivers/soc/fsl/qe/qmc.c
··· 1790 1790 return -EINVAL; 1791 1791 qmc->dpram_offset = res->start - qe_muram_dma(qe_muram_addr(0)); 1792 1792 qmc->dpram = devm_ioremap_resource(qmc->dev, res); 1793 - if (IS_ERR(qmc->scc_pram)) 1794 - return PTR_ERR(qmc->scc_pram); 1793 + if (IS_ERR(qmc->dpram)) 1794 + return PTR_ERR(qmc->dpram); 1795 1795 1796 1796 return 0; 1797 1797 }
+9 -4
drivers/soc/microchip/mpfs-sys-controller.c
··· 142 142 143 143 sys_controller->flash = of_get_mtd_device_by_node(np); 144 144 of_node_put(np); 145 - if (IS_ERR(sys_controller->flash)) 146 - return dev_err_probe(dev, PTR_ERR(sys_controller->flash), "Failed to get flash\n"); 145 + if (IS_ERR(sys_controller->flash)) { 146 + ret = dev_err_probe(dev, PTR_ERR(sys_controller->flash), "Failed to get flash\n"); 147 + goto out_free; 148 + } 147 149 148 150 no_flash: 149 151 sys_controller->client.dev = dev; ··· 157 155 if (IS_ERR(sys_controller->chan)) { 158 156 ret = dev_err_probe(dev, PTR_ERR(sys_controller->chan), 159 157 "Failed to get mbox channel\n"); 160 - kfree(sys_controller); 161 - return ret; 158 + goto out_free; 162 159 } 163 160 164 161 init_completion(&sys_controller->c); ··· 175 174 dev_info(&pdev->dev, "Registered MPFS system controller\n"); 176 175 177 176 return 0; 177 + 178 + out_free: 179 + kfree(sys_controller); 180 + return ret; 178 181 } 179 182 180 183 static void mpfs_sys_controller_remove(struct platform_device *pdev)
+1
drivers/soc/rockchip/grf.c
··· 231 231 grf = syscon_node_to_regmap(np); 232 232 if (IS_ERR(grf)) { 233 233 pr_err("%s: could not get grf syscon\n", __func__); 234 + of_node_put(np); 234 235 return PTR_ERR(grf); 235 236 } 236 237
+9 -42
drivers/spi/spi-amlogic-spifc-a4.c
··· 411 411 ret = dma_mapping_error(sfc->dev, sfc->daddr); 412 412 if (ret) { 413 413 dev_err(sfc->dev, "DMA mapping error\n"); 414 - goto out_map_data; 414 + return ret; 415 415 } 416 416 417 417 cmd = CMD_DATA_ADDRL(sfc->daddr); ··· 429 429 ret = dma_mapping_error(sfc->dev, sfc->iaddr); 430 430 if (ret) { 431 431 dev_err(sfc->dev, "DMA mapping error\n"); 432 - dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir); 433 432 goto out_map_data; 434 433 } 435 434 ··· 447 448 return 0; 448 449 449 450 out_map_info: 450 - dma_unmap_single(sfc->dev, sfc->iaddr, datalen, dir); 451 + dma_unmap_single(sfc->dev, sfc->iaddr, infolen, dir); 451 452 out_map_data: 452 453 dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir); 453 454 ··· 1083 1084 return clk_set_rate(sfc->core_clk, SFC_BUS_DEFAULT_CLK); 1084 1085 } 1085 1086 1086 - static int aml_sfc_disable_clk(struct aml_sfc *sfc) 1087 - { 1088 - clk_disable_unprepare(sfc->core_clk); 1089 - clk_disable_unprepare(sfc->gate_clk); 1090 - 1091 - return 0; 1092 - } 1093 - 1094 1087 static int aml_sfc_probe(struct platform_device *pdev) 1095 1088 { 1096 1089 struct device_node *np = pdev->dev.of_node; ··· 1133 1142 1134 1143 /* Enable Amlogic flash controller spi mode */ 1135 1144 ret = regmap_write(sfc->regmap_base, SFC_SPI_CFG, SPI_MODE_EN); 1136 - if (ret) { 1137 - dev_err(dev, "failed to enable SPI mode\n"); 1138 - goto err_out; 1139 - } 1145 + if (ret) 1146 + return dev_err_probe(dev, ret, "failed to enable SPI mode\n"); 1140 1147 1141 1148 ret = dma_set_mask(sfc->dev, DMA_BIT_MASK(32)); 1142 - if (ret) { 1143 - dev_err(sfc->dev, "failed to set dma mask\n"); 1144 - goto err_out; 1145 - } 1149 + if (ret) 1150 + return dev_err_probe(sfc->dev, ret, "failed to set dma mask\n"); 1146 1151 1147 1152 sfc->ecc_eng.dev = &pdev->dev; 1148 1153 sfc->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED; ··· 1146 1159 sfc->ecc_eng.priv = sfc; 1147 1160 1148 1161 ret = nand_ecc_register_on_host_hw_engine(&sfc->ecc_eng); 1149 - if (ret) { 1150 - dev_err(&pdev->dev, "failed to register Aml host ecc engine.\n"); 1151 - goto err_out; 1152 - } 1162 + if (ret) 1163 + return dev_err_probe(&pdev->dev, ret, "failed to register Aml host ecc engine.\n"); 1153 1164 1154 1165 ret = of_property_read_u32(np, "amlogic,rx-adj", &val); 1155 1166 if (!ret) ··· 1163 1178 ctrl->min_speed_hz = SFC_MIN_FREQUENCY; 1164 1179 ctrl->num_chipselect = SFC_MAX_CS_NUM; 1165 1180 1166 - ret = devm_spi_register_controller(dev, ctrl); 1167 - if (ret) 1168 - goto err_out; 1169 - 1170 - return 0; 1171 - 1172 - err_out: 1173 - aml_sfc_disable_clk(sfc); 1174 - 1175 - return ret; 1176 - } 1177 - 1178 - static void aml_sfc_remove(struct platform_device *pdev) 1179 - { 1180 - struct spi_controller *ctlr = platform_get_drvdata(pdev); 1181 - struct aml_sfc *sfc = spi_controller_get_devdata(ctlr); 1182 - 1183 - aml_sfc_disable_clk(sfc); 1181 + return devm_spi_register_controller(dev, ctrl); 1184 1182 } 1185 1183 1186 1184 static const struct of_device_id aml_sfc_of_match[] = { ··· 1181 1213 .of_match_table = aml_sfc_of_match, 1182 1214 }, 1183 1215 .probe = aml_sfc_probe, 1184 - .remove = aml_sfc_remove, 1185 1216 }; 1186 1217 module_platform_driver(aml_sfc_driver); 1187 1218
+4 -8
drivers/spi/spi-amlogic-spisg.c
··· 729 729 }; 730 730 731 731 if (of_property_read_bool(dev->of_node, "spi-slave")) 732 - ctlr = spi_alloc_target(dev, sizeof(*spisg)); 732 + ctlr = devm_spi_alloc_target(dev, sizeof(*spisg)); 733 733 else 734 - ctlr = spi_alloc_host(dev, sizeof(*spisg)); 734 + ctlr = devm_spi_alloc_host(dev, sizeof(*spisg)); 735 735 if (!ctlr) 736 736 return -ENOMEM; 737 737 ··· 750 750 return dev_err_probe(dev, PTR_ERR(spisg->map), "regmap init failed\n"); 751 751 752 752 irq = platform_get_irq(pdev, 0); 753 - if (irq < 0) { 754 - ret = irq; 755 - goto out_controller; 756 - } 753 + if (irq < 0) 754 + return irq; 757 755 758 756 ret = device_reset_optional(dev); 759 757 if (ret) ··· 815 817 if (spisg->core) 816 818 clk_disable_unprepare(spisg->core); 817 819 clk_disable_unprepare(spisg->pclk); 818 - out_controller: 819 - spi_controller_put(ctlr); 820 820 821 821 return ret; 822 822 }
+16 -22
drivers/spi/spi-atcspi200.c
··· 195 195 if (op->addr.buswidth > 1) 196 196 tc |= TRANS_ADDR_FMT; 197 197 if (op->data.nbytes) { 198 - tc |= TRANS_DUAL_QUAD(ffs(op->data.buswidth) - 1); 198 + unsigned int width_code; 199 + 200 + width_code = ffs(op->data.buswidth) - 1; 201 + if (unlikely(width_code > 3)) { 202 + WARN_ON_ONCE(1); 203 + width_code = 0; 204 + } 205 + tc |= TRANS_DUAL_QUAD(width_code); 206 + 199 207 if (op->data.dir == SPI_MEM_DATA_IN) { 200 208 if (op->dummy.nbytes) 201 209 tc |= TRANS_MODE_DMY_READ | ··· 505 497 506 498 static int atcspi_configure_dma(struct atcspi_dev *spi) 507 499 { 508 - struct dma_chan *dma_chan; 509 - int ret = 0; 500 + spi->host->dma_rx = devm_dma_request_chan(spi->dev, "rx"); 501 + if (IS_ERR(spi->host->dma_rx)) 502 + return PTR_ERR(spi->host->dma_rx); 510 503 511 - dma_chan = devm_dma_request_chan(spi->dev, "rx"); 512 - if (IS_ERR(dma_chan)) { 513 - ret = PTR_ERR(dma_chan); 514 - goto err_exit; 515 - } 516 - spi->host->dma_rx = dma_chan; 504 + spi->host->dma_tx = devm_dma_request_chan(spi->dev, "tx"); 505 + if (IS_ERR(spi->host->dma_tx)) 506 + return PTR_ERR(spi->host->dma_tx); 517 507 518 - dma_chan = devm_dma_request_chan(spi->dev, "tx"); 519 - if (IS_ERR(dma_chan)) { 520 - ret = PTR_ERR(dma_chan); 521 - goto free_rx; 522 - } 523 - spi->host->dma_tx = dma_chan; 524 508 init_completion(&spi->dma_completion); 525 509 526 - return ret; 527 - 528 - free_rx: 529 - dma_release_channel(spi->host->dma_rx); 530 - spi->host->dma_rx = NULL; 531 - err_exit: 532 - return ret; 510 + return 0; 533 511 } 534 512 535 513 static int atcspi_enable_clk(struct atcspi_dev *spi)
+11 -20
drivers/spi/spi-axiado.c
··· 765 765 platform_set_drvdata(pdev, ctlr); 766 766 767 767 xspi->regs = devm_platform_ioremap_resource(pdev, 0); 768 - if (IS_ERR(xspi->regs)) { 769 - ret = PTR_ERR(xspi->regs); 770 - goto remove_ctlr; 771 - } 768 + if (IS_ERR(xspi->regs)) 769 + return PTR_ERR(xspi->regs); 772 770 773 771 xspi->pclk = devm_clk_get(&pdev->dev, "pclk"); 774 - if (IS_ERR(xspi->pclk)) { 775 - dev_err(&pdev->dev, "pclk clock not found.\n"); 776 - ret = PTR_ERR(xspi->pclk); 777 - goto remove_ctlr; 778 - } 772 + if (IS_ERR(xspi->pclk)) 773 + return dev_err_probe(&pdev->dev, PTR_ERR(xspi->pclk), 774 + "pclk clock not found.\n"); 779 775 780 776 xspi->ref_clk = devm_clk_get(&pdev->dev, "ref"); 781 - if (IS_ERR(xspi->ref_clk)) { 782 - dev_err(&pdev->dev, "ref clock not found.\n"); 783 - ret = PTR_ERR(xspi->ref_clk); 784 - goto remove_ctlr; 785 - } 777 + if (IS_ERR(xspi->ref_clk)) 778 + return dev_err_probe(&pdev->dev, PTR_ERR(xspi->ref_clk), 779 + "ref clock not found.\n"); 786 780 787 781 ret = clk_prepare_enable(xspi->pclk); 788 - if (ret) { 789 - dev_err(&pdev->dev, "Unable to enable APB clock.\n"); 790 - goto remove_ctlr; 791 - } 782 + if (ret) 783 + return dev_err_probe(&pdev->dev, ret, "Unable to enable APB clock.\n"); 792 784 793 785 ret = clk_prepare_enable(xspi->ref_clk); 794 786 if (ret) { ··· 861 869 clk_disable_unprepare(xspi->ref_clk); 862 870 clk_dis_apb: 863 871 clk_disable_unprepare(xspi->pclk); 864 - remove_ctlr: 865 - spi_controller_put(ctlr); 872 + 866 873 return ret; 867 874 } 868 875
+6
drivers/spi/spi-cadence-quadspi.c
··· 76 76 u8 cs; 77 77 }; 78 78 79 + static const struct clk_bulk_data cqspi_clks[CLK_QSPI_NUM] = { 80 + [CLK_QSPI_APB] = { .id = "apb" }, 81 + [CLK_QSPI_AHB] = { .id = "ahb" }, 82 + }; 83 + 79 84 struct cqspi_st { 80 85 struct platform_device *pdev; 81 86 struct spi_controller *host; ··· 1828 1823 } 1829 1824 1830 1825 /* Obtain QSPI clocks. */ 1826 + memcpy(&cqspi->clks, &cqspi_clks, sizeof(cqspi->clks)); 1831 1827 ret = devm_clk_bulk_get_optional(dev, CLK_QSPI_NUM, cqspi->clks); 1832 1828 if (ret) 1833 1829 return dev_err_probe(dev, ret, "Failed to get clocks\n");
+7 -6
drivers/spi/spi-geni-qcom.c
··· 359 359 writel((spi_slv->mode & SPI_LOOP) ? LOOPBACK_ENABLE : 0, se->base + SE_SPI_LOOPBACK); 360 360 if (cs_changed) 361 361 writel(chipselect, se->base + SE_SPI_DEMUX_SEL); 362 - if (mode_changed & SE_SPI_CPHA) 362 + if (mode_changed & SPI_CPHA) 363 363 writel((spi_slv->mode & SPI_CPHA) ? CPHA : 0, se->base + SE_SPI_CPHA); 364 - if (mode_changed & SE_SPI_CPOL) 364 + if (mode_changed & SPI_CPOL) 365 365 writel((spi_slv->mode & SPI_CPOL) ? CPOL : 0, se->base + SE_SPI_CPOL); 366 366 if ((mode_changed & SPI_CS_HIGH) || (cs_changed && (spi_slv->mode & SPI_CS_HIGH))) 367 367 writel((spi_slv->mode & SPI_CS_HIGH) ? BIT(chipselect) : 0, se->base + SE_SPI_DEMUX_OUTPUT_INV); ··· 906 906 struct spi_controller *spi = data; 907 907 struct spi_geni_master *mas = spi_controller_get_devdata(spi); 908 908 struct geni_se *se = &mas->se; 909 - u32 m_irq; 909 + u32 m_irq, dma_tx_status, dma_rx_status; 910 910 911 911 m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS); 912 - if (!m_irq) 912 + dma_tx_status = readl_relaxed(se->base + SE_DMA_TX_IRQ_STAT); 913 + dma_rx_status = readl_relaxed(se->base + SE_DMA_RX_IRQ_STAT); 914 + 915 + if (!m_irq && !dma_tx_status && !dma_rx_status) 913 916 return IRQ_NONE; 914 917 915 918 if (m_irq & (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN | ··· 960 957 } 961 958 } else if (mas->cur_xfer_mode == GENI_SE_DMA) { 962 959 const struct spi_transfer *xfer = mas->cur_xfer; 963 - u32 dma_tx_status = readl_relaxed(se->base + SE_DMA_TX_IRQ_STAT); 964 - u32 dma_rx_status = readl_relaxed(se->base + SE_DMA_RX_IRQ_STAT); 965 960 966 961 if (dma_tx_status) 967 962 writel(dma_tx_status, se->base + SE_DMA_TX_IRQ_CLR);
+1
drivers/spi/spi-intel-pci.c
··· 96 96 { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info }, 97 97 { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info }, 98 98 { PCI_VDEVICE(INTEL, 0xa823), (unsigned long)&cnl_info }, 99 + { PCI_VDEVICE(INTEL, 0xd323), (unsigned long)&cnl_info }, 99 100 { PCI_VDEVICE(INTEL, 0xe323), (unsigned long)&cnl_info }, 100 101 { PCI_VDEVICE(INTEL, 0xe423), (unsigned long)&cnl_info }, 101 102 { },
+1 -1
drivers/spi/spi-rockchip-sfc.c
··· 711 711 } 712 712 } 713 713 714 - ret = devm_spi_register_controller(dev, host); 714 + ret = spi_register_controller(host); 715 715 if (ret) 716 716 goto err_register; 717 717
+12 -13
drivers/spi/spi.c
··· 3049 3049 struct spi_controller *ctlr; 3050 3050 3051 3051 ctlr = container_of(dev, struct spi_controller, dev); 3052 + 3053 + free_percpu(ctlr->pcpu_statistics); 3052 3054 kfree(ctlr); 3053 3055 } 3054 3056 ··· 3193 3191 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); 3194 3192 if (!ctlr) 3195 3193 return NULL; 3194 + 3195 + ctlr->pcpu_statistics = spi_alloc_pcpu_stats(NULL); 3196 + if (!ctlr->pcpu_statistics) { 3197 + kfree(ctlr); 3198 + return NULL; 3199 + } 3196 3200 3197 3201 device_initialize(&ctlr->dev); 3198 3202 INIT_LIST_HEAD(&ctlr->queue); ··· 3488 3480 dev_info(dev, "controller is unqueued, this is deprecated\n"); 3489 3481 } else if (ctlr->transfer_one || ctlr->transfer_one_message) { 3490 3482 status = spi_controller_initialize_queue(ctlr); 3491 - if (status) { 3492 - device_del(&ctlr->dev); 3493 - goto free_bus_id; 3494 - } 3495 - } 3496 - /* Add statistics */ 3497 - ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev); 3498 - if (!ctlr->pcpu_statistics) { 3499 - dev_err(dev, "Error allocating per-cpu statistics\n"); 3500 - status = -ENOMEM; 3501 - goto destroy_queue; 3483 + if (status) 3484 + goto del_ctrl; 3502 3485 } 3503 3486 3504 3487 mutex_lock(&board_lock); ··· 3503 3504 acpi_register_spi_devices(ctlr); 3504 3505 return status; 3505 3506 3506 - destroy_queue: 3507 - spi_destroy_queue(ctlr); 3507 + del_ctrl: 3508 + device_del(&ctlr->dev); 3508 3509 free_bus_id: 3509 3510 mutex_lock(&board_lock); 3510 3511 idr_remove(&spi_controller_idr, ctlr->bus_num);
+10 -5
drivers/staging/rtl8723bs/core/rtw_ieee80211.c
··· 186 186 187 187 cnt = 0; 188 188 189 - while (cnt < in_len) { 189 + while (cnt + 2 <= in_len) { 190 + u8 ie_len = in_ie[cnt + 1]; 191 + 192 + if (cnt + 2 + ie_len > in_len) 193 + break; 194 + 190 195 if (eid == in_ie[cnt] 191 - && (!oui || !memcmp(&in_ie[cnt+2], oui, oui_len))) { 196 + && (!oui || (ie_len >= oui_len && !memcmp(&in_ie[cnt + 2], oui, oui_len)))) { 192 197 target_ie = &in_ie[cnt]; 193 198 194 199 if (ie) 195 - memcpy(ie, &in_ie[cnt], in_ie[cnt+1]+2); 200 + memcpy(ie, &in_ie[cnt], ie_len + 2); 196 201 197 202 if (ielen) 198 - *ielen = in_ie[cnt+1]+2; 203 + *ielen = ie_len + 2; 199 204 200 205 break; 201 206 } 202 - cnt += in_ie[cnt+1]+2; /* goto next */ 207 + cnt += ie_len + 2; /* goto next */ 203 208 } 204 209 205 210 return target_ie;
+4 -1
drivers/staging/rtl8723bs/core/rtw_mlme.c
··· 1988 1988 while (i < in_len) { 1989 1989 ielength = initial_out_len; 1990 1990 1991 - if (in_ie[i] == 0xDD && in_ie[i + 2] == 0x00 && in_ie[i + 3] == 0x50 && in_ie[i + 4] == 0xF2 && in_ie[i + 5] == 0x02 && i + 5 < in_len) { /* WMM element ID and OUI */ 1991 + if (i + 5 < in_len && 1992 + in_ie[i] == 0xDD && in_ie[i + 2] == 0x00 && 1993 + in_ie[i + 3] == 0x50 && in_ie[i + 4] == 0xF2 && 1994 + in_ie[i + 5] == 0x02) { 1992 1995 for (j = i; j < i + 9; j++) { 1993 1996 out_ie[ielength] = in_ie[j]; 1994 1997 ielength++;
+1
drivers/staging/sm750fb/sm750.c
··· 1123 1123 1124 1124 iounmap(sm750_dev->pvReg); 1125 1125 iounmap(sm750_dev->pvMem); 1126 + pci_release_region(pdev, 1); 1126 1127 kfree(g_settings); 1127 1128 } 1128 1129
+11 -11
drivers/staging/sm750fb/sm750_hw.c
··· 36 36 37 37 pr_info("mmio phyAddr = %lx\n", sm750_dev->vidreg_start); 38 38 39 - /* 40 - * reserve the vidreg space of smi adaptor 41 - * if you do this, you need to add release region code 42 - * in lynxfb_remove, or memory will not be mapped again 43 - * successfully 44 - */ 39 + /* reserve the vidreg space of smi adaptor */ 45 40 ret = pci_request_region(pdev, 1, "sm750fb"); 46 41 if (ret) { 47 42 pr_err("Can not request PCI regions.\n"); 48 - goto exit; 43 + return ret; 49 44 } 50 45 51 46 /* now map mmio and vidmem */ ··· 49 54 if (!sm750_dev->pvReg) { 50 55 pr_err("mmio failed\n"); 51 56 ret = -EFAULT; 52 - goto exit; 57 + goto err_release_region; 53 58 } 54 59 pr_info("mmio virtual addr = %p\n", sm750_dev->pvReg); 55 60 ··· 74 79 sm750_dev->pvMem = 75 80 ioremap_wc(sm750_dev->vidmem_start, sm750_dev->vidmem_size); 76 81 if (!sm750_dev->pvMem) { 77 - iounmap(sm750_dev->pvReg); 78 82 pr_err("Map video memory failed\n"); 79 83 ret = -EFAULT; 80 - goto exit; 84 + goto err_unmap_reg; 81 85 } 82 86 pr_info("video memory vaddr = %p\n", sm750_dev->pvMem); 83 - exit: 87 + 88 + return 0; 89 + 90 + err_unmap_reg: 91 + iounmap(sm750_dev->pvReg); 92 + err_release_region: 93 + pci_release_region(pdev, 1); 84 94 return ret; 85 95 } 86 96
-27
drivers/tee/tee_shm.c
··· 23 23 struct page *page; 24 24 }; 25 25 26 - static void shm_put_kernel_pages(struct page **pages, size_t page_count) 27 - { 28 - size_t n; 29 - 30 - for (n = 0; n < page_count; n++) 31 - put_page(pages[n]); 32 - } 33 - 34 - static void shm_get_kernel_pages(struct page **pages, size_t page_count) 35 - { 36 - size_t n; 37 - 38 - for (n = 0; n < page_count; n++) 39 - get_page(pages[n]); 40 - } 41 - 42 26 static void release_registered_pages(struct tee_shm *shm) 43 27 { 44 28 if (shm->pages) { 45 29 if (shm->flags & TEE_SHM_USER_MAPPED) 46 30 unpin_user_pages(shm->pages, shm->num_pages); 47 - else 48 - shm_put_kernel_pages(shm->pages, shm->num_pages); 49 31 50 32 kfree(shm->pages); 51 33 } ··· 459 477 goto err_put_shm_pages; 460 478 } 461 479 462 - /* 463 - * iov_iter_extract_kvec_pages does not get reference on the pages, 464 - * get a reference on them. 465 - */ 466 - if (iov_iter_is_kvec(iter)) 467 - shm_get_kernel_pages(shm->pages, num_pages); 468 - 469 480 shm->offset = off; 470 481 shm->size = len; 471 482 shm->num_pages = num_pages; ··· 474 499 err_put_shm_pages: 475 500 if (!iov_iter_is_kvec(iter)) 476 501 unpin_user_pages(shm->pages, shm->num_pages); 477 - else 478 - shm_put_kernel_pages(shm->pages, shm->num_pages); 479 502 err_free_shm_pages: 480 503 kfree(shm->pages); 481 504 err_free_shm:
+25
drivers/tty/serial/8250/8250.h
··· 175 175 return value; 176 176 } 177 177 178 + void serial8250_clear_fifos(struct uart_8250_port *p); 178 179 void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p); 180 + void serial8250_fifo_wait_for_lsr_thre(struct uart_8250_port *up, unsigned int count); 179 181 180 182 void serial8250_rpm_get(struct uart_8250_port *p); 181 183 void serial8250_rpm_put(struct uart_8250_port *p); ··· 402 400 403 401 return dma && dma->tx_running; 404 402 } 403 + 404 + static inline void serial8250_tx_dma_pause(struct uart_8250_port *p) 405 + { 406 + struct uart_8250_dma *dma = p->dma; 407 + 408 + if (!dma->tx_running) 409 + return; 410 + 411 + dmaengine_pause(dma->txchan); 412 + } 413 + 414 + static inline void serial8250_tx_dma_resume(struct uart_8250_port *p) 415 + { 416 + struct uart_8250_dma *dma = p->dma; 417 + 418 + if (!dma->tx_running) 419 + return; 420 + 421 + dmaengine_resume(dma->txchan); 422 + } 405 423 #else 406 424 static inline int serial8250_tx_dma(struct uart_8250_port *p) 407 425 { ··· 443 421 { 444 422 return false; 445 423 } 424 + 425 + static inline void serial8250_tx_dma_pause(struct uart_8250_port *p) { } 426 + static inline void serial8250_tx_dma_resume(struct uart_8250_port *p) { } 446 427 #endif 447 428 448 429 static inline int ns16550a_goto_highspeed(struct uart_8250_port *up)
+15
drivers/tty/serial/8250/8250_dma.c
··· 162 162 */ 163 163 dma->tx_size = 0; 164 164 165 + /* 166 + * We can't use `dmaengine_terminate_sync` because `uart_flush_buffer` is 167 + * holding the uart port spinlock. 168 + */ 165 169 dmaengine_terminate_async(dma->txchan); 170 + 171 + /* 172 + * The callback might or might not run. If it doesn't run, we need to ensure 173 + * that `tx_running` is cleared so that we can schedule new transactions. 174 + * If it does run, then the zombie callback will clear `tx_running` again 175 + * and perform a no-op since `tx_size` was cleared above. 176 + * 177 + * In either case, we ASSUME the DMA transaction will terminate before we 178 + * issue a new `serial8250_tx_dma`. 179 + */ 180 + dma->tx_running = 0; 166 181 } 167 182 168 183 int serial8250_rx_dma(struct uart_8250_port *p)
+239 -65
drivers/tty/serial/8250/8250_dw.c
··· 9 9 * LCR is written whilst busy. If it is, then a busy detect interrupt is 10 10 * raised, the LCR needs to be rewritten and the uart status register read. 11 11 */ 12 + #include <linux/bitfield.h> 13 + #include <linux/bits.h> 14 + #include <linux/cleanup.h> 12 15 #include <linux/clk.h> 13 16 #include <linux/delay.h> 14 17 #include <linux/device.h> 15 18 #include <linux/io.h> 19 + #include <linux/lockdep.h> 16 20 #include <linux/mod_devicetable.h> 17 21 #include <linux/module.h> 18 22 #include <linux/notifier.h> ··· 44 40 #define RZN1_UART_RDMACR 0x110 /* DMA Control Register Receive Mode */ 45 41 46 42 /* DesignWare specific register fields */ 43 + #define DW_UART_IIR_IID GENMASK(3, 0) 44 + 47 45 #define DW_UART_MCR_SIRE BIT(6) 46 + 47 + #define DW_UART_USR_BUSY BIT(0) 48 48 49 49 /* Renesas specific register fields */ 50 50 #define RZN1_UART_xDMACR_DMA_EN BIT(0) ··· 64 56 #define DW_UART_QUIRK_IS_DMA_FC BIT(3) 65 57 #define DW_UART_QUIRK_APMC0D08 BIT(4) 66 58 #define DW_UART_QUIRK_CPR_VALUE BIT(5) 59 + #define DW_UART_QUIRK_IER_KICK BIT(6) 60 + 61 + /* 62 + * Number of consecutive IIR_NO_INT interrupts required to trigger interrupt 63 + * storm prevention code. 64 + */ 65 + #define DW_UART_QUIRK_IER_KICK_THRES 4 67 66 68 67 struct dw8250_platform_data { 69 68 u8 usr_reg; ··· 92 77 93 78 unsigned int skip_autocfg:1; 94 79 unsigned int uart_16550_compatible:1; 80 + unsigned int in_idle:1; 81 + 82 + u8 no_int_count; 95 83 }; 96 84 97 85 static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data) ··· 125 107 return value; 126 108 } 127 109 128 - /* 129 - * This function is being called as part of the uart_port::serial_out() 130 - * routine. Hence, it must not call serial_port_out() or serial_out() 131 - * against the modified registers here, i.e. LCR. 132 - */ 133 - static void dw8250_force_idle(struct uart_port *p) 110 + static void dw8250_idle_exit(struct uart_port *p) 134 111 { 112 + struct dw8250_data *d = to_dw8250_data(p->private_data); 135 113 struct uart_8250_port *up = up_to_u8250p(p); 136 - unsigned int lsr; 137 114 138 - /* 139 - * The following call currently performs serial_out() 140 - * against the FCR register. Because it differs to LCR 141 - * there will be no infinite loop, but if it ever gets 142 - * modified, we might need a new custom version of it 143 - * that avoids infinite recursion. 144 - */ 145 - serial8250_clear_and_reinit_fifos(up); 115 + if (d->uart_16550_compatible) 116 + return; 146 117 147 - /* 148 - * With PSLVERR_RESP_EN parameter set to 1, the device generates an 149 - * error response when an attempt to read an empty RBR with FIFO 150 - * enabled. 151 - */ 152 - if (up->fcr & UART_FCR_ENABLE_FIFO) { 153 - lsr = serial_port_in(p, UART_LSR); 154 - if (!(lsr & UART_LSR_DR)) 155 - return; 118 + if (up->capabilities & UART_CAP_FIFO) 119 + serial_port_out(p, UART_FCR, up->fcr); 120 + serial_port_out(p, UART_MCR, up->mcr); 121 + serial_port_out(p, UART_IER, up->ier); 122 + 123 + /* DMA Rx is restarted by IRQ handler as needed. */ 124 + if (up->dma) 125 + serial8250_tx_dma_resume(up); 126 + 127 + d->in_idle = 0; 128 + } 129 + 130 + /* 131 + * Ensure BUSY is not asserted. If DW UART is configured with 132 + * !uart_16550_compatible, the writes to LCR, DLL, and DLH fail while 133 + * BUSY is asserted. 134 + * 135 + * Context: port's lock must be held 136 + */ 137 + static int dw8250_idle_enter(struct uart_port *p) 138 + { 139 + struct dw8250_data *d = to_dw8250_data(p->private_data); 140 + unsigned int usr_reg = d->pdata ? d->pdata->usr_reg : DW_UART_USR; 141 + struct uart_8250_port *up = up_to_u8250p(p); 142 + int retries; 143 + u32 lsr; 144 + 145 + lockdep_assert_held_once(&p->lock); 146 + 147 + if (d->uart_16550_compatible) 148 + return 0; 149 + 150 + d->in_idle = 1; 151 + 152 + /* Prevent triggering interrupt from RBR filling */ 153 + serial_port_out(p, UART_IER, 0); 154 + 155 + if (up->dma) { 156 + serial8250_rx_dma_flush(up); 157 + if (serial8250_tx_dma_running(up)) 158 + serial8250_tx_dma_pause(up); 156 159 } 157 160 158 - serial_port_in(p, UART_RX); 161 + /* 162 + * Wait until Tx becomes empty + one extra frame time to ensure all bits 163 + * have been sent on the wire. 164 + * 165 + * FIXME: frame_time delay is too long with very low baudrates. 166 + */ 167 + serial8250_fifo_wait_for_lsr_thre(up, p->fifosize); 168 + ndelay(p->frame_time); 169 + 170 + serial_port_out(p, UART_MCR, up->mcr | UART_MCR_LOOP); 171 + 172 + retries = 4; /* Arbitrary limit, 2 was always enough in tests */ 173 + do { 174 + serial8250_clear_fifos(up); 175 + if (!(serial_port_in(p, usr_reg) & DW_UART_USR_BUSY)) 176 + break; 177 + /* FIXME: frame_time delay is too long with very low baudrates. */ 178 + ndelay(p->frame_time); 179 + } while (--retries); 180 + 181 + lsr = serial_lsr_in(up); 182 + if (lsr & UART_LSR_DR) { 183 + serial_port_in(p, UART_RX); 184 + up->lsr_saved_flags = 0; 185 + } 186 + 187 + /* Now guaranteed to have BUSY deasserted? Just sanity check */ 188 + if (serial_port_in(p, usr_reg) & DW_UART_USR_BUSY) { 189 + dw8250_idle_exit(p); 190 + return -EBUSY; 191 + } 192 + 193 + return 0; 194 + } 195 + 196 + static void dw8250_set_divisor(struct uart_port *p, unsigned int baud, 197 + unsigned int quot, unsigned int quot_frac) 198 + { 199 + struct uart_8250_port *up = up_to_u8250p(p); 200 + int ret; 201 + 202 + ret = dw8250_idle_enter(p); 203 + if (ret < 0) 204 + return; 205 + 206 + serial_port_out(p, UART_LCR, up->lcr | UART_LCR_DLAB); 207 + if (!(serial_port_in(p, UART_LCR) & UART_LCR_DLAB)) 208 + goto idle_failed; 209 + 210 + serial_dl_write(up, quot); 211 + serial_port_out(p, UART_LCR, up->lcr); 212 + 213 + idle_failed: 214 + dw8250_idle_exit(p); 159 215 } 160 216 161 217 /* 162 218 * This function is being called as part of the uart_port::serial_out() 163 - * routine. Hence, it must not call serial_port_out() or serial_out() 164 - * against the modified registers here, i.e. LCR. 219 + * routine. Hence, special care must be taken when serial_port_out() or 220 + * serial_out() against the modified registers here, i.e. LCR (d->in_idle is 221 + * used to break recursion loop). 165 222 */ 166 223 static void dw8250_check_lcr(struct uart_port *p, unsigned int offset, u32 value) 167 224 { 168 225 struct dw8250_data *d = to_dw8250_data(p->private_data); 169 - void __iomem *addr = p->membase + (offset << p->regshift); 170 - int tries = 1000; 226 + u32 lcr; 227 + int ret; 171 228 172 229 if (offset != UART_LCR || d->uart_16550_compatible) 173 230 return; 174 231 232 + lcr = serial_port_in(p, UART_LCR); 233 + 175 234 /* Make sure LCR write wasn't ignored */ 176 - while (tries--) { 177 - u32 lcr = serial_port_in(p, offset); 235 + if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) 236 + return; 178 237 179 - if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) 180 - return; 238 + if (d->in_idle) 239 + goto write_err; 181 240 182 - dw8250_force_idle(p); 241 + ret = dw8250_idle_enter(p); 242 + if (ret < 0) 243 + goto write_err; 183 244 184 - #ifdef CONFIG_64BIT 185 - if (p->type == PORT_OCTEON) 186 - __raw_writeq(value & 0xff, addr); 187 - else 188 - #endif 189 - if (p->iotype == UPIO_MEM32) 190 - writel(value, addr); 191 - else if (p->iotype == UPIO_MEM32BE) 192 - iowrite32be(value, addr); 193 - else 194 - writeb(value, addr); 195 - } 245 + serial_port_out(p, UART_LCR, value); 246 + dw8250_idle_exit(p); 247 + return; 248 + 249 + write_err: 196 250 /* 197 251 * FIXME: this deadlocks if port->lock is already held 198 252 * dev_err(p->dev, "Couldn't set LCR to %d\n", value); 199 253 */ 254 + return; /* Silences "label at the end of compound statement" */ 255 + } 256 + 257 + /* 258 + * With BUSY, LCR writes can be very expensive (IRQ + complex retry logic). 259 + * If the write does not change the value of the LCR register, skip it entirely. 260 + */ 261 + static bool dw8250_can_skip_reg_write(struct uart_port *p, unsigned int offset, u32 value) 262 + { 263 + struct dw8250_data *d = to_dw8250_data(p->private_data); 264 + u32 lcr; 265 + 266 + if (offset != UART_LCR || d->uart_16550_compatible) 267 + return false; 268 + 269 + lcr = serial_port_in(p, offset); 270 + return lcr == value; 200 271 } 201 272 202 273 /* Returns once the transmitter is empty or we run out of retries */ ··· 314 207 315 208 static void dw8250_serial_out(struct uart_port *p, unsigned int offset, u32 value) 316 209 { 210 + if (dw8250_can_skip_reg_write(p, offset, value)) 211 + return; 212 + 317 213 writeb(value, p->membase + (offset << p->regshift)); 318 214 dw8250_check_lcr(p, offset, value); 319 215 } 320 216 321 217 static void dw8250_serial_out38x(struct uart_port *p, unsigned int offset, u32 value) 322 218 { 219 + if (dw8250_can_skip_reg_write(p, offset, value)) 220 + return; 221 + 323 222 /* Allow the TX to drain before we reconfigure */ 324 223 if (offset == UART_LCR) 325 224 dw8250_tx_wait_empty(p); ··· 350 237 351 238 static void dw8250_serial_outq(struct uart_port *p, unsigned int offset, u32 value) 352 239 { 240 + if (dw8250_can_skip_reg_write(p, offset, value)) 241 + return; 242 + 353 243 value &= 0xff; 354 244 __raw_writeq(value, p->membase + (offset << p->regshift)); 355 245 /* Read back to ensure register write ordering. */ ··· 364 248 365 249 static void dw8250_serial_out32(struct uart_port *p, unsigned int offset, u32 value) 366 250 { 251 + if (dw8250_can_skip_reg_write(p, offset, value)) 252 + return; 253 + 367 254 writel(value, p->membase + (offset << p->regshift)); 368 255 dw8250_check_lcr(p, offset, value); 369 256 } ··· 380 261 381 262 static void dw8250_serial_out32be(struct uart_port *p, unsigned int offset, u32 value) 382 263 { 264 + if (dw8250_can_skip_reg_write(p, offset, value)) 265 + return; 266 + 383 267 iowrite32be(value, p->membase + (offset << p->regshift)); 384 268 dw8250_check_lcr(p, offset, value); 385 269 } ··· 394 272 return dw8250_modify_msr(p, offset, value); 395 273 } 396 274 275 + /* 276 + * INTC10EE UART can IRQ storm while reporting IIR_NO_INT. Inducing IIR value 277 + * change has been observed to break the storm. 278 + * 279 + * If Tx is empty (THRE asserted), we use here IER_THRI to cause IIR_NO_INT -> 280 + * IIR_THRI transition. 281 + */ 282 + static void dw8250_quirk_ier_kick(struct uart_port *p) 283 + { 284 + struct uart_8250_port *up = up_to_u8250p(p); 285 + u32 lsr; 286 + 287 + if (up->ier & UART_IER_THRI) 288 + return; 289 + 290 + lsr = serial_lsr_in(up); 291 + if (!(lsr & UART_LSR_THRE)) 292 + return; 293 + 294 + serial_port_out(p, UART_IER, up->ier | UART_IER_THRI); 295 + serial_port_in(p, UART_LCR); /* safe, no side-effects */ 296 + serial_port_out(p, UART_IER, up->ier); 297 + } 397 298 398 299 static int dw8250_handle_irq(struct uart_port *p) 399 300 { ··· 426 281 bool rx_timeout = (iir & 0x3f) == UART_IIR_RX_TIMEOUT; 427 282 unsigned int quirks = d->pdata->quirks; 428 283 unsigned int status; 429 - unsigned long flags; 284 + 285 + guard(uart_port_lock_irqsave)(p); 286 + 287 + switch (FIELD_GET(DW_UART_IIR_IID, iir)) { 288 + case UART_IIR_NO_INT: 289 + if (d->uart_16550_compatible || up->dma) 290 + return 0; 291 + 292 + if (quirks & DW_UART_QUIRK_IER_KICK && 293 + d->no_int_count == (DW_UART_QUIRK_IER_KICK_THRES - 1)) 294 + dw8250_quirk_ier_kick(p); 295 + d->no_int_count = (d->no_int_count + 1) % DW_UART_QUIRK_IER_KICK_THRES; 296 + 297 + return 0; 298 + 299 + case UART_IIR_BUSY: 300 + /* Clear the USR */ 301 + serial_port_in(p, d->pdata->usr_reg); 302 + 303 + d->no_int_count = 0; 304 + 305 + return 1; 306 + } 307 + 308 + d->no_int_count = 0; 430 309 431 310 /* 432 311 * There are ways to get Designware-based UARTs into a state where ··· 463 294 * so we limit the workaround only to non-DMA mode. 464 295 */ 465 296 if (!up->dma && rx_timeout) { 466 - uart_port_lock_irqsave(p, &flags); 467 297 status = serial_lsr_in(up); 468 298 469 299 if (!(status & (UART_LSR_DR | UART_LSR_BI))) 470 300 serial_port_in(p, UART_RX); 471 - 472 - uart_port_unlock_irqrestore(p, flags); 473 301 } 474 302 475 303 /* Manually stop the Rx DMA transfer when acting as flow controller */ 476 304 if (quirks & DW_UART_QUIRK_IS_DMA_FC && up->dma && up->dma->rx_running && rx_timeout) { 477 - uart_port_lock_irqsave(p, &flags); 478 305 status = serial_lsr_in(up); 479 - uart_port_unlock_irqrestore(p, flags); 480 306 481 307 if (status & (UART_LSR_DR | UART_LSR_BI)) { 482 308 dw8250_writel_ext(p, RZN1_UART_RDMACR, 0); ··· 479 315 } 480 316 } 481 317 482 - if (serial8250_handle_irq(p, iir)) 483 - return 1; 318 + serial8250_handle_irq_locked(p, iir); 484 319 485 - if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) { 486 - /* Clear the USR */ 487 - serial_port_in(p, d->pdata->usr_reg); 488 - 489 - return 1; 490 - } 491 - 492 - return 0; 320 + return 1; 493 321 } 494 322 495 323 static void dw8250_clk_work_cb(struct work_struct *work) ··· 683 527 reset_control_assert(data); 684 528 } 685 529 530 + static void dw8250_shutdown(struct uart_port *port) 531 + { 532 + struct dw8250_data *d = to_dw8250_data(port->private_data); 533 + 534 + serial8250_do_shutdown(port); 535 + d->no_int_count = 0; 536 + } 537 + 686 538 static int dw8250_probe(struct platform_device *pdev) 687 539 { 688 540 struct uart_8250_port uart = {}, *up = &uart; ··· 709 545 p->type = PORT_8250; 710 546 p->flags = UPF_FIXED_PORT; 711 547 p->dev = dev; 548 + 712 549 p->set_ldisc = dw8250_set_ldisc; 713 550 p->set_termios = dw8250_set_termios; 551 + p->set_divisor = dw8250_set_divisor; 714 552 715 553 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 716 554 if (!data) ··· 820 654 dw8250_quirks(p, data); 821 655 822 656 /* If the Busy Functionality is not implemented, don't handle it */ 823 - if (data->uart_16550_compatible) 657 + if (data->uart_16550_compatible) { 824 658 p->handle_irq = NULL; 825 - else if (data->pdata) 659 + } else if (data->pdata) { 826 660 p->handle_irq = dw8250_handle_irq; 661 + p->shutdown = dw8250_shutdown; 662 + } 827 663 828 664 dw8250_setup_dma_filter(p, data); 829 665 ··· 957 789 .quirks = DW_UART_QUIRK_SKIP_SET_RATE, 958 790 }; 959 791 792 + static const struct dw8250_platform_data dw8250_intc10ee = { 793 + .usr_reg = DW_UART_USR, 794 + .quirks = DW_UART_QUIRK_IER_KICK, 795 + }; 796 + 960 797 static const struct of_device_id dw8250_of_match[] = { 961 798 { .compatible = "snps,dw-apb-uart", .data = &dw8250_dw_apb }, 962 799 { .compatible = "cavium,octeon-3860-uart", .data = &dw8250_octeon_3860_data }, ··· 991 818 { "INT33C5", (kernel_ulong_t)&dw8250_dw_apb }, 992 819 { "INT3434", (kernel_ulong_t)&dw8250_dw_apb }, 993 820 { "INT3435", (kernel_ulong_t)&dw8250_dw_apb }, 994 - { "INTC10EE", (kernel_ulong_t)&dw8250_dw_apb }, 821 + { "INTC10EE", (kernel_ulong_t)&dw8250_intc10ee }, 995 822 { }, 996 823 }; 997 824 MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match); ··· 1009 836 1010 837 module_platform_driver(dw8250_platform_driver); 1011 838 839 + MODULE_IMPORT_NS("SERIAL_8250"); 1012 840 MODULE_AUTHOR("Jamie Iles"); 1013 841 MODULE_LICENSE("GPL"); 1014 842 MODULE_DESCRIPTION("Synopsys DesignWare 8250 serial port driver");
+17
drivers/tty/serial/8250/8250_pci.c
··· 137 137 }; 138 138 139 139 #define PCI_DEVICE_ID_HPE_PCI_SERIAL 0x37e 140 + #define PCIE_VENDOR_ID_ASIX 0x125B 141 + #define PCIE_DEVICE_ID_AX99100 0x9100 140 142 141 143 static const struct pci_device_id pci_use_msi[] = { 142 144 { PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900, ··· 151 149 0xA000, 0x1000) }, 152 150 { PCI_DEVICE_SUB(PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL, 153 151 PCI_ANY_ID, PCI_ANY_ID) }, 152 + { PCI_DEVICE_SUB(PCIE_VENDOR_ID_ASIX, PCIE_DEVICE_ID_AX99100, 153 + 0xA000, 0x1000) }, 154 154 { } 155 155 }; 156 156 ··· 924 920 case PCI_DEVICE_ID_NETMOS_9912: 925 921 case PCI_DEVICE_ID_NETMOS_9922: 926 922 case PCI_DEVICE_ID_NETMOS_9900: 923 + case PCIE_DEVICE_ID_AX99100: 927 924 num_serial = pci_netmos_9900_numports(dev); 928 925 break; 929 926 ··· 2543 2538 */ 2544 2539 { 2545 2540 .vendor = PCI_VENDOR_ID_NETMOS, 2541 + .device = PCI_ANY_ID, 2542 + .subvendor = PCI_ANY_ID, 2543 + .subdevice = PCI_ANY_ID, 2544 + .init = pci_netmos_init, 2545 + .setup = pci_netmos_9900_setup, 2546 + }, 2547 + { 2548 + .vendor = PCIE_VENDOR_ID_ASIX, 2546 2549 .device = PCI_ANY_ID, 2547 2550 .subvendor = PCI_ANY_ID, 2548 2551 .subdevice = PCI_ANY_ID, ··· 6077 6064 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900, 6078 6065 0xA000, 0x3002, 6079 6066 0, 0, pbn_NETMOS9900_2s_115200 }, 6067 + 6068 + { PCIE_VENDOR_ID_ASIX, PCIE_DEVICE_ID_AX99100, 6069 + 0xA000, 0x1000, 6070 + 0, 0, pbn_b0_1_115200 }, 6080 6071 6081 6072 /* 6082 6073 * Best Connectivity and Rosewill PCI Multi I/O cards
+45 -30
drivers/tty/serial/8250/8250_port.c
··· 18 18 #include <linux/irq.h> 19 19 #include <linux/console.h> 20 20 #include <linux/gpio/consumer.h> 21 + #include <linux/lockdep.h> 21 22 #include <linux/sysrq.h> 22 23 #include <linux/delay.h> 23 24 #include <linux/platform_device.h> ··· 489 488 /* 490 489 * FIFO support. 491 490 */ 492 - static void serial8250_clear_fifos(struct uart_8250_port *p) 491 + void serial8250_clear_fifos(struct uart_8250_port *p) 493 492 { 494 493 if (p->capabilities & UART_CAP_FIFO) { 495 494 serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO); ··· 498 497 serial_out(p, UART_FCR, 0); 499 498 } 500 499 } 500 + EXPORT_SYMBOL_NS_GPL(serial8250_clear_fifos, "SERIAL_8250"); 501 501 502 502 static enum hrtimer_restart serial8250_em485_handle_start_tx(struct hrtimer *t); 503 503 static enum hrtimer_restart serial8250_em485_handle_stop_tx(struct hrtimer *t); ··· 1784 1782 } 1785 1783 1786 1784 /* 1787 - * This handles the interrupt from one port. 1785 + * Context: port's lock must be held by the caller. 1788 1786 */ 1789 - int serial8250_handle_irq(struct uart_port *port, unsigned int iir) 1787 + void serial8250_handle_irq_locked(struct uart_port *port, unsigned int iir) 1790 1788 { 1791 1789 struct uart_8250_port *up = up_to_u8250p(port); 1792 1790 struct tty_port *tport = &port->state->port; 1793 1791 bool skip_rx = false; 1794 - unsigned long flags; 1795 1792 u16 status; 1796 1793 1797 - if (iir & UART_IIR_NO_INT) 1798 - return 0; 1799 - 1800 - uart_port_lock_irqsave(port, &flags); 1794 + lockdep_assert_held_once(&port->lock); 1801 1795 1802 1796 status = serial_lsr_in(up); 1803 1797 ··· 1826 1828 else if (!up->dma->tx_running) 1827 1829 __stop_tx(up); 1828 1830 } 1831 + } 1832 + EXPORT_SYMBOL_NS_GPL(serial8250_handle_irq_locked, "SERIAL_8250"); 1829 1833 1830 - uart_unlock_and_check_sysrq_irqrestore(port, flags); 1834 + /* 1835 + * This handles the interrupt from one port. 1836 + */ 1837 + int serial8250_handle_irq(struct uart_port *port, unsigned int iir) 1838 + { 1839 + if (iir & UART_IIR_NO_INT) 1840 + return 0; 1841 + 1842 + guard(uart_port_lock_irqsave)(port); 1843 + serial8250_handle_irq_locked(port, iir); 1831 1844 1832 1845 return 1; 1833 1846 } ··· 2156 2147 if (up->port.flags & UPF_NO_THRE_TEST) 2157 2148 return; 2158 2149 2159 - if (port->irqflags & IRQF_SHARED) 2160 - disable_irq_nosync(port->irq); 2150 + disable_irq(port->irq); 2161 2151 2162 2152 /* 2163 2153 * Test for UARTs that do not reassert THRE when the transmitter is idle and the interrupt ··· 2178 2170 serial_port_out(port, UART_IER, 0); 2179 2171 } 2180 2172 2181 - if (port->irqflags & IRQF_SHARED) 2182 - enable_irq(port->irq); 2173 + enable_irq(port->irq); 2183 2174 2184 2175 /* 2185 2176 * If the interrupt is not reasserted, or we otherwise don't trust the iir, setup a timer to ··· 2357 2350 void serial8250_do_shutdown(struct uart_port *port) 2358 2351 { 2359 2352 struct uart_8250_port *up = up_to_u8250p(port); 2353 + u32 lcr; 2360 2354 2361 2355 serial8250_rpm_get(up); 2362 2356 /* ··· 2384 2376 port->mctrl &= ~TIOCM_OUT2; 2385 2377 2386 2378 serial8250_set_mctrl(port, port->mctrl); 2379 + 2380 + /* Disable break condition */ 2381 + lcr = serial_port_in(port, UART_LCR); 2382 + lcr &= ~UART_LCR_SBC; 2383 + serial_port_out(port, UART_LCR, lcr); 2387 2384 } 2388 2385 2389 - /* 2390 - * Disable break condition and FIFOs 2391 - */ 2392 - serial_port_out(port, UART_LCR, 2393 - serial_port_in(port, UART_LCR) & ~UART_LCR_SBC); 2394 2386 serial8250_clear_fifos(up); 2395 2387 2396 2388 rsa_disable(up); ··· 2400 2392 * the IRQ chain. 2401 2393 */ 2402 2394 serial_port_in(port, UART_RX); 2395 + /* 2396 + * LCR writes on DW UART can trigger late (unmaskable) IRQs. 2397 + * Handle them before releasing the handler. 2398 + */ 2399 + synchronize_irq(port->irq); 2400 + 2403 2401 serial8250_rpm_put(up); 2404 2402 2405 2403 up->ops->release_irq(up); ··· 3199 3185 } 3200 3186 EXPORT_SYMBOL_GPL(serial8250_set_defaults); 3201 3187 3188 + void serial8250_fifo_wait_for_lsr_thre(struct uart_8250_port *up, unsigned int count) 3189 + { 3190 + unsigned int i; 3191 + 3192 + for (i = 0; i < count; i++) { 3193 + if (wait_for_lsr(up, UART_LSR_THRE)) 3194 + return; 3195 + } 3196 + } 3197 + EXPORT_SYMBOL_NS_GPL(serial8250_fifo_wait_for_lsr_thre, "SERIAL_8250"); 3198 + 3202 3199 #ifdef CONFIG_SERIAL_8250_CONSOLE 3203 3200 3204 3201 static void serial8250_console_putchar(struct uart_port *port, unsigned char ch) ··· 3251 3226 serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS); 3252 3227 } 3253 3228 3254 - static void fifo_wait_for_lsr(struct uart_8250_port *up, unsigned int count) 3255 - { 3256 - unsigned int i; 3257 - 3258 - for (i = 0; i < count; i++) { 3259 - if (wait_for_lsr(up, UART_LSR_THRE)) 3260 - return; 3261 - } 3262 - } 3263 - 3264 3229 /* 3265 3230 * Print a string to the serial port using the device FIFO 3266 3231 * ··· 3269 3254 3270 3255 while (s != end) { 3271 3256 /* Allow timeout for each byte of a possibly full FIFO */ 3272 - fifo_wait_for_lsr(up, fifosize); 3257 + serial8250_fifo_wait_for_lsr_thre(up, fifosize); 3273 3258 3274 3259 for (i = 0; i < fifosize && s != end; ++i) { 3275 3260 if (*s == '\n' && !cr_sent) { ··· 3287 3272 * Allow timeout for each byte written since the caller will only wait 3288 3273 * for UART_LSR_BOTH_EMPTY using the timeout of a single character 3289 3274 */ 3290 - fifo_wait_for_lsr(up, tx_count); 3275 + serial8250_fifo_wait_for_lsr_thre(up, tx_count); 3291 3276 } 3292 3277 3293 3278 /*
+4 -1
drivers/tty/serial/serial_core.c
··· 643 643 unsigned int ret; 644 644 645 645 port = uart_port_ref_lock(state, &flags); 646 - ret = kfifo_avail(&state->port.xmit_fifo); 646 + if (!state->port.xmit_buf) 647 + ret = 0; 648 + else 649 + ret = kfifo_avail(&state->port.xmit_fifo); 647 650 uart_port_unlock_deref(port, flags); 648 651 return ret; 649 652 }
+1
drivers/tty/serial/uartlite.c
··· 878 878 pm_runtime_use_autosuspend(&pdev->dev); 879 879 pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT); 880 880 pm_runtime_set_active(&pdev->dev); 881 + pm_runtime_get_noresume(&pdev->dev); 881 882 pm_runtime_enable(&pdev->dev); 882 883 883 884 ret = ulite_assign(&pdev->dev, id, res->start, irq, pdata);
+8
drivers/tty/vt/vt.c
··· 1339 1339 kfree(vc->vc_saved_screen); 1340 1340 vc->vc_saved_screen = NULL; 1341 1341 } 1342 + vc_uniscr_free(vc->vc_saved_uni_lines); 1343 + vc->vc_saved_uni_lines = NULL; 1342 1344 } 1343 1345 return vc; 1344 1346 } ··· 1886 1884 vc->vc_saved_screen = kmemdup((u16 *)vc->vc_origin, size, GFP_KERNEL); 1887 1885 if (vc->vc_saved_screen == NULL) 1888 1886 return; 1887 + vc->vc_saved_uni_lines = vc->vc_uni_lines; 1888 + vc->vc_uni_lines = NULL; 1889 1889 vc->vc_saved_rows = vc->vc_rows; 1890 1890 vc->vc_saved_cols = vc->vc_cols; 1891 1891 save_cur(vc); ··· 1909 1905 dest = ((u16 *)vc->vc_origin) + r * vc->vc_cols; 1910 1906 memcpy(dest, src, 2 * cols); 1911 1907 } 1908 + vc_uniscr_set(vc, vc->vc_saved_uni_lines); 1909 + vc->vc_saved_uni_lines = NULL; 1912 1910 restore_cur(vc); 1913 1911 /* Update the entire screen */ 1914 1912 if (con_should_update(vc)) ··· 2233 2227 if (vc->vc_saved_screen != NULL) { 2234 2228 kfree(vc->vc_saved_screen); 2235 2229 vc->vc_saved_screen = NULL; 2230 + vc_uniscr_free(vc->vc_saved_uni_lines); 2231 + vc->vc_saved_uni_lines = NULL; 2236 2232 vc->vc_saved_rows = 0; 2237 2233 vc->vc_saved_cols = 0; 2238 2234 }
+1 -1
drivers/ufs/core/ufshcd.c
··· 10066 10066 } 10067 10067 10068 10068 flush_work(&hba->eeh_work); 10069 + cancel_delayed_work_sync(&hba->ufs_rtc_update_work); 10069 10070 10070 10071 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); 10071 10072 if (ret) ··· 10121 10120 if (ret) 10122 10121 goto set_link_active; 10123 10122 10124 - cancel_delayed_work_sync(&hba->ufs_rtc_update_work); 10125 10123 goto out; 10126 10124 10127 10125 set_link_active:
+5
drivers/usb/class/cdc-acm.c
··· 1379 1379 acm->ctrl_caps = h.usb_cdc_acm_descriptor->bmCapabilities; 1380 1380 if (quirks & NO_CAP_LINE) 1381 1381 acm->ctrl_caps &= ~USB_CDC_CAP_LINE; 1382 + if (quirks & MISSING_CAP_BRK) 1383 + acm->ctrl_caps |= USB_CDC_CAP_BRK; 1382 1384 acm->ctrlsize = ctrlsize; 1383 1385 acm->readsize = readsize; 1384 1386 acm->rx_buflimit = num_rx_buf; ··· 2003 2001 { USB_DEVICE(0x32a7, 0x0000), 2004 2002 .driver_info = IGNORE_DEVICE, 2005 2003 }, 2004 + 2005 + /* CH343 supports CAP_BRK, but doesn't advertise it */ 2006 + { USB_DEVICE(0x1a86, 0x55d3), .driver_info = MISSING_CAP_BRK, }, 2006 2007 2007 2008 /* control interfaces without any protocol set */ 2008 2009 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+1
drivers/usb/class/cdc-acm.h
··· 113 113 #define CLEAR_HALT_CONDITIONS BIT(5) 114 114 #define SEND_ZERO_PACKET BIT(6) 115 115 #define DISABLE_ECHO BIT(7) 116 + #define MISSING_CAP_BRK BIT(8)
+3 -1
drivers/usb/class/cdc-wdm.c
··· 225 225 /* we may already be in overflow */ 226 226 if (!test_bit(WDM_OVERFLOW, &desc->flags)) { 227 227 memmove(desc->ubuf + desc->length, desc->inbuf, length); 228 - desc->length += length; 228 + smp_wmb(); /* against wdm_read() */ 229 + WRITE_ONCE(desc->length, desc->length + length); 229 230 } 230 231 } 231 232 skip_error: ··· 534 533 return -ERESTARTSYS; 535 534 536 535 cntr = READ_ONCE(desc->length); 536 + smp_rmb(); /* against wdm_in_callback() */ 537 537 if (cntr == 0) { 538 538 desc->read = 0; 539 539 retry:
+3 -3
drivers/usb/class/usbtmc.c
··· 727 727 buffer[1] = data->bTag; 728 728 buffer[2] = ~data->bTag; 729 729 730 - retval = usb_bulk_msg(data->usb_dev, 730 + retval = usb_bulk_msg_killable(data->usb_dev, 731 731 usb_sndbulkpipe(data->usb_dev, 732 732 data->bulk_out), 733 733 buffer, USBTMC_HEADER_SIZE, ··· 1347 1347 buffer[11] = 0; /* Reserved */ 1348 1348 1349 1349 /* Send bulk URB */ 1350 - retval = usb_bulk_msg(data->usb_dev, 1350 + retval = usb_bulk_msg_killable(data->usb_dev, 1351 1351 usb_sndbulkpipe(data->usb_dev, 1352 1352 data->bulk_out), 1353 1353 buffer, USBTMC_HEADER_SIZE, ··· 1419 1419 actual = 0; 1420 1420 1421 1421 /* Send bulk URB */ 1422 - retval = usb_bulk_msg(data->usb_dev, 1422 + retval = usb_bulk_msg_killable(data->usb_dev, 1423 1423 usb_rcvbulkpipe(data->usb_dev, 1424 1424 data->bulk_in), 1425 1425 buffer, bufsize, &actual,
+5 -1
drivers/usb/core/config.c
··· 927 927 dev->descriptor.bNumConfigurations = ncfg = USB_MAXCONFIG; 928 928 } 929 929 930 - if (ncfg < 1) { 930 + if (ncfg < 1 && dev->quirks & USB_QUIRK_FORCE_ONE_CONFIG) { 931 + dev_info(ddev, "Device claims zero configurations, forcing to 1\n"); 932 + dev->descriptor.bNumConfigurations = 1; 933 + ncfg = 1; 934 + } else if (ncfg < 1) { 931 935 dev_err(ddev, "no configurations\n"); 932 936 return -EINVAL; 933 937 }
+79 -21
drivers/usb/core/message.c
··· 42 42 43 43 44 44 /* 45 - * Starts urb and waits for completion or timeout. Note that this call 46 - * is NOT interruptible. Many device driver i/o requests should be 47 - * interruptible and therefore these drivers should implement their 48 - * own interruptible routines. 45 + * Starts urb and waits for completion or timeout. 46 + * Whether or not the wait is killable depends on the flag passed in. 47 + * For example, compare usb_bulk_msg() and usb_bulk_msg_killable(). 48 + * 49 + * For non-killable waits, we enforce a maximum limit on the timeout value. 49 50 */ 50 - static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length) 51 + static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length, 52 + bool killable) 51 53 { 52 54 struct api_context ctx; 53 55 unsigned long expire; 54 56 int retval; 57 + long rc; 55 58 56 59 init_completion(&ctx.done); 57 60 urb->context = &ctx; ··· 63 60 if (unlikely(retval)) 64 61 goto out; 65 62 66 - expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; 67 - if (!wait_for_completion_timeout(&ctx.done, expire)) { 63 + if (!killable && (timeout <= 0 || timeout > USB_MAX_SYNCHRONOUS_TIMEOUT)) 64 + timeout = USB_MAX_SYNCHRONOUS_TIMEOUT; 65 + expire = (timeout > 0) ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; 66 + if (killable) 67 + rc = wait_for_completion_killable_timeout(&ctx.done, expire); 68 + else 69 + rc = wait_for_completion_timeout(&ctx.done, expire); 70 + if (rc <= 0) { 68 71 usb_kill_urb(urb); 69 - retval = (ctx.status == -ENOENT ? -ETIMEDOUT : ctx.status); 72 + if (ctx.status != -ENOENT) 73 + retval = ctx.status; 74 + else if (rc == 0) 75 + retval = -ETIMEDOUT; 76 + else 77 + retval = rc; 70 78 71 79 dev_dbg(&urb->dev->dev, 72 - "%s timed out on ep%d%s len=%u/%u\n", 80 + "%s timed out or killed on ep%d%s len=%u/%u\n", 73 81 current->comm, 74 82 usb_endpoint_num(&urb->ep->desc), 75 83 usb_urb_dir_in(urb) ? "in" : "out", ··· 114 100 usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char *)cmd, data, 115 101 len, usb_api_blocking_completion, NULL); 116 102 117 - retv = usb_start_wait_urb(urb, timeout, &length); 103 + retv = usb_start_wait_urb(urb, timeout, &length, false); 118 104 if (retv < 0) 119 105 return retv; 120 106 else ··· 131 117 * @index: USB message index value 132 118 * @data: pointer to the data to send 133 119 * @size: length in bytes of the data to send 134 - * @timeout: time in msecs to wait for the message to complete before timing 135 - * out (if 0 the wait is forever) 120 + * @timeout: time in msecs to wait for the message to complete before timing out 136 121 * 137 122 * Context: task context, might sleep. 138 123 * ··· 186 173 * @index: USB message index value 187 174 * @driver_data: pointer to the data to send 188 175 * @size: length in bytes of the data to send 189 - * @timeout: time in msecs to wait for the message to complete before timing 190 - * out (if 0 the wait is forever) 176 + * @timeout: time in msecs to wait for the message to complete before timing out 191 177 * @memflags: the flags for memory allocation for buffers 192 178 * 193 179 * Context: !in_interrupt () ··· 244 232 * @index: USB message index value 245 233 * @driver_data: pointer to the data to be filled in by the message 246 234 * @size: length in bytes of the data to be received 247 - * @timeout: time in msecs to wait for the message to complete before timing 248 - * out (if 0 the wait is forever) 235 + * @timeout: time in msecs to wait for the message to complete before timing out 249 236 * @memflags: the flags for memory allocation for buffers 250 237 * 251 238 * Context: !in_interrupt () ··· 315 304 * @len: length in bytes of the data to send 316 305 * @actual_length: pointer to a location to put the actual length transferred 317 306 * in bytes 318 - * @timeout: time in msecs to wait for the message to complete before 319 - * timing out (if 0 the wait is forever) 307 + * @timeout: time in msecs to wait for the message to complete before timing out 320 308 * 321 309 * Context: task context, might sleep. 322 310 * ··· 347 337 * @len: length in bytes of the data to send 348 338 * @actual_length: pointer to a location to put the actual length transferred 349 339 * in bytes 350 - * @timeout: time in msecs to wait for the message to complete before 351 - * timing out (if 0 the wait is forever) 340 + * @timeout: time in msecs to wait for the message to complete before timing out 352 341 * 353 342 * Context: task context, might sleep. 354 343 * ··· 394 385 usb_fill_bulk_urb(urb, usb_dev, pipe, data, len, 395 386 usb_api_blocking_completion, NULL); 396 387 397 - return usb_start_wait_urb(urb, timeout, actual_length); 388 + return usb_start_wait_urb(urb, timeout, actual_length, false); 398 389 } 399 390 EXPORT_SYMBOL_GPL(usb_bulk_msg); 391 + 392 + /** 393 + * usb_bulk_msg_killable - Builds a bulk urb, sends it off and waits for completion in a killable state 394 + * @usb_dev: pointer to the usb device to send the message to 395 + * @pipe: endpoint "pipe" to send the message to 396 + * @data: pointer to the data to send 397 + * @len: length in bytes of the data to send 398 + * @actual_length: pointer to a location to put the actual length transferred 399 + * in bytes 400 + * @timeout: time in msecs to wait for the message to complete before 401 + * timing out (if <= 0, the wait is as long as possible) 402 + * 403 + * Context: task context, might sleep. 404 + * 405 + * This function is just like usb_blk_msg(), except that it waits in a 406 + * killable state and there is no limit on the timeout length. 407 + * 408 + * Return: 409 + * If successful, 0. Otherwise a negative error number. The number of actual 410 + * bytes transferred will be stored in the @actual_length parameter. 411 + * 412 + */ 413 + int usb_bulk_msg_killable(struct usb_device *usb_dev, unsigned int pipe, 414 + void *data, int len, int *actual_length, int timeout) 415 + { 416 + struct urb *urb; 417 + struct usb_host_endpoint *ep; 418 + 419 + ep = usb_pipe_endpoint(usb_dev, pipe); 420 + if (!ep || len < 0) 421 + return -EINVAL; 422 + 423 + urb = usb_alloc_urb(0, GFP_KERNEL); 424 + if (!urb) 425 + return -ENOMEM; 426 + 427 + if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == 428 + USB_ENDPOINT_XFER_INT) { 429 + pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30); 430 + usb_fill_int_urb(urb, usb_dev, pipe, data, len, 431 + usb_api_blocking_completion, NULL, 432 + ep->desc.bInterval); 433 + } else 434 + usb_fill_bulk_urb(urb, usb_dev, pipe, data, len, 435 + usb_api_blocking_completion, NULL); 436 + 437 + return usb_start_wait_urb(urb, timeout, actual_length, true); 438 + } 439 + EXPORT_SYMBOL_GPL(usb_bulk_msg_killable); 400 440 401 441 /*-------------------------------------------------------------------*/ 402 442
+1 -7
drivers/usb/core/phy.c
··· 200 200 list_for_each_entry(roothub_entry, head, list) { 201 201 err = phy_set_mode(roothub_entry->phy, mode); 202 202 if (err) 203 - goto err_out; 203 + return err; 204 204 } 205 205 206 206 return 0; 207 - 208 - err_out: 209 - list_for_each_entry_continue_reverse(roothub_entry, head, list) 210 - phy_power_off(roothub_entry->phy); 211 - 212 - return err; 213 207 } 214 208 EXPORT_SYMBOL_GPL(usb_phy_roothub_set_mode); 215 209
+21
drivers/usb/core/quirks.c
··· 140 140 case 'p': 141 141 flags |= USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT; 142 142 break; 143 + case 'q': 144 + flags |= USB_QUIRK_FORCE_ONE_CONFIG; 143 145 /* Ignore unrecognized flag characters */ 144 146 } 145 147 } ··· 208 206 209 207 /* HP v222w 16GB Mini USB Drive */ 210 208 { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT }, 209 + 210 + /* Huawei 4G LTE module ME906S */ 211 + { USB_DEVICE(0x03f0, 0xa31d), .driver_info = 212 + USB_QUIRK_DISCONNECT_SUSPEND }, 211 213 212 214 /* Creative SB Audigy 2 NX */ 213 215 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, ··· 382 376 /* SanDisk Extreme 55AE */ 383 377 { USB_DEVICE(0x0781, 0x55ae), .driver_info = USB_QUIRK_NO_LPM }, 384 378 379 + /* Avermedia Live Gamer Ultra 2.1 (GC553G2) - BOS descriptor fetch hangs at SuperSpeed Plus */ 380 + { USB_DEVICE(0x07ca, 0x2553), .driver_info = USB_QUIRK_NO_BOS }, 381 + 385 382 /* Realforce 87U Keyboard */ 386 383 { USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM }, 387 384 ··· 444 435 /* ASUS Base Station(T100) */ 445 436 { USB_DEVICE(0x0b05, 0x17e0), .driver_info = 446 437 USB_QUIRK_IGNORE_REMOTE_WAKEUP }, 438 + 439 + /* ASUS TUF 4K PRO - BOS descriptor fetch hangs at SuperSpeed Plus */ 440 + { USB_DEVICE(0x0b05, 0x1ab9), .driver_info = USB_QUIRK_NO_BOS }, 447 441 448 442 /* Realtek Semiconductor Corp. Mass Storage Device (Multicard Reader)*/ 449 443 { USB_DEVICE(0x0bda, 0x0151), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, ··· 576 564 577 565 { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM }, 578 566 567 + /* UGREEN 35871 - BOS descriptor fetch hangs at SuperSpeed Plus */ 568 + { USB_DEVICE(0x2b89, 0x5871), .driver_info = USB_QUIRK_NO_BOS }, 569 + 579 570 /* APTIV AUTOMOTIVE HUB */ 580 571 { USB_DEVICE(0x2c48, 0x0132), .driver_info = 581 572 USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT }, ··· 589 574 /* Alcor Link AK9563 SC Reader used in 2022 Lenovo ThinkPads */ 590 575 { USB_DEVICE(0x2ce3, 0x9563), .driver_info = USB_QUIRK_NO_LPM }, 591 576 577 + /* ezcap401 - BOS descriptor fetch hangs at SuperSpeed Plus */ 578 + { USB_DEVICE(0x32ed, 0x0401), .driver_info = USB_QUIRK_NO_BOS }, 579 + 592 580 /* DELL USB GEN2 */ 593 581 { USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME }, 594 582 595 583 /* VCOM device */ 596 584 { USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, 585 + 586 + /* Noji-MCS SmartCard Reader */ 587 + { USB_DEVICE(0x5131, 0x2007), .driver_info = USB_QUIRK_FORCE_ONE_CONFIG }, 597 588 598 589 /* INTEL VALUE SSD */ 599 590 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+2
drivers/usb/dwc3/dwc3-pci.c
··· 56 56 #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e 57 57 #define PCI_DEVICE_ID_INTEL_CNPV 0xa3b0 58 58 #define PCI_DEVICE_ID_INTEL_RPL 0xa70e 59 + #define PCI_DEVICE_ID_INTEL_NVLH 0xd37f 59 60 #define PCI_DEVICE_ID_INTEL_PTLH 0xe332 60 61 #define PCI_DEVICE_ID_INTEL_PTLH_PCH 0xe37e 61 62 #define PCI_DEVICE_ID_INTEL_PTLU 0xe432 ··· 448 447 { PCI_DEVICE_DATA(INTEL, CNPH, &dwc3_pci_intel_swnode) }, 449 448 { PCI_DEVICE_DATA(INTEL, CNPV, &dwc3_pci_intel_swnode) }, 450 449 { PCI_DEVICE_DATA(INTEL, RPL, &dwc3_pci_intel_swnode) }, 450 + { PCI_DEVICE_DATA(INTEL, NVLH, &dwc3_pci_intel_swnode) }, 451 451 { PCI_DEVICE_DATA(INTEL, PTLH, &dwc3_pci_intel_swnode) }, 452 452 { PCI_DEVICE_DATA(INTEL, PTLH_PCH, &dwc3_pci_intel_swnode) }, 453 453 { PCI_DEVICE_DATA(INTEL, PTLU, &dwc3_pci_intel_swnode) },
+4
drivers/usb/gadget/function/f_hid.c
··· 1207 1207 if (!hidg->interval_user_set) { 1208 1208 hidg_fs_in_ep_desc.bInterval = 10; 1209 1209 hidg_hs_in_ep_desc.bInterval = 4; 1210 + hidg_ss_in_ep_desc.bInterval = 4; 1210 1211 } else { 1211 1212 hidg_fs_in_ep_desc.bInterval = hidg->interval; 1212 1213 hidg_hs_in_ep_desc.bInterval = hidg->interval; 1214 + hidg_ss_in_ep_desc.bInterval = hidg->interval; 1213 1215 } 1214 1216 1215 1217 hidg_ss_out_comp_desc.wBytesPerInterval = ··· 1241 1239 if (!hidg->interval_user_set) { 1242 1240 hidg_fs_out_ep_desc.bInterval = 10; 1243 1241 hidg_hs_out_ep_desc.bInterval = 4; 1242 + hidg_ss_out_ep_desc.bInterval = 4; 1244 1243 } else { 1245 1244 hidg_fs_out_ep_desc.bInterval = hidg->interval; 1246 1245 hidg_hs_out_ep_desc.bInterval = hidg->interval; 1246 + hidg_ss_out_ep_desc.bInterval = hidg->interval; 1247 1247 } 1248 1248 status = usb_assign_descriptors(f, 1249 1249 hidg_fs_descriptors_intout,
+10 -2
drivers/usb/gadget/function/f_mass_storage.c
··· 180 180 #include <linux/kthread.h> 181 181 #include <linux/sched/signal.h> 182 182 #include <linux/limits.h> 183 + #include <linux/overflow.h> 183 184 #include <linux/pagemap.h> 184 185 #include <linux/rwsem.h> 185 186 #include <linux/slab.h> ··· 1854 1853 int cmnd_size, enum data_direction data_dir, 1855 1854 unsigned int mask, int needs_medium, const char *name) 1856 1855 { 1857 - if (common->curlun) 1858 - common->data_size_from_cmnd <<= common->curlun->blkbits; 1856 + if (common->curlun) { 1857 + if (check_shl_overflow(common->data_size_from_cmnd, 1858 + common->curlun->blkbits, 1859 + &common->data_size_from_cmnd)) { 1860 + common->phase_error = 1; 1861 + return -EINVAL; 1862 + } 1863 + } 1864 + 1859 1865 return check_command(common, cmnd_size, data_dir, 1860 1866 mask, needs_medium, name); 1861 1867 }
+79 -65
drivers/usb/gadget/function/f_ncm.c
··· 83 83 return container_of(f, struct f_ncm, port.func); 84 84 } 85 85 86 - static inline struct f_ncm_opts *func_to_ncm_opts(struct usb_function *f) 87 - { 88 - return container_of(f->fi, struct f_ncm_opts, func_inst); 89 - } 90 - 91 86 /*-------------------------------------------------------------------------*/ 92 87 93 88 /* ··· 859 864 static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 860 865 { 861 866 struct f_ncm *ncm = func_to_ncm(f); 862 - struct f_ncm_opts *opts = func_to_ncm_opts(f); 863 867 struct usb_composite_dev *cdev = f->config->cdev; 864 868 865 869 /* Control interface has only altsetting 0 */ ··· 881 887 if (alt > 1) 882 888 goto fail; 883 889 884 - scoped_guard(mutex, &opts->lock) 885 - if (opts->net) { 886 - DBG(cdev, "reset ncm\n"); 887 - opts->net = NULL; 888 - gether_disconnect(&ncm->port); 889 - ncm_reset_values(ncm); 890 - } 890 + if (ncm->netdev) { 891 + DBG(cdev, "reset ncm\n"); 892 + ncm->netdev = NULL; 893 + gether_disconnect(&ncm->port); 894 + ncm_reset_values(ncm); 895 + } 891 896 892 897 /* 893 898 * CDC Network only sends data in non-default altsettings. ··· 919 926 net = gether_connect(&ncm->port); 920 927 if (IS_ERR(net)) 921 928 return PTR_ERR(net); 922 - scoped_guard(mutex, &opts->lock) 923 - opts->net = net; 929 + ncm->netdev = net; 924 930 } 925 931 926 932 spin_lock(&ncm->lock); ··· 1366 1374 static void ncm_disable(struct usb_function *f) 1367 1375 { 1368 1376 struct f_ncm *ncm = func_to_ncm(f); 1369 - struct f_ncm_opts *opts = func_to_ncm_opts(f); 1370 1377 struct usb_composite_dev *cdev = f->config->cdev; 1371 1378 1372 1379 DBG(cdev, "ncm deactivated\n"); 1373 1380 1374 - scoped_guard(mutex, &opts->lock) 1375 - if (opts->net) { 1376 - opts->net = NULL; 1377 - gether_disconnect(&ncm->port); 1378 - } 1381 + if (ncm->netdev) { 1382 + ncm->netdev = NULL; 1383 + gether_disconnect(&ncm->port); 1384 + } 1379 1385 1380 1386 if (ncm->notify->enabled) { 1381 1387 usb_ep_disable(ncm->notify); ··· 1433 1443 { 1434 1444 struct usb_composite_dev *cdev = c->cdev; 1435 1445 struct f_ncm *ncm = func_to_ncm(f); 1436 - struct f_ncm_opts *ncm_opts = func_to_ncm_opts(f); 1437 1446 struct usb_string *us; 1438 1447 int status = 0; 1439 1448 struct usb_ep *ep; 1449 + struct f_ncm_opts *ncm_opts; 1440 1450 1441 1451 struct usb_os_desc_table *os_desc_table __free(kfree) = NULL; 1442 - struct net_device *netdev __free(free_gether_netdev) = NULL; 1452 + struct net_device *net __free(detach_gadget) = NULL; 1443 1453 struct usb_request *request __free(free_usb_request) = NULL; 1444 1454 1445 1455 if (!can_support_ecm(cdev->gadget)) 1446 1456 return -EINVAL; 1457 + 1458 + ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst); 1447 1459 1448 1460 if (cdev->use_os_string) { 1449 1461 os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL); ··· 1453 1461 return -ENOMEM; 1454 1462 } 1455 1463 1456 - netdev = gether_setup_default(); 1457 - if (IS_ERR(netdev)) 1458 - return -ENOMEM; 1464 + scoped_guard(mutex, &ncm_opts->lock) 1465 + if (ncm_opts->bind_count == 0) { 1466 + if (!device_is_registered(&ncm_opts->net->dev)) { 1467 + ncm_opts->net->mtu = (ncm_opts->max_segment_size - ETH_HLEN); 1468 + gether_set_gadget(ncm_opts->net, cdev->gadget); 1469 + status = gether_register_netdev(ncm_opts->net); 1470 + } else 1471 + status = gether_attach_gadget(ncm_opts->net, cdev->gadget); 1459 1472 1460 - scoped_guard(mutex, &ncm_opts->lock) { 1461 - gether_apply_opts(netdev, &ncm_opts->net_opts); 1462 - netdev->mtu = ncm_opts->max_segment_size - ETH_HLEN; 1463 - } 1473 + if (status) 1474 + return status; 1475 + net = ncm_opts->net; 1476 + } 1464 1477 1465 - gether_set_gadget(netdev, cdev->gadget); 1466 - status = gether_register_netdev(netdev); 1467 - if (status) 1468 - return status; 1469 - 1470 - /* export host's Ethernet address in CDC format */ 1471 - status = gether_get_host_addr_cdc(netdev, ncm->ethaddr, 1472 - sizeof(ncm->ethaddr)); 1473 - if (status < 12) 1474 - return -EINVAL; 1475 - ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr; 1478 + ncm_string_defs[1].s = ncm->ethaddr; 1476 1479 1477 1480 us = usb_gstrings_attach(cdev, ncm_strings, 1478 1481 ARRAY_SIZE(ncm_string_defs)); ··· 1565 1578 f->os_desc_n = 1; 1566 1579 } 1567 1580 ncm->notify_req = no_free_ptr(request); 1568 - ncm->netdev = no_free_ptr(netdev); 1569 - ncm->port.ioport = netdev_priv(ncm->netdev); 1581 + 1582 + ncm_opts->bind_count++; 1583 + retain_and_null_ptr(net); 1570 1584 1571 1585 DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n", 1572 1586 ncm->port.in_ep->name, ncm->port.out_ep->name, ··· 1582 1594 } 1583 1595 1584 1596 /* f_ncm_item_ops */ 1585 - USB_ETHER_OPTS_ITEM(ncm); 1597 + USB_ETHERNET_CONFIGFS_ITEM(ncm); 1586 1598 1587 1599 /* f_ncm_opts_dev_addr */ 1588 - USB_ETHER_OPTS_ATTR_DEV_ADDR(ncm); 1600 + USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(ncm); 1589 1601 1590 1602 /* f_ncm_opts_host_addr */ 1591 - USB_ETHER_OPTS_ATTR_HOST_ADDR(ncm); 1603 + USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(ncm); 1592 1604 1593 1605 /* f_ncm_opts_qmult */ 1594 - USB_ETHER_OPTS_ATTR_QMULT(ncm); 1606 + USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(ncm); 1595 1607 1596 1608 /* f_ncm_opts_ifname */ 1597 - USB_ETHER_OPTS_ATTR_IFNAME(ncm); 1609 + USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ncm); 1598 1610 1599 1611 static ssize_t ncm_opts_max_segment_size_show(struct config_item *item, 1600 1612 char *page) ··· 1660 1672 struct f_ncm_opts *opts; 1661 1673 1662 1674 opts = container_of(f, struct f_ncm_opts, func_inst); 1675 + if (device_is_registered(&opts->net->dev)) 1676 + gether_cleanup(netdev_priv(opts->net)); 1677 + else 1678 + free_netdev(opts->net); 1663 1679 kfree(opts->ncm_interf_group); 1664 1680 kfree(opts); 1665 1681 } 1666 1682 1667 1683 static struct usb_function_instance *ncm_alloc_inst(void) 1668 1684 { 1669 - struct usb_function_instance *ret; 1685 + struct f_ncm_opts *opts; 1670 1686 struct usb_os_desc *descs[1]; 1671 1687 char *names[1]; 1672 1688 struct config_group *ncm_interf_group; 1673 1689 1674 - struct f_ncm_opts *opts __free(kfree) = kzalloc_obj(*opts); 1690 + opts = kzalloc_obj(*opts); 1675 1691 if (!opts) 1676 1692 return ERR_PTR(-ENOMEM); 1677 - 1678 - opts->net = NULL; 1679 1693 opts->ncm_os_desc.ext_compat_id = opts->ncm_ext_compat_id; 1680 - gether_setup_opts_default(&opts->net_opts, "usb"); 1681 1694 1682 1695 mutex_init(&opts->lock); 1683 1696 opts->func_inst.free_func_inst = ncm_free_inst; 1697 + opts->net = gether_setup_default(); 1698 + if (IS_ERR(opts->net)) { 1699 + struct net_device *net = opts->net; 1700 + kfree(opts); 1701 + return ERR_CAST(net); 1702 + } 1684 1703 opts->max_segment_size = ETH_FRAME_LEN; 1685 1704 INIT_LIST_HEAD(&opts->ncm_os_desc.ext_prop); 1686 1705 ··· 1698 1703 ncm_interf_group = 1699 1704 usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, 1700 1705 names, THIS_MODULE); 1701 - if (IS_ERR(ncm_interf_group)) 1706 + if (IS_ERR(ncm_interf_group)) { 1707 + ncm_free_inst(&opts->func_inst); 1702 1708 return ERR_CAST(ncm_interf_group); 1709 + } 1703 1710 opts->ncm_interf_group = ncm_interf_group; 1704 1711 1705 - ret = &opts->func_inst; 1706 - retain_and_null_ptr(opts); 1707 - return ret; 1712 + return &opts->func_inst; 1708 1713 } 1709 1714 1710 1715 static void ncm_free(struct usb_function *f) 1711 1716 { 1712 - struct f_ncm_opts *opts = func_to_ncm_opts(f); 1717 + struct f_ncm *ncm; 1718 + struct f_ncm_opts *opts; 1713 1719 1714 - scoped_guard(mutex, &opts->lock) 1715 - opts->refcnt--; 1716 - kfree(func_to_ncm(f)); 1720 + ncm = func_to_ncm(f); 1721 + opts = container_of(f->fi, struct f_ncm_opts, func_inst); 1722 + kfree(ncm); 1723 + mutex_lock(&opts->lock); 1724 + opts->refcnt--; 1725 + mutex_unlock(&opts->lock); 1717 1726 } 1718 1727 1719 1728 static void ncm_unbind(struct usb_configuration *c, struct usb_function *f) 1720 1729 { 1721 1730 struct f_ncm *ncm = func_to_ncm(f); 1731 + struct f_ncm_opts *ncm_opts; 1722 1732 1723 1733 DBG(c->cdev, "ncm unbind\n"); 1734 + 1735 + ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst); 1724 1736 1725 1737 hrtimer_cancel(&ncm->task_timer); 1726 1738 ··· 1745 1743 kfree(ncm->notify_req->buf); 1746 1744 usb_ep_free_request(ncm->notify, ncm->notify_req); 1747 1745 1748 - ncm->port.ioport = NULL; 1749 - gether_cleanup(netdev_priv(ncm->netdev)); 1746 + ncm_opts->bind_count--; 1747 + if (ncm_opts->bind_count == 0) 1748 + gether_detach_gadget(ncm_opts->net); 1750 1749 } 1751 1750 1752 1751 static struct usb_function *ncm_alloc(struct usb_function_instance *fi) 1753 1752 { 1754 1753 struct f_ncm *ncm; 1755 1754 struct f_ncm_opts *opts; 1755 + int status; 1756 1756 1757 1757 /* allocate and initialize one new instance */ 1758 1758 ncm = kzalloc(sizeof(*ncm), GFP_KERNEL); ··· 1762 1758 return ERR_PTR(-ENOMEM); 1763 1759 1764 1760 opts = container_of(fi, struct f_ncm_opts, func_inst); 1761 + mutex_lock(&opts->lock); 1762 + opts->refcnt++; 1765 1763 1766 - scoped_guard(mutex, &opts->lock) 1767 - opts->refcnt++; 1764 + /* export host's Ethernet address in CDC format */ 1765 + status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr, 1766 + sizeof(ncm->ethaddr)); 1767 + if (status < 12) { /* strlen("01234567890a") */ 1768 + kfree(ncm); 1769 + mutex_unlock(&opts->lock); 1770 + return ERR_PTR(-EINVAL); 1771 + } 1768 1772 1769 1773 spin_lock_init(&ncm->lock); 1770 1774 ncm_reset_values(ncm); 1775 + ncm->port.ioport = netdev_priv(opts->net); 1776 + mutex_unlock(&opts->lock); 1771 1777 ncm->port.is_fixed = true; 1772 1778 ncm->port.supports_multi_frame = true; 1773 1779
+14
drivers/usb/gadget/function/f_tcm.c
··· 1222 1222 se_cmd = &cmd->se_cmd; 1223 1223 tpg = cmd->fu->tpg; 1224 1224 tv_nexus = tpg->tpg_nexus; 1225 + if (!tv_nexus) { 1226 + struct usb_gadget *gadget = fuas_to_gadget(cmd->fu); 1227 + 1228 + dev_err(&gadget->dev, "Missing nexus, ignoring command\n"); 1229 + return; 1230 + } 1231 + 1225 1232 dir = get_cmd_dir(cmd->cmd_buf); 1226 1233 if (dir < 0) 1227 1234 goto out; ··· 1490 1483 se_cmd = &cmd->se_cmd; 1491 1484 tpg = cmd->fu->tpg; 1492 1485 tv_nexus = tpg->tpg_nexus; 1486 + if (!tv_nexus) { 1487 + struct usb_gadget *gadget = fuas_to_gadget(cmd->fu); 1488 + 1489 + dev_err(&gadget->dev, "Missing nexus, ignoring command\n"); 1490 + return; 1491 + } 1492 + 1493 1493 dir = get_cmd_dir(cmd->cmd_buf); 1494 1494 if (dir < 0) 1495 1495 goto out;
+22 -45
drivers/usb/gadget/function/u_ether.c
··· 897 897 } 898 898 EXPORT_SYMBOL_GPL(gether_set_gadget); 899 899 900 + int gether_attach_gadget(struct net_device *net, struct usb_gadget *g) 901 + { 902 + int ret; 903 + 904 + ret = device_move(&net->dev, &g->dev, DPM_ORDER_DEV_AFTER_PARENT); 905 + if (ret) 906 + return ret; 907 + 908 + gether_set_gadget(net, g); 909 + return 0; 910 + } 911 + EXPORT_SYMBOL_GPL(gether_attach_gadget); 912 + 913 + void gether_detach_gadget(struct net_device *net) 914 + { 915 + struct eth_dev *dev = netdev_priv(net); 916 + 917 + device_move(&net->dev, NULL, DPM_ORDER_NONE); 918 + dev->gadget = NULL; 919 + } 920 + EXPORT_SYMBOL_GPL(gether_detach_gadget); 921 + 900 922 int gether_set_dev_addr(struct net_device *net, const char *dev_addr) 901 923 { 902 924 struct eth_dev *dev; ··· 1062 1040 } 1063 1041 EXPORT_SYMBOL_GPL(gether_set_ifname); 1064 1042 1065 - void gether_setup_opts_default(struct gether_opts *opts, const char *name) 1066 - { 1067 - opts->qmult = QMULT_DEFAULT; 1068 - snprintf(opts->name, sizeof(opts->name), "%s%%d", name); 1069 - eth_random_addr(opts->dev_mac); 1070 - opts->addr_assign_type = NET_ADDR_RANDOM; 1071 - eth_random_addr(opts->host_mac); 1072 - } 1073 - EXPORT_SYMBOL_GPL(gether_setup_opts_default); 1074 - 1075 - void gether_apply_opts(struct net_device *net, struct gether_opts *opts) 1076 - { 1077 - struct eth_dev *dev = netdev_priv(net); 1078 - 1079 - dev->qmult = opts->qmult; 1080 - 1081 - if (opts->ifname_set) { 1082 - strscpy(net->name, opts->name, sizeof(net->name)); 1083 - dev->ifname_set = true; 1084 - } 1085 - 1086 - memcpy(dev->host_mac, opts->host_mac, sizeof(dev->host_mac)); 1087 - 1088 - if (opts->addr_assign_type == NET_ADDR_SET) { 1089 - memcpy(dev->dev_mac, opts->dev_mac, sizeof(dev->dev_mac)); 1090 - net->addr_assign_type = opts->addr_assign_type; 1091 - } 1092 - } 1093 - EXPORT_SYMBOL_GPL(gether_apply_opts); 1094 - 1095 1043 void gether_suspend(struct gether *link) 1096 1044 { 1097 1045 struct eth_dev *dev = link->ioport; ··· 1117 1125 free_netdev(dev->net); 1118 1126 } 1119 1127 EXPORT_SYMBOL_GPL(gether_cleanup); 1120 - 1121 - void gether_unregister_free_netdev(struct net_device *net) 1122 - { 1123 - if (!net) 1124 - return; 1125 - 1126 - struct eth_dev *dev = netdev_priv(net); 1127 - 1128 - if (net->reg_state == NETREG_REGISTERED) { 1129 - unregister_netdev(net); 1130 - flush_work(&dev->work); 1131 - } 1132 - free_netdev(net); 1133 - } 1134 - EXPORT_SYMBOL_GPL(gether_unregister_free_netdev); 1135 1128 1136 1129 /** 1137 1130 * gether_connect - notify network layer that USB link is active
+26 -30
drivers/usb/gadget/function/u_ether.h
··· 38 38 39 39 struct eth_dev; 40 40 41 - /** 42 - * struct gether_opts - Options for Ethernet gadget function instances 43 - * @name: Pattern for the network interface name (e.g., "usb%d"). 44 - * Used to generate the net device name. 45 - * @qmult: Queue length multiplier for high/super speed. 46 - * @host_mac: The MAC address to be used by the host side. 47 - * @dev_mac: The MAC address to be used by the device side. 48 - * @ifname_set: True if the interface name pattern has been set by userspace. 49 - * @addr_assign_type: The method used for assigning the device MAC address 50 - * (e.g., NET_ADDR_RANDOM, NET_ADDR_SET). 51 - * 52 - * This structure caches network-related settings provided through configfs 53 - * before the net_device is fully instantiated. This allows for early 54 - * configuration while deferring net_device allocation until the function 55 - * is bound. 56 - */ 57 - struct gether_opts { 58 - char name[IFNAMSIZ]; 59 - unsigned int qmult; 60 - u8 host_mac[ETH_ALEN]; 61 - u8 dev_mac[ETH_ALEN]; 62 - bool ifname_set; 63 - unsigned char addr_assign_type; 64 - }; 65 - 66 41 /* 67 42 * This represents the USB side of an "ethernet" link, managed by a USB 68 43 * function which provides control and (maybe) framing. Two functions ··· 151 176 void gether_set_gadget(struct net_device *net, struct usb_gadget *g); 152 177 153 178 /** 179 + * gether_attach_gadget - Reparent net_device to the gadget device. 180 + * @net: The network device to reparent. 181 + * @g: The target USB gadget device to parent to. 182 + * 183 + * This function moves the network device to be a child of the USB gadget 184 + * device in the device hierarchy. This is typically done when the function 185 + * is bound to a configuration. 186 + * 187 + * Returns 0 on success, or a negative error code on failure. 188 + */ 189 + int gether_attach_gadget(struct net_device *net, struct usb_gadget *g); 190 + 191 + /** 192 + * gether_detach_gadget - Detach net_device from its gadget parent. 193 + * @net: The network device to detach. 194 + * 195 + * This function moves the network device to be a child of the virtual 196 + * devices parent, effectively detaching it from the USB gadget device 197 + * hierarchy. This is typically done when the function is unbound 198 + * from a configuration but the instance is not yet freed. 199 + */ 200 + void gether_detach_gadget(struct net_device *net); 201 + 202 + DEFINE_FREE(detach_gadget, struct net_device *, if (_T) gether_detach_gadget(_T)) 203 + 204 + /** 154 205 * gether_set_dev_addr - initialize an ethernet-over-usb link with eth address 155 206 * @net: device representing this link 156 207 * @dev_addr: eth address of this device ··· 284 283 int gether_set_ifname(struct net_device *net, const char *name, int len); 285 284 286 285 void gether_cleanup(struct eth_dev *dev); 287 - void gether_unregister_free_netdev(struct net_device *net); 288 - DEFINE_FREE(free_gether_netdev, struct net_device *, gether_unregister_free_netdev(_T)); 289 - 290 - void gether_setup_opts_default(struct gether_opts *opts, const char *name); 291 - void gether_apply_opts(struct net_device *net, struct gether_opts *opts); 292 286 293 287 void gether_suspend(struct gether *link); 294 288 void gether_resume(struct gether *link);
-177
drivers/usb/gadget/function/u_ether_configfs.h
··· 13 13 #ifndef __U_ETHER_CONFIGFS_H 14 14 #define __U_ETHER_CONFIGFS_H 15 15 16 - #include <linux/cleanup.h> 17 - #include <linux/hex.h> 18 - #include <linux/if_ether.h> 19 - #include <linux/mutex.h> 20 - #include <linux/netdevice.h> 21 - #include <linux/rtnetlink.h> 22 - 23 16 #define USB_ETHERNET_CONFIGFS_ITEM(_f_) \ 24 17 static void _f_##_attr_release(struct config_item *item) \ 25 18 { \ ··· 196 203 } \ 197 204 \ 198 205 CONFIGFS_ATTR(_f_##_opts_, _n_) 199 - 200 - #define USB_ETHER_OPTS_ITEM(_f_) \ 201 - static void _f_##_attr_release(struct config_item *item) \ 202 - { \ 203 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 204 - \ 205 - usb_put_function_instance(&opts->func_inst); \ 206 - } \ 207 - \ 208 - static struct configfs_item_operations _f_##_item_ops = { \ 209 - .release = _f_##_attr_release, \ 210 - } 211 - 212 - #define USB_ETHER_OPTS_ATTR_DEV_ADDR(_f_) \ 213 - static ssize_t _f_##_opts_dev_addr_show(struct config_item *item, \ 214 - char *page) \ 215 - { \ 216 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 217 - \ 218 - guard(mutex)(&opts->lock); \ 219 - return sysfs_emit(page, "%pM\n", opts->net_opts.dev_mac); \ 220 - } \ 221 - \ 222 - static ssize_t _f_##_opts_dev_addr_store(struct config_item *item, \ 223 - const char *page, size_t len) \ 224 - { \ 225 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 226 - u8 new_addr[ETH_ALEN]; \ 227 - const char *p = page; \ 228 - \ 229 - guard(mutex)(&opts->lock); \ 230 - if (opts->refcnt) \ 231 - return -EBUSY; \ 232 - \ 233 - for (int i = 0; i < ETH_ALEN; i++) { \ 234 - unsigned char num; \ 235 - if ((*p == '.') || (*p == ':')) \ 236 - p++; \ 237 - num = hex_to_bin(*p++) << 4; \ 238 - num |= hex_to_bin(*p++); \ 239 - new_addr[i] = num; \ 240 - } \ 241 - if (!is_valid_ether_addr(new_addr)) \ 242 - return -EINVAL; \ 243 - memcpy(opts->net_opts.dev_mac, new_addr, ETH_ALEN); \ 244 - opts->net_opts.addr_assign_type = NET_ADDR_SET; \ 245 - return len; \ 246 - } \ 247 - \ 248 - CONFIGFS_ATTR(_f_##_opts_, dev_addr) 249 - 250 - #define USB_ETHER_OPTS_ATTR_HOST_ADDR(_f_) \ 251 - static ssize_t _f_##_opts_host_addr_show(struct config_item *item, \ 252 - char *page) \ 253 - { \ 254 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 255 - \ 256 - guard(mutex)(&opts->lock); \ 257 - return sysfs_emit(page, "%pM\n", opts->net_opts.host_mac); \ 258 - } \ 259 - \ 260 - static ssize_t _f_##_opts_host_addr_store(struct config_item *item, \ 261 - const char *page, size_t len) \ 262 - { \ 263 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 264 - u8 new_addr[ETH_ALEN]; \ 265 - const char *p = page; \ 266 - \ 267 - guard(mutex)(&opts->lock); \ 268 - if (opts->refcnt) \ 269 - return -EBUSY; \ 270 - \ 271 - for (int i = 0; i < ETH_ALEN; i++) { \ 272 - unsigned char num; \ 273 - if ((*p == '.') || (*p == ':')) \ 274 - p++; \ 275 - num = hex_to_bin(*p++) << 4; \ 276 - num |= hex_to_bin(*p++); \ 277 - new_addr[i] = num; \ 278 - } \ 279 - if (!is_valid_ether_addr(new_addr)) \ 280 - return -EINVAL; \ 281 - memcpy(opts->net_opts.host_mac, new_addr, ETH_ALEN); \ 282 - return len; \ 283 - } \ 284 - \ 285 - CONFIGFS_ATTR(_f_##_opts_, host_addr) 286 - 287 - #define USB_ETHER_OPTS_ATTR_QMULT(_f_) \ 288 - static ssize_t _f_##_opts_qmult_show(struct config_item *item, \ 289 - char *page) \ 290 - { \ 291 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 292 - \ 293 - guard(mutex)(&opts->lock); \ 294 - return sysfs_emit(page, "%u\n", opts->net_opts.qmult); \ 295 - } \ 296 - \ 297 - static ssize_t _f_##_opts_qmult_store(struct config_item *item, \ 298 - const char *page, size_t len) \ 299 - { \ 300 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 301 - u32 val; \ 302 - int ret; \ 303 - \ 304 - guard(mutex)(&opts->lock); \ 305 - if (opts->refcnt) \ 306 - return -EBUSY; \ 307 - \ 308 - ret = kstrtou32(page, 0, &val); \ 309 - if (ret) \ 310 - return ret; \ 311 - \ 312 - opts->net_opts.qmult = val; \ 313 - return len; \ 314 - } \ 315 - \ 316 - CONFIGFS_ATTR(_f_##_opts_, qmult) 317 - 318 - #define USB_ETHER_OPTS_ATTR_IFNAME(_f_) \ 319 - static ssize_t _f_##_opts_ifname_show(struct config_item *item, \ 320 - char *page) \ 321 - { \ 322 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 323 - const char *name; \ 324 - \ 325 - guard(mutex)(&opts->lock); \ 326 - rtnl_lock(); \ 327 - if (opts->net_opts.ifname_set) \ 328 - name = opts->net_opts.name; \ 329 - else if (opts->net) \ 330 - name = netdev_name(opts->net); \ 331 - else \ 332 - name = "(inactive net_device)"; \ 333 - rtnl_unlock(); \ 334 - return sysfs_emit(page, "%s\n", name); \ 335 - } \ 336 - \ 337 - static ssize_t _f_##_opts_ifname_store(struct config_item *item, \ 338 - const char *page, size_t len) \ 339 - { \ 340 - struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ 341 - char tmp[IFNAMSIZ]; \ 342 - const char *p; \ 343 - size_t c_len = len; \ 344 - \ 345 - if (c_len > 0 && page[c_len - 1] == '\n') \ 346 - c_len--; \ 347 - \ 348 - if (c_len >= sizeof(tmp)) \ 349 - return -E2BIG; \ 350 - \ 351 - strscpy(tmp, page, c_len + 1); \ 352 - if (!dev_valid_name(tmp)) \ 353 - return -EINVAL; \ 354 - \ 355 - /* Require exactly one %d */ \ 356 - p = strchr(tmp, '%'); \ 357 - if (!p || p[1] != 'd' || strchr(p + 2, '%')) \ 358 - return -EINVAL; \ 359 - \ 360 - guard(mutex)(&opts->lock); \ 361 - if (opts->refcnt) \ 362 - return -EBUSY; \ 363 - strscpy(opts->net_opts.name, tmp, sizeof(opts->net_opts.name)); \ 364 - opts->net_opts.ifname_set = true; \ 365 - return len; \ 366 - } \ 367 - \ 368 - CONFIGFS_ATTR(_f_##_opts_, ifname) 369 206 370 207 #endif /* __U_ETHER_CONFIGFS_H */
+1 -3
drivers/usb/gadget/function/u_ncm.h
··· 15 15 16 16 #include <linux/usb/composite.h> 17 17 18 - #include "u_ether.h" 19 - 20 18 struct f_ncm_opts { 21 19 struct usb_function_instance func_inst; 22 20 struct net_device *net; 21 + int bind_count; 23 22 24 - struct gether_opts net_opts; 25 23 struct config_group *ncm_interf_group; 26 24 struct usb_os_desc ncm_os_desc; 27 25 char ncm_ext_compat_id[16];
+1 -1
drivers/usb/gadget/function/uvc_video.c
··· 513 513 return; 514 514 } 515 515 516 - interval_duration = 2 << (video->ep->desc->bInterval - 1); 516 + interval_duration = 1 << (video->ep->desc->bInterval - 1); 517 517 if (cdev->gadget->speed < USB_SPEED_HIGH) 518 518 interval_duration *= 10000; 519 519 else
+9 -1
drivers/usb/host/xhci-debugfs.c
··· 386 386 static int xhci_portli_show(struct seq_file *s, void *unused) 387 387 { 388 388 struct xhci_port *port = s->private; 389 - struct xhci_hcd *xhci = hcd_to_xhci(port->rhub->hcd); 389 + struct xhci_hcd *xhci; 390 390 u32 portli; 391 391 392 392 portli = readl(&port->port_reg->portli); 393 + 394 + /* port without protocol capability isn't added to a roothub */ 395 + if (!port->rhub) { 396 + seq_printf(s, "0x%08x\n", portli); 397 + return 0; 398 + } 399 + 400 + xhci = hcd_to_xhci(port->rhub->hcd); 393 401 394 402 /* PORTLI fields are valid if port is a USB3 or eUSB2V2 port */ 395 403 if (port->rhub == &xhci->usb3_rhub)
+1
drivers/usb/host/xhci-ring.c
··· 3195 3195 3196 3196 if (status & STS_HCE) { 3197 3197 xhci_warn(xhci, "WARNING: Host Controller Error\n"); 3198 + xhci_halt(xhci); 3198 3199 goto out; 3199 3200 } 3200 3201
+2 -2
drivers/usb/host/xhci.c
··· 4146 4146 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || 4147 4147 (xhci->xhc_state & XHCI_STATE_HALTED)) { 4148 4148 spin_unlock_irqrestore(&xhci->lock, flags); 4149 - kfree(command); 4149 + xhci_free_command(xhci, command); 4150 4150 return -ENODEV; 4151 4151 } 4152 4152 ··· 4154 4154 slot_id); 4155 4155 if (ret) { 4156 4156 spin_unlock_irqrestore(&xhci->lock, flags); 4157 - kfree(command); 4157 + xhci_free_command(xhci, command); 4158 4158 return ret; 4159 4159 } 4160 4160 xhci_ring_cmd_db(xhci);
+4 -2
drivers/usb/image/mdc800.c
··· 707 707 if (signal_pending (current)) 708 708 { 709 709 mutex_unlock(&mdc800->io_lock); 710 - return -EINTR; 710 + return len == left ? -EINTR : len-left; 711 711 } 712 712 713 713 sts=left > (mdc800->out_count-mdc800->out_ptr)?mdc800->out_count-mdc800->out_ptr:left; ··· 730 730 mutex_unlock(&mdc800->io_lock); 731 731 return len-left; 732 732 } 733 - wait_event_timeout(mdc800->download_wait, 733 + retval = wait_event_timeout(mdc800->download_wait, 734 734 mdc800->downloaded, 735 735 msecs_to_jiffies(TO_DOWNLOAD_GET_READY)); 736 + if (!retval) 737 + usb_kill_urb(mdc800->download_urb); 736 738 mdc800->downloaded = 0; 737 739 if (mdc800->download_urb->status != 0) 738 740 {
+1 -1
drivers/usb/misc/uss720.c
··· 736 736 ret = get_1284_register(pp, 0, &reg, GFP_KERNEL); 737 737 dev_dbg(&intf->dev, "reg: %7ph\n", priv->reg); 738 738 if (ret < 0) 739 - return ret; 739 + goto probe_abort; 740 740 741 741 ret = usb_find_last_int_in_endpoint(interface, &epd); 742 742 if (!ret) {
+1 -1
drivers/usb/misc/yurex.c
··· 272 272 dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt, 273 273 dev, 1); 274 274 dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 275 + dev->bbu = -1; 275 276 if (usb_submit_urb(dev->urb, GFP_KERNEL)) { 276 277 retval = -EIO; 277 278 dev_err(&interface->dev, "Could not submitting URB\n"); ··· 281 280 282 281 /* save our data pointer in this interface device */ 283 282 usb_set_intfdata(interface, dev); 284 - dev->bbu = -1; 285 283 286 284 /* we can register the device now, as it is ready */ 287 285 retval = usb_register_dev(interface, &yurex_class);
+9
drivers/usb/renesas_usbhs/common.c
··· 815 815 816 816 usbhs_platform_call(priv, hardware_exit, pdev); 817 817 reset_control_assert(priv->rsts); 818 + 819 + /* 820 + * Explicitly free the IRQ to ensure the interrupt handler is 821 + * disabled and synchronized before freeing resources. 822 + * devm_free_irq() calls free_irq() which waits for any running 823 + * ISR to complete, preventing UAF. 824 + */ 825 + devm_free_irq(&pdev->dev, priv->irq, priv); 826 + 818 827 usbhs_mod_remove(priv); 819 828 usbhs_fifo_remove(priv); 820 829 usbhs_pipe_remove(priv);
+6 -1
drivers/usb/roles/class.c
··· 139 139 static struct usb_role_switch * 140 140 usb_role_switch_is_parent(struct fwnode_handle *fwnode) 141 141 { 142 - struct fwnode_handle *parent = fwnode_get_parent(fwnode); 142 + struct fwnode_handle *parent; 143 143 struct device *dev; 144 + 145 + if (!fwnode_device_is_compatible(fwnode, "usb-b-connector")) 146 + return NULL; 147 + 148 + parent = fwnode_get_parent(fwnode); 144 149 145 150 if (!fwnode_property_present(parent, "usb-role-switch")) { 146 151 fwnode_handle_put(parent);
+6 -1
drivers/usb/typec/altmodes/displayport.c
··· 100 100 { 101 101 u8 pin_assign = 0; 102 102 u32 conf; 103 + u32 signal; 103 104 104 105 /* DP Signalling */ 105 - conf = (dp->data.conf & DP_CONF_SIGNALLING_MASK) >> DP_CONF_SIGNALLING_SHIFT; 106 + signal = DP_CAP_DP_SIGNALLING(dp->port->vdo) & DP_CAP_DP_SIGNALLING(dp->alt->vdo); 107 + if (dp->plug_prime) 108 + signal &= DP_CAP_DP_SIGNALLING(dp->plug_prime->vdo); 109 + 110 + conf = signal << DP_CONF_SIGNALLING_SHIFT; 106 111 107 112 switch (con) { 108 113 case DP_STATUS_CON_DISABLED:
+1 -1
drivers/usb/typec/tcpm/tcpm.c
··· 7890 7890 port->partner_desc.identity = &port->partner_ident; 7891 7891 7892 7892 port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode); 7893 - if (IS_ERR_OR_NULL(port->role_sw)) 7893 + if (!port->role_sw) 7894 7894 port->role_sw = usb_role_switch_get(port->dev); 7895 7895 if (IS_ERR(port->role_sw)) { 7896 7896 err = PTR_ERR(port->role_sw);
+4 -4
fs/afs/addr_list.c
··· 298 298 srx.transport.sin.sin_addr.s_addr = xdr; 299 299 300 300 peer = rxrpc_kernel_lookup_peer(net->socket, &srx, GFP_KERNEL); 301 - if (!peer) 302 - return -ENOMEM; 301 + if (IS_ERR(peer)) 302 + return PTR_ERR(peer); 303 303 304 304 for (i = 0; i < alist->nr_ipv4; i++) { 305 305 if (peer == alist->addrs[i].peer) { ··· 342 342 memcpy(&srx.transport.sin6.sin6_addr, xdr, 16); 343 343 344 344 peer = rxrpc_kernel_lookup_peer(net->socket, &srx, GFP_KERNEL); 345 - if (!peer) 346 - return -ENOMEM; 345 + if (IS_ERR(peer)) 346 + return PTR_ERR(peer); 347 347 348 348 for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) { 349 349 if (peer == alist->addrs[i].peer) {
+6
fs/binfmt_elf_fdpic.c
··· 595 595 #ifdef ELF_HWCAP2 596 596 nitems++; 597 597 #endif 598 + #ifdef ELF_HWCAP3 599 + nitems++; 600 + #endif 601 + #ifdef ELF_HWCAP4 602 + nitems++; 603 + #endif 598 604 599 605 csp = sp; 600 606 sp -= nitems * 2 * sizeof(unsigned long);
+28
fs/btrfs/backref.c
··· 1393 1393 .indirect_missing_keys = PREFTREE_INIT 1394 1394 }; 1395 1395 1396 + if (unlikely(!root)) { 1397 + btrfs_err(ctx->fs_info, 1398 + "missing extent root for extent at bytenr %llu", 1399 + ctx->bytenr); 1400 + return -EUCLEAN; 1401 + } 1402 + 1396 1403 /* Roots ulist is not needed when using a sharedness check context. */ 1397 1404 if (sc) 1398 1405 ASSERT(ctx->roots == NULL); ··· 2211 2204 struct btrfs_extent_item *ei; 2212 2205 struct btrfs_key key; 2213 2206 2207 + if (unlikely(!extent_root)) { 2208 + btrfs_err(fs_info, 2209 + "missing extent root for extent at bytenr %llu", 2210 + logical); 2211 + return -EUCLEAN; 2212 + } 2213 + 2214 2214 key.objectid = logical; 2215 2215 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) 2216 2216 key.type = BTRFS_METADATA_ITEM_KEY; ··· 2865 2851 struct btrfs_key key; 2866 2852 int ret; 2867 2853 2854 + if (unlikely(!extent_root)) { 2855 + btrfs_err(fs_info, 2856 + "missing extent root for extent at bytenr %llu", 2857 + bytenr); 2858 + return -EUCLEAN; 2859 + } 2860 + 2868 2861 key.objectid = bytenr; 2869 2862 key.type = BTRFS_METADATA_ITEM_KEY; 2870 2863 key.offset = (u64)-1; ··· 3008 2987 3009 2988 /* We're at keyed items, there is no inline item, go to the next one */ 3010 2989 extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr); 2990 + if (unlikely(!extent_root)) { 2991 + btrfs_err(iter->fs_info, 2992 + "missing extent root for extent at bytenr %llu", 2993 + iter->bytenr); 2994 + return -EUCLEAN; 2995 + } 2996 + 3011 2997 ret = btrfs_next_item(extent_root, iter->path); 3012 2998 if (ret) 3013 2999 return ret;
+36
fs/btrfs/block-group.c
··· 739 739 740 740 last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); 741 741 extent_root = btrfs_extent_root(fs_info, last); 742 + if (unlikely(!extent_root)) { 743 + btrfs_err(fs_info, 744 + "missing extent root for block group at offset %llu", 745 + block_group->start); 746 + return -EUCLEAN; 747 + } 742 748 743 749 #ifdef CONFIG_BTRFS_DEBUG 744 750 /* ··· 1067 1061 int ret; 1068 1062 1069 1063 root = btrfs_block_group_root(fs_info); 1064 + if (unlikely(!root)) { 1065 + btrfs_err(fs_info, "missing block group root"); 1066 + return -EUCLEAN; 1067 + } 1068 + 1070 1069 key.objectid = block_group->start; 1071 1070 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 1072 1071 key.offset = block_group->length; ··· 1359 1348 struct btrfs_root *root = btrfs_block_group_root(fs_info); 1360 1349 struct btrfs_chunk_map *map; 1361 1350 unsigned int num_items; 1351 + 1352 + if (unlikely(!root)) { 1353 + btrfs_err(fs_info, "missing block group root"); 1354 + return ERR_PTR(-EUCLEAN); 1355 + } 1362 1356 1363 1357 map = btrfs_find_chunk_map(fs_info, chunk_offset, 1); 1364 1358 ASSERT(map != NULL); ··· 2156 2140 int ret; 2157 2141 struct btrfs_key found_key; 2158 2142 2143 + if (unlikely(!root)) { 2144 + btrfs_err(fs_info, "missing block group root"); 2145 + return -EUCLEAN; 2146 + } 2147 + 2159 2148 btrfs_for_each_slot(root, key, &found_key, path, ret) { 2160 2149 if (found_key.objectid >= key->objectid && 2161 2150 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { ··· 2734 2713 size_t size; 2735 2714 int ret; 2736 2715 2716 + if (unlikely(!root)) { 2717 + btrfs_err(fs_info, "missing block group root"); 2718 + return -EUCLEAN; 2719 + } 2720 + 2737 2721 spin_lock(&block_group->lock); 2738 2722 btrfs_set_stack_block_group_v2_used(&bgi, block_group->used); 2739 2723 btrfs_set_stack_block_group_v2_chunk_objectid(&bgi, block_group->global_root_id); ··· 3074 3048 int ret; 3075 3049 bool dirty_bg_running; 3076 3050 3051 + if (unlikely(!root)) { 3052 + btrfs_err(fs_info, "missing block group root"); 3053 + return -EUCLEAN; 3054 + } 3055 + 3077 3056 /* 3078 3057 * This can only happen when we are doing read-only scrub on read-only 3079 3058 * mount. ··· 3222 3191 u32 old_last_identity_remap_count; 3223 3192 u64 used, remap_bytes; 3224 3193 u32 identity_remap_count; 3194 + 3195 + if (unlikely(!root)) { 3196 + btrfs_err(fs_info, "missing block group root"); 3197 + return -EUCLEAN; 3198 + } 3225 3199 3226 3200 /* 3227 3201 * Block group items update can be triggered out of commit transaction
+8 -3
fs/btrfs/compression.c
··· 320 320 321 321 ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize)); 322 322 ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize)); 323 - ASSERT(cb->writeback); 323 + /* 324 + * This flag determines if we should clear the writeback flag from the 325 + * page cache. But this function is only utilized by encoded writes, it 326 + * never goes through the page cache. 327 + */ 328 + ASSERT(!cb->writeback); 324 329 325 330 cb->start = ordered->file_offset; 326 331 cb->len = ordered->num_bytes; 332 + ASSERT(cb->bbio.bio.bi_iter.bi_size == ordered->disk_num_bytes); 327 333 cb->compressed_len = ordered->disk_num_bytes; 328 334 cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT; 329 335 cb->bbio.ordered = ordered; ··· 351 345 cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE, end_bbio_compressed_write); 352 346 cb->start = start; 353 347 cb->len = len; 354 - cb->writeback = true; 355 - 348 + cb->writeback = false; 356 349 return cb; 357 350 } 358 351
+23 -4
fs/btrfs/disk-io.c
··· 1591 1591 * this will bump the backup pointer by one when it is 1592 1592 * done 1593 1593 */ 1594 - static void backup_super_roots(struct btrfs_fs_info *info) 1594 + static int backup_super_roots(struct btrfs_fs_info *info) 1595 1595 { 1596 1596 const int next_backup = info->backup_root_index; 1597 1597 struct btrfs_root_backup *root_backup; ··· 1622 1622 if (!btrfs_fs_incompat(info, EXTENT_TREE_V2)) { 1623 1623 struct btrfs_root *extent_root = btrfs_extent_root(info, 0); 1624 1624 struct btrfs_root *csum_root = btrfs_csum_root(info, 0); 1625 + 1626 + if (unlikely(!extent_root)) { 1627 + btrfs_err(info, "missing extent root for extent at bytenr 0"); 1628 + return -EUCLEAN; 1629 + } 1630 + if (unlikely(!csum_root)) { 1631 + btrfs_err(info, "missing csum root for extent at bytenr 0"); 1632 + return -EUCLEAN; 1633 + } 1625 1634 1626 1635 btrfs_set_backup_extent_root(root_backup, 1627 1636 extent_root->node->start); ··· 1679 1670 memcpy(&info->super_copy->super_roots, 1680 1671 &info->super_for_commit->super_roots, 1681 1672 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); 1673 + 1674 + return 0; 1682 1675 } 1683 1676 1684 1677 /* ··· 3605 3594 } 3606 3595 } 3607 3596 3608 - btrfs_zoned_reserve_data_reloc_bg(fs_info); 3609 3597 btrfs_free_zone_cache(fs_info); 3610 3598 3611 3599 btrfs_check_active_zone_reservation(fs_info); ··· 3631 3621 ret = PTR_ERR(fs_info->transaction_kthread); 3632 3622 goto fail_cleaner; 3633 3623 } 3624 + 3625 + /* 3626 + * Starts a transaction, must be called after the transaction kthread 3627 + * is initialized. 3628 + */ 3629 + btrfs_zoned_reserve_data_reloc_bg(fs_info); 3634 3630 3635 3631 ret = btrfs_read_qgroup_config(fs_info); 3636 3632 if (ret) ··· 4062 4046 * not from fsync where the tree roots in fs_info have not 4063 4047 * been consistent on disk. 4064 4048 */ 4065 - if (max_mirrors == 0) 4066 - backup_super_roots(fs_info); 4049 + if (max_mirrors == 0) { 4050 + ret = backup_super_roots(fs_info); 4051 + if (ret < 0) 4052 + return ret; 4053 + } 4067 4054 4068 4055 sb = fs_info->super_for_commit; 4069 4056 dev_item = &sb->dev_item;
+93 -5
fs/btrfs/extent-tree.c
··· 75 75 struct btrfs_key key; 76 76 BTRFS_PATH_AUTO_FREE(path); 77 77 78 + if (unlikely(!root)) { 79 + btrfs_err(fs_info, 80 + "missing extent root for extent at bytenr %llu", start); 81 + return -EUCLEAN; 82 + } 83 + 78 84 path = btrfs_alloc_path(); 79 85 if (!path) 80 86 return -ENOMEM; ··· 137 131 key.offset = offset; 138 132 139 133 extent_root = btrfs_extent_root(fs_info, bytenr); 134 + if (unlikely(!extent_root)) { 135 + btrfs_err(fs_info, 136 + "missing extent root for extent at bytenr %llu", bytenr); 137 + return -EUCLEAN; 138 + } 139 + 140 140 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 141 141 if (ret < 0) 142 142 return ret; ··· 448 436 int recow; 449 437 int ret; 450 438 439 + if (unlikely(!root)) { 440 + btrfs_err(trans->fs_info, 441 + "missing extent root for extent at bytenr %llu", bytenr); 442 + return -EUCLEAN; 443 + } 444 + 451 445 key.objectid = bytenr; 452 446 if (parent) { 453 447 key.type = BTRFS_SHARED_DATA_REF_KEY; ··· 527 509 u32 size; 528 510 u32 num_refs; 529 511 int ret; 512 + 513 + if (unlikely(!root)) { 514 + btrfs_err(trans->fs_info, 515 + "missing extent root for extent at bytenr %llu", bytenr); 516 + return -EUCLEAN; 517 + } 530 518 531 519 key.objectid = bytenr; 532 520 if (node->parent) { ··· 692 668 struct btrfs_key key; 693 669 int ret; 694 670 671 + if (unlikely(!root)) { 672 + btrfs_err(trans->fs_info, 673 + "missing extent root for extent at bytenr %llu", bytenr); 674 + return -EUCLEAN; 675 + } 676 + 695 677 key.objectid = bytenr; 696 678 if (parent) { 697 679 key.type = BTRFS_SHARED_BLOCK_REF_KEY; ··· 721 691 struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr); 722 692 struct btrfs_key key; 723 693 int ret; 694 + 695 + if (unlikely(!root)) { 696 + btrfs_err(trans->fs_info, 697 + "missing extent root for extent at bytenr %llu", bytenr); 698 + return -EUCLEAN; 699 + } 724 700 725 701 key.objectid = bytenr; 726 702 if (node->parent) { ··· 817 781 int ret; 818 782 bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); 819 783 int needed; 784 + 785 + if (unlikely(!root)) { 786 + btrfs_err(fs_info, 787 + "missing extent root for extent at bytenr %llu", bytenr); 788 + return -EUCLEAN; 789 + } 820 790 821 791 key.objectid = bytenr; 822 792 key.type = BTRFS_EXTENT_ITEM_KEY; ··· 1722 1680 } 1723 1681 1724 1682 root = btrfs_extent_root(fs_info, key.objectid); 1683 + if (unlikely(!root)) { 1684 + btrfs_err(fs_info, 1685 + "missing extent root for extent at bytenr %llu", 1686 + key.objectid); 1687 + return -EUCLEAN; 1688 + } 1725 1689 again: 1726 1690 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 1727 1691 if (ret < 0) { ··· 1974 1926 struct btrfs_root *csum_root; 1975 1927 1976 1928 csum_root = btrfs_csum_root(fs_info, head->bytenr); 1977 - ret = btrfs_del_csums(trans, csum_root, head->bytenr, 1978 - head->num_bytes); 1929 + if (unlikely(!csum_root)) { 1930 + btrfs_err(fs_info, 1931 + "missing csum root for extent at bytenr %llu", 1932 + head->bytenr); 1933 + ret = -EUCLEAN; 1934 + } else { 1935 + ret = btrfs_del_csums(trans, csum_root, head->bytenr, 1936 + head->num_bytes); 1937 + } 1979 1938 } 1980 1939 } 1981 1940 ··· 2433 2378 u32 expected_size; 2434 2379 int type; 2435 2380 int ret; 2381 + 2382 + if (unlikely(!extent_root)) { 2383 + btrfs_err(fs_info, 2384 + "missing extent root for extent at bytenr %llu", bytenr); 2385 + return -EUCLEAN; 2386 + } 2436 2387 2437 2388 key.objectid = bytenr; 2438 2389 key.type = BTRFS_EXTENT_ITEM_KEY; ··· 3154 3093 struct btrfs_root *csum_root; 3155 3094 3156 3095 csum_root = btrfs_csum_root(trans->fs_info, bytenr); 3096 + if (unlikely(!csum_root)) { 3097 + ret = -EUCLEAN; 3098 + btrfs_abort_transaction(trans, ret); 3099 + btrfs_err(trans->fs_info, 3100 + "missing csum root for extent at bytenr %llu", 3101 + bytenr); 3102 + return ret; 3103 + } 3104 + 3157 3105 ret = btrfs_del_csums(trans, csum_root, bytenr, num_bytes); 3158 3106 if (unlikely(ret)) { 3159 3107 btrfs_abort_transaction(trans, ret); ··· 3292 3222 u64 delayed_ref_root = href->owning_root; 3293 3223 3294 3224 extent_root = btrfs_extent_root(info, bytenr); 3295 - ASSERT(extent_root); 3225 + if (unlikely(!extent_root)) { 3226 + btrfs_err(info, 3227 + "missing extent root for extent at bytenr %llu", bytenr); 3228 + return -EUCLEAN; 3229 + } 3296 3230 3297 3231 path = btrfs_alloc_path(); 3298 3232 if (!path) ··· 5013 4939 size += btrfs_extent_inline_ref_size(BTRFS_EXTENT_OWNER_REF_KEY); 5014 4940 size += btrfs_extent_inline_ref_size(type); 5015 4941 4942 + extent_root = btrfs_extent_root(fs_info, ins->objectid); 4943 + if (unlikely(!extent_root)) { 4944 + btrfs_err(fs_info, 4945 + "missing extent root for extent at bytenr %llu", 4946 + ins->objectid); 4947 + return -EUCLEAN; 4948 + } 4949 + 5016 4950 path = btrfs_alloc_path(); 5017 4951 if (!path) 5018 4952 return -ENOMEM; 5019 4953 5020 - extent_root = btrfs_extent_root(fs_info, ins->objectid); 5021 4954 ret = btrfs_insert_empty_item(trans, extent_root, path, ins, size); 5022 4955 if (ret) { 5023 4956 btrfs_free_path(path); ··· 5100 5019 size += sizeof(*block_info); 5101 5020 } 5102 5021 5022 + extent_root = btrfs_extent_root(fs_info, extent_key.objectid); 5023 + if (unlikely(!extent_root)) { 5024 + btrfs_err(fs_info, 5025 + "missing extent root for extent at bytenr %llu", 5026 + extent_key.objectid); 5027 + return -EUCLEAN; 5028 + } 5029 + 5103 5030 path = btrfs_alloc_path(); 5104 5031 if (!path) 5105 5032 return -ENOMEM; 5106 5033 5107 - extent_root = btrfs_extent_root(fs_info, extent_key.objectid); 5108 5034 ret = btrfs_insert_empty_item(trans, extent_root, path, &extent_key, 5109 5035 size); 5110 5036 if (ret) {
+1
fs/btrfs/extent_io.c
··· 4507 4507 */ 4508 4508 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { 4509 4509 spin_unlock(&eb->refs_lock); 4510 + rcu_read_lock(); 4510 4511 break; 4511 4512 } 4512 4513
+7
fs/btrfs/file-item.c
··· 308 308 /* Current item doesn't contain the desired range, search again */ 309 309 btrfs_release_path(path); 310 310 csum_root = btrfs_csum_root(fs_info, disk_bytenr); 311 + if (unlikely(!csum_root)) { 312 + btrfs_err(fs_info, 313 + "missing csum root for extent at bytenr %llu", 314 + disk_bytenr); 315 + return -EUCLEAN; 316 + } 317 + 311 318 item = btrfs_lookup_csum(NULL, csum_root, path, disk_bytenr, 0); 312 319 if (IS_ERR(item)) { 313 320 ret = PTR_ERR(item);
+8 -1
fs/btrfs/free-space-tree.c
··· 1073 1073 if (ret) 1074 1074 return ret; 1075 1075 1076 + extent_root = btrfs_extent_root(trans->fs_info, block_group->start); 1077 + if (unlikely(!extent_root)) { 1078 + btrfs_err(trans->fs_info, 1079 + "missing extent root for block group at offset %llu", 1080 + block_group->start); 1081 + return -EUCLEAN; 1082 + } 1083 + 1076 1084 mutex_lock(&block_group->free_space_lock); 1077 1085 1078 1086 /* ··· 1094 1086 key.type = BTRFS_EXTENT_ITEM_KEY; 1095 1087 key.offset = 0; 1096 1088 1097 - extent_root = btrfs_extent_root(trans->fs_info, key.objectid); 1098 1089 ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0); 1099 1090 if (ret < 0) 1100 1091 goto out_locked;
+39 -5
fs/btrfs/inode.c
··· 2012 2012 */ 2013 2013 2014 2014 csum_root = btrfs_csum_root(root->fs_info, io_start); 2015 + if (unlikely(!csum_root)) { 2016 + btrfs_err(root->fs_info, 2017 + "missing csum root for extent at bytenr %llu", io_start); 2018 + ret = -EUCLEAN; 2019 + goto out; 2020 + } 2021 + 2015 2022 ret = btrfs_lookup_csums_list(csum_root, io_start, 2016 2023 io_start + args->file_extent.num_bytes - 1, 2017 2024 NULL, nowait); ··· 2756 2749 int ret; 2757 2750 2758 2751 list_for_each_entry(sum, list, list) { 2759 - trans->adding_csums = true; 2760 - if (!csum_root) 2752 + if (!csum_root) { 2761 2753 csum_root = btrfs_csum_root(trans->fs_info, 2762 2754 sum->logical); 2755 + if (unlikely(!csum_root)) { 2756 + btrfs_err(trans->fs_info, 2757 + "missing csum root for extent at bytenr %llu", 2758 + sum->logical); 2759 + return -EUCLEAN; 2760 + } 2761 + } 2762 + trans->adding_csums = true; 2763 2763 ret = btrfs_csum_file_blocks(trans, csum_root, sum); 2764 2764 trans->adding_csums = false; 2765 2765 if (ret) ··· 6626 6612 int ret; 6627 6613 bool xa_reserved = false; 6628 6614 6615 + if (!args->orphan && !args->subvol) { 6616 + /* 6617 + * Before anything else, check if we can add the name to the 6618 + * parent directory. We want to avoid a dir item overflow in 6619 + * case we have an existing dir item due to existing name 6620 + * hash collisions. We do this check here before we call 6621 + * btrfs_add_link() down below so that we can avoid a 6622 + * transaction abort (which could be exploited by malicious 6623 + * users). 6624 + * 6625 + * For subvolumes we already do this in btrfs_mksubvol(). 6626 + */ 6627 + ret = btrfs_check_dir_item_collision(BTRFS_I(dir)->root, 6628 + btrfs_ino(BTRFS_I(dir)), 6629 + name); 6630 + if (ret < 0) 6631 + return ret; 6632 + } 6633 + 6629 6634 path = btrfs_alloc_path(); 6630 6635 if (!path) 6631 6636 return -ENOMEM; ··· 9888 9855 int compression; 9889 9856 size_t orig_count; 9890 9857 const u32 min_folio_size = btrfs_min_folio_size(fs_info); 9858 + const u32 blocksize = fs_info->sectorsize; 9891 9859 u64 start, end; 9892 9860 u64 num_bytes, ram_bytes, disk_num_bytes; 9893 9861 struct btrfs_key ins; ··· 9999 9965 ret = -EFAULT; 10000 9966 goto out_cb; 10001 9967 } 10002 - if (bytes < min_folio_size) 10003 - folio_zero_range(folio, bytes, min_folio_size - bytes); 10004 - ret = bio_add_folio(&cb->bbio.bio, folio, folio_size(folio), 0); 9968 + if (!IS_ALIGNED(bytes, blocksize)) 9969 + folio_zero_range(folio, bytes, round_up(bytes, blocksize) - bytes); 9970 + ret = bio_add_folio(&cb->bbio.bio, folio, round_up(bytes, blocksize), 0); 10005 9971 if (unlikely(!ret)) { 10006 9972 folio_put(folio); 10007 9973 ret = -EINVAL;
+37 -7
fs/btrfs/ioctl.c
··· 672 672 goto out; 673 673 } 674 674 675 + /* 676 + * Subvolumes have orphans cleaned on first dentry lookup. A new 677 + * subvolume cannot have any orphans, so we should set the bit before we 678 + * add the subvolume dentry to the dentry cache, so that it is in the 679 + * same state as a subvolume after first lookup. 680 + */ 681 + set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &new_root->state); 675 682 d_instantiate_new(dentry, new_inode_args.inode); 676 683 new_inode_args.inode = NULL; 677 684 ··· 3617 3610 } 3618 3611 } 3619 3612 3620 - trans = btrfs_join_transaction(root); 3613 + /* 2 BTRFS_QGROUP_RELATION_KEY items. */ 3614 + trans = btrfs_start_transaction(root, 2); 3621 3615 if (IS_ERR(trans)) { 3622 3616 ret = PTR_ERR(trans); 3623 3617 goto out; ··· 3690 3682 goto out; 3691 3683 } 3692 3684 3693 - trans = btrfs_join_transaction(root); 3685 + /* 3686 + * 1 BTRFS_QGROUP_INFO_KEY item. 3687 + * 1 BTRFS_QGROUP_LIMIT_KEY item. 3688 + */ 3689 + trans = btrfs_start_transaction(root, 2); 3694 3690 if (IS_ERR(trans)) { 3695 3691 ret = PTR_ERR(trans); 3696 3692 goto out; ··· 3743 3731 goto drop_write; 3744 3732 } 3745 3733 3746 - trans = btrfs_join_transaction(root); 3734 + /* 1 BTRFS_QGROUP_LIMIT_KEY item. */ 3735 + trans = btrfs_start_transaction(root, 1); 3747 3736 if (IS_ERR(trans)) { 3748 3737 ret = PTR_ERR(trans); 3749 3738 goto out; ··· 3865 3852 goto out; 3866 3853 } 3867 3854 3855 + received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid, 3856 + BTRFS_UUID_SIZE); 3857 + 3858 + /* 3859 + * Before we attempt to add the new received uuid, check if we have room 3860 + * for it in case there's already an item. If the size of the existing 3861 + * item plus this root's ID (u64) exceeds the maximum item size, we can 3862 + * return here without the need to abort a transaction. If we don't do 3863 + * this check, the btrfs_uuid_tree_add() call below would fail with 3864 + * -EOVERFLOW and result in a transaction abort. Malicious users could 3865 + * exploit this to turn the fs into RO mode. 3866 + */ 3867 + if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) { 3868 + ret = btrfs_uuid_tree_check_overflow(fs_info, sa->uuid, 3869 + BTRFS_UUID_KEY_RECEIVED_SUBVOL); 3870 + if (ret < 0) 3871 + goto out; 3872 + } 3873 + 3868 3874 /* 3869 3875 * 1 - root item 3870 3876 * 2 - uuid items (received uuid + subvol uuid) ··· 3899 3867 sa->rtime.sec = ct.tv_sec; 3900 3868 sa->rtime.nsec = ct.tv_nsec; 3901 3869 3902 - received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid, 3903 - BTRFS_UUID_SIZE); 3904 3870 if (received_uuid_changed && 3905 3871 !btrfs_is_empty_uuid(root_item->received_uuid)) { 3906 3872 ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid, 3907 3873 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 3908 3874 btrfs_root_id(root)); 3909 3875 if (unlikely(ret && ret != -ENOENT)) { 3910 - btrfs_abort_transaction(trans, ret); 3911 3876 btrfs_end_transaction(trans); 3912 3877 goto out; 3913 3878 } ··· 3919 3890 3920 3891 ret = btrfs_update_root(trans, fs_info->tree_root, 3921 3892 &root->root_key, &root->root_item); 3922 - if (ret < 0) { 3893 + if (unlikely(ret < 0)) { 3894 + btrfs_abort_transaction(trans, ret); 3923 3895 btrfs_end_transaction(trans); 3924 3896 goto out; 3925 3897 }
+2 -2
fs/btrfs/lzo.c
··· 429 429 int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) 430 430 { 431 431 struct workspace *workspace = list_entry(ws, struct workspace, list); 432 - const struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info; 432 + struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info; 433 433 const u32 sectorsize = fs_info->sectorsize; 434 434 struct folio_iter fi; 435 435 char *kaddr; ··· 447 447 /* There must be a compressed folio and matches the sectorsize. */ 448 448 if (unlikely(!fi.folio)) 449 449 return -EINVAL; 450 - ASSERT(folio_size(fi.folio) == sectorsize); 450 + ASSERT(folio_size(fi.folio) == btrfs_min_folio_size(fs_info)); 451 451 kaddr = kmap_local_folio(fi.folio, 0); 452 452 len_in = read_compress_length(kaddr); 453 453 kunmap_local(kaddr);
-3
fs/btrfs/messages.h
··· 31 31 #define btrfs_printk_in_rcu(fs_info, level, fmt, args...) \ 32 32 btrfs_no_printk(fs_info, fmt, ##args) 33 33 34 - #define btrfs_printk_in_rcu(fs_info, level, fmt, args...) \ 35 - btrfs_no_printk(fs_info, fmt, ##args) 36 - 37 34 #define btrfs_printk_rl_in_rcu(fs_info, level, fmt, args...) \ 38 35 btrfs_no_printk(fs_info, fmt, ##args) 39 36
+10
fs/btrfs/print-tree.c
··· 38 38 { BTRFS_BLOCK_GROUP_TREE_OBJECTID, "BLOCK_GROUP_TREE" }, 39 39 { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" }, 40 40 { BTRFS_RAID_STRIPE_TREE_OBJECTID, "RAID_STRIPE_TREE" }, 41 + { BTRFS_REMAP_TREE_OBJECTID, "REMAP_TREE" }, 41 42 }; 42 43 43 44 const char *btrfs_root_name(const struct btrfs_key *key, char *buf) ··· 416 415 [BTRFS_UUID_KEY_SUBVOL] = "UUID_KEY_SUBVOL", 417 416 [BTRFS_UUID_KEY_RECEIVED_SUBVOL] = "UUID_KEY_RECEIVED_SUBVOL", 418 417 [BTRFS_RAID_STRIPE_KEY] = "RAID_STRIPE", 418 + [BTRFS_IDENTITY_REMAP_KEY] = "IDENTITY_REMAP", 419 + [BTRFS_REMAP_KEY] = "REMAP", 420 + [BTRFS_REMAP_BACKREF_KEY] = "REMAP_BACKREF", 419 421 }; 420 422 421 423 if (key->type == 0 && key->objectid == BTRFS_FREE_SPACE_OBJECTID) ··· 439 435 struct btrfs_extent_data_ref *dref; 440 436 struct btrfs_shared_data_ref *sref; 441 437 struct btrfs_dev_extent *dev_extent; 438 + struct btrfs_remap_item *remap; 442 439 struct btrfs_key key; 443 440 444 441 if (!l) ··· 573 568 case BTRFS_RAID_STRIPE_KEY: 574 569 print_raid_stripe_key(l, btrfs_item_size(l, i), 575 570 btrfs_item_ptr(l, i, struct btrfs_stripe_extent)); 571 + break; 572 + case BTRFS_REMAP_KEY: 573 + case BTRFS_REMAP_BACKREF_KEY: 574 + remap = btrfs_item_ptr(l, i, struct btrfs_remap_item); 575 + pr_info("\t\taddress %llu\n", btrfs_remap_address(l, remap)); 576 576 break; 577 577 } 578 578 }
+8
fs/btrfs/qgroup.c
··· 3739 3739 mutex_lock(&fs_info->qgroup_rescan_lock); 3740 3740 extent_root = btrfs_extent_root(fs_info, 3741 3741 fs_info->qgroup_rescan_progress.objectid); 3742 + if (unlikely(!extent_root)) { 3743 + btrfs_err(fs_info, 3744 + "missing extent root for extent at bytenr %llu", 3745 + fs_info->qgroup_rescan_progress.objectid); 3746 + mutex_unlock(&fs_info->qgroup_rescan_lock); 3747 + return -EUCLEAN; 3748 + } 3749 + 3742 3750 ret = btrfs_search_slot_for_read(extent_root, 3743 3751 &fs_info->qgroup_rescan_progress, 3744 3752 path, 1, 0);
+10 -2
fs/btrfs/raid56.c
··· 2297 2297 static void fill_data_csums(struct btrfs_raid_bio *rbio) 2298 2298 { 2299 2299 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; 2300 - struct btrfs_root *csum_root = btrfs_csum_root(fs_info, 2301 - rbio->bioc->full_stripe_logical); 2300 + struct btrfs_root *csum_root; 2302 2301 const u64 start = rbio->bioc->full_stripe_logical; 2303 2302 const u32 len = (rbio->nr_data * rbio->stripe_nsectors) << 2304 2303 fs_info->sectorsize_bits; ··· 2327 2328 GFP_NOFS); 2328 2329 if (!rbio->csum_buf || !rbio->csum_bitmap) { 2329 2330 ret = -ENOMEM; 2331 + goto error; 2332 + } 2333 + 2334 + csum_root = btrfs_csum_root(fs_info, rbio->bioc->full_stripe_logical); 2335 + if (unlikely(!csum_root)) { 2336 + btrfs_err(fs_info, 2337 + "missing csum root for extent at bytenr %llu", 2338 + rbio->bioc->full_stripe_logical); 2339 + ret = -EUCLEAN; 2330 2340 goto error; 2331 2341 } 2332 2342
+34 -7
fs/btrfs/relocation.c
··· 4185 4185 dest_addr = ins.objectid; 4186 4186 dest_length = ins.offset; 4187 4187 4188 + dest_bg = btrfs_lookup_block_group(fs_info, dest_addr); 4189 + 4188 4190 if (!is_data && !IS_ALIGNED(dest_length, fs_info->nodesize)) { 4189 4191 u64 new_length = ALIGN_DOWN(dest_length, fs_info->nodesize); 4190 4192 ··· 4297 4295 if (unlikely(ret)) 4298 4296 goto end; 4299 4297 4300 - dest_bg = btrfs_lookup_block_group(fs_info, dest_addr); 4301 - 4302 4298 adjust_block_group_remap_bytes(trans, dest_bg, dest_length); 4303 4299 4304 4300 mutex_lock(&dest_bg->free_space_lock); 4305 4301 bg_needs_free_space = test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, 4306 4302 &dest_bg->runtime_flags); 4307 4303 mutex_unlock(&dest_bg->free_space_lock); 4308 - btrfs_put_block_group(dest_bg); 4309 4304 4310 4305 if (bg_needs_free_space) { 4311 4306 ret = btrfs_add_block_group_free_space(trans, dest_bg); ··· 4332 4333 btrfs_end_transaction(trans); 4333 4334 } 4334 4335 } else { 4335 - dest_bg = btrfs_lookup_block_group(fs_info, dest_addr); 4336 4336 btrfs_free_reserved_bytes(dest_bg, dest_length, 0); 4337 - btrfs_put_block_group(dest_bg); 4338 4337 4339 4338 ret = btrfs_commit_transaction(trans); 4340 4339 } 4340 + 4341 + btrfs_put_block_group(dest_bg); 4341 4342 4342 4343 return ret; 4343 4344 } ··· 4398 4399 4399 4400 leaf = path->nodes[0]; 4400 4401 } 4402 + 4403 + btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4401 4404 } 4402 4405 4403 4406 remap = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_remap_item); ··· 4953 4952 struct btrfs_space_info *sinfo = src_bg->space_info; 4954 4953 4955 4954 extent_root = btrfs_extent_root(fs_info, src_bg->start); 4955 + if (unlikely(!extent_root)) { 4956 + btrfs_err(fs_info, 4957 + "missing extent root for block group at offset %llu", 4958 + src_bg->start); 4959 + return -EUCLEAN; 4960 + } 4956 4961 4957 4962 trans = btrfs_start_transaction(extent_root, 0); 4958 4963 if (IS_ERR(trans)) ··· 5311 5304 int ret; 5312 5305 bool bg_is_ro = false; 5313 5306 5307 + if (unlikely(!extent_root)) { 5308 + btrfs_err(fs_info, 5309 + "missing extent root for block group at offset %llu", 5310 + group_start); 5311 + return -EUCLEAN; 5312 + } 5313 + 5314 5314 /* 5315 5315 * This only gets set if we had a half-deleted snapshot on mount. We 5316 5316 * cannot allow relocation to start while we're still trying to clean up ··· 5548 5534 goto out; 5549 5535 } 5550 5536 5537 + rc->extent_root = btrfs_extent_root(fs_info, 0); 5538 + if (unlikely(!rc->extent_root)) { 5539 + btrfs_err(fs_info, "missing extent root for extent at bytenr 0"); 5540 + ret = -EUCLEAN; 5541 + goto out; 5542 + } 5543 + 5551 5544 ret = reloc_chunk_start(fs_info); 5552 5545 if (ret < 0) 5553 5546 goto out_end; 5554 - 5555 - rc->extent_root = btrfs_extent_root(fs_info, 0); 5556 5547 5557 5548 set_reloc_control(rc); 5558 5549 ··· 5652 5633 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, disk_bytenr); 5653 5634 LIST_HEAD(list); 5654 5635 int ret; 5636 + 5637 + if (unlikely(!csum_root)) { 5638 + btrfs_mark_ordered_extent_error(ordered); 5639 + btrfs_err(fs_info, 5640 + "missing csum root for extent at bytenr %llu", 5641 + disk_bytenr); 5642 + return -EUCLEAN; 5643 + } 5655 5644 5656 5645 ret = btrfs_lookup_csums_list(csum_root, disk_bytenr, 5657 5646 disk_bytenr + ordered->num_bytes - 1,
+4 -1
fs/btrfs/space-info.c
··· 2194 2194 if (!btrfs_should_periodic_reclaim(space_info)) 2195 2195 continue; 2196 2196 for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) { 2197 - if (do_reclaim_sweep(space_info, raid)) 2197 + if (do_reclaim_sweep(space_info, raid)) { 2198 + spin_lock(&space_info->lock); 2198 2199 btrfs_set_periodic_reclaim_ready(space_info, false); 2200 + spin_unlock(&space_info->lock); 2201 + } 2199 2202 } 2200 2203 } 2201 2204 }
+16
fs/btrfs/transaction.c
··· 1905 1905 ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid, 1906 1906 BTRFS_UUID_KEY_RECEIVED_SUBVOL, 1907 1907 objectid); 1908 + /* 1909 + * We are creating of lot of snapshots of the same root that was 1910 + * received (has a received UUID) and reached a leaf's limit for 1911 + * an item. We can safely ignore this and avoid a transaction 1912 + * abort. A deletion of this snapshot will still work since we 1913 + * ignore if an item with a BTRFS_UUID_KEY_RECEIVED_SUBVOL key 1914 + * is missing (see btrfs_delete_subvolume()). Send/receive will 1915 + * work too since it peeks the first root id from the existing 1916 + * item (it could peek any), and in case it's missing it 1917 + * falls back to search by BTRFS_UUID_KEY_SUBVOL keys. 1918 + * Creation of a snapshot does not require CAP_SYS_ADMIN, so 1919 + * we don't want users triggering transaction aborts, either 1920 + * intentionally or not. 1921 + */ 1922 + if (ret == -EOVERFLOW) 1923 + ret = 0; 1908 1924 if (unlikely(ret && ret != -EEXIST)) { 1909 1925 btrfs_abort_transaction(trans, ret); 1910 1926 goto fail;
+18 -1
fs/btrfs/tree-checker.c
··· 1284 1284 } 1285 1285 if (unlikely(btrfs_root_drop_level(&ri) >= BTRFS_MAX_LEVEL)) { 1286 1286 generic_err(leaf, slot, 1287 - "invalid root level, have %u expect [0, %u]", 1287 + "invalid root drop_level, have %u expect [0, %u]", 1288 1288 btrfs_root_drop_level(&ri), BTRFS_MAX_LEVEL - 1); 1289 + return -EUCLEAN; 1290 + } 1291 + /* 1292 + * If drop_progress.objectid is non-zero, a btrfs_drop_snapshot() was 1293 + * interrupted and the resume point was recorded in drop_progress and 1294 + * drop_level. In that case drop_level must be >= 1: level 0 is the 1295 + * leaf level and drop_snapshot never saves a checkpoint there (it 1296 + * only records checkpoints at internal node levels in DROP_REFERENCE 1297 + * stage). A zero drop_level combined with a non-zero drop_progress 1298 + * objectid indicates on-disk corruption and would cause a BUG_ON in 1299 + * merge_reloc_root() and btrfs_drop_snapshot() at mount time. 1300 + */ 1301 + if (unlikely(btrfs_disk_key_objectid(&ri.drop_progress) != 0 && 1302 + btrfs_root_drop_level(&ri) == 0)) { 1303 + generic_err(leaf, slot, 1304 + "invalid root drop_level 0 with non-zero drop_progress objectid %llu", 1305 + btrfs_disk_key_objectid(&ri.drop_progress)); 1289 1306 return -EUCLEAN; 1290 1307 } 1291 1308
+27
fs/btrfs/tree-log.c
··· 984 984 985 985 sums = list_first_entry(&ordered_sums, struct btrfs_ordered_sum, list); 986 986 csum_root = btrfs_csum_root(fs_info, sums->logical); 987 + if (unlikely(!csum_root)) { 988 + btrfs_err(fs_info, 989 + "missing csum root for extent at bytenr %llu", 990 + sums->logical); 991 + ret = -EUCLEAN; 992 + } 993 + 987 994 if (!ret) { 988 995 ret = btrfs_del_csums(trans, csum_root, sums->logical, 989 996 sums->len); ··· 4897 4890 } 4898 4891 4899 4892 csum_root = btrfs_csum_root(trans->fs_info, disk_bytenr); 4893 + if (unlikely(!csum_root)) { 4894 + btrfs_err(trans->fs_info, 4895 + "missing csum root for extent at bytenr %llu", 4896 + disk_bytenr); 4897 + return -EUCLEAN; 4898 + } 4899 + 4900 4900 disk_bytenr += extent_offset; 4901 4901 ret = btrfs_lookup_csums_list(csum_root, disk_bytenr, 4902 4902 disk_bytenr + extent_num_bytes - 1, ··· 5100 5086 /* block start is already adjusted for the file extent offset. */ 5101 5087 block_start = btrfs_extent_map_block_start(em); 5102 5088 csum_root = btrfs_csum_root(trans->fs_info, block_start); 5089 + if (unlikely(!csum_root)) { 5090 + btrfs_err(trans->fs_info, 5091 + "missing csum root for extent at bytenr %llu", 5092 + block_start); 5093 + return -EUCLEAN; 5094 + } 5095 + 5103 5096 ret = btrfs_lookup_csums_list(csum_root, block_start + csum_offset, 5104 5097 block_start + csum_offset + csum_len - 1, 5105 5098 &ordered_sums, false); ··· 6216 6195 struct btrfs_root *root, 6217 6196 struct btrfs_log_ctx *ctx) 6218 6197 { 6198 + const bool orig_log_new_dentries = ctx->log_new_dentries; 6219 6199 int ret = 0; 6220 6200 6221 6201 /* ··· 6278 6256 * dir index key range logged for the directory. So we 6279 6257 * must make sure the deletion is recorded. 6280 6258 */ 6259 + ctx->log_new_dentries = false; 6281 6260 ret = btrfs_log_inode(trans, inode, LOG_INODE_ALL, ctx); 6261 + if (!ret && ctx->log_new_dentries) 6262 + ret = log_new_dir_dentries(trans, inode, ctx); 6263 + 6282 6264 btrfs_add_delayed_iput(inode); 6283 6265 if (ret) 6284 6266 break; ··· 6317 6291 break; 6318 6292 } 6319 6293 6294 + ctx->log_new_dentries = orig_log_new_dentries; 6320 6295 ctx->logging_conflict_inodes = false; 6321 6296 if (ret) 6322 6297 free_conflicting_inodes(ctx);
+38
fs/btrfs/uuid-tree.c
··· 199 199 return 0; 200 200 } 201 201 202 + /* 203 + * Check if we can add one root ID to a UUID key. 204 + * If the key does not yet exists, we can, otherwise only if extended item does 205 + * not exceeds the maximum item size permitted by the leaf size. 206 + * 207 + * Returns 0 on success, negative value on error. 208 + */ 209 + int btrfs_uuid_tree_check_overflow(struct btrfs_fs_info *fs_info, 210 + const u8 *uuid, u8 type) 211 + { 212 + BTRFS_PATH_AUTO_FREE(path); 213 + int ret; 214 + u32 item_size; 215 + struct btrfs_key key; 216 + 217 + if (WARN_ON_ONCE(!fs_info->uuid_root)) 218 + return -EINVAL; 219 + 220 + path = btrfs_alloc_path(); 221 + if (!path) 222 + return -ENOMEM; 223 + 224 + btrfs_uuid_to_key(uuid, type, &key); 225 + ret = btrfs_search_slot(NULL, fs_info->uuid_root, &key, path, 0, 0); 226 + if (ret < 0) 227 + return ret; 228 + if (ret > 0) 229 + return 0; 230 + 231 + item_size = btrfs_item_size(path->nodes[0], path->slots[0]); 232 + 233 + if (sizeof(struct btrfs_item) + item_size + sizeof(u64) > 234 + BTRFS_LEAF_DATA_SIZE(fs_info)) 235 + return -EOVERFLOW; 236 + 237 + return 0; 238 + } 239 + 202 240 static int btrfs_uuid_iter_rem(struct btrfs_root *uuid_root, u8 *uuid, u8 type, 203 241 u64 subid) 204 242 {
+2
fs/btrfs/uuid-tree.h
··· 12 12 u64 subid); 13 13 int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8 type, 14 14 u64 subid); 15 + int btrfs_uuid_tree_check_overflow(struct btrfs_fs_info *fs_info, 16 + const u8 *uuid, u8 type); 15 17 int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info); 16 18 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info); 17 19 int btrfs_uuid_scan_kthread(void *data);
+18 -9
fs/btrfs/volumes.c
··· 3587 3587 3588 3588 /* step one, relocate all the extents inside this chunk */ 3589 3589 btrfs_scrub_pause(fs_info); 3590 - ret = btrfs_relocate_block_group(fs_info, chunk_offset, true); 3590 + ret = btrfs_relocate_block_group(fs_info, chunk_offset, verbose); 3591 3591 btrfs_scrub_continue(fs_info); 3592 3592 if (ret) { 3593 3593 /* ··· 4277 4277 end: 4278 4278 while (!list_empty(chunks)) { 4279 4279 bool is_unused; 4280 + struct btrfs_block_group *bg; 4280 4281 4281 4282 rci = list_first_entry(chunks, struct remap_chunk_info, list); 4282 4283 4283 - spin_lock(&rci->bg->lock); 4284 - is_unused = !btrfs_is_block_group_used(rci->bg); 4285 - spin_unlock(&rci->bg->lock); 4284 + bg = rci->bg; 4285 + if (bg) { 4286 + /* 4287 + * This is a bit racy and the 'used' status can change 4288 + * but this is not a problem as later functions will 4289 + * verify it again. 4290 + */ 4291 + spin_lock(&bg->lock); 4292 + is_unused = !btrfs_is_block_group_used(bg); 4293 + spin_unlock(&bg->lock); 4286 4294 4287 - if (is_unused) 4288 - btrfs_mark_bg_unused(rci->bg); 4295 + if (is_unused) 4296 + btrfs_mark_bg_unused(bg); 4289 4297 4290 - if (rci->made_ro) 4291 - btrfs_dec_block_group_ro(rci->bg); 4298 + if (rci->made_ro) 4299 + btrfs_dec_block_group_ro(bg); 4292 4300 4293 - btrfs_put_block_group(rci->bg); 4301 + btrfs_put_block_group(bg); 4302 + } 4294 4303 4295 4304 list_del(&rci->list); 4296 4305 kfree(rci);
+11 -2
fs/btrfs/zoned.c
··· 337 337 if (!btrfs_fs_incompat(fs_info, ZONED)) 338 338 return 0; 339 339 340 - mutex_lock(&fs_devices->device_list_mutex); 340 + /* 341 + * No need to take the device_list mutex here, we're still in the mount 342 + * path and devices cannot be added to or removed from the list yet. 343 + */ 341 344 list_for_each_entry(device, &fs_devices->devices, dev_list) { 342 345 /* We can skip reading of zone info for missing devices */ 343 346 if (!device->bdev) ··· 350 347 if (ret) 351 348 break; 352 349 } 353 - mutex_unlock(&fs_devices->device_list_mutex); 354 350 355 351 return ret; 356 352 } ··· 1261 1259 key.offset = 0; 1262 1260 1263 1261 root = btrfs_extent_root(fs_info, key.objectid); 1262 + if (unlikely(!root)) { 1263 + btrfs_err(fs_info, 1264 + "missing extent root for extent at bytenr %llu", 1265 + key.objectid); 1266 + return -EUCLEAN; 1267 + } 1268 + 1264 1269 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1265 1270 /* We should not find the exact match */ 1266 1271 if (unlikely(!ret))
+1 -1
fs/btrfs/zstd.c
··· 600 600 bio_first_folio(&fi, &cb->bbio.bio, 0); 601 601 if (unlikely(!fi.folio)) 602 602 return -EINVAL; 603 - ASSERT(folio_size(fi.folio) == blocksize); 603 + ASSERT(folio_size(fi.folio) == min_folio_size); 604 604 605 605 stream = zstd_init_dstream( 606 606 ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
-1
fs/ceph/addr.c
··· 1326 1326 continue; 1327 1327 } else if (rc == -E2BIG) { 1328 1328 folio_unlock(folio); 1329 - ceph_wbc->fbatch.folios[i] = NULL; 1330 1329 break; 1331 1330 } 1332 1331
+2 -2
fs/ceph/debugfs.c
··· 79 79 if (req->r_inode) { 80 80 seq_printf(s, " #%llx", ceph_ino(req->r_inode)); 81 81 } else if (req->r_dentry) { 82 - struct ceph_path_info path_info; 82 + struct ceph_path_info path_info = {0}; 83 83 path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0); 84 84 if (IS_ERR(path)) 85 85 path = NULL; ··· 98 98 } 99 99 100 100 if (req->r_old_dentry) { 101 - struct ceph_path_info path_info; 101 + struct ceph_path_info path_info = {0}; 102 102 path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &path_info, 0); 103 103 if (IS_ERR(path)) 104 104 path = NULL;
+15 -2
fs/ceph/dir.c
··· 1339 1339 struct ceph_client *cl = fsc->client; 1340 1340 struct ceph_mds_client *mdsc = fsc->mdsc; 1341 1341 struct inode *inode = d_inode(dentry); 1342 + struct ceph_inode_info *ci = ceph_inode(inode); 1342 1343 struct ceph_mds_request *req; 1343 1344 bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS); 1344 1345 struct dentry *dn; ··· 1364 1363 if (!dn) { 1365 1364 try_async = false; 1366 1365 } else { 1367 - struct ceph_path_info path_info; 1366 + struct ceph_path_info path_info = {0}; 1368 1367 path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0); 1369 1368 if (IS_ERR(path)) { 1370 1369 try_async = false; ··· 1425 1424 * We have enough caps, so we assume that the unlink 1426 1425 * will succeed. Fix up the target inode and dcache. 1427 1426 */ 1428 - drop_nlink(inode); 1427 + 1428 + /* 1429 + * Protect the i_nlink update with i_ceph_lock 1430 + * to precent racing against ceph_fill_inode() 1431 + * handling our completion on a worker thread 1432 + * and don't decrement if i_nlink has already 1433 + * been updated to zero by this completion. 1434 + */ 1435 + spin_lock(&ci->i_ceph_lock); 1436 + if (inode->i_nlink > 0) 1437 + drop_nlink(inode); 1438 + spin_unlock(&ci->i_ceph_lock); 1439 + 1429 1440 d_delete(dentry); 1430 1441 } else { 1431 1442 spin_lock(&fsc->async_unlink_conflict_lock);
+2 -2
fs/ceph/file.c
··· 397 397 if (!dentry) { 398 398 do_sync = true; 399 399 } else { 400 - struct ceph_path_info path_info; 400 + struct ceph_path_info path_info = {0}; 401 401 path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0); 402 402 if (IS_ERR(path)) { 403 403 do_sync = true; ··· 807 807 if (!dn) { 808 808 try_async = false; 809 809 } else { 810 - struct ceph_path_info path_info; 810 + struct ceph_path_info path_info = {0}; 811 811 path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0); 812 812 if (IS_ERR(path)) { 813 813 try_async = false;
+1 -1
fs/ceph/inode.c
··· 2551 2551 if (!dentry) { 2552 2552 do_sync = true; 2553 2553 } else { 2554 - struct ceph_path_info path_info; 2554 + struct ceph_path_info path_info = {0}; 2555 2555 path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0); 2556 2556 if (IS_ERR(path)) { 2557 2557 do_sync = true;
+3
fs/ceph/mds_client.c
··· 2768 2768 if (ret < 0) { 2769 2769 dput(parent); 2770 2770 dput(cur); 2771 + __putname(path); 2771 2772 return ERR_PTR(ret); 2772 2773 } 2773 2774 ··· 2778 2777 if (len < 0) { 2779 2778 dput(parent); 2780 2779 dput(cur); 2780 + __putname(path); 2781 2781 return ERR_PTR(len); 2782 2782 } 2783 2783 } ··· 2815 2813 * cannot ever succeed. Creating paths that long is 2816 2814 * possible with Ceph, but Linux cannot use them. 2817 2815 */ 2816 + __putname(path); 2818 2817 return ERR_PTR(-ENAMETOOLONG); 2819 2818 } 2820 2819
+2 -1
fs/nfs/Kconfig
··· 87 87 space programs which can be found in the Linux nfs-utils package, 88 88 available from http://linux-nfs.org/. 89 89 90 - If unsure, say Y. 90 + If unsure, say N. 91 91 92 92 config NFS_SWAP 93 93 bool "Provide swap over NFS support" ··· 100 100 config NFS_V4_0 101 101 bool "NFS client support for NFSv4.0" 102 102 depends on NFS_V4 103 + default y 103 104 help 104 105 This option enables support for minor version 0 of the NFSv4 protocol 105 106 (RFC 3530) in the kernel's NFS client.
+6 -1
fs/nfs/nfs3proc.c
··· 392 392 if (status != 0) 393 393 goto out_release_acls; 394 394 395 - if (d_alias) 395 + if (d_alias) { 396 + if (d_is_dir(d_alias)) { 397 + status = -EISDIR; 398 + goto out_dput; 399 + } 396 400 dentry = d_alias; 401 + } 397 402 398 403 /* When we created the file with exclusive semantics, make 399 404 * sure we set the attributes afterwards. */
+54 -9
fs/nfsd/export.c
··· 36 36 * second map contains a reference to the entry in the first map. 37 37 */ 38 38 39 + static struct workqueue_struct *nfsd_export_wq; 40 + 39 41 #define EXPKEY_HASHBITS 8 40 42 #define EXPKEY_HASHMAX (1 << EXPKEY_HASHBITS) 41 43 #define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1) 42 44 43 - static void expkey_put(struct kref *ref) 45 + static void expkey_release(struct work_struct *work) 44 46 { 45 - struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref); 47 + struct svc_expkey *key = container_of(to_rcu_work(work), 48 + struct svc_expkey, ek_rwork); 46 49 47 50 if (test_bit(CACHE_VALID, &key->h.flags) && 48 51 !test_bit(CACHE_NEGATIVE, &key->h.flags)) 49 52 path_put(&key->ek_path); 50 53 auth_domain_put(key->ek_client); 51 - kfree_rcu(key, ek_rcu); 54 + kfree(key); 55 + } 56 + 57 + static void expkey_put(struct kref *ref) 58 + { 59 + struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref); 60 + 61 + INIT_RCU_WORK(&key->ek_rwork, expkey_release); 62 + queue_rcu_work(nfsd_export_wq, &key->ek_rwork); 52 63 } 53 64 54 65 static int expkey_upcall(struct cache_detail *cd, struct cache_head *h) ··· 364 353 EXP_STATS_COUNTERS_NUM); 365 354 } 366 355 367 - static void svc_export_release(struct rcu_head *rcu_head) 356 + static void svc_export_release(struct work_struct *work) 368 357 { 369 - struct svc_export *exp = container_of(rcu_head, struct svc_export, 370 - ex_rcu); 358 + struct svc_export *exp = container_of(to_rcu_work(work), 359 + struct svc_export, ex_rwork); 371 360 361 + path_put(&exp->ex_path); 362 + auth_domain_put(exp->ex_client); 372 363 nfsd4_fslocs_free(&exp->ex_fslocs); 373 364 export_stats_destroy(exp->ex_stats); 374 365 kfree(exp->ex_stats); ··· 382 369 { 383 370 struct svc_export *exp = container_of(ref, struct svc_export, h.ref); 384 371 385 - path_put(&exp->ex_path); 386 - auth_domain_put(exp->ex_client); 387 - call_rcu(&exp->ex_rcu, svc_export_release); 372 + INIT_RCU_WORK(&exp->ex_rwork, svc_export_release); 373 + queue_rcu_work(nfsd_export_wq, &exp->ex_rwork); 388 374 } 389 375 390 376 static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h) ··· 1491 1479 .show = e_show, 1492 1480 }; 1493 1481 1482 + /** 1483 + * nfsd_export_wq_init - allocate the export release workqueue 1484 + * 1485 + * Called once at module load. The workqueue runs deferred svc_export and 1486 + * svc_expkey release work scheduled by queue_rcu_work() in the cache put 1487 + * callbacks. 1488 + * 1489 + * Return values: 1490 + * %0: workqueue allocated 1491 + * %-ENOMEM: allocation failed 1492 + */ 1493 + int nfsd_export_wq_init(void) 1494 + { 1495 + nfsd_export_wq = alloc_workqueue("nfsd_export", WQ_UNBOUND, 0); 1496 + if (!nfsd_export_wq) 1497 + return -ENOMEM; 1498 + return 0; 1499 + } 1500 + 1501 + /** 1502 + * nfsd_export_wq_shutdown - drain and free the export release workqueue 1503 + * 1504 + * Called once at module unload. Per-namespace teardown in 1505 + * nfsd_export_shutdown() has already drained all deferred work. 1506 + */ 1507 + void nfsd_export_wq_shutdown(void) 1508 + { 1509 + destroy_workqueue(nfsd_export_wq); 1510 + } 1511 + 1494 1512 /* 1495 1513 * Initialize the exports module. 1496 1514 */ ··· 1582 1540 1583 1541 cache_unregister_net(nn->svc_expkey_cache, net); 1584 1542 cache_unregister_net(nn->svc_export_cache, net); 1543 + /* Drain deferred export and expkey release work. */ 1544 + rcu_barrier(); 1545 + flush_workqueue(nfsd_export_wq); 1585 1546 cache_destroy_net(nn->svc_expkey_cache, net); 1586 1547 cache_destroy_net(nn->svc_export_cache, net); 1587 1548 svcauth_unix_purge(net);
+5 -2
fs/nfsd/export.h
··· 7 7 8 8 #include <linux/sunrpc/cache.h> 9 9 #include <linux/percpu_counter.h> 10 + #include <linux/workqueue.h> 10 11 #include <uapi/linux/nfsd/export.h> 11 12 #include <linux/nfs4.h> 12 13 ··· 76 75 u32 ex_layout_types; 77 76 struct nfsd4_deviceid_map *ex_devid_map; 78 77 struct cache_detail *cd; 79 - struct rcu_head ex_rcu; 78 + struct rcu_work ex_rwork; 80 79 unsigned long ex_xprtsec_modes; 81 80 struct export_stats *ex_stats; 82 81 }; ··· 93 92 u32 ek_fsid[6]; 94 93 95 94 struct path ek_path; 96 - struct rcu_head ek_rcu; 95 + struct rcu_work ek_rwork; 97 96 }; 98 97 99 98 #define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC)) ··· 111 110 /* 112 111 * Function declarations 113 112 */ 113 + int nfsd_export_wq_init(void); 114 + void nfsd_export_wq_shutdown(void); 114 115 int nfsd_export_init(struct net *); 115 116 void nfsd_export_shutdown(struct net *); 116 117 void nfsd_export_flush(struct net *);
+7 -2
fs/nfsd/nfs4xdr.c
··· 6281 6281 int len = xdr->buf->len - (op_status_offset + XDR_UNIT); 6282 6282 6283 6283 so->so_replay.rp_status = op->status; 6284 - so->so_replay.rp_buflen = len; 6285 - read_bytes_from_xdr_buf(xdr->buf, op_status_offset + XDR_UNIT, 6284 + if (len <= NFSD4_REPLAY_ISIZE) { 6285 + so->so_replay.rp_buflen = len; 6286 + read_bytes_from_xdr_buf(xdr->buf, 6287 + op_status_offset + XDR_UNIT, 6286 6288 so->so_replay.rp_buf, len); 6289 + } else { 6290 + so->so_replay.rp_buflen = 0; 6291 + } 6287 6292 } 6288 6293 status: 6289 6294 op->status = nfsd4_map_status(op->status,
+19 -3
fs/nfsd/nfsctl.c
··· 149 149 150 150 seq = file->private_data; 151 151 seq->private = nn->svc_export_cache; 152 + get_net(net); 152 153 return 0; 154 + } 155 + 156 + static int exports_release(struct inode *inode, struct file *file) 157 + { 158 + struct seq_file *seq = file->private_data; 159 + struct cache_detail *cd = seq->private; 160 + 161 + put_net(cd->net); 162 + return seq_release(inode, file); 153 163 } 154 164 155 165 static int exports_nfsd_open(struct inode *inode, struct file *file) ··· 171 161 .open = exports_nfsd_open, 172 162 .read = seq_read, 173 163 .llseek = seq_lseek, 174 - .release = seq_release, 164 + .release = exports_release, 175 165 }; 176 166 177 167 static int export_features_show(struct seq_file *m, void *v) ··· 1386 1376 .proc_open = exports_proc_open, 1387 1377 .proc_read = seq_read, 1388 1378 .proc_lseek = seq_lseek, 1389 - .proc_release = seq_release, 1379 + .proc_release = exports_release, 1390 1380 }; 1391 1381 1392 1382 static int create_proc_exports_entry(void) ··· 2269 2259 if (retval) 2270 2260 goto out_free_pnfs; 2271 2261 nfsd_lockd_init(); /* lockd->nfsd callbacks */ 2262 + retval = nfsd_export_wq_init(); 2263 + if (retval) 2264 + goto out_free_lockd; 2272 2265 retval = register_pernet_subsys(&nfsd_net_ops); 2273 2266 if (retval < 0) 2274 - goto out_free_lockd; 2267 + goto out_free_export_wq; 2275 2268 retval = register_cld_notifier(); 2276 2269 if (retval) 2277 2270 goto out_free_subsys; ··· 2303 2290 unregister_cld_notifier(); 2304 2291 out_free_subsys: 2305 2292 unregister_pernet_subsys(&nfsd_net_ops); 2293 + out_free_export_wq: 2294 + nfsd_export_wq_shutdown(); 2306 2295 out_free_lockd: 2307 2296 nfsd_lockd_shutdown(); 2308 2297 nfsd_drc_slab_free(); ··· 2325 2310 nfsd4_destroy_laundry_wq(); 2326 2311 unregister_cld_notifier(); 2327 2312 unregister_pernet_subsys(&nfsd_net_ops); 2313 + nfsd_export_wq_shutdown(); 2328 2314 nfsd_drc_slab_free(); 2329 2315 nfsd_lockd_shutdown(); 2330 2316 nfsd4_free_slabs();
+12 -5
fs/nfsd/state.h
··· 541 541 struct xdr_netobj cr_princhash; 542 542 }; 543 543 544 - /* A reasonable value for REPLAY_ISIZE was estimated as follows: 545 - * The OPEN response, typically the largest, requires 546 - * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 8(verifier) + 547 - * 4(deleg. type) + 8(deleg. stateid) + 4(deleg. recall flag) + 548 - * 20(deleg. space limit) + ~32(deleg. ace) = 112 bytes 544 + /* 545 + * REPLAY_ISIZE is sized for an OPEN response with delegation: 546 + * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 547 + * 8(verifier) + 4(deleg. type) + 8(deleg. stateid) + 548 + * 4(deleg. recall flag) + 20(deleg. space limit) + 549 + * ~32(deleg. ace) = 112 bytes 550 + * 551 + * Some responses can exceed this. A LOCK denial includes the conflicting 552 + * lock owner, which can be up to 1024 bytes (NFS4_OPAQUE_LIMIT). Responses 553 + * larger than REPLAY_ISIZE are not cached in rp_ibuf; only rp_status is 554 + * saved. Enlarging this constant increases the size of every 555 + * nfs4_stateowner. 549 556 */ 550 557 551 558 #define NFSD4_REPLAY_ISIZE 112
+1 -1
fs/smb/client/cifsacl.c
··· 1489 1489 struct cifsFileInfo *open_file = NULL; 1490 1490 1491 1491 if (inode) 1492 - open_file = find_readable_file(CIFS_I(inode), true); 1492 + open_file = find_readable_file(CIFS_I(inode), FIND_FSUID_ONLY); 1493 1493 if (!open_file) 1494 1494 return get_cifs_acl_by_path(cifs_sb, path, pacllen, info); 1495 1495
+1 -1
fs/smb/client/cifsfs.c
··· 1269 1269 struct cifsFileInfo *writeable_srcfile; 1270 1270 int rc = -EINVAL; 1271 1271 1272 - writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY); 1272 + writeable_srcfile = find_writable_file(src_cifsi, FIND_FSUID_ONLY); 1273 1273 if (writeable_srcfile) { 1274 1274 if (src_tcon->ses->server->ops->set_file_size) 1275 1275 rc = src_tcon->ses->server->ops->set_file_size(
+23 -6
fs/smb/client/cifsglob.h
··· 20 20 #include <linux/utsname.h> 21 21 #include <linux/sched/mm.h> 22 22 #include <linux/netfs.h> 23 + #include <linux/fcntl.h> 23 24 #include "cifs_fs_sb.h" 24 25 #include "cifsacl.h" 25 26 #include <crypto/internal/hash.h> ··· 1885 1884 } 1886 1885 1887 1886 1888 - /* cifs_get_writable_file() flags */ 1889 - enum cifs_writable_file_flags { 1890 - FIND_WR_ANY = 0U, 1891 - FIND_WR_FSUID_ONLY = (1U << 0), 1892 - FIND_WR_WITH_DELETE = (1U << 1), 1893 - FIND_WR_NO_PENDING_DELETE = (1U << 2), 1887 + enum cifs_find_flags { 1888 + FIND_ANY = 0U, 1889 + FIND_FSUID_ONLY = (1U << 0), 1890 + FIND_WITH_DELETE = (1U << 1), 1891 + FIND_NO_PENDING_DELETE = (1U << 2), 1892 + FIND_OPEN_FLAGS = (1U << 3), 1894 1893 }; 1895 1894 1896 1895 #define MID_FREE 0 ··· 2375 2374 { 2376 2375 return cifs_sb_flags(sbi) & CIFS_MOUNT_SHUTDOWN; 2377 2376 } 2377 + 2378 + static inline int cifs_open_create_options(unsigned int oflags, int opts) 2379 + { 2380 + /* O_SYNC also has bit for O_DSYNC so following check picks up either */ 2381 + if (oflags & O_SYNC) 2382 + opts |= CREATE_WRITE_THROUGH; 2383 + if (oflags & O_DIRECT) 2384 + opts |= CREATE_NO_BUFFER; 2385 + return opts; 2386 + } 2387 + 2388 + /* 2389 + * The number of blocks is not related to (i_size / i_blksize), but instead 2390 + * 512 byte (2**9) size is required for calculating num blocks. 2391 + */ 2392 + #define CIFS_INO_BLOCKS(size) DIV_ROUND_UP_ULL((u64)(size), 512) 2378 2393 2379 2394 #endif /* _CIFS_GLOB_H */
+22 -4
fs/smb/client/cifsproto.h
··· 138 138 ssize_t result); 139 139 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode, 140 140 int flags); 141 - int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags, 142 - struct cifsFileInfo **ret_file); 141 + int __cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, 142 + unsigned int find_flags, unsigned int open_flags, 143 + struct cifsFileInfo **ret_file); 143 144 int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name, int flags, 144 145 struct cifsFileInfo **ret_file); 145 - struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, 146 - bool fsuid_only); 146 + struct cifsFileInfo *__find_readable_file(struct cifsInodeInfo *cifs_inode, 147 + unsigned int find_flags, 148 + unsigned int open_flags); 147 149 int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name, 148 150 struct cifsFileInfo **ret_file); 149 151 int cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode, ··· 596 594 sg_set_page(&sgtable->sgl[sgtable->nents++], 597 595 virt_to_page((void *)addr), buflen, off); 598 596 } 597 + } 598 + 599 + static inline int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, 600 + unsigned int find_flags, 601 + struct cifsFileInfo **ret_file) 602 + { 603 + find_flags &= ~FIND_OPEN_FLAGS; 604 + return __cifs_get_writable_file(cifs_inode, find_flags, 0, ret_file); 605 + } 606 + 607 + static inline struct cifsFileInfo * 608 + find_readable_file(struct cifsInodeInfo *cinode, unsigned int find_flags) 609 + { 610 + find_flags &= ~FIND_OPEN_FLAGS; 611 + find_flags |= FIND_NO_PENDING_DELETE; 612 + return __find_readable_file(cinode, find_flags, 0); 599 613 } 600 614 601 615 #endif /* _CIFSPROTO_H */
+4
fs/smb/client/connect.c
··· 1955 1955 case Kerberos: 1956 1956 if (!uid_eq(ctx->cred_uid, ses->cred_uid)) 1957 1957 return 0; 1958 + if (strncmp(ses->user_name ?: "", 1959 + ctx->username ?: "", 1960 + CIFS_MAX_USERNAME_LEN)) 1961 + return 0; 1958 1962 break; 1959 1963 case NTLMv2: 1960 1964 case RawNTLMSSP:
+2 -2
fs/smb/client/dir.c
··· 187 187 const char *full_path; 188 188 void *page = alloc_dentry_path(); 189 189 struct inode *newinode = NULL; 190 - unsigned int sbflags; 190 + unsigned int sbflags = cifs_sb_flags(cifs_sb); 191 191 int disposition; 192 192 struct TCP_Server_Info *server = tcon->ses->server; 193 193 struct cifs_open_parms oparms; ··· 308 308 goto out; 309 309 } 310 310 311 + create_options |= cifs_open_create_options(oflags, create_options); 311 312 /* 312 313 * if we're not using unix extensions, see if we need to set 313 314 * ATTR_READONLY on the create call ··· 368 367 * If Open reported that we actually created a file then we now have to 369 368 * set the mode if possible. 370 369 */ 371 - sbflags = cifs_sb_flags(cifs_sb); 372 370 if ((tcon->unix_ext) && (*oplock & CIFS_CREATE_ACTION)) { 373 371 struct cifs_unix_set_info_args args = { 374 372 .mode = mode,
+69 -61
fs/smb/client/file.c
··· 255 255 struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq); 256 256 int ret; 257 257 258 - ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile); 258 + ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_ANY, &req->cfile); 259 259 if (ret) { 260 260 cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret); 261 261 return; ··· 584 584 *********************************************************************/ 585 585 586 586 disposition = cifs_get_disposition(f_flags); 587 - 588 587 /* BB pass O_SYNC flag through on file attributes .. BB */ 589 - 590 - /* O_SYNC also has bit for O_DSYNC so following check picks up either */ 591 - if (f_flags & O_SYNC) 592 - create_options |= CREATE_WRITE_THROUGH; 593 - 594 - if (f_flags & O_DIRECT) 595 - create_options |= CREATE_NO_BUFFER; 588 + create_options |= cifs_open_create_options(f_flags, create_options); 596 589 597 590 retry_open: 598 591 oparms = (struct cifs_open_parms) { ··· 956 963 return tcon->ses->server->ops->flush(xid, tcon, 957 964 &cfile->fid); 958 965 } 959 - rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile); 966 + rc = cifs_get_writable_file(CIFS_I(inode), FIND_ANY, &cfile); 960 967 if (!rc) { 961 968 tcon = tlink_tcon(cfile->tlink); 962 969 rc = tcon->ses->server->ops->flush(xid, tcon, &cfile->fid); ··· 981 988 return -ERESTARTSYS; 982 989 mapping_set_error(inode->i_mapping, rc); 983 990 984 - cfile = find_writable_file(cinode, FIND_WR_FSUID_ONLY); 991 + cfile = find_writable_file(cinode, FIND_FSUID_ONLY); 985 992 rc = cifs_file_flush(xid, inode, cfile); 986 993 if (!rc) { 987 994 if (cfile) { ··· 993 1000 if (!rc) { 994 1001 netfs_resize_file(&cinode->netfs, 0, true); 995 1002 cifs_setsize(inode, 0); 996 - inode->i_blocks = 0; 997 1003 } 998 1004 } 999 1005 if (cfile) ··· 1060 1068 1061 1069 /* Get the cached handle as SMB2 close is deferred */ 1062 1070 if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) { 1063 - rc = cifs_get_writable_path(tcon, full_path, 1064 - FIND_WR_FSUID_ONLY | 1065 - FIND_WR_NO_PENDING_DELETE, 1066 - &cfile); 1071 + rc = __cifs_get_writable_file(CIFS_I(inode), 1072 + FIND_FSUID_ONLY | 1073 + FIND_NO_PENDING_DELETE | 1074 + FIND_OPEN_FLAGS, 1075 + file->f_flags, &cfile); 1067 1076 } else { 1068 - rc = cifs_get_readable_path(tcon, full_path, &cfile); 1077 + cfile = __find_readable_file(CIFS_I(inode), 1078 + FIND_NO_PENDING_DELETE | 1079 + FIND_OPEN_FLAGS, 1080 + file->f_flags); 1081 + rc = cfile ? 0 : -ENOENT; 1069 1082 } 1070 1083 if (rc == 0) { 1071 - unsigned int oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC); 1072 - unsigned int cflags = cfile->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC); 1073 - 1074 - if (cifs_convert_flags(oflags, 0) == cifs_convert_flags(cflags, 0) && 1075 - (oflags & (O_SYNC|O_DIRECT)) == (cflags & (O_SYNC|O_DIRECT))) { 1076 - file->private_data = cfile; 1077 - spin_lock(&CIFS_I(inode)->deferred_lock); 1078 - cifs_del_deferred_close(cfile); 1079 - spin_unlock(&CIFS_I(inode)->deferred_lock); 1080 - goto use_cache; 1081 - } 1082 - _cifsFileInfo_put(cfile, true, false); 1083 - } else { 1084 - /* hard link on the defeered close file */ 1085 - rc = cifs_get_hardlink_path(tcon, inode, file); 1086 - if (rc) 1087 - cifs_close_deferred_file(CIFS_I(inode)); 1084 + file->private_data = cfile; 1085 + spin_lock(&CIFS_I(inode)->deferred_lock); 1086 + cifs_del_deferred_close(cfile); 1087 + spin_unlock(&CIFS_I(inode)->deferred_lock); 1088 + goto use_cache; 1088 1089 } 1090 + /* hard link on the deferred close file */ 1091 + rc = cifs_get_hardlink_path(tcon, inode, file); 1092 + if (rc) 1093 + cifs_close_deferred_file(CIFS_I(inode)); 1089 1094 1090 1095 if (server->oplocks) 1091 1096 oplock = REQ_OPLOCK; ··· 1303 1314 rdwr_for_fscache = 1; 1304 1315 1305 1316 desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache); 1306 - 1307 - /* O_SYNC also has bit for O_DSYNC so following check picks up either */ 1308 - if (cfile->f_flags & O_SYNC) 1309 - create_options |= CREATE_WRITE_THROUGH; 1310 - 1311 - if (cfile->f_flags & O_DIRECT) 1312 - create_options |= CREATE_NO_BUFFER; 1317 + create_options |= cifs_open_create_options(cfile->f_flags, 1318 + create_options); 1313 1319 1314 1320 if (server->ops->get_lease_key) 1315 1321 server->ops->get_lease_key(inode, &cfile->fid); ··· 2508 2524 netfs_write_subrequest_terminated(&wdata->subreq, result); 2509 2525 } 2510 2526 2511 - struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, 2512 - bool fsuid_only) 2527 + static bool open_flags_match(struct cifsInodeInfo *cinode, 2528 + unsigned int oflags, unsigned int cflags) 2529 + { 2530 + struct inode *inode = &cinode->netfs.inode; 2531 + int crw = 0, orw = 0; 2532 + 2533 + oflags &= ~(O_CREAT | O_EXCL | O_TRUNC); 2534 + cflags &= ~(O_CREAT | O_EXCL | O_TRUNC); 2535 + 2536 + if (cifs_fscache_enabled(inode)) { 2537 + if (OPEN_FMODE(cflags) & FMODE_WRITE) 2538 + crw = 1; 2539 + if (OPEN_FMODE(oflags) & FMODE_WRITE) 2540 + orw = 1; 2541 + } 2542 + if (cifs_convert_flags(oflags, orw) != cifs_convert_flags(cflags, crw)) 2543 + return false; 2544 + 2545 + return (oflags & (O_SYNC | O_DIRECT)) == (cflags & (O_SYNC | O_DIRECT)); 2546 + } 2547 + 2548 + struct cifsFileInfo *__find_readable_file(struct cifsInodeInfo *cifs_inode, 2549 + unsigned int find_flags, 2550 + unsigned int open_flags) 2513 2551 { 2514 2552 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode); 2553 + bool fsuid_only = find_flags & FIND_FSUID_ONLY; 2515 2554 struct cifsFileInfo *open_file = NULL; 2516 2555 2517 2556 /* only filter by fsuid on multiuser mounts */ ··· 2547 2540 have a close pending, we go through the whole list */ 2548 2541 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { 2549 2542 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) 2543 + continue; 2544 + if ((find_flags & FIND_NO_PENDING_DELETE) && 2545 + open_file->status_file_deleted) 2546 + continue; 2547 + if ((find_flags & FIND_OPEN_FLAGS) && 2548 + !open_flags_match(cifs_inode, open_flags, 2549 + open_file->f_flags)) 2550 2550 continue; 2551 2551 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) { 2552 2552 if ((!open_file->invalidHandle)) { ··· 2573 2559 } 2574 2560 2575 2561 /* Return -EBADF if no handle is found and general rc otherwise */ 2576 - int 2577 - cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags, 2578 - struct cifsFileInfo **ret_file) 2562 + int __cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, 2563 + unsigned int find_flags, unsigned int open_flags, 2564 + struct cifsFileInfo **ret_file) 2579 2565 { 2580 2566 struct cifsFileInfo *open_file, *inv_file = NULL; 2581 2567 struct cifs_sb_info *cifs_sb; 2582 2568 bool any_available = false; 2583 2569 int rc = -EBADF; 2584 2570 unsigned int refind = 0; 2585 - bool fsuid_only = flags & FIND_WR_FSUID_ONLY; 2586 - bool with_delete = flags & FIND_WR_WITH_DELETE; 2571 + bool fsuid_only = find_flags & FIND_FSUID_ONLY; 2572 + bool with_delete = find_flags & FIND_WITH_DELETE; 2587 2573 *ret_file = NULL; 2588 2574 2589 2575 /* ··· 2617 2603 continue; 2618 2604 if (with_delete && !(open_file->fid.access & DELETE)) 2619 2605 continue; 2620 - if ((flags & FIND_WR_NO_PENDING_DELETE) && 2606 + if ((find_flags & FIND_NO_PENDING_DELETE) && 2621 2607 open_file->status_file_deleted) 2608 + continue; 2609 + if ((find_flags & FIND_OPEN_FLAGS) && 2610 + !open_flags_match(cifs_inode, open_flags, 2611 + open_file->f_flags)) 2622 2612 continue; 2623 2613 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { 2624 2614 if (!open_file->invalidHandle) { ··· 2740 2722 cinode = CIFS_I(d_inode(cfile->dentry)); 2741 2723 spin_unlock(&tcon->open_file_lock); 2742 2724 free_dentry_path(page); 2743 - *ret_file = find_readable_file(cinode, 0); 2744 - if (*ret_file) { 2745 - spin_lock(&cinode->open_file_lock); 2746 - if ((*ret_file)->status_file_deleted) { 2747 - spin_unlock(&cinode->open_file_lock); 2748 - cifsFileInfo_put(*ret_file); 2749 - *ret_file = NULL; 2750 - } else { 2751 - spin_unlock(&cinode->open_file_lock); 2752 - } 2753 - } 2725 + *ret_file = find_readable_file(cinode, FIND_ANY); 2754 2726 return *ret_file ? 0 : -ENOENT; 2755 2727 } 2756 2728 ··· 2812 2804 } 2813 2805 2814 2806 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) { 2815 - smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY); 2807 + smbfile = find_writable_file(CIFS_I(inode), FIND_ANY); 2816 2808 if (smbfile) { 2817 2809 rc = server->ops->flush(xid, tcon, &smbfile->fid); 2818 2810 cifsFileInfo_put(smbfile);
+1 -1
fs/smb/client/fs_context.c
··· 1997 1997 ctx->backupuid_specified = false; /* no backup intent for a user */ 1998 1998 ctx->backupgid_specified = false; /* no backup intent for a group */ 1999 1999 2000 - ctx->retrans = 1; 2000 + ctx->retrans = 0; 2001 2001 ctx->reparse_type = CIFS_REPARSE_TYPE_DEFAULT; 2002 2002 ctx->symlink_type = CIFS_SYMLINK_TYPE_DEFAULT; 2003 2003 ctx->nonativesocket = 0;
+9 -18
fs/smb/client/inode.c
··· 219 219 */ 220 220 if (is_size_safe_to_change(cifs_i, fattr->cf_eof, from_readdir)) { 221 221 i_size_write(inode, fattr->cf_eof); 222 - 223 - /* 224 - * i_blocks is not related to (i_size / i_blksize), 225 - * but instead 512 byte (2**9) size is required for 226 - * calculating num blocks. 227 - */ 228 - inode->i_blocks = (512 - 1 + fattr->cf_bytes) >> 9; 222 + inode->i_blocks = CIFS_INO_BLOCKS(fattr->cf_bytes); 229 223 } 230 224 231 225 if (S_ISLNK(fattr->cf_mode) && fattr->cf_symlink_target) { ··· 2991 2997 } 2992 2998 } 2993 2999 2994 - cfile = find_readable_file(cifs_i, false); 3000 + cfile = find_readable_file(cifs_i, FIND_ANY); 2995 3001 if (cfile == NULL) 2996 3002 return -EINVAL; 2997 3003 ··· 3009 3015 { 3010 3016 spin_lock(&inode->i_lock); 3011 3017 i_size_write(inode, offset); 3018 + /* 3019 + * Until we can query the server for actual allocation size, 3020 + * this is best estimate we have for blocks allocated for a file. 3021 + */ 3022 + inode->i_blocks = CIFS_INO_BLOCKS(offset); 3012 3023 spin_unlock(&inode->i_lock); 3013 3024 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 3014 3025 truncate_pagecache(inode, offset); ··· 3049 3050 size, false); 3050 3051 cifs_dbg(FYI, "%s: set_file_size: rc = %d\n", __func__, rc); 3051 3052 } else { 3052 - open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY); 3053 + open_file = find_writable_file(cifsInode, FIND_FSUID_ONLY); 3053 3054 if (open_file) { 3054 3055 tcon = tlink_tcon(open_file->tlink); 3055 3056 server = tcon->ses->server; ··· 3086 3087 if (rc == 0) { 3087 3088 netfs_resize_file(&cifsInode->netfs, size, true); 3088 3089 cifs_setsize(inode, size); 3089 - /* 3090 - * i_blocks is not related to (i_size / i_blksize), but instead 3091 - * 512 byte (2**9) size is required for calculating num blocks. 3092 - * Until we can query the server for actual allocation size, 3093 - * this is best estimate we have for blocks allocated for a file 3094 - * Number of blocks must be rounded up so size 1 is not 0 blocks 3095 - */ 3096 - inode->i_blocks = (512 - 1 + size) >> 9; 3097 3090 } 3098 3091 3099 3092 return rc; ··· 3210 3219 open_file->fid.netfid, 3211 3220 open_file->pid); 3212 3221 } else { 3213 - open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY); 3222 + open_file = find_writable_file(cifsInode, FIND_FSUID_ONLY); 3214 3223 if (open_file) { 3215 3224 pTcon = tlink_tcon(open_file->tlink); 3216 3225 rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args,
+1 -1
fs/smb/client/smb1ops.c
··· 960 960 struct cifs_tcon *tcon; 961 961 962 962 /* if the file is already open for write, just use that fileid */ 963 - open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY); 963 + open_file = find_writable_file(cinode, FIND_FSUID_ONLY); 964 964 965 965 if (open_file) { 966 966 fid.netfid = open_file->fid.netfid;
+1 -1
fs/smb/client/smb1transport.c
··· 460 460 return 0; 461 461 462 462 /* 463 - * Windows NT server returns error resposne (e.g. STATUS_DELETE_PENDING 463 + * Windows NT server returns error response (e.g. STATUS_DELETE_PENDING 464 464 * or STATUS_OBJECT_NAME_NOT_FOUND or ERRDOS/ERRbadfile or any other) 465 465 * for some TRANS2 requests without the RESPONSE flag set in header. 466 466 */
+10 -12
fs/smb/client/smb2inode.c
··· 1156 1156 cifs_i = CIFS_I(inode); 1157 1157 dosattrs = cifs_i->cifsAttrs | ATTR_READONLY; 1158 1158 data.Attributes = cpu_to_le32(dosattrs); 1159 - cifs_get_writable_path(tcon, name, FIND_WR_ANY, &cfile); 1159 + cifs_get_writable_path(tcon, name, FIND_ANY, &cfile); 1160 1160 oparms = CIFS_OPARMS(cifs_sb, tcon, name, FILE_WRITE_ATTRIBUTES, 1161 1161 FILE_CREATE, CREATE_NOT_FILE, ACL_NO_MODE); 1162 1162 tmprc = smb2_compound_op(xid, tcon, cifs_sb, name, ··· 1336 1336 __u32 co = file_create_options(source_dentry); 1337 1337 1338 1338 drop_cached_dir_by_name(xid, tcon, from_name, cifs_sb); 1339 - cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile); 1339 + cifs_get_writable_path(tcon, from_name, FIND_WITH_DELETE, &cfile); 1340 1340 1341 1341 int rc = smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb, 1342 1342 co, DELETE, SMB2_OP_RENAME, cfile, source_dentry); 1343 1343 if (rc == -EINVAL) { 1344 1344 cifs_dbg(FYI, "invalid lease key, resending request without lease"); 1345 - cifs_get_writable_path(tcon, from_name, 1346 - FIND_WR_WITH_DELETE, &cfile); 1345 + cifs_get_writable_path(tcon, from_name, FIND_WITH_DELETE, &cfile); 1347 1346 rc = smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb, 1348 1347 co, DELETE, SMB2_OP_RENAME, cfile, NULL); 1349 1348 } ··· 1376 1377 1377 1378 in_iov.iov_base = &eof; 1378 1379 in_iov.iov_len = sizeof(eof); 1379 - cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); 1380 + cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile); 1380 1381 1381 1382 oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_WRITE_DATA, 1382 1383 FILE_OPEN, 0, ACL_NO_MODE); ··· 1386 1387 cfile, NULL, NULL, dentry); 1387 1388 if (rc == -EINVAL) { 1388 1389 cifs_dbg(FYI, "invalid lease key, resending request without lease"); 1389 - cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); 1390 + cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile); 1390 1391 rc = smb2_compound_op(xid, tcon, cifs_sb, 1391 1392 full_path, &oparms, &in_iov, 1392 1393 &(int){SMB2_OP_SET_EOF}, 1, ··· 1416 1417 (buf->LastWriteTime == 0) && (buf->ChangeTime == 0)) { 1417 1418 if (buf->Attributes == 0) 1418 1419 goto out; /* would be a no op, no sense sending this */ 1419 - cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); 1420 + cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile); 1420 1421 } 1421 1422 1422 1423 oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_WRITE_ATTRIBUTES, ··· 1475 1476 1476 1477 if (tcon->posix_extensions) { 1477 1478 cmds[1] = SMB2_OP_POSIX_QUERY_INFO; 1478 - cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); 1479 + cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile); 1479 1480 rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, 1480 1481 in_iov, cmds, 2, cfile, out_iov, out_buftype, NULL); 1481 1482 if (!rc) { ··· 1484 1485 } 1485 1486 } else { 1486 1487 cmds[1] = SMB2_OP_QUERY_INFO; 1487 - cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); 1488 + cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile); 1488 1489 rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, 1489 1490 in_iov, cmds, 2, cfile, out_iov, out_buftype, NULL); 1490 1491 if (!rc) { ··· 1635 1636 iov[1].iov_base = utf16_path; 1636 1637 iov[1].iov_len = sizeof(*utf16_path) * UniStrlen((wchar_t *)utf16_path); 1637 1638 1638 - cifs_get_writable_path(tcon, full_path, FIND_WR_WITH_DELETE, &cfile); 1639 + cifs_get_writable_path(tcon, full_path, FIND_WITH_DELETE, &cfile); 1639 1640 rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, iov, 1640 1641 cmds, num_cmds, cfile, NULL, NULL, dentry); 1641 1642 if (rc == -EINVAL) { 1642 1643 cifs_dbg(FYI, "invalid lease key, resending request without lease\n"); 1643 - cifs_get_writable_path(tcon, full_path, 1644 - FIND_WR_WITH_DELETE, &cfile); 1644 + cifs_get_writable_path(tcon, full_path, FIND_WITH_DELETE, &cfile); 1645 1645 rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, iov, 1646 1646 cmds, num_cmds, cfile, NULL, NULL, NULL); 1647 1647 }
+6 -3
fs/smb/client/smb2maperror.c
··· 109 109 } 110 110 111 111 #if IS_ENABLED(CONFIG_SMB_KUNIT_TESTS) 112 + #define EXPORT_SYMBOL_FOR_SMB_TEST(sym) \ 113 + EXPORT_SYMBOL_FOR_MODULES(sym, "smb2maperror_test") 114 + 112 115 /* Previous prototype for eliminating the build warning. */ 113 116 const struct status_to_posix_error *smb2_get_err_map_test(__u32 smb2_status); 114 117 ··· 119 116 { 120 117 return smb2_get_err_map(smb2_status); 121 118 } 122 - EXPORT_SYMBOL_GPL(smb2_get_err_map_test); 119 + EXPORT_SYMBOL_FOR_SMB_TEST(smb2_get_err_map_test); 123 120 124 121 const struct status_to_posix_error *smb2_error_map_table_test = smb2_error_map_table; 125 - EXPORT_SYMBOL_GPL(smb2_error_map_table_test); 122 + EXPORT_SYMBOL_FOR_SMB_TEST(smb2_error_map_table_test); 126 123 127 124 unsigned int smb2_error_map_num = ARRAY_SIZE(smb2_error_map_table); 128 - EXPORT_SYMBOL_GPL(smb2_error_map_num); 125 + EXPORT_SYMBOL_FOR_SMB_TEST(smb2_error_map_num); 129 126 #endif
+18 -20
fs/smb/client/smb2ops.c
··· 628 628 struct smb_sockaddr_in6 *p6; 629 629 struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL; 630 630 struct cifs_server_iface tmp_iface; 631 + __be16 port; 631 632 ssize_t bytes_left; 632 633 size_t next = 0; 633 634 int nb_iface = 0; ··· 663 662 goto out; 664 663 } 665 664 665 + spin_lock(&ses->server->srv_lock); 666 + if (ses->server->dstaddr.ss_family == AF_INET) 667 + port = ((struct sockaddr_in *)&ses->server->dstaddr)->sin_port; 668 + else if (ses->server->dstaddr.ss_family == AF_INET6) 669 + port = ((struct sockaddr_in6 *)&ses->server->dstaddr)->sin6_port; 670 + else 671 + port = cpu_to_be16(CIFS_PORT); 672 + spin_unlock(&ses->server->srv_lock); 673 + 666 674 while (bytes_left >= (ssize_t)sizeof(*p)) { 667 675 memset(&tmp_iface, 0, sizeof(tmp_iface)); 668 676 /* default to 1Gbps when link speed is unset */ ··· 692 682 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4); 693 683 694 684 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */ 695 - addr4->sin_port = cpu_to_be16(CIFS_PORT); 685 + addr4->sin_port = port; 696 686 697 687 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__, 698 688 &addr4->sin_addr); ··· 706 696 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */ 707 697 addr6->sin6_flowinfo = 0; 708 698 addr6->sin6_scope_id = 0; 709 - addr6->sin6_port = cpu_to_be16(CIFS_PORT); 699 + addr6->sin6_port = port; 710 700 711 701 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__, 712 702 &addr6->sin6_addr); ··· 1497 1487 { 1498 1488 struct smb2_file_network_open_info file_inf; 1499 1489 struct inode *inode; 1490 + u64 asize; 1500 1491 int rc; 1501 1492 1502 1493 rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid, ··· 1521 1510 inode_set_atime_to_ts(inode, 1522 1511 cifs_NTtimeToUnix(file_inf.LastAccessTime)); 1523 1512 1524 - /* 1525 - * i_blocks is not related to (i_size / i_blksize), 1526 - * but instead 512 byte (2**9) size is required for 1527 - * calculating num blocks. 1528 - */ 1529 - if (le64_to_cpu(file_inf.AllocationSize) > 4096) 1530 - inode->i_blocks = 1531 - (512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9; 1513 + asize = le64_to_cpu(file_inf.AllocationSize); 1514 + if (asize > 4096) 1515 + inode->i_blocks = CIFS_INO_BLOCKS(asize); 1532 1516 1533 1517 /* End of file and Attributes should not have to be updated on close */ 1534 1518 spin_unlock(&inode->i_lock); ··· 2200 2194 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false); 2201 2195 if (rc) 2202 2196 goto duplicate_extents_out; 2203 - 2204 - /* 2205 - * Although also could set plausible allocation size (i_blocks) 2206 - * here in addition to setting the file size, in reflink 2207 - * it is likely that the target file is sparse. Its allocation 2208 - * size will be queried on next revalidate, but it is important 2209 - * to make sure that file's cached size is updated immediately 2210 - */ 2211 2197 netfs_resize_file(netfs_inode(inode), dest_off + len, true); 2212 2198 cifs_setsize(inode, dest_off + len); 2213 2199 } ··· 3350 3352 struct cifsFileInfo *open_file = NULL; 3351 3353 3352 3354 if (inode && !(info & SACL_SECINFO)) 3353 - open_file = find_readable_file(CIFS_I(inode), true); 3355 + open_file = find_readable_file(CIFS_I(inode), FIND_FSUID_ONLY); 3354 3356 if (!open_file || (info & SACL_SECINFO)) 3355 3357 return get_smb2_acl_by_path(cifs_sb, path, pacllen, info); 3356 3358 ··· 3896 3898 * some servers (Windows2016) will not reflect recent writes in 3897 3899 * QUERY_ALLOCATED_RANGES until SMB2_flush is called. 3898 3900 */ 3899 - wrcfile = find_writable_file(cifsi, FIND_WR_ANY); 3901 + wrcfile = find_writable_file(cifsi, FIND_ANY); 3900 3902 if (wrcfile) { 3901 3903 filemap_write_and_wait(inode->i_mapping); 3902 3904 smb2_flush_file(xid, tcon, &wrcfile->fid);
+4 -1
fs/smb/client/smb2pdu.c
··· 5307 5307 5308 5308 memset(&rqst, 0, sizeof(struct smb_rqst)); 5309 5309 rqst.rq_iov = iov; 5310 - rqst.rq_nvec = n_vec + 1; 5310 + /* iov[0] is the SMB header; move payload to rq_iter for encryption safety */ 5311 + rqst.rq_nvec = 1; 5312 + iov_iter_kvec(&rqst.rq_iter, ITER_SOURCE, &iov[1], n_vec, 5313 + io_parms->length); 5311 5314 5312 5315 if (retries) { 5313 5316 /* Back-off before retry */
+2 -20
fs/smb/server/auth.c
··· 589 589 if (!(conn->dialect >= SMB30_PROT_ID && signing->binding)) 590 590 memcpy(chann->smb3signingkey, key, SMB3_SIGN_KEY_SIZE); 591 591 592 - ksmbd_debug(AUTH, "dumping generated AES signing keys\n"); 592 + ksmbd_debug(AUTH, "generated SMB3 signing key\n"); 593 593 ksmbd_debug(AUTH, "Session Id %llu\n", sess->id); 594 - ksmbd_debug(AUTH, "Session Key %*ph\n", 595 - SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key); 596 - ksmbd_debug(AUTH, "Signing Key %*ph\n", 597 - SMB3_SIGN_KEY_SIZE, key); 598 594 return 0; 599 595 } 600 596 ··· 648 652 ptwin->decryption.context, 649 653 sess->smb3decryptionkey, SMB3_ENC_DEC_KEY_SIZE); 650 654 651 - ksmbd_debug(AUTH, "dumping generated AES encryption keys\n"); 655 + ksmbd_debug(AUTH, "generated SMB3 encryption/decryption keys\n"); 652 656 ksmbd_debug(AUTH, "Cipher type %d\n", conn->cipher_type); 653 657 ksmbd_debug(AUTH, "Session Id %llu\n", sess->id); 654 - ksmbd_debug(AUTH, "Session Key %*ph\n", 655 - SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key); 656 - if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM || 657 - conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) { 658 - ksmbd_debug(AUTH, "ServerIn Key %*ph\n", 659 - SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3encryptionkey); 660 - ksmbd_debug(AUTH, "ServerOut Key %*ph\n", 661 - SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3decryptionkey); 662 - } else { 663 - ksmbd_debug(AUTH, "ServerIn Key %*ph\n", 664 - SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3encryptionkey); 665 - ksmbd_debug(AUTH, "ServerOut Key %*ph\n", 666 - SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3decryptionkey); 667 - } 668 658 } 669 659 670 660 void ksmbd_gen_smb30_encryptionkey(struct ksmbd_conn *conn,
+6 -3
fs/smb/server/mgmt/tree_connect.c
··· 102 102 103 103 void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon) 104 104 { 105 - if (atomic_dec_and_test(&tcon->refcount)) 105 + if (atomic_dec_and_test(&tcon->refcount)) { 106 + ksmbd_share_config_put(tcon->share_conf); 106 107 kfree(tcon); 108 + } 107 109 } 108 110 109 111 static int __ksmbd_tree_conn_disconnect(struct ksmbd_session *sess, ··· 115 113 116 114 ret = ksmbd_ipc_tree_disconnect_request(sess->id, tree_conn->id); 117 115 ksmbd_release_tree_conn_id(sess, tree_conn->id); 118 - ksmbd_share_config_put(tree_conn->share_conf); 119 116 ksmbd_counter_dec(KSMBD_COUNTER_TREE_CONNS); 120 - if (atomic_dec_and_test(&tree_conn->refcount)) 117 + if (atomic_dec_and_test(&tree_conn->refcount)) { 118 + ksmbd_share_config_put(tree_conn->share_conf); 121 119 kfree(tree_conn); 120 + } 122 121 return ret; 123 122 } 124 123
+25 -10
fs/smb/server/oplock.c
··· 120 120 kfree(lease); 121 121 } 122 122 123 - static void free_opinfo(struct oplock_info *opinfo) 123 + static void __free_opinfo(struct oplock_info *opinfo) 124 124 { 125 125 if (opinfo->is_lease) 126 126 free_lease(opinfo); 127 127 if (opinfo->conn && atomic_dec_and_test(&opinfo->conn->refcnt)) 128 128 kfree(opinfo->conn); 129 129 kfree(opinfo); 130 + } 131 + 132 + static void free_opinfo_rcu(struct rcu_head *rcu) 133 + { 134 + struct oplock_info *opinfo = container_of(rcu, struct oplock_info, rcu); 135 + 136 + __free_opinfo(opinfo); 137 + } 138 + 139 + static void free_opinfo(struct oplock_info *opinfo) 140 + { 141 + call_rcu(&opinfo->rcu, free_opinfo_rcu); 130 142 } 131 143 132 144 struct oplock_info *opinfo_get(struct ksmbd_file *fp) ··· 188 176 free_opinfo(opinfo); 189 177 } 190 178 191 - static void opinfo_add(struct oplock_info *opinfo) 179 + static void opinfo_add(struct oplock_info *opinfo, struct ksmbd_file *fp) 192 180 { 193 - struct ksmbd_inode *ci = opinfo->o_fp->f_ci; 181 + struct ksmbd_inode *ci = fp->f_ci; 194 182 195 183 down_write(&ci->m_lock); 196 184 list_add(&opinfo->op_entry, &ci->m_op_list); ··· 1135 1123 1136 1124 rcu_read_lock(); 1137 1125 opinfo = rcu_dereference(fp->f_opinfo); 1138 - rcu_read_unlock(); 1139 1126 1140 - if (!opinfo || !opinfo->is_lease || opinfo->o_lease->version != 2) 1127 + if (!opinfo || !opinfo->is_lease || opinfo->o_lease->version != 2) { 1128 + rcu_read_unlock(); 1141 1129 return; 1130 + } 1131 + rcu_read_unlock(); 1142 1132 1143 1133 p_ci = ksmbd_inode_lookup_lock(fp->filp->f_path.dentry->d_parent); 1144 1134 if (!p_ci) ··· 1291 1277 set_oplock_level(opinfo, req_op_level, lctx); 1292 1278 1293 1279 out: 1294 - rcu_assign_pointer(fp->f_opinfo, opinfo); 1295 - opinfo->o_fp = fp; 1296 - 1297 1280 opinfo_count_inc(fp); 1298 - opinfo_add(opinfo); 1281 + opinfo_add(opinfo, fp); 1282 + 1299 1283 if (opinfo->is_lease) { 1300 1284 err = add_lease_global_list(opinfo); 1301 1285 if (err) 1302 1286 goto err_out; 1303 1287 } 1304 1288 1289 + rcu_assign_pointer(fp->f_opinfo, opinfo); 1290 + opinfo->o_fp = fp; 1291 + 1305 1292 return 0; 1306 1293 err_out: 1307 - free_opinfo(opinfo); 1294 + __free_opinfo(opinfo); 1308 1295 return err; 1309 1296 } 1310 1297
+3 -2
fs/smb/server/oplock.h
··· 69 69 struct lease *o_lease; 70 70 struct list_head op_entry; 71 71 struct list_head lease_entry; 72 - wait_queue_head_t oplock_q; /* Other server threads */ 73 - wait_queue_head_t oplock_brk; /* oplock breaking wait */ 72 + wait_queue_head_t oplock_q; /* Other server threads */ 73 + wait_queue_head_t oplock_brk; /* oplock breaking wait */ 74 + struct rcu_head rcu; 74 75 }; 75 76 76 77 struct lease_break_info {
+16 -9
fs/smb/server/smb2pdu.c
··· 126 126 pr_err("The first operation in the compound does not have tcon\n"); 127 127 return -EINVAL; 128 128 } 129 + if (work->tcon->t_state != TREE_CONNECTED) 130 + return -ENOENT; 129 131 if (tree_id != UINT_MAX && work->tcon->id != tree_id) { 130 132 pr_err("tree id(%u) is different with id(%u) in first operation\n", 131 133 tree_id, work->tcon->id); ··· 1950 1948 } 1951 1949 } 1952 1950 smb2_set_err_rsp(work); 1951 + conn->binding = false; 1953 1952 } else { 1954 1953 unsigned int iov_len; 1955 1954 ··· 2831 2828 goto out; 2832 2829 } 2833 2830 2834 - dh_info->fp->conn = conn; 2831 + if (dh_info->fp->conn) { 2832 + ksmbd_put_durable_fd(dh_info->fp); 2833 + err = -EBADF; 2834 + goto out; 2835 + } 2835 2836 dh_info->reconnected = true; 2836 2837 goto out; 2837 2838 } ··· 3019 3012 goto err_out2; 3020 3013 } 3021 3014 3015 + fp = dh_info.fp; 3016 + 3022 3017 if (ksmbd_override_fsids(work)) { 3023 3018 rc = -ENOMEM; 3024 3019 ksmbd_put_durable_fd(dh_info.fp); 3025 3020 goto err_out2; 3026 3021 } 3027 3022 3028 - fp = dh_info.fp; 3029 3023 file_info = FILE_OPENED; 3030 3024 3031 3025 rc = ksmbd_vfs_getattr(&fp->filp->f_path, &stat); ··· 3624 3616 3625 3617 reconnected_fp: 3626 3618 rsp->StructureSize = cpu_to_le16(89); 3627 - rcu_read_lock(); 3628 - opinfo = rcu_dereference(fp->f_opinfo); 3619 + opinfo = opinfo_get(fp); 3629 3620 rsp->OplockLevel = opinfo != NULL ? opinfo->level : 0; 3630 - rcu_read_unlock(); 3631 3621 rsp->Flags = 0; 3632 3622 rsp->CreateAction = cpu_to_le32(file_info); 3633 3623 rsp->CreationTime = cpu_to_le64(fp->create_time); ··· 3666 3660 next_ptr = &lease_ccontext->Next; 3667 3661 next_off = conn->vals->create_lease_size; 3668 3662 } 3663 + opinfo_put(opinfo); 3669 3664 3670 3665 if (maximal_access_ctxt) { 3671 3666 struct create_context *mxac_ccontext; ··· 5459 5452 struct smb2_query_info_req *req, 5460 5453 struct smb2_query_info_rsp *rsp) 5461 5454 { 5462 - struct ksmbd_session *sess = work->sess; 5463 5455 struct ksmbd_conn *conn = work->conn; 5464 5456 struct ksmbd_share_config *share = work->tcon->share_conf; 5465 5457 int fsinfoclass = 0; ··· 5595 5589 5596 5590 info = (struct object_id_info *)(rsp->Buffer); 5597 5591 5598 - if (!user_guest(sess->user)) 5599 - memcpy(info->objid, user_passkey(sess->user), 16); 5592 + if (path.mnt->mnt_sb->s_uuid_len == 16) 5593 + memcpy(info->objid, path.mnt->mnt_sb->s_uuid.b, 5594 + path.mnt->mnt_sb->s_uuid_len); 5600 5595 else 5601 - memset(info->objid, 0, 16); 5596 + memcpy(info->objid, &stfs.f_fsid, sizeof(stfs.f_fsid)); 5602 5597 5603 5598 info->extended_info.magic = cpu_to_le32(EXTENDED_INFO_MAGIC); 5604 5599 info->extended_info.version = cpu_to_le32(1);
+5 -5
fs/smb/server/vfs_cache.c
··· 87 87 88 88 rcu_read_lock(); 89 89 opinfo = rcu_dereference(fp->f_opinfo); 90 - rcu_read_unlock(); 91 - 92 - if (!opinfo) { 93 - seq_printf(m, " %-15s", " "); 94 - } else { 90 + if (opinfo) { 95 91 const struct ksmbd_const_name *const_names; 96 92 int count; 97 93 unsigned int level; ··· 101 105 count = ARRAY_SIZE(ksmbd_oplock_const_names); 102 106 level = opinfo->level; 103 107 } 108 + rcu_read_unlock(); 104 109 ksmbd_proc_show_const_name(m, " %-15s", 105 110 const_names, count, level); 111 + } else { 112 + rcu_read_unlock(); 113 + seq_printf(m, " %-15s", " "); 106 114 } 107 115 108 116 seq_printf(m, " %#010x %#010x %s\n",
-3
fs/tests/exec_kunit.c
··· 94 94 { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * 3 + sizeof(void *)), 95 95 .argc = 0, .envc = 0 }, 96 96 .expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) }, 97 - { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * (_STK_LIM / 4 * + sizeof(void *)), 98 - .argc = 0, .envc = 0 }, 99 - .expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) }, 100 97 { { .p = ULONG_MAX, .rlim_stack.rlim_cur = 4 * _STK_LIM, 101 98 .argc = 0, .envc = 0 }, 102 99 .expected_argmin = ULONG_MAX - (_STK_LIM / 4 * 3) + sizeof(void *) },
+2 -6
fs/xfs/libxfs/xfs_da_btree.c
··· 2716 2716 * larger one that needs to be free by the caller. 2717 2717 */ 2718 2718 if (nirecs > 1) { 2719 - map = kzalloc(nirecs * sizeof(struct xfs_buf_map), 2720 - GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); 2721 - if (!map) { 2722 - error = -ENOMEM; 2723 - goto out_free_irecs; 2724 - } 2719 + map = kcalloc(nirecs, sizeof(struct xfs_buf_map), 2720 + GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); 2725 2721 *mapp = map; 2726 2722 } 2727 2723
+1 -1
fs/xfs/libxfs/xfs_defer.c
··· 809 809 810 810 /* Paused items cannot absorb more work */ 811 811 if (dfp->dfp_flags & XFS_DEFER_PAUSED) 812 - return NULL; 812 + return false; 813 813 814 814 /* Already full? */ 815 815 if (ops->max_items && dfp->dfp_count >= ops->max_items)
+1 -1
fs/xfs/xfs_bmap_item.c
··· 245 245 struct xfs_bmap_intent *ba = bi_entry(a); 246 246 struct xfs_bmap_intent *bb = bi_entry(b); 247 247 248 - return ba->bi_owner->i_ino - bb->bi_owner->i_ino; 248 + return cmp_int(ba->bi_owner->i_ino, bb->bi_owner->i_ino); 249 249 } 250 250 251 251 /* Log bmap updates in the intent item. */
+7 -1
fs/xfs/xfs_dquot.c
··· 1439 1439 return 0; 1440 1440 1441 1441 out_abort: 1442 + /* 1443 + * Shut down the log before removing the dquot item from the AIL. 1444 + * Otherwise, the log tail may advance past this item's LSN while 1445 + * log writes are still in progress, making these unflushed changes 1446 + * unrecoverable on the next mount. 1447 + */ 1448 + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1442 1449 dqp->q_flags &= ~XFS_DQFLAG_DIRTY; 1443 1450 xfs_trans_ail_delete(lip, 0); 1444 - xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1445 1451 xfs_dqfunlock(dqp); 1446 1452 return error; 1447 1453 }
+10 -7
fs/xfs/xfs_healthmon.c
··· 141 141 hm->mount_cookie = DETACHED_MOUNT_COOKIE; 142 142 spin_unlock(&xfs_healthmon_lock); 143 143 144 + /* 145 + * Wake up any readers that might remain. This can happen if unmount 146 + * races with the healthmon fd owner entering ->read_iter, having 147 + * already emptied the event queue. 148 + * 149 + * In the ->release case there shouldn't be any readers because the 150 + * only users of the waiter are read and poll. 151 + */ 152 + wake_up_all(&hm->wait); 153 + 144 154 trace_xfs_healthmon_detach(hm); 145 155 xfs_healthmon_put(hm); 146 156 } ··· 1037 1027 * process can create another health monitor file. 1038 1028 */ 1039 1029 xfs_healthmon_detach(hm); 1040 - 1041 - /* 1042 - * Wake up any readers that might be left. There shouldn't be any 1043 - * because the only users of the waiter are read and poll. 1044 - */ 1045 - wake_up_all(&hm->wait); 1046 - 1047 1030 xfs_healthmon_put(hm); 1048 1031 return 0; 1049 1032 }
-1
fs/xfs/xfs_icache.c
··· 159 159 ASSERT(!test_bit(XFS_LI_IN_AIL, 160 160 &ip->i_itemp->ili_item.li_flags)); 161 161 xfs_inode_item_destroy(ip); 162 - ip->i_itemp = NULL; 163 162 } 164 163 165 164 kmem_cache_free(xfs_inode_cache, ip);
+2
fs/xfs/xfs_log.c
··· 1357 1357 1358 1358 if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1) 1359 1359 log->l_iclog_roundoff = mp->m_sb.sb_logsunit; 1360 + else if (mp->m_sb.sb_logsectsize > 0) 1361 + log->l_iclog_roundoff = mp->m_sb.sb_logsectsize; 1360 1362 else 1361 1363 log->l_iclog_roundoff = BBSIZE; 1362 1364
-2
fs/xfs/xfs_zone_gc.c
··· 96 96 */ 97 97 xfs_fsblock_t old_startblock; 98 98 xfs_daddr_t new_daddr; 99 - struct xfs_zone_scratch *scratch; 100 99 101 100 /* Are we writing to a sequential write required zone? */ 102 101 bool is_seq; ··· 778 779 ihold(VFS_I(chunk->ip)); 779 780 split_chunk->ip = chunk->ip; 780 781 split_chunk->is_seq = chunk->is_seq; 781 - split_chunk->scratch = chunk->scratch; 782 782 split_chunk->offset = chunk->offset; 783 783 split_chunk->len = split_len; 784 784 split_chunk->old_startblock = chunk->old_startblock;
+2 -1
include/hyperv/hvgdk_mini.h
··· 477 477 #define HVCALL_NOTIFY_PARTITION_EVENT 0x0087 478 478 #define HVCALL_ENTER_SLEEP_STATE 0x0084 479 479 #define HVCALL_NOTIFY_PORT_RING_EMPTY 0x008b 480 - #define HVCALL_SCRUB_PARTITION 0x008d 481 480 #define HVCALL_REGISTER_INTERCEPT_RESULT 0x0091 482 481 #define HVCALL_ASSERT_VIRTUAL_INTERRUPT 0x0094 483 482 #define HVCALL_CREATE_PORT 0x0095 ··· 1120 1121 HV_X64_REGISTER_MSR_MTRR_FIX4KF8000 = 0x0008007A, 1121 1122 1122 1123 HV_X64_REGISTER_REG_PAGE = 0x0009001C, 1124 + #elif defined(CONFIG_ARM64) 1125 + HV_ARM64_REGISTER_SINT_RESERVED_INTERRUPT_ID = 0x00070001, 1123 1126 #endif 1124 1127 }; 1125 1128
+1 -1
include/linux/auxvec.h
··· 4 4 5 5 #include <uapi/linux/auxvec.h> 6 6 7 - #define AT_VECTOR_SIZE_BASE 22 /* NEW_AUX_ENT entries in auxiliary table */ 7 + #define AT_VECTOR_SIZE_BASE 24 /* NEW_AUX_ENT entries in auxiliary table */ 8 8 /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */ 9 9 #endif /* _LINUX_AUXVEC_H */
+3 -1
include/linux/build_bug.h
··· 32 32 /** 33 33 * BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied 34 34 * error message. 35 - * @condition: the condition which the compiler should know is false. 35 + * @cond: the condition which the compiler should know is false. 36 + * @msg: build-time error message 36 37 * 37 38 * See BUILD_BUG_ON for description. 38 39 */ ··· 61 60 62 61 /** 63 62 * static_assert - check integer constant expression at build time 63 + * @expr: expression to be checked 64 64 * 65 65 * static_assert() is a wrapper for the C11 _Static_assert, with a 66 66 * little macro magic to make the message optional (defaulting to the
+1
include/linux/console_struct.h
··· 160 160 struct uni_pagedict **uni_pagedict_loc; /* [!] Location of uni_pagedict variable for this console */ 161 161 u32 **vc_uni_lines; /* unicode screen content */ 162 162 u16 *vc_saved_screen; 163 + u32 **vc_saved_uni_lines; 163 164 unsigned int vc_saved_cols; 164 165 unsigned int vc_saved_rows; 165 166 /* additional information is in vt_kern.h */
+54
include/linux/device.h
··· 483 483 * on. This shrinks the "Board Support Packages" (BSPs) and 484 484 * minimizes board-specific #ifdefs in drivers. 485 485 * @driver_data: Private pointer for driver specific info. 486 + * @driver_override: Driver name to force a match. Do not touch directly; use 487 + * device_set_driver_override() instead. 486 488 * @links: Links to suppliers and consumers of this device. 487 489 * @power: For device power management. 488 490 * See Documentation/driver-api/pm/devices.rst for details. ··· 578 576 core doesn't touch it */ 579 577 void *driver_data; /* Driver data, set and get with 580 578 dev_set_drvdata/dev_get_drvdata */ 579 + struct { 580 + const char *name; 581 + spinlock_t lock; 582 + } driver_override; 581 583 struct mutex mutex; /* mutex to synchronize calls to 582 584 * its driver. 583 585 */ ··· 706 700 }; 707 701 708 702 #define kobj_to_dev(__kobj) container_of_const(__kobj, struct device, kobj) 703 + 704 + int __device_set_driver_override(struct device *dev, const char *s, size_t len); 705 + 706 + /** 707 + * device_set_driver_override() - Helper to set or clear driver override. 708 + * @dev: Device to change 709 + * @s: NUL-terminated string, new driver name to force a match, pass empty 710 + * string to clear it ("" or "\n", where the latter is only for sysfs 711 + * interface). 712 + * 713 + * Helper to set or clear driver override of a device. 714 + * 715 + * Returns: 0 on success or a negative error code on failure. 716 + */ 717 + static inline int device_set_driver_override(struct device *dev, const char *s) 718 + { 719 + return __device_set_driver_override(dev, s, s ? strlen(s) : 0); 720 + } 721 + 722 + /** 723 + * device_has_driver_override() - Check if a driver override has been set. 724 + * @dev: device to check 725 + * 726 + * Returns true if a driver override has been set for this device. 727 + */ 728 + static inline bool device_has_driver_override(struct device *dev) 729 + { 730 + guard(spinlock)(&dev->driver_override.lock); 731 + return !!dev->driver_override.name; 732 + } 733 + 734 + /** 735 + * device_match_driver_override() - Match a driver against the device's driver_override. 736 + * @dev: device to check 737 + * @drv: driver to match against 738 + * 739 + * Returns > 0 if a driver override is set and matches the given driver, 0 if a 740 + * driver override is set but does not match, or < 0 if a driver override is not 741 + * set at all. 742 + */ 743 + static inline int device_match_driver_override(struct device *dev, 744 + const struct device_driver *drv) 745 + { 746 + guard(spinlock)(&dev->driver_override.lock); 747 + if (dev->driver_override.name) 748 + return !strcmp(dev->driver_override.name, drv->name); 749 + return -1; 750 + } 709 751 710 752 /** 711 753 * device_iommu_mapped - Returns true when the device DMA is translated
+4
include/linux/device/bus.h
··· 65 65 * this bus. 66 66 * @pm: Power management operations of this bus, callback the specific 67 67 * device driver's pm-ops. 68 + * @driver_override: Set to true if this bus supports the driver_override 69 + * mechanism, which allows userspace to force a specific 70 + * driver to bind to a device via a sysfs attribute. 68 71 * @need_parent_lock: When probing or removing a device on this bus, the 69 72 * device core should lock the device's parent. 70 73 * ··· 109 106 110 107 const struct dev_pm_ops *pm; 111 108 109 + bool driver_override; 112 110 bool need_parent_lock; 113 111 }; 114 112
+2 -1
include/linux/etherdevice.h
··· 42 42 43 43 int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, 44 44 const void *daddr, const void *saddr, unsigned len); 45 - int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); 45 + int eth_header_parse(const struct sk_buff *skb, const struct net_device *dev, 46 + unsigned char *haddr); 46 47 int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, 47 48 __be16 type); 48 49 void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
+4 -4
include/linux/firmware/intel/stratix10-svc-client.h
··· 68 68 * timeout value used in Stratix10 FPGA manager driver. 69 69 * timeout value used in RSU driver 70 70 */ 71 - #define SVC_RECONFIG_REQUEST_TIMEOUT_MS 300 72 - #define SVC_RECONFIG_BUFFER_TIMEOUT_MS 720 73 - #define SVC_RSU_REQUEST_TIMEOUT_MS 300 71 + #define SVC_RECONFIG_REQUEST_TIMEOUT_MS 5000 72 + #define SVC_RECONFIG_BUFFER_TIMEOUT_MS 5000 73 + #define SVC_RSU_REQUEST_TIMEOUT_MS 2000 74 74 #define SVC_FCS_REQUEST_TIMEOUT_MS 2000 75 75 #define SVC_COMPLETED_TIMEOUT_MS 30000 76 - #define SVC_HWMON_REQUEST_TIMEOUT_MS 300 76 + #define SVC_HWMON_REQUEST_TIMEOUT_MS 2000 77 77 78 78 struct stratix10_svc_chan; 79 79
+1
include/linux/hid.h
··· 682 682 __s32 battery_charge_status; 683 683 enum hid_battery_status battery_status; 684 684 bool battery_avoid_query; 685 + bool battery_present; 685 686 ktime_t battery_ratelimit_time; 686 687 #endif 687 688
+2 -1
include/linux/if_ether.h
··· 40 40 return (struct ethhdr *)skb_inner_mac_header(skb); 41 41 } 42 42 43 - int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); 43 + int eth_header_parse(const struct sk_buff *skb, const struct net_device *dev, 44 + unsigned char *haddr); 44 45 45 46 extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); 46 47
+8 -2
include/linux/io-pgtable.h
··· 53 53 * tables. 54 54 * @ias: Input address (iova) size, in bits. 55 55 * @oas: Output address (paddr) size, in bits. 56 - * @coherent_walk A flag to indicate whether or not page table walks made 56 + * @coherent_walk: A flag to indicate whether or not page table walks made 57 57 * by the IOMMU are coherent with the CPU caches. 58 58 * @tlb: TLB management callbacks for this set of tables. 59 59 * @iommu_dev: The device representing the DMA configuration for the ··· 136 136 void (*free)(void *cookie, void *pages, size_t size); 137 137 138 138 /* Low-level data specific to the table format */ 139 + /* private: */ 139 140 union { 140 141 struct { 141 142 u64 ttbr; ··· 204 203 * @unmap_pages: Unmap a range of virtually contiguous pages of the same size. 205 204 * @iova_to_phys: Translate iova to physical address. 206 205 * @pgtable_walk: (optional) Perform a page table walk for a given iova. 206 + * @read_and_clear_dirty: Record dirty info per IOVA. If an IOVA is dirty, 207 + * clear its dirty state from the PTE unless the 208 + * IOMMU_DIRTY_NO_CLEAR flag is passed in. 207 209 * 208 210 * These functions map directly onto the iommu_ops member functions with 209 211 * the same names. ··· 235 231 * the configuration actually provided by the allocator (e.g. the 236 232 * pgsize_bitmap may be restricted). 237 233 * @cookie: An opaque token provided by the IOMMU driver and passed back to 238 - * the callback routines in cfg->tlb. 234 + * the callback routines. 235 + * 236 + * Returns: Pointer to the &struct io_pgtable_ops for this set of page tables. 239 237 */ 240 238 struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, 241 239 struct io_pgtable_cfg *cfg,
+4
include/linux/io_uring_types.h
··· 388 388 * regularly bounce b/w CPUs. 389 389 */ 390 390 struct { 391 + struct io_rings __rcu *rings_rcu; 391 392 struct llist_head work_llist; 392 393 struct llist_head retry_llist; 393 394 unsigned long check_cq; ··· 541 540 REQ_F_BL_NO_RECYCLE_BIT, 542 541 REQ_F_BUFFERS_COMMIT_BIT, 543 542 REQ_F_BUF_NODE_BIT, 543 + REQ_F_BUF_MORE_BIT, 544 544 REQ_F_HAS_METADATA_BIT, 545 545 REQ_F_IMPORT_BUFFER_BIT, 546 546 REQ_F_SQE_COPIED_BIT, ··· 627 625 REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT), 628 626 /* buf node is valid */ 629 627 REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT), 628 + /* incremental buffer consumption, more space available */ 629 + REQ_F_BUF_MORE = IO_REQ_FLAG(REQ_F_BUF_MORE_BIT), 630 630 /* request has read/write metadata assigned */ 631 631 REQ_F_HAS_METADATA = IO_REQ_FLAG(REQ_F_HAS_METADATA_BIT), 632 632 /*
+6 -1
include/linux/ipv6.h
··· 333 333 }; 334 334 335 335 #if IS_ENABLED(CONFIG_IPV6) 336 - bool ipv6_mod_enabled(void); 336 + extern int disable_ipv6_mod; 337 + 338 + static inline bool ipv6_mod_enabled(void) 339 + { 340 + return disable_ipv6_mod == 0; 341 + } 337 342 338 343 static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk) 339 344 {
+35 -48
include/linux/kvm_host.h
··· 1940 1940 1941 1941 struct kvm_stat_data { 1942 1942 struct kvm *kvm; 1943 - const struct _kvm_stats_desc *desc; 1943 + const struct kvm_stats_desc *desc; 1944 1944 enum kvm_stat_kind kind; 1945 1945 }; 1946 1946 1947 - struct _kvm_stats_desc { 1948 - struct kvm_stats_desc desc; 1949 - char name[KVM_STATS_NAME_SIZE]; 1950 - }; 1951 - 1952 - #define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \ 1953 - .flags = type | unit | base | \ 1954 - BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \ 1955 - BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \ 1956 - BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \ 1957 - .exponent = exp, \ 1958 - .size = sz, \ 1947 + #define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \ 1948 + .flags = type | unit | base | \ 1949 + BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \ 1950 + BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \ 1951 + BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \ 1952 + .exponent = exp, \ 1953 + .size = sz, \ 1959 1954 .bucket_size = bsz 1960 1955 1961 - #define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1962 - { \ 1963 - { \ 1964 - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1965 - .offset = offsetof(struct kvm_vm_stat, generic.stat) \ 1966 - }, \ 1967 - .name = #stat, \ 1968 - } 1969 - #define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1970 - { \ 1971 - { \ 1972 - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1973 - .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \ 1974 - }, \ 1975 - .name = #stat, \ 1976 - } 1977 - #define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1978 - { \ 1979 - { \ 1980 - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1981 - .offset = offsetof(struct kvm_vm_stat, stat) \ 1982 - }, \ 1983 - .name = #stat, \ 1984 - } 1985 - #define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1986 - { \ 1987 - { \ 1988 - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1989 - .offset = offsetof(struct kvm_vcpu_stat, stat) \ 1990 - }, \ 1991 - .name = #stat, \ 1992 - } 1956 + #define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1957 + { \ 1958 + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1959 + .offset = offsetof(struct kvm_vm_stat, generic.stat), \ 1960 + .name = #stat, \ 1961 + } 1962 + #define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1963 + { \ 1964 + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1965 + .offset = offsetof(struct kvm_vcpu_stat, generic.stat), \ 1966 + .name = #stat, \ 1967 + } 1968 + #define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1969 + { \ 1970 + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1971 + .offset = offsetof(struct kvm_vm_stat, stat), \ 1972 + .name = #stat, \ 1973 + } 1974 + #define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1975 + { \ 1976 + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1977 + .offset = offsetof(struct kvm_vcpu_stat, stat), \ 1978 + .name = #stat, \ 1979 + } 1993 1980 /* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */ 1994 1981 #define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \ 1995 1982 SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz) ··· 2053 2066 STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking) 2054 2067 2055 2068 ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, 2056 - const struct _kvm_stats_desc *desc, 2069 + const struct kvm_stats_desc *desc, 2057 2070 void *stats, size_t size_stats, 2058 2071 char __user *user_buffer, size_t size, loff_t *offset); 2059 2072 ··· 2098 2111 2099 2112 2100 2113 extern const struct kvm_stats_header kvm_vm_stats_header; 2101 - extern const struct _kvm_stats_desc kvm_vm_stats_desc[]; 2114 + extern const struct kvm_stats_desc kvm_vm_stats_desc[]; 2102 2115 extern const struct kvm_stats_header kvm_vcpu_stats_header; 2103 - extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; 2116 + extern const struct kvm_stats_desc kvm_vcpu_stats_desc[]; 2104 2117 2105 2118 static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) 2106 2119 {
+1 -1
include/linux/local_lock_internal.h
··· 315 315 316 316 #endif /* CONFIG_PREEMPT_RT */ 317 317 318 - #if defined(WARN_CONTEXT_ANALYSIS) 318 + #if defined(WARN_CONTEXT_ANALYSIS) && !defined(__CHECKER__) 319 319 /* 320 320 * Because the compiler only knows about the base per-CPU variable, use this 321 321 * helper function to make the compiler think we lock/unlock the @base variable,
+6 -11
include/linux/mm.h
··· 3514 3514 static inline void ptlock_free(struct ptdesc *ptdesc) {} 3515 3515 #endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */ 3516 3516 3517 - static inline unsigned long ptdesc_nr_pages(const struct ptdesc *ptdesc) 3518 - { 3519 - return compound_nr(ptdesc_page(ptdesc)); 3520 - } 3521 - 3522 3517 static inline void __pagetable_ctor(struct ptdesc *ptdesc) 3523 3518 { 3524 - pg_data_t *pgdat = NODE_DATA(memdesc_nid(ptdesc->pt_flags)); 3519 + struct folio *folio = ptdesc_folio(ptdesc); 3525 3520 3526 - __SetPageTable(ptdesc_page(ptdesc)); 3527 - mod_node_page_state(pgdat, NR_PAGETABLE, ptdesc_nr_pages(ptdesc)); 3521 + __folio_set_pgtable(folio); 3522 + lruvec_stat_add_folio(folio, NR_PAGETABLE); 3528 3523 } 3529 3524 3530 3525 static inline void pagetable_dtor(struct ptdesc *ptdesc) 3531 3526 { 3532 - pg_data_t *pgdat = NODE_DATA(memdesc_nid(ptdesc->pt_flags)); 3527 + struct folio *folio = ptdesc_folio(ptdesc); 3533 3528 3534 3529 ptlock_free(ptdesc); 3535 - __ClearPageTable(ptdesc_page(ptdesc)); 3536 - mod_node_page_state(pgdat, NR_PAGETABLE, -ptdesc_nr_pages(ptdesc)); 3530 + __folio_clear_pgtable(folio); 3531 + lruvec_stat_sub_folio(folio, NR_PAGETABLE); 3537 3532 } 3538 3533 3539 3534 static inline void pagetable_dtor_free(struct ptdesc *ptdesc)
+16 -15
include/linux/mmu_notifier.h
··· 234 234 }; 235 235 236 236 /** 237 - * struct mmu_interval_notifier_ops 237 + * struct mmu_interval_notifier_ops - callback for range notification 238 238 * @invalidate: Upon return the caller must stop using any SPTEs within this 239 239 * range. This function can sleep. Return false only if sleeping 240 240 * was required but mmu_notifier_range_blockable(range) is false. ··· 309 309 310 310 /** 311 311 * mmu_interval_set_seq - Save the invalidation sequence 312 - * @interval_sub - The subscription passed to invalidate 313 - * @cur_seq - The cur_seq passed to the invalidate() callback 312 + * @interval_sub: The subscription passed to invalidate 313 + * @cur_seq: The cur_seq passed to the invalidate() callback 314 314 * 315 315 * This must be called unconditionally from the invalidate callback of a 316 316 * struct mmu_interval_notifier_ops under the same lock that is used to call ··· 329 329 330 330 /** 331 331 * mmu_interval_read_retry - End a read side critical section against a VA range 332 - * interval_sub: The subscription 333 - * seq: The return of the paired mmu_interval_read_begin() 332 + * @interval_sub: The subscription 333 + * @seq: The return of the paired mmu_interval_read_begin() 334 334 * 335 335 * This MUST be called under a user provided lock that is also held 336 336 * unconditionally by op->invalidate() when it calls mmu_interval_set_seq(). ··· 338 338 * Each call should be paired with a single mmu_interval_read_begin() and 339 339 * should be used to conclude the read side. 340 340 * 341 - * Returns true if an invalidation collided with this critical section, and 341 + * Returns: true if an invalidation collided with this critical section, and 342 342 * the caller should retry. 343 343 */ 344 344 static inline bool ··· 350 350 351 351 /** 352 352 * mmu_interval_check_retry - Test if a collision has occurred 353 - * interval_sub: The subscription 354 - * seq: The return of the matching mmu_interval_read_begin() 353 + * @interval_sub: The subscription 354 + * @seq: The return of the matching mmu_interval_read_begin() 355 355 * 356 356 * This can be used in the critical section between mmu_interval_read_begin() 357 - * and mmu_interval_read_retry(). A return of true indicates an invalidation 358 - * has collided with this critical region and a future 359 - * mmu_interval_read_retry() will return true. 360 - * 361 - * False is not reliable and only suggests a collision may not have 362 - * occurred. It can be called many times and does not have to hold the user 363 - * provided lock. 357 + * and mmu_interval_read_retry(). 364 358 * 365 359 * This call can be used as part of loops and other expensive operations to 366 360 * expedite a retry. 361 + * It can be called many times and does not have to hold the user 362 + * provided lock. 363 + * 364 + * Returns: true indicates an invalidation has collided with this critical 365 + * region and a future mmu_interval_read_retry() will return true. 366 + * False is not reliable and only suggests a collision may not have 367 + * occurred. 367 368 */ 368 369 static inline bool 369 370 mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
+37 -4
include/linux/netdevice.h
··· 311 311 int (*create) (struct sk_buff *skb, struct net_device *dev, 312 312 unsigned short type, const void *daddr, 313 313 const void *saddr, unsigned int len); 314 - int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 314 + int (*parse)(const struct sk_buff *skb, 315 + const struct net_device *dev, 316 + unsigned char *haddr); 315 317 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 316 318 void (*cache_update)(struct hh_cache *hh, 317 319 const struct net_device *dev, ··· 2157 2155 unsigned long state; 2158 2156 unsigned int flags; 2159 2157 unsigned short hard_header_len; 2158 + enum netdev_stat_type pcpu_stat_type:8; 2160 2159 netdev_features_t features; 2161 2160 struct inet6_dev __rcu *ip6_ptr; 2162 2161 __cacheline_group_end(net_device_read_txrx); ··· 2406 2403 /* mid-layer private */ 2407 2404 void *ml_priv; 2408 2405 enum netdev_ml_priv_type ml_priv_type; 2409 - 2410 - enum netdev_stat_type pcpu_stat_type:8; 2411 2406 2412 2407 #if IS_ENABLED(CONFIG_GARP) 2413 2408 struct garp_port __rcu *garp_port; ··· 3447 3446 3448 3447 if (!dev->header_ops || !dev->header_ops->parse) 3449 3448 return 0; 3450 - return dev->header_ops->parse(skb, haddr); 3449 + return dev->header_ops->parse(skb, dev, haddr); 3451 3450 } 3452 3451 3453 3452 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) ··· 3577 3576 }; 3578 3577 DECLARE_PER_CPU(struct page_pool_bh, system_page_pool); 3579 3578 3579 + #define XMIT_RECURSION_LIMIT 8 3580 + 3580 3581 #ifndef CONFIG_PREEMPT_RT 3581 3582 static inline int dev_recursion_level(void) 3582 3583 { 3583 3584 return this_cpu_read(softnet_data.xmit.recursion); 3585 + } 3586 + 3587 + static inline bool dev_xmit_recursion(void) 3588 + { 3589 + return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > 3590 + XMIT_RECURSION_LIMIT); 3591 + } 3592 + 3593 + static inline void dev_xmit_recursion_inc(void) 3594 + { 3595 + __this_cpu_inc(softnet_data.xmit.recursion); 3596 + } 3597 + 3598 + static inline void dev_xmit_recursion_dec(void) 3599 + { 3600 + __this_cpu_dec(softnet_data.xmit.recursion); 3584 3601 } 3585 3602 #else 3586 3603 static inline int dev_recursion_level(void) ··· 3606 3587 return current->net_xmit.recursion; 3607 3588 } 3608 3589 3590 + static inline bool dev_xmit_recursion(void) 3591 + { 3592 + return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT); 3593 + } 3594 + 3595 + static inline void dev_xmit_recursion_inc(void) 3596 + { 3597 + current->net_xmit.recursion++; 3598 + } 3599 + 3600 + static inline void dev_xmit_recursion_dec(void) 3601 + { 3602 + current->net_xmit.recursion--; 3603 + } 3609 3604 #endif 3610 3605 3611 3606 void __netif_schedule(struct Qdisc *q);
+1 -1
include/linux/nvme-auth.h
··· 11 11 struct nvme_dhchap_key { 12 12 size_t len; 13 13 u8 hash; 14 - u8 key[]; 14 + u8 key[] __counted_by(len); 15 15 }; 16 16 17 17 u32 nvme_auth_get_seqnum(void);
-5
include/linux/platform_device.h
··· 31 31 struct resource *resource; 32 32 33 33 const struct platform_device_id *id_entry; 34 - /* 35 - * Driver name to force a match. Do not set directly, because core 36 - * frees it. Use driver_set_override() to set or clear it. 37 - */ 38 - const char *driver_override; 39 34 40 35 /* MFD cell pointer */ 41 36 struct mfd_cell *mfd_cell;
+5 -1
include/linux/rseq_types.h
··· 133 133 * @active: MM CID is active for the task 134 134 * @cid: The CID associated to the task either permanently or 135 135 * borrowed from the CPU 136 + * @node: Queued in the per MM MMCID list 136 137 */ 137 138 struct sched_mm_cid { 138 139 unsigned int active; 139 140 unsigned int cid; 141 + struct hlist_node node; 140 142 }; 141 143 142 144 /** ··· 159 157 * @work: Regular work to handle the affinity mode change case 160 158 * @lock: Spinlock to protect against affinity setting which can't take @mutex 161 159 * @mutex: Mutex to serialize forks and exits related to this mm 160 + * @user_list: List of the MM CID users of a MM 162 161 * @nr_cpus_allowed: The number of CPUs in the per MM allowed CPUs map. The map 163 162 * is growth only. 164 163 * @users: The number of tasks sharing this MM. Separate from mm::mm_users ··· 180 177 181 178 raw_spinlock_t lock; 182 179 struct mutex mutex; 180 + struct hlist_head user_list; 183 181 184 182 /* Low frequency modified */ 185 183 unsigned int nr_cpus_allowed; 186 184 unsigned int users; 187 185 unsigned int pcpu_thrs; 188 186 unsigned int update_deferred; 189 - }____cacheline_aligned_in_smp; 187 + } ____cacheline_aligned; 190 188 #else /* CONFIG_SCHED_MM_CID */ 191 189 struct mm_mm_cid { }; 192 190 struct sched_mm_cid { };
-2
include/linux/sched.h
··· 2354 2354 #ifdef CONFIG_SCHED_MM_CID 2355 2355 void sched_mm_cid_before_execve(struct task_struct *t); 2356 2356 void sched_mm_cid_after_execve(struct task_struct *t); 2357 - void sched_mm_cid_fork(struct task_struct *t); 2358 2357 void sched_mm_cid_exit(struct task_struct *t); 2359 2358 static __always_inline int task_mm_cid(struct task_struct *t) 2360 2359 { ··· 2362 2363 #else 2363 2364 static inline void sched_mm_cid_before_execve(struct task_struct *t) { } 2364 2365 static inline void sched_mm_cid_after_execve(struct task_struct *t) { } 2365 - static inline void sched_mm_cid_fork(struct task_struct *t) { } 2366 2366 static inline void sched_mm_cid_exit(struct task_struct *t) { } 2367 2367 static __always_inline int task_mm_cid(struct task_struct *t) 2368 2368 {
+1
include/linux/serial_8250.h
··· 195 195 void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud, 196 196 unsigned int quot); 197 197 int fsl8250_handle_irq(struct uart_port *port); 198 + void serial8250_handle_irq_locked(struct uart_port *port, unsigned int iir); 198 199 int serial8250_handle_irq(struct uart_port *port, unsigned int iir); 199 200 u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr); 200 201 void serial8250_read_char(struct uart_8250_port *up, u16 lsr);
+2 -2
include/linux/uaccess.h
··· 792 792 793 793 /** 794 794 * scoped_user_rw_access_size - Start a scoped user read/write access with given size 795 - * @uptr Pointer to the user space address to read from and write to 795 + * @uptr: Pointer to the user space address to read from and write to 796 796 * @size: Size of the access starting from @uptr 797 797 * @elbl: Error label to goto when the access region is rejected 798 798 * ··· 803 803 804 804 /** 805 805 * scoped_user_rw_access - Start a scoped user read/write access 806 - * @uptr Pointer to the user space address to read from and write to 806 + * @uptr: Pointer to the user space address to read from and write to 807 807 * @elbl: Error label to goto when the access region is rejected 808 808 * 809 809 * The size of the access starting from @uptr is determined via sizeof(*@uptr)).
+6 -2
include/linux/usb.h
··· 1862 1862 * SYNCHRONOUS CALL SUPPORT * 1863 1863 *-------------------------------------------------------------------*/ 1864 1864 1865 + /* Maximum value allowed for timeout in synchronous routines below */ 1866 + #define USB_MAX_SYNCHRONOUS_TIMEOUT 60000 /* ms */ 1867 + 1865 1868 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe, 1866 1869 __u8 request, __u8 requesttype, __u16 value, __u16 index, 1867 1870 void *data, __u16 size, int timeout); 1868 1871 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe, 1869 1872 void *data, int len, int *actual_length, int timeout); 1870 1873 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, 1871 - void *data, int len, int *actual_length, 1872 - int timeout); 1874 + void *data, int len, int *actual_length, int timeout); 1875 + extern int usb_bulk_msg_killable(struct usb_device *usb_dev, unsigned int pipe, 1876 + void *data, int len, int *actual_length, int timeout); 1873 1877 1874 1878 /* wrappers around usb_control_msg() for the most common standard requests */ 1875 1879 int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request,
+3
include/linux/usb/quirks.h
··· 78 78 /* skip BOS descriptor request */ 79 79 #define USB_QUIRK_NO_BOS BIT(17) 80 80 81 + /* Device claims zero configurations, forcing to 1 */ 82 + #define USB_QUIRK_FORCE_ONE_CONFIG BIT(18) 83 + 81 84 #endif /* __LINUX_USB_QUIRKS_H */
+1
include/linux/usb/usbnet.h
··· 132 132 #define FLAG_MULTI_PACKET 0x2000 133 133 #define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */ 134 134 #define FLAG_NOARP 0x8000 /* device can't do ARP */ 135 + #define FLAG_NOMAXMTU 0x10000 /* allow max_mtu above hard_mtu */ 135 136 136 137 /* init device ... can sleep, or cause probe() failure */ 137 138 int (*bind)(struct usbnet *, struct usb_interface *);
+14
include/net/ip6_tunnel.h
··· 156 156 { 157 157 int pkt_len, err; 158 158 159 + if (unlikely(dev_recursion_level() > IP_TUNNEL_RECURSION_LIMIT)) { 160 + if (dev) { 161 + net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 162 + dev->name); 163 + DEV_STATS_INC(dev, tx_errors); 164 + } 165 + kfree_skb(skb); 166 + return; 167 + } 168 + 169 + dev_xmit_recursion_inc(); 170 + 159 171 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 160 172 IP6CB(skb)->flags = ip6cb_flags; 161 173 pkt_len = skb->len - skb_inner_network_offset(skb); ··· 178 166 pkt_len = -1; 179 167 iptunnel_xmit_stats(dev, pkt_len); 180 168 } 169 + 170 + dev_xmit_recursion_dec(); 181 171 } 182 172 #endif 183 173 #endif
+29 -6
include/net/ip_tunnels.h
··· 27 27 #include <net/ip6_route.h> 28 28 #endif 29 29 30 + /* Recursion limit for tunnel xmit to detect routing loops. 31 + * Unlike XMIT_RECURSION_LIMIT (8) used in the no-qdisc path, tunnel 32 + * recursion involves route lookups and full IP output, consuming much 33 + * more stack per level, so a lower limit is needed. 34 + */ 35 + #define IP_TUNNEL_RECURSION_LIMIT 4 36 + 30 37 /* Keep error state on tunnel for 30 sec */ 31 38 #define IPTUNNEL_ERR_TIMEO (30*HZ) 32 39 ··· 665 658 static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) 666 659 { 667 660 if (pkt_len > 0) { 668 - struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats); 661 + if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_DSTATS) { 662 + struct pcpu_dstats *dstats = get_cpu_ptr(dev->dstats); 669 663 670 - u64_stats_update_begin(&tstats->syncp); 671 - u64_stats_add(&tstats->tx_bytes, pkt_len); 672 - u64_stats_inc(&tstats->tx_packets); 673 - u64_stats_update_end(&tstats->syncp); 674 - put_cpu_ptr(tstats); 664 + u64_stats_update_begin(&dstats->syncp); 665 + u64_stats_add(&dstats->tx_bytes, pkt_len); 666 + u64_stats_inc(&dstats->tx_packets); 667 + u64_stats_update_end(&dstats->syncp); 668 + put_cpu_ptr(dstats); 669 + return; 670 + } 671 + if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) { 672 + struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats); 673 + 674 + u64_stats_update_begin(&tstats->syncp); 675 + u64_stats_add(&tstats->tx_bytes, pkt_len); 676 + u64_stats_inc(&tstats->tx_packets); 677 + u64_stats_update_end(&tstats->syncp); 678 + put_cpu_ptr(tstats); 679 + return; 680 + } 681 + pr_err_once("iptunnel_xmit_stats pcpu_stat_type=%d\n", 682 + dev->pcpu_stat_type); 683 + WARN_ON_ONCE(1); 675 684 return; 676 685 } 677 686
+3 -1
include/net/mac80211.h
··· 7407 7407 * @band: the band to transmit on 7408 7408 * @sta: optional pointer to get the station to send the frame to 7409 7409 * 7410 - * Return: %true if the skb was prepared, %false otherwise 7410 + * Return: %true if the skb was prepared, %false otherwise. 7411 + * On failure, the skb is freed by this function; callers must not 7412 + * free it again. 7411 7413 * 7412 7414 * Note: must be called under RCU lock 7413 7415 */
+2 -4
include/net/netfilter/nf_tables.h
··· 277 277 unsigned char data[]; 278 278 }; 279 279 280 - #define NFT_SET_ELEM_INTERNAL_LAST 0x1 281 - 282 280 /* placeholder structure for opaque set element backend representation. */ 283 281 struct nft_elem_priv { }; 284 282 ··· 286 288 * @key: element key 287 289 * @key_end: closing element key 288 290 * @data: element data 289 - * @flags: flags 290 291 * @priv: element private data and extensions 291 292 */ 292 293 struct nft_set_elem { ··· 301 304 u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)]; 302 305 struct nft_data val; 303 306 } data; 304 - u32 flags; 305 307 struct nft_elem_priv *priv; 306 308 }; 307 309 ··· 874 878 u64 timeout, u64 expiration, gfp_t gfp); 875 879 int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set, 876 880 struct nft_expr *expr_array[]); 881 + void nft_set_elem_expr_destroy(const struct nft_ctx *ctx, 882 + struct nft_set_elem_expr *elem_expr); 877 883 void nft_set_elem_destroy(const struct nft_set *set, 878 884 const struct nft_elem_priv *elem_priv, 879 885 bool destroy_expr);
+1 -1
include/net/page_pool/types.h
··· 247 247 /* User-facing fields, protected by page_pools_lock */ 248 248 struct { 249 249 struct hlist_node list; 250 - u64 detach_time; 250 + ktime_t detach_time; 251 251 u32 id; 252 252 } user; 253 253 };
+33
include/net/sch_generic.h
··· 716 716 void qdisc_put(struct Qdisc *qdisc); 717 717 void qdisc_put_unlocked(struct Qdisc *qdisc); 718 718 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); 719 + 720 + static inline void dev_reset_queue(struct net_device *dev, 721 + struct netdev_queue *dev_queue, 722 + void *_unused) 723 + { 724 + struct Qdisc *qdisc; 725 + bool nolock; 726 + 727 + qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); 728 + if (!qdisc) 729 + return; 730 + 731 + nolock = qdisc->flags & TCQ_F_NOLOCK; 732 + 733 + if (nolock) 734 + spin_lock_bh(&qdisc->seqlock); 735 + spin_lock_bh(qdisc_lock(qdisc)); 736 + 737 + qdisc_reset(qdisc); 738 + 739 + spin_unlock_bh(qdisc_lock(qdisc)); 740 + if (nolock) { 741 + clear_bit(__QDISC_STATE_MISSED, &qdisc->state); 742 + clear_bit(__QDISC_STATE_DRAINING, &qdisc->state); 743 + spin_unlock_bh(&qdisc->seqlock); 744 + } 745 + } 746 + 719 747 #ifdef CONFIG_NET_SCHED 720 748 int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, 721 749 void *type_data); ··· 1456 1428 struct mini_Qdisc __rcu **p_miniq); 1457 1429 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, 1458 1430 struct tcf_block *block); 1431 + 1432 + static inline bool mini_qdisc_pair_inited(struct mini_Qdisc_pair *miniqp) 1433 + { 1434 + return !!miniqp->p_miniq; 1435 + } 1459 1436 1460 1437 void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx); 1461 1438
+1 -1
include/net/udp_tunnel.h
··· 52 52 static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, 53 53 struct socket **sockp) 54 54 { 55 - return 0; 55 + return -EPFNOSUPPORT; 56 56 } 57 57 #endif 58 58
+5 -2
include/trace/events/task.h
··· 38 38 TP_ARGS(task, comm), 39 39 40 40 TP_STRUCT__entry( 41 + __field( pid_t, pid) 41 42 __array( char, oldcomm, TASK_COMM_LEN) 42 43 __array( char, newcomm, TASK_COMM_LEN) 43 44 __field( short, oom_score_adj) 44 45 ), 45 46 46 47 TP_fast_assign( 48 + __entry->pid = task->pid; 47 49 memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN); 48 50 strscpy(entry->newcomm, comm, TASK_COMM_LEN); 49 51 __entry->oom_score_adj = task->signal->oom_score_adj; 50 52 ), 51 53 52 - TP_printk("oldcomm=%s newcomm=%s oom_score_adj=%hd", 53 - __entry->oldcomm, __entry->newcomm, __entry->oom_score_adj) 54 + TP_printk("pid=%d oldcomm=%s newcomm=%s oom_score_adj=%hd", 55 + __entry->pid, __entry->oldcomm, 56 + __entry->newcomm, __entry->oom_score_adj) 54 57 ); 55 58 56 59 /**
+8
include/uapi/linux/kvm.h
··· 14 14 #include <linux/ioctl.h> 15 15 #include <asm/kvm.h> 16 16 17 + #ifdef __KERNEL__ 18 + #include <linux/kvm_types.h> 19 + #endif 20 + 17 21 #define KVM_API_VERSION 12 18 22 19 23 /* ··· 1605 1601 __u16 size; 1606 1602 __u32 offset; 1607 1603 __u32 bucket_size; 1604 + #ifdef __KERNEL__ 1605 + char name[KVM_STATS_NAME_SIZE]; 1606 + #else 1608 1607 char name[]; 1608 + #endif 1609 1609 }; 1610 1610 1611 1611 #define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
+1 -1
io_uring/bpf_filter.c
··· 85 85 do { 86 86 if (filter == &dummy_filter) 87 87 return -EACCES; 88 - ret = bpf_prog_run(filter->prog, &bpf_ctx); 88 + ret = bpf_prog_run_pin_on_cpu(filter->prog, &bpf_ctx); 89 89 if (!ret) 90 90 return -EACCES; 91 91 filter = filter->next;
+7 -3
io_uring/eventfd.c
··· 76 76 { 77 77 bool skip = false; 78 78 struct io_ev_fd *ev_fd; 79 - 80 - if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) 81 - return; 79 + struct io_rings *rings; 82 80 83 81 guard(rcu)(); 82 + 83 + rings = rcu_dereference(ctx->rings_rcu); 84 + if (!rings) 85 + return; 86 + if (READ_ONCE(rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) 87 + return; 84 88 ev_fd = rcu_dereference(ctx->io_ev_fd); 85 89 /* 86 90 * Check again if ev_fd exists in case an io_eventfd_unregister call
+3 -1
io_uring/io_uring.c
··· 1745 1745 * well as 2 contiguous entries. 1746 1746 */ 1747 1747 if (!(ctx->flags & IORING_SETUP_SQE_MIXED) || *left < 2 || 1748 - !(ctx->cached_sq_head & (ctx->sq_entries - 1))) 1748 + (unsigned)(sqe - ctx->sq_sqes) >= ctx->sq_entries - 1) 1749 1749 return io_init_fail_req(req, -EINVAL); 1750 1750 /* 1751 1751 * A 128b operation on a mixed SQ uses two entries, so we have ··· 2066 2066 io_free_region(ctx->user, &ctx->sq_region); 2067 2067 io_free_region(ctx->user, &ctx->ring_region); 2068 2068 ctx->rings = NULL; 2069 + RCU_INIT_POINTER(ctx->rings_rcu, NULL); 2069 2070 ctx->sq_sqes = NULL; 2070 2071 } 2071 2072 ··· 2704 2703 if (ret) 2705 2704 return ret; 2706 2705 ctx->rings = rings = io_region_get_ptr(&ctx->ring_region); 2706 + rcu_assign_pointer(ctx->rings_rcu, rings); 2707 2707 if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) 2708 2708 ctx->sq_array = (u32 *)((char *)rings + rl->sq_array_offset); 2709 2709
+22 -5
io_uring/kbuf.c
··· 34 34 35 35 static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len) 36 36 { 37 + /* No data consumed, return false early to avoid consuming the buffer */ 38 + if (!len) 39 + return false; 40 + 37 41 while (len) { 38 42 struct io_uring_buf *buf; 39 43 u32 buf_len, this_len; ··· 115 111 116 112 buf = req->kbuf; 117 113 bl = io_buffer_get_list(ctx, buf->bgid); 118 - list_add(&buf->list, &bl->buf_list); 119 - bl->nbufs++; 114 + /* 115 + * If the buffer list was upgraded to a ring-based one, or removed, 116 + * while the request was in-flight in io-wq, drop it. 117 + */ 118 + if (bl && !(bl->flags & IOBL_BUF_RING)) { 119 + list_add(&buf->list, &bl->buf_list); 120 + bl->nbufs++; 121 + } else { 122 + kfree(buf); 123 + } 120 124 req->flags &= ~REQ_F_BUFFER_SELECTED; 125 + req->kbuf = NULL; 121 126 122 127 io_ring_submit_unlock(ctx, issue_flags); 123 128 return true; ··· 216 203 sel.addr = u64_to_user_ptr(READ_ONCE(buf->addr)); 217 204 218 205 if (io_should_commit(req, issue_flags)) { 219 - io_kbuf_commit(req, sel.buf_list, *len, 1); 206 + if (!io_kbuf_commit(req, sel.buf_list, *len, 1)) 207 + req->flags |= REQ_F_BUF_MORE; 220 208 sel.buf_list = NULL; 221 209 } 222 210 return sel; ··· 350 336 */ 351 337 if (ret > 0) { 352 338 req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE; 353 - io_kbuf_commit(req, sel->buf_list, arg->out_len, ret); 339 + if (!io_kbuf_commit(req, sel->buf_list, arg->out_len, ret)) 340 + req->flags |= REQ_F_BUF_MORE; 354 341 } 355 342 } else { 356 343 ret = io_provided_buffers_select(req, &arg->out_len, sel->buf_list, arg->iovs); ··· 397 382 398 383 if (bl) 399 384 ret = io_kbuf_commit(req, bl, len, nr); 385 + if (ret && (req->flags & REQ_F_BUF_MORE)) 386 + ret = false; 400 387 401 - req->flags &= ~REQ_F_BUFFER_RING; 388 + req->flags &= ~(REQ_F_BUFFER_RING | REQ_F_BUF_MORE); 402 389 return ret; 403 390 } 404 391
+7 -2
io_uring/poll.c
··· 272 272 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs); 273 273 v &= ~IO_POLL_RETRY_FLAG; 274 274 } 275 + v &= IO_POLL_REF_MASK; 275 276 } 276 277 277 278 /* the mask was stashed in __io_poll_execute */ ··· 305 304 return IOU_POLL_REMOVE_POLL_USE_RES; 306 305 } 307 306 } else { 308 - int ret = io_poll_issue(req, tw); 307 + int ret; 309 308 309 + /* multiple refs and HUP, ensure we loop once more */ 310 + if ((req->cqe.res & (POLLHUP | POLLRDHUP)) && v != 1) 311 + v--; 312 + 313 + ret = io_poll_issue(req, tw); 310 314 if (ret == IOU_COMPLETE) 311 315 return IOU_POLL_REMOVE_POLL_USE_RES; 312 316 else if (ret == IOU_REQUEUE) ··· 327 321 * Release all references, retry if someone tried to restart 328 322 * task_work while we were executing it. 329 323 */ 330 - v &= IO_POLL_REF_MASK; 331 324 } while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK); 332 325 333 326 io_napi_add(req);
+13 -2
io_uring/register.c
··· 202 202 return -EPERM; 203 203 /* 204 204 * Similar to seccomp, disallow setting a filter if task_no_new_privs 205 - * is true and we're not CAP_SYS_ADMIN. 205 + * is false and we're not CAP_SYS_ADMIN. 206 206 */ 207 207 if (!task_no_new_privs(current) && 208 208 !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN)) ··· 238 238 239 239 /* 240 240 * Similar to seccomp, disallow setting a filter if task_no_new_privs 241 - * is true and we're not CAP_SYS_ADMIN. 241 + * is false and we're not CAP_SYS_ADMIN. 242 242 */ 243 243 if (!task_no_new_privs(current) && 244 244 !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN)) ··· 633 633 ctx->sq_entries = p->sq_entries; 634 634 ctx->cq_entries = p->cq_entries; 635 635 636 + /* 637 + * Just mark any flag we may have missed and that the application 638 + * should act on unconditionally. Worst case it'll be an extra 639 + * syscall. 640 + */ 641 + atomic_or(IORING_SQ_TASKRUN | IORING_SQ_NEED_WAKEUP, &n.rings->sq_flags); 636 642 ctx->rings = n.rings; 643 + rcu_assign_pointer(ctx->rings_rcu, n.rings); 644 + 637 645 ctx->sq_sqes = n.sq_sqes; 638 646 swap_old(ctx, o, n, ring_region); 639 647 swap_old(ctx, o, n, sq_region); ··· 650 642 out: 651 643 spin_unlock(&ctx->completion_lock); 652 644 mutex_unlock(&ctx->mmap_lock); 645 + /* Wait for concurrent io_ctx_mark_taskrun() */ 646 + if (to_free == &o) 647 + synchronize_rcu_expedited(); 653 648 io_register_free_rings(ctx, to_free); 654 649 655 650 if (ctx->sq_data)
+20 -2
io_uring/tw.c
··· 152 152 WARN_ON_ONCE(ret); 153 153 } 154 154 155 + /* 156 + * Sets IORING_SQ_TASKRUN in the sq_flags shared with userspace, using the 157 + * RCU protected rings pointer to be safe against concurrent ring resizing. 158 + */ 159 + static void io_ctx_mark_taskrun(struct io_ring_ctx *ctx) 160 + { 161 + lockdep_assert_in_rcu_read_lock(); 162 + 163 + if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) { 164 + struct io_rings *rings = rcu_dereference(ctx->rings_rcu); 165 + 166 + atomic_or(IORING_SQ_TASKRUN, &rings->sq_flags); 167 + } 168 + } 169 + 155 170 void io_req_local_work_add(struct io_kiocb *req, unsigned flags) 156 171 { 157 172 struct io_ring_ctx *ctx = req->ctx; ··· 221 206 */ 222 207 223 208 if (!head) { 224 - if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 225 - atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 209 + io_ctx_mark_taskrun(ctx); 226 210 if (ctx->has_evfd) 227 211 io_eventfd_signal(ctx, false); 228 212 } ··· 245 231 if (!llist_add(&req->io_task_work.node, &tctx->task_list)) 246 232 return; 247 233 234 + /* 235 + * Doesn't need to use ->rings_rcu, as resizing isn't supported for 236 + * !DEFER_TASKRUN. 237 + */ 248 238 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 249 239 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 250 240
+20 -4
kernel/bpf/btf.c
··· 1787 1787 * of the _bh() version. 1788 1788 */ 1789 1789 spin_lock_irqsave(&btf_idr_lock, flags); 1790 - idr_remove(&btf_idr, btf->id); 1790 + if (btf->id) { 1791 + idr_remove(&btf_idr, btf->id); 1792 + /* 1793 + * Clear the id here to make this function idempotent, since it will get 1794 + * called a couple of times for module BTFs: on module unload, and then 1795 + * the final btf_put(). btf_alloc_id() starts IDs with 1, so we can use 1796 + * 0 as sentinel value. 1797 + */ 1798 + WRITE_ONCE(btf->id, 0); 1799 + } 1791 1800 spin_unlock_irqrestore(&btf_idr_lock, flags); 1792 1801 } 1793 1802 ··· 8124 8115 { 8125 8116 const struct btf *btf = filp->private_data; 8126 8117 8127 - seq_printf(m, "btf_id:\t%u\n", btf->id); 8118 + seq_printf(m, "btf_id:\t%u\n", READ_ONCE(btf->id)); 8128 8119 } 8129 8120 #endif 8130 8121 ··· 8206 8197 if (copy_from_user(&info, uinfo, info_copy)) 8207 8198 return -EFAULT; 8208 8199 8209 - info.id = btf->id; 8200 + info.id = READ_ONCE(btf->id); 8210 8201 ubtf = u64_to_user_ptr(info.btf); 8211 8202 btf_copy = min_t(u32, btf->data_size, info.btf_size); 8212 8203 if (copy_to_user(ubtf, btf->data, btf_copy)) ··· 8269 8260 8270 8261 u32 btf_obj_id(const struct btf *btf) 8271 8262 { 8272 - return btf->id; 8263 + return READ_ONCE(btf->id); 8273 8264 } 8274 8265 8275 8266 bool btf_is_kernel(const struct btf *btf) ··· 8391 8382 if (btf_mod->module != module) 8392 8383 continue; 8393 8384 8385 + /* 8386 + * For modules, we do the freeing of BTF IDR as soon as 8387 + * module goes away to disable BTF discovery, since the 8388 + * btf_try_get_module() on such BTFs will fail. This may 8389 + * be called again on btf_put(), but it's ok to do so. 8390 + */ 8391 + btf_free_id(btf_mod->btf); 8394 8392 list_del(&btf_mod->list); 8395 8393 if (btf_mod->sysfs_attr) 8396 8394 sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);
+35 -8
kernel/bpf/core.c
··· 1422 1422 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1423 1423 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); 1424 1424 break; 1425 + 1426 + case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: 1427 + case BPF_ST | BPF_PROBE_MEM32 | BPF_W: 1428 + case BPF_ST | BPF_PROBE_MEM32 | BPF_H: 1429 + case BPF_ST | BPF_PROBE_MEM32 | BPF_B: 1430 + *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ 1431 + from->imm); 1432 + *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1433 + /* 1434 + * Cannot use BPF_STX_MEM() macro here as it 1435 + * hardcodes BPF_MEM mode, losing PROBE_MEM32 1436 + * and breaking arena addressing in the JIT. 1437 + */ 1438 + *to++ = (struct bpf_insn) { 1439 + .code = BPF_STX | BPF_PROBE_MEM32 | 1440 + BPF_SIZE(from->code), 1441 + .dst_reg = from->dst_reg, 1442 + .src_reg = BPF_REG_AX, 1443 + .off = from->off, 1444 + }; 1445 + break; 1425 1446 } 1426 1447 out: 1427 1448 return to - to_buff; ··· 1757 1736 } 1758 1737 1759 1738 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1739 + /* Absolute value of s32 without undefined behavior for S32_MIN */ 1740 + static u32 abs_s32(s32 x) 1741 + { 1742 + return x >= 0 ? (u32)x : -(u32)x; 1743 + } 1744 + 1760 1745 /** 1761 1746 * ___bpf_prog_run - run eBPF program on a given context 1762 1747 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers ··· 1927 1900 DST = do_div(AX, (u32) SRC); 1928 1901 break; 1929 1902 case 1: 1930 - AX = abs((s32)DST); 1931 - AX = do_div(AX, abs((s32)SRC)); 1903 + AX = abs_s32((s32)DST); 1904 + AX = do_div(AX, abs_s32((s32)SRC)); 1932 1905 if ((s32)DST < 0) 1933 1906 DST = (u32)-AX; 1934 1907 else ··· 1955 1928 DST = do_div(AX, (u32) IMM); 1956 1929 break; 1957 1930 case 1: 1958 - AX = abs((s32)DST); 1959 - AX = do_div(AX, abs((s32)IMM)); 1931 + AX = abs_s32((s32)DST); 1932 + AX = do_div(AX, abs_s32((s32)IMM)); 1960 1933 if ((s32)DST < 0) 1961 1934 DST = (u32)-AX; 1962 1935 else ··· 1982 1955 DST = (u32) AX; 1983 1956 break; 1984 1957 case 1: 1985 - AX = abs((s32)DST); 1986 - do_div(AX, abs((s32)SRC)); 1958 + AX = abs_s32((s32)DST); 1959 + do_div(AX, abs_s32((s32)SRC)); 1987 1960 if (((s32)DST < 0) == ((s32)SRC < 0)) 1988 1961 DST = (u32)AX; 1989 1962 else ··· 2009 1982 DST = (u32) AX; 2010 1983 break; 2011 1984 case 1: 2012 - AX = abs((s32)DST); 2013 - do_div(AX, abs((s32)IMM)); 1985 + AX = abs_s32((s32)DST); 1986 + do_div(AX, abs_s32((s32)IMM)); 2014 1987 if (((s32)DST < 0) == ((s32)IMM < 0)) 2015 1988 DST = (u32)AX; 2016 1989 else
+25 -8
kernel/bpf/verifier.c
··· 15910 15910 /* Apply bswap if alu64 or switch between big-endian and little-endian machines */ 15911 15911 bool need_bswap = alu64 || (to_le == is_big_endian); 15912 15912 15913 + /* 15914 + * If the register is mutated, manually reset its scalar ID to break 15915 + * any existing ties and avoid incorrect bounds propagation. 15916 + */ 15917 + if (need_bswap || insn->imm == 16 || insn->imm == 32) 15918 + dst_reg->id = 0; 15919 + 15913 15920 if (need_bswap) { 15914 15921 if (insn->imm == 16) 15915 15922 dst_reg->var_off = tnum_bswap16(dst_reg->var_off); ··· 15999 15992 else 16000 15993 return 0; 16001 15994 16002 - branch = push_stack(env, env->insn_idx + 1, env->insn_idx, false); 15995 + branch = push_stack(env, env->insn_idx, env->insn_idx, false); 16003 15996 if (IS_ERR(branch)) 16004 15997 return PTR_ERR(branch); 16005 15998 ··· 17415 17408 continue; 17416 17409 if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST)) 17417 17410 continue; 17411 + /* 17412 + * Skip mixed 32/64-bit links: the delta relationship doesn't 17413 + * hold across different ALU widths. 17414 + */ 17415 + if (((reg->id ^ known_reg->id) & BPF_ADD_CONST) == BPF_ADD_CONST) 17416 + continue; 17418 17417 if ((!(reg->id & BPF_ADD_CONST) && !(known_reg->id & BPF_ADD_CONST)) || 17419 17418 reg->off == known_reg->off) { 17420 17419 s32 saved_subreg_def = reg->subreg_def; ··· 17448 17435 scalar32_min_max_add(reg, &fake_reg); 17449 17436 scalar_min_max_add(reg, &fake_reg); 17450 17437 reg->var_off = tnum_add(reg->var_off, fake_reg.var_off); 17451 - if (known_reg->id & BPF_ADD_CONST32) 17438 + if ((reg->id | known_reg->id) & BPF_ADD_CONST32) 17452 17439 zext_32_to_64(reg); 17453 17440 reg_bounds_sync(reg); 17454 17441 } ··· 19876 19863 * Also verify that new value satisfies old value range knowledge. 19877 19864 */ 19878 19865 19879 - /* ADD_CONST mismatch: different linking semantics */ 19880 - if ((rold->id & BPF_ADD_CONST) && !(rcur->id & BPF_ADD_CONST)) 19881 - return false; 19882 - 19883 - if (rold->id && !(rold->id & BPF_ADD_CONST) && (rcur->id & BPF_ADD_CONST)) 19866 + /* 19867 + * ADD_CONST flags must match exactly: BPF_ADD_CONST32 and 19868 + * BPF_ADD_CONST64 have different linking semantics in 19869 + * sync_linked_regs() (alu32 zero-extends, alu64 does not), 19870 + * so pruning across different flag types is unsafe. 19871 + */ 19872 + if (rold->id && 19873 + (rold->id & BPF_ADD_CONST) != (rcur->id & BPF_ADD_CONST)) 19884 19874 return false; 19885 19875 19886 19876 /* Both have offset linkage: offsets must match */ ··· 20920 20904 * state when it exits. 20921 20905 */ 20922 20906 int err = check_resource_leak(env, exception_exit, 20923 - !env->cur_state->curframe, 20907 + exception_exit || !env->cur_state->curframe, 20908 + exception_exit ? "bpf_throw" : 20924 20909 "BPF_EXIT instruction in main prog"); 20925 20910 if (err) 20926 20911 return err;
+6
kernel/cgroup/cgroup.c
··· 5109 5109 return; 5110 5110 5111 5111 task = list_entry(it->task_pos, struct task_struct, cg_list); 5112 + /* 5113 + * Hide tasks that are exiting but not yet removed. Keep zombie 5114 + * leaders with live threads visible. 5115 + */ 5116 + if ((task->flags & PF_EXITING) && !atomic_read(&task->signal->live)) 5117 + goto repeat; 5112 5118 5113 5119 if (it->flags & CSS_TASK_ITER_PROCS) { 5114 5120 /* if PROCS, skip over tasks which aren't group leaders */
+31 -28
kernel/cgroup/cpuset.c
··· 879 879 /* 880 880 * Cgroup v2 doesn't support domain attributes, just set all of them 881 881 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a 882 - * subset of HK_TYPE_DOMAIN housekeeping CPUs. 882 + * subset of HK_TYPE_DOMAIN_BOOT housekeeping CPUs. 883 883 */ 884 884 for (i = 0; i < ndoms; i++) { 885 885 /* ··· 888 888 */ 889 889 if (!csa || csa[i] == &top_cpuset) 890 890 cpumask_and(doms[i], top_cpuset.effective_cpus, 891 - housekeeping_cpumask(HK_TYPE_DOMAIN)); 891 + housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT)); 892 892 else 893 893 cpumask_copy(doms[i], csa[i]->effective_cpus); 894 894 if (dattr) ··· 1329 1329 } 1330 1330 1331 1331 /* 1332 - * update_hk_sched_domains - Update HK cpumasks & rebuild sched domains 1332 + * cpuset_update_sd_hk_unlock - Rebuild sched domains, update HK & unlock 1333 1333 * 1334 - * Update housekeeping cpumasks and rebuild sched domains if necessary. 1335 - * This should be called at the end of cpuset or hotplug actions. 1334 + * Update housekeeping cpumasks and rebuild sched domains if necessary and 1335 + * then do a cpuset_full_unlock(). 1336 + * This should be called at the end of cpuset operation. 1336 1337 */ 1337 - static void update_hk_sched_domains(void) 1338 + static void cpuset_update_sd_hk_unlock(void) 1339 + __releases(&cpuset_mutex) 1340 + __releases(&cpuset_top_mutex) 1338 1341 { 1342 + /* force_sd_rebuild will be cleared in rebuild_sched_domains_locked() */ 1343 + if (force_sd_rebuild) 1344 + rebuild_sched_domains_locked(); 1345 + 1339 1346 if (update_housekeeping) { 1340 - /* Updating HK cpumasks implies rebuild sched domains */ 1341 1347 update_housekeeping = false; 1342 - force_sd_rebuild = true; 1343 1348 cpumask_copy(isolated_hk_cpus, isolated_cpus); 1344 1349 1345 1350 /* ··· 1355 1350 mutex_unlock(&cpuset_mutex); 1356 1351 cpus_read_unlock(); 1357 1352 WARN_ON_ONCE(housekeeping_update(isolated_hk_cpus)); 1358 - cpus_read_lock(); 1359 - mutex_lock(&cpuset_mutex); 1353 + mutex_unlock(&cpuset_top_mutex); 1354 + } else { 1355 + cpuset_full_unlock(); 1360 1356 } 1361 - /* force_sd_rebuild will be cleared in rebuild_sched_domains_locked() */ 1362 - if (force_sd_rebuild) 1363 - rebuild_sched_domains_locked(); 1364 1357 } 1365 1358 1366 1359 /* 1367 - * Work function to invoke update_hk_sched_domains() 1360 + * Work function to invoke cpuset_update_sd_hk_unlock() 1368 1361 */ 1369 1362 static void hk_sd_workfn(struct work_struct *work) 1370 1363 { 1371 1364 cpuset_full_lock(); 1372 - update_hk_sched_domains(); 1373 - cpuset_full_unlock(); 1365 + cpuset_update_sd_hk_unlock(); 1374 1366 } 1375 1367 1376 1368 /** ··· 3232 3230 3233 3231 free_cpuset(trialcs); 3234 3232 out_unlock: 3235 - update_hk_sched_domains(); 3236 - cpuset_full_unlock(); 3233 + cpuset_update_sd_hk_unlock(); 3237 3234 if (of_cft(of)->private == FILE_MEMLIST) 3238 3235 schedule_flush_migrate_mm(); 3239 3236 return retval ?: nbytes; ··· 3339 3338 cpuset_full_lock(); 3340 3339 if (is_cpuset_online(cs)) 3341 3340 retval = update_prstate(cs, val); 3342 - update_hk_sched_domains(); 3343 - cpuset_full_unlock(); 3341 + cpuset_update_sd_hk_unlock(); 3344 3342 return retval ?: nbytes; 3345 3343 } 3346 3344 ··· 3513 3513 /* Reset valid partition back to member */ 3514 3514 if (is_partition_valid(cs)) 3515 3515 update_prstate(cs, PRS_MEMBER); 3516 - update_hk_sched_domains(); 3517 - cpuset_full_unlock(); 3516 + cpuset_update_sd_hk_unlock(); 3518 3517 } 3519 3518 3520 3519 static void cpuset_css_free(struct cgroup_subsys_state *css) ··· 3922 3923 rcu_read_unlock(); 3923 3924 } 3924 3925 3925 - 3926 3926 /* 3927 - * Queue a work to call housekeeping_update() & rebuild_sched_domains() 3928 - * There will be a slight delay before the HK_TYPE_DOMAIN housekeeping 3929 - * cpumask can correctly reflect what is in isolated_cpus. 3927 + * rebuild_sched_domains() will always be called directly if needed 3928 + * to make sure that newly added or removed CPU will be reflected in 3929 + * the sched domains. However, if isolated partition invalidation 3930 + * or recreation is being done (update_housekeeping set), a work item 3931 + * will be queued to call housekeeping_update() to update the 3932 + * corresponding housekeeping cpumasks after some slight delay. 3930 3933 * 3931 3934 * We rely on WORK_STRUCT_PENDING_BIT to not requeue a work item that 3932 3935 * is still pending. Before the pending bit is cleared, the work data ··· 3937 3936 * previously queued work. Since hk_sd_workfn() doesn't use the work 3938 3937 * item at all, this is not a problem. 3939 3938 */ 3940 - if (update_housekeeping || force_sd_rebuild) 3941 - queue_work(system_unbound_wq, &hk_sd_work); 3939 + if (force_sd_rebuild) 3940 + rebuild_sched_domains_cpuslocked(); 3941 + if (update_housekeeping) 3942 + queue_work(system_dfl_wq, &hk_sd_work); 3942 3943 3943 3944 free_tmpmasks(ptmp); 3944 3945 }
+2 -2
kernel/crash_dump_dm_crypt.c
··· 168 168 169 169 memcpy(dm_key->data, ukp->data, ukp->datalen); 170 170 dm_key->key_size = ukp->datalen; 171 - kexec_dprintk("Get dm crypt key (size=%u) %s: %8ph\n", dm_key->key_size, 172 - dm_key->key_desc, dm_key->data); 171 + kexec_dprintk("Get dm crypt key (size=%u) %s\n", dm_key->key_size, 172 + dm_key->key_desc); 173 173 174 174 out: 175 175 up_read(&key->sem);
+8 -11
kernel/events/core.c
··· 4813 4813 struct perf_event *sub, *event = data->event; 4814 4814 struct perf_event_context *ctx = event->ctx; 4815 4815 struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context); 4816 - struct pmu *pmu = event->pmu; 4816 + struct pmu *pmu; 4817 4817 4818 4818 /* 4819 4819 * If this is a task context, we need to check whether it is ··· 4825 4825 if (ctx->task && cpuctx->task_ctx != ctx) 4826 4826 return; 4827 4827 4828 - raw_spin_lock(&ctx->lock); 4828 + guard(raw_spinlock)(&ctx->lock); 4829 4829 ctx_time_update_event(ctx, event); 4830 4830 4831 4831 perf_event_update_time(event); ··· 4833 4833 perf_event_update_sibling_time(event); 4834 4834 4835 4835 if (event->state != PERF_EVENT_STATE_ACTIVE) 4836 - goto unlock; 4836 + return; 4837 4837 4838 4838 if (!data->group) { 4839 - pmu->read(event); 4839 + perf_pmu_read(event); 4840 4840 data->ret = 0; 4841 - goto unlock; 4841 + return; 4842 4842 } 4843 4843 4844 + pmu = event->pmu_ctx->pmu; 4844 4845 pmu->start_txn(pmu, PERF_PMU_TXN_READ); 4845 4846 4846 - pmu->read(event); 4847 - 4847 + perf_pmu_read(event); 4848 4848 for_each_sibling_event(sub, event) 4849 4849 perf_pmu_read(sub); 4850 4850 4851 4851 data->ret = pmu->commit_txn(pmu); 4852 - 4853 - unlock: 4854 - raw_spin_unlock(&ctx->lock); 4855 4852 } 4856 4853 4857 4854 static inline u64 perf_event_count(struct perf_event *event, bool self) ··· 14741 14744 get_ctx(child_ctx); 14742 14745 child_event->ctx = child_ctx; 14743 14746 14744 - pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event); 14747 + pmu_ctx = find_get_pmu_context(parent_event->pmu_ctx->pmu, child_ctx, child_event); 14745 14748 if (IS_ERR(pmu_ctx)) { 14746 14749 free_event(child_event); 14747 14750 return ERR_CAST(pmu_ctx);
+1 -2
kernel/fork.c
··· 1000 1000 #ifdef CONFIG_SCHED_MM_CID 1001 1001 tsk->mm_cid.cid = MM_CID_UNSET; 1002 1002 tsk->mm_cid.active = 0; 1003 + INIT_HLIST_NODE(&tsk->mm_cid.node); 1003 1004 #endif 1004 1005 return tsk; 1005 1006 ··· 1587 1586 1588 1587 tsk->mm = mm; 1589 1588 tsk->active_mm = mm; 1590 - sched_mm_cid_fork(tsk); 1591 1589 return 0; 1592 1590 } 1593 1591 ··· 2498 2498 exit_nsproxy_namespaces(p); 2499 2499 bad_fork_cleanup_mm: 2500 2500 if (p->mm) { 2501 - sched_mm_cid_exit(p); 2502 2501 mm_clear_owner(p->mm, p); 2503 2502 mmput(p->mm); 2504 2503 }
+6 -2
kernel/kprobes.c
··· 1144 1144 lockdep_assert_held(&kprobe_mutex); 1145 1145 1146 1146 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0); 1147 - if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret)) 1147 + if (ret < 0) 1148 1148 return ret; 1149 1149 1150 1150 if (*cnt == 0) { 1151 1151 ret = register_ftrace_function(ops); 1152 - if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) { 1152 + if (ret < 0) { 1153 1153 /* 1154 1154 * At this point, sinec ops is not registered, we should be sefe from 1155 1155 * registering empty filter. ··· 1178 1178 int ret; 1179 1179 1180 1180 lockdep_assert_held(&kprobe_mutex); 1181 + if (unlikely(kprobe_ftrace_disabled)) { 1182 + /* Now ftrace is disabled forever, disarm is already done. */ 1183 + return 0; 1184 + } 1181 1185 1182 1186 if (*cnt == 1) { 1183 1187 ret = unregister_ftrace_function(ops);
+29 -52
kernel/sched/core.c
··· 4729 4729 scx_cancel_fork(p); 4730 4730 } 4731 4731 4732 + static void sched_mm_cid_fork(struct task_struct *t); 4733 + 4732 4734 void sched_post_fork(struct task_struct *p) 4733 4735 { 4736 + sched_mm_cid_fork(p); 4734 4737 uclamp_post_fork(p); 4735 4738 scx_post_fork(p); 4736 4739 } ··· 10620 10617 } 10621 10618 } 10622 10619 10623 - static bool mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm) 10620 + static void mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm) 10624 10621 { 10625 10622 /* Remote access to mm::mm_cid::pcpu requires rq_lock */ 10626 10623 guard(task_rq_lock)(t); 10627 - /* If the task is not active it is not in the users count */ 10628 - if (!t->mm_cid.active) 10629 - return false; 10630 10624 if (cid_on_task(t->mm_cid.cid)) { 10631 10625 /* If running on the CPU, put the CID in transit mode, otherwise drop it */ 10632 10626 if (task_rq(t)->curr == t) ··· 10631 10631 else 10632 10632 mm_unset_cid_on_task(t); 10633 10633 } 10634 - return true; 10635 - } 10636 - 10637 - static void mm_cid_do_fixup_tasks_to_cpus(struct mm_struct *mm) 10638 - { 10639 - struct task_struct *p, *t; 10640 - unsigned int users; 10641 - 10642 - /* 10643 - * This can obviously race with a concurrent affinity change, which 10644 - * increases the number of allowed CPUs for this mm, but that does 10645 - * not affect the mode and only changes the CID constraints. A 10646 - * possible switch back to per task mode happens either in the 10647 - * deferred handler function or in the next fork()/exit(). 10648 - * 10649 - * The caller has already transferred. The newly incoming task is 10650 - * already accounted for, but not yet visible. 10651 - */ 10652 - users = mm->mm_cid.users - 2; 10653 - if (!users) 10654 - return; 10655 - 10656 - guard(rcu)(); 10657 - for_other_threads(current, t) { 10658 - if (mm_cid_fixup_task_to_cpu(t, mm)) 10659 - users--; 10660 - } 10661 - 10662 - if (!users) 10663 - return; 10664 - 10665 - /* Happens only for VM_CLONE processes. */ 10666 - for_each_process_thread(p, t) { 10667 - if (t == current || t->mm != mm) 10668 - continue; 10669 - if (mm_cid_fixup_task_to_cpu(t, mm)) { 10670 - if (--users == 0) 10671 - return; 10672 - } 10673 - } 10674 10634 } 10675 10635 10676 10636 static void mm_cid_fixup_tasks_to_cpus(void) 10677 10637 { 10678 10638 struct mm_struct *mm = current->mm; 10639 + struct task_struct *t; 10679 10640 10680 - mm_cid_do_fixup_tasks_to_cpus(mm); 10641 + lockdep_assert_held(&mm->mm_cid.mutex); 10642 + 10643 + hlist_for_each_entry(t, &mm->mm_cid.user_list, mm_cid.node) { 10644 + /* Current has already transferred before invoking the fixup. */ 10645 + if (t != current) 10646 + mm_cid_fixup_task_to_cpu(t, mm); 10647 + } 10648 + 10681 10649 mm_cid_complete_transit(mm, MM_CID_ONCPU); 10682 10650 } 10683 10651 10684 10652 static bool sched_mm_cid_add_user(struct task_struct *t, struct mm_struct *mm) 10685 10653 { 10654 + lockdep_assert_held(&mm->mm_cid.lock); 10655 + 10686 10656 t->mm_cid.active = 1; 10657 + hlist_add_head(&t->mm_cid.node, &mm->mm_cid.user_list); 10687 10658 mm->mm_cid.users++; 10688 10659 return mm_update_max_cids(mm); 10689 10660 } 10690 10661 10691 - void sched_mm_cid_fork(struct task_struct *t) 10662 + static void sched_mm_cid_fork(struct task_struct *t) 10692 10663 { 10693 10664 struct mm_struct *mm = t->mm; 10694 10665 bool percpu; 10695 10666 10696 - WARN_ON_ONCE(!mm || t->mm_cid.cid != MM_CID_UNSET); 10667 + if (!mm) 10668 + return; 10669 + 10670 + WARN_ON_ONCE(t->mm_cid.cid != MM_CID_UNSET); 10697 10671 10698 10672 guard(mutex)(&mm->mm_cid.mutex); 10699 10673 scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) { ··· 10706 10732 10707 10733 static bool sched_mm_cid_remove_user(struct task_struct *t) 10708 10734 { 10735 + lockdep_assert_held(&t->mm->mm_cid.lock); 10736 + 10709 10737 t->mm_cid.active = 0; 10710 - scoped_guard(preempt) { 10711 - /* Clear the transition bit */ 10712 - t->mm_cid.cid = cid_from_transit_cid(t->mm_cid.cid); 10713 - mm_unset_cid_on_task(t); 10714 - } 10738 + /* Clear the transition bit */ 10739 + t->mm_cid.cid = cid_from_transit_cid(t->mm_cid.cid); 10740 + mm_unset_cid_on_task(t); 10741 + hlist_del_init(&t->mm_cid.node); 10715 10742 t->mm->mm_cid.users--; 10716 10743 return mm_update_max_cids(t->mm); 10717 10744 } ··· 10855 10880 mutex_init(&mm->mm_cid.mutex); 10856 10881 mm->mm_cid.irq_work = IRQ_WORK_INIT_HARD(mm_cid_irq_work); 10857 10882 INIT_WORK(&mm->mm_cid.work, mm_cid_work_fn); 10883 + INIT_HLIST_HEAD(&mm->mm_cid.user_list); 10858 10884 cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask); 10859 10885 bitmap_zero(mm_cidmask(mm), num_possible_cpus()); 10860 10886 } 10861 10887 #else /* CONFIG_SCHED_MM_CID */ 10862 10888 static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) { } 10889 + static inline void sched_mm_cid_fork(struct task_struct *t) { } 10863 10890 #endif /* !CONFIG_SCHED_MM_CID */ 10864 10891 10865 10892 static DEFINE_PER_CPU(struct sched_change_ctx, sched_change_ctx);
+11 -11
kernel/sched/ext.c
··· 1103 1103 } 1104 1104 1105 1105 /* seq records the order tasks are queued, used by BPF DSQ iterator */ 1106 - dsq->seq++; 1106 + WRITE_ONCE(dsq->seq, dsq->seq + 1); 1107 1107 p->scx.dsq_seq = dsq->seq; 1108 1108 1109 1109 dsq_mod_nr(dsq, 1); ··· 1470 1470 p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT; 1471 1471 } 1472 1472 1473 - static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags) 1473 + static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int core_enq_flags) 1474 1474 { 1475 1475 struct scx_sched *sch = scx_root; 1476 1476 int sticky_cpu = p->scx.sticky_cpu; 1477 + u64 enq_flags = core_enq_flags | rq->scx.extra_enq_flags; 1477 1478 1478 1479 if (enq_flags & ENQUEUE_WAKEUP) 1479 1480 rq->scx.flags |= SCX_RQ_IN_WAKEUP; 1480 - 1481 - enq_flags |= rq->scx.extra_enq_flags; 1482 1481 1483 1482 if (sticky_cpu >= 0) 1484 1483 p->scx.sticky_cpu = -1; ··· 3907 3908 * consider offloading iff the total queued duration is over the 3908 3909 * threshold. 3909 3910 */ 3910 - min_delta_us = scx_bypass_lb_intv_us / SCX_BYPASS_LB_MIN_DELTA_DIV; 3911 - if (delta < DIV_ROUND_UP(min_delta_us, scx_slice_bypass_us)) 3911 + min_delta_us = READ_ONCE(scx_bypass_lb_intv_us) / SCX_BYPASS_LB_MIN_DELTA_DIV; 3912 + if (delta < DIV_ROUND_UP(min_delta_us, READ_ONCE(scx_slice_bypass_us))) 3912 3913 return 0; 3913 3914 3914 3915 raw_spin_rq_lock_irq(rq); ··· 4136 4137 WARN_ON_ONCE(scx_bypass_depth <= 0); 4137 4138 if (scx_bypass_depth != 1) 4138 4139 goto unlock; 4139 - WRITE_ONCE(scx_slice_dfl, scx_slice_bypass_us * NSEC_PER_USEC); 4140 + WRITE_ONCE(scx_slice_dfl, READ_ONCE(scx_slice_bypass_us) * NSEC_PER_USEC); 4140 4141 bypass_timestamp = ktime_get_ns(); 4141 4142 if (sch) 4142 4143 scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1); ··· 5258 5259 if (!READ_ONCE(helper)) { 5259 5260 mutex_lock(&helper_mutex); 5260 5261 if (!helper) { 5261 - helper = kthread_run_worker(0, "scx_enable_helper"); 5262 - if (IS_ERR_OR_NULL(helper)) { 5263 - helper = NULL; 5262 + struct kthread_worker *w = 5263 + kthread_run_worker(0, "scx_enable_helper"); 5264 + if (IS_ERR_OR_NULL(w)) { 5264 5265 mutex_unlock(&helper_mutex); 5265 5266 return -ENOMEM; 5266 5267 } 5267 - sched_set_fifo(helper->task); 5268 + sched_set_fifo(w->task); 5269 + WRITE_ONCE(helper, w); 5268 5270 } 5269 5271 mutex_unlock(&helper_mutex); 5270 5272 }
+98 -16
kernel/sched/ext_internal.h
··· 1035 1035 }; 1036 1036 1037 1037 /* 1038 - * sched_ext_entity->ops_state 1038 + * Task Ownership State Machine (sched_ext_entity->ops_state) 1039 1039 * 1040 - * Used to track the task ownership between the SCX core and the BPF scheduler. 1041 - * State transitions look as follows: 1040 + * The sched_ext core uses this state machine to track task ownership 1041 + * between the SCX core and the BPF scheduler. This allows the BPF 1042 + * scheduler to dispatch tasks without strict ordering requirements, while 1043 + * the SCX core safely rejects invalid dispatches. 1042 1044 * 1043 - * NONE -> QUEUEING -> QUEUED -> DISPATCHING 1044 - * ^ | | 1045 - * | v v 1046 - * \-------------------------------/ 1045 + * State Transitions 1047 1046 * 1048 - * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call 1049 - * sites for explanations on the conditions being waited upon and why they are 1050 - * safe. Transitions out of them into NONE or QUEUED must store_release and the 1051 - * waiters should load_acquire. 1047 + * .------------> NONE (owned by SCX core) 1048 + * | | ^ 1049 + * | enqueue | | direct dispatch 1050 + * | v | 1051 + * | QUEUEING -------' 1052 + * | | 1053 + * | enqueue | 1054 + * | completes | 1055 + * | v 1056 + * | QUEUED (owned by BPF scheduler) 1057 + * | | 1058 + * | dispatch | 1059 + * | | 1060 + * | v 1061 + * | DISPATCHING 1062 + * | | 1063 + * | dispatch | 1064 + * | completes | 1065 + * `---------------' 1052 1066 * 1053 - * Tracking scx_ops_state enables sched_ext core to reliably determine whether 1054 - * any given task can be dispatched by the BPF scheduler at all times and thus 1055 - * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler 1056 - * to try to dispatch any task anytime regardless of its state as the SCX core 1057 - * can safely reject invalid dispatches. 1067 + * State Descriptions 1068 + * 1069 + * - %SCX_OPSS_NONE: 1070 + * Task is owned by the SCX core. It's either on a run queue, running, 1071 + * or being manipulated by the core scheduler. The BPF scheduler has no 1072 + * claim on this task. 1073 + * 1074 + * - %SCX_OPSS_QUEUEING: 1075 + * Transitional state while transferring a task from the SCX core to 1076 + * the BPF scheduler. The task's rq lock is held during this state. 1077 + * Since QUEUEING is both entered and exited under the rq lock, dequeue 1078 + * can never observe this state (it would be a BUG). When finishing a 1079 + * dispatch, if the task is still in %SCX_OPSS_QUEUEING the completion 1080 + * path busy-waits for it to leave this state (via wait_ops_state()) 1081 + * before retrying. 1082 + * 1083 + * - %SCX_OPSS_QUEUED: 1084 + * Task is owned by the BPF scheduler. It's on a DSQ (dispatch queue) 1085 + * and the BPF scheduler is responsible for dispatching it. A QSEQ 1086 + * (queue sequence number) is embedded in this state to detect 1087 + * dispatch/dequeue races: if a task is dequeued and re-enqueued, the 1088 + * QSEQ changes and any in-flight dispatch operations targeting the old 1089 + * QSEQ are safely ignored. 1090 + * 1091 + * - %SCX_OPSS_DISPATCHING: 1092 + * Transitional state while transferring a task from the BPF scheduler 1093 + * back to the SCX core. This state indicates the BPF scheduler has 1094 + * selected the task for execution. When dequeue needs to take the task 1095 + * off a DSQ and it is still in %SCX_OPSS_DISPATCHING, the dequeue path 1096 + * busy-waits for it to leave this state (via wait_ops_state()) before 1097 + * proceeding. Exits to %SCX_OPSS_NONE when dispatch completes. 1098 + * 1099 + * Memory Ordering 1100 + * 1101 + * Transitions out of %SCX_OPSS_QUEUEING and %SCX_OPSS_DISPATCHING into 1102 + * %SCX_OPSS_NONE or %SCX_OPSS_QUEUED must use atomic_long_set_release() 1103 + * and waiters must use atomic_long_read_acquire(). This ensures proper 1104 + * synchronization between concurrent operations. 1105 + * 1106 + * Cross-CPU Task Migration 1107 + * 1108 + * When moving a task in the %SCX_OPSS_DISPATCHING state, we can't simply 1109 + * grab the target CPU's rq lock because a concurrent dequeue might be 1110 + * waiting on %SCX_OPSS_DISPATCHING while holding the source rq lock 1111 + * (deadlock). 1112 + * 1113 + * The sched_ext core uses a "lock dancing" protocol coordinated by 1114 + * p->scx.holding_cpu. When moving a task to a different rq: 1115 + * 1116 + * 1. Verify task can be moved (CPU affinity, migration_disabled, etc.) 1117 + * 2. Set p->scx.holding_cpu to the current CPU 1118 + * 3. Set task state to %SCX_OPSS_NONE; dequeue waits while DISPATCHING 1119 + * is set, so clearing DISPATCHING first prevents the circular wait 1120 + * (safe to lock the rq we need) 1121 + * 4. Unlock the current CPU's rq 1122 + * 5. Lock src_rq (where the task currently lives) 1123 + * 6. Verify p->scx.holding_cpu == current CPU, if not, dequeue won the 1124 + * race (dequeue clears holding_cpu to -1 when it takes the task), in 1125 + * this case migration is aborted 1126 + * 7. If src_rq == dst_rq: clear holding_cpu and enqueue directly 1127 + * into dst_rq's local DSQ (no lock swap needed) 1128 + * 8. Otherwise: call move_remote_task_to_local_dsq(), which releases 1129 + * src_rq, locks dst_rq, and performs the deactivate/activate 1130 + * migration cycle (dst_rq is held on return) 1131 + * 9. Unlock dst_rq and re-lock the current CPU's rq to restore 1132 + * the lock state expected by the caller 1133 + * 1134 + * If any verification fails, abort the migration. 1135 + * 1136 + * This state tracking allows the BPF scheduler to try to dispatch any task 1137 + * at any time regardless of its state. The SCX core can safely 1138 + * reject/ignore invalid dispatches, simplifying the BPF scheduler 1139 + * implementation. 1058 1140 */ 1059 1141 enum scx_ops_state { 1060 1142 SCX_OPSS_NONE, /* owned by the SCX core */
+30 -9
kernel/sched/idle.c
··· 161 161 return cpuidle_enter(drv, dev, next_state); 162 162 } 163 163 164 + static void idle_call_stop_or_retain_tick(bool stop_tick) 165 + { 166 + if (stop_tick || tick_nohz_tick_stopped()) 167 + tick_nohz_idle_stop_tick(); 168 + else 169 + tick_nohz_idle_retain_tick(); 170 + } 171 + 164 172 /** 165 173 * cpuidle_idle_call - the main idle function 166 174 * ··· 178 170 * set, and it returns with polling set. If it ever stops polling, it 179 171 * must clear the polling bit. 180 172 */ 181 - static void cpuidle_idle_call(void) 173 + static void cpuidle_idle_call(bool stop_tick) 182 174 { 183 175 struct cpuidle_device *dev = cpuidle_get_device(); 184 176 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); ··· 194 186 } 195 187 196 188 if (cpuidle_not_available(drv, dev)) { 197 - tick_nohz_idle_stop_tick(); 189 + idle_call_stop_or_retain_tick(stop_tick); 198 190 199 191 default_idle_call(); 200 192 goto exit_idle; ··· 229 221 230 222 next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns); 231 223 call_cpuidle(drv, dev, next_state); 232 - } else { 233 - bool stop_tick = true; 224 + } else if (drv->state_count > 1) { 225 + /* 226 + * stop_tick is expected to be true by default by cpuidle 227 + * governors, which allows them to select idle states with 228 + * target residency above the tick period length. 229 + */ 230 + stop_tick = true; 234 231 235 232 /* 236 233 * Ask the cpuidle framework to choose a convenient idle state. 237 234 */ 238 235 next_state = cpuidle_select(drv, dev, &stop_tick); 239 236 240 - if (stop_tick || tick_nohz_tick_stopped()) 241 - tick_nohz_idle_stop_tick(); 242 - else 243 - tick_nohz_idle_retain_tick(); 237 + idle_call_stop_or_retain_tick(stop_tick); 244 238 245 239 entered_state = call_cpuidle(drv, dev, next_state); 246 240 /* 247 241 * Give the governor an opportunity to reflect on the outcome 248 242 */ 249 243 cpuidle_reflect(dev, entered_state); 244 + } else { 245 + idle_call_stop_or_retain_tick(stop_tick); 246 + 247 + /* 248 + * If there is only a single idle state (or none), there is 249 + * nothing meaningful for the governor to choose. Skip the 250 + * governor and always use state 0. 251 + */ 252 + call_cpuidle(drv, dev, 0); 250 253 } 251 254 252 255 exit_idle: ··· 278 259 static void do_idle(void) 279 260 { 280 261 int cpu = smp_processor_id(); 262 + bool got_tick = false; 281 263 282 264 /* 283 265 * Check if we need to update blocked load ··· 349 329 tick_nohz_idle_restart_tick(); 350 330 cpu_idle_poll(); 351 331 } else { 352 - cpuidle_idle_call(); 332 + cpuidle_idle_call(got_tick); 353 333 } 334 + got_tick = tick_nohz_idle_got_tick(); 354 335 arch_cpu_idle_exit(); 355 336 } 356 337
+1 -1
kernel/time/time.c
··· 697 697 * 698 698 * Return: jiffies_64 value converted to 64-bit "clock_t" (CLOCKS_PER_SEC) 699 699 */ 700 - u64 jiffies_64_to_clock_t(u64 x) 700 + notrace u64 jiffies_64_to_clock_t(u64 x) 701 701 { 702 702 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 703 703 # if HZ < USER_HZ
+2 -2
kernel/trace/ftrace.c
··· 6606 6606 if (!orig_hash) 6607 6607 goto unlock; 6608 6608 6609 - /* Enable the tmp_ops to have the same functions as the direct ops */ 6609 + /* Enable the tmp_ops to have the same functions as the hash object. */ 6610 6610 ftrace_ops_init(&tmp_ops); 6611 - tmp_ops.func_hash = ops->func_hash; 6611 + tmp_ops.func_hash->filter_hash = hash; 6612 6612 6613 6613 err = register_ftrace_function_nolock(&tmp_ops); 6614 6614 if (err)
+1 -1
kernel/trace/ring_buffer.c
··· 2053 2053 2054 2054 entries += ret; 2055 2055 entry_bytes += local_read(&head_page->page->commit); 2056 - local_set(&cpu_buffer->head_page->entries, ret); 2056 + local_set(&head_page->entries, ret); 2057 2057 2058 2058 if (head_page == cpu_buffer->commit_page) 2059 2059 break;
+27 -9
kernel/trace/trace.c
··· 555 555 lockdep_assert_held(&event_mutex); 556 556 557 557 if (enabled) { 558 - if (!list_empty(&tr->marker_list)) 558 + if (tr->trace_flags & TRACE_ITER(COPY_MARKER)) 559 559 return false; 560 560 561 561 list_add_rcu(&tr->marker_list, &marker_copies); ··· 563 563 return true; 564 564 } 565 565 566 - if (list_empty(&tr->marker_list)) 566 + if (!(tr->trace_flags & TRACE_ITER(COPY_MARKER))) 567 567 return false; 568 568 569 - list_del_init(&tr->marker_list); 569 + list_del_rcu(&tr->marker_list); 570 570 tr->trace_flags &= ~TRACE_ITER(COPY_MARKER); 571 571 return true; 572 572 } ··· 6784 6784 6785 6785 do { 6786 6786 /* 6787 + * It is possible that something is trying to migrate this 6788 + * task. What happens then, is when preemption is enabled, 6789 + * the migration thread will preempt this task, try to 6790 + * migrate it, fail, then let it run again. That will 6791 + * cause this to loop again and never succeed. 6792 + * On failures, enabled and disable preemption with 6793 + * migration enabled, to allow the migration thread to 6794 + * migrate this task. 6795 + */ 6796 + if (trys) { 6797 + preempt_enable_notrace(); 6798 + preempt_disable_notrace(); 6799 + cpu = smp_processor_id(); 6800 + buffer = per_cpu_ptr(tinfo->tbuf, cpu)->buf; 6801 + } 6802 + 6803 + /* 6787 6804 * If for some reason, copy_from_user() always causes a context 6788 6805 * switch, this would then cause an infinite loop. 6789 6806 * If this task is preempted by another user space task, it ··· 9761 9744 9762 9745 list_del(&tr->list); 9763 9746 9747 + if (printk_trace == tr) 9748 + update_printk_trace(&global_trace); 9749 + 9750 + /* Must be done before disabling all the flags */ 9751 + if (update_marker_trace(tr, 0)) 9752 + synchronize_rcu(); 9753 + 9764 9754 /* Disable all the flags that were enabled coming in */ 9765 9755 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) { 9766 9756 if ((1ULL << i) & ZEROED_TRACE_FLAGS) 9767 9757 set_tracer_flag(tr, 1ULL << i, 0); 9768 9758 } 9769 - 9770 - if (printk_trace == tr) 9771 - update_printk_trace(&global_trace); 9772 - 9773 - if (update_marker_trace(tr, 0)) 9774 - synchronize_rcu(); 9775 9759 9776 9760 tracing_set_nop(tr); 9777 9761 clear_ftrace_function_probes(tr);
+28 -27
kernel/workqueue.c
··· 190 190 int id; /* I: pool ID */ 191 191 unsigned int flags; /* L: flags */ 192 192 193 - unsigned long watchdog_ts; /* L: watchdog timestamp */ 193 + unsigned long last_progress_ts; /* L: last forward progress timestamp */ 194 194 bool cpu_stall; /* WD: stalled cpu bound pool */ 195 195 196 196 /* ··· 1697 1697 WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INACTIVE)); 1698 1698 trace_workqueue_activate_work(work); 1699 1699 if (list_empty(&pwq->pool->worklist)) 1700 - pwq->pool->watchdog_ts = jiffies; 1700 + pwq->pool->last_progress_ts = jiffies; 1701 1701 move_linked_works(work, &pwq->pool->worklist, NULL); 1702 1702 __clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb); 1703 1703 } ··· 2348 2348 */ 2349 2349 if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq, false)) { 2350 2350 if (list_empty(&pool->worklist)) 2351 - pool->watchdog_ts = jiffies; 2351 + pool->last_progress_ts = jiffies; 2352 2352 2353 2353 trace_workqueue_activate_work(work); 2354 2354 insert_work(pwq, work, &pool->worklist, work_flags); ··· 3204 3204 worker->current_pwq = pwq; 3205 3205 if (worker->task) 3206 3206 worker->current_at = worker->task->se.sum_exec_runtime; 3207 + worker->current_start = jiffies; 3207 3208 work_data = *work_data_bits(work); 3208 3209 worker->current_color = get_work_color(work_data); 3209 3210 ··· 3353 3352 while ((work = list_first_entry_or_null(&worker->scheduled, 3354 3353 struct work_struct, entry))) { 3355 3354 if (first) { 3356 - worker->pool->watchdog_ts = jiffies; 3355 + worker->pool->last_progress_ts = jiffies; 3357 3356 first = false; 3358 3357 } 3359 3358 process_one_work(worker, work); ··· 4851 4850 pool->cpu = -1; 4852 4851 pool->node = NUMA_NO_NODE; 4853 4852 pool->flags |= POOL_DISASSOCIATED; 4854 - pool->watchdog_ts = jiffies; 4853 + pool->last_progress_ts = jiffies; 4855 4854 INIT_LIST_HEAD(&pool->worklist); 4856 4855 INIT_LIST_HEAD(&pool->idle_list); 4857 4856 hash_init(pool->busy_hash); ··· 6275 6274 { 6276 6275 struct worker_pool *pool = worker->pool; 6277 6276 6278 - if (pool->flags & WQ_BH) 6277 + if (pool->flags & POOL_BH) 6279 6278 pr_cont("bh%s", 6280 6279 pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : ""); 6281 6280 else ··· 6360 6359 pr_cont(" %s", comma ? "," : ""); 6361 6360 pr_cont_worker_id(worker); 6362 6361 pr_cont(":%ps", worker->current_func); 6362 + pr_cont(" for %us", 6363 + jiffies_to_msecs(jiffies - worker->current_start) / 1000); 6363 6364 list_for_each_entry(work, &worker->scheduled, entry) 6364 6365 pr_cont_work(false, work, &pcws); 6365 6366 pr_cont_work_flush(comma, (work_func_t)-1L, &pcws); ··· 6465 6462 6466 6463 /* How long the first pending work is waiting for a worker. */ 6467 6464 if (!list_empty(&pool->worklist)) 6468 - hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000; 6465 + hung = jiffies_to_msecs(jiffies - pool->last_progress_ts) / 1000; 6469 6466 6470 6467 /* 6471 6468 * Defer printing to avoid deadlocks in console drivers that ··· 7583 7580 7584 7581 /* 7585 7582 * Show workers that might prevent the processing of pending work items. 7586 - * The only candidates are CPU-bound workers in the running state. 7587 - * Pending work items should be handled by another idle worker 7588 - * in all other situations. 7583 + * A busy worker that is not running on the CPU (e.g. sleeping in 7584 + * wait_event_idle() with PF_WQ_WORKER cleared) can stall the pool just as 7585 + * effectively as a CPU-bound one, so dump every in-flight worker. 7589 7586 */ 7590 - static void show_cpu_pool_hog(struct worker_pool *pool) 7587 + static void show_cpu_pool_busy_workers(struct worker_pool *pool) 7591 7588 { 7592 7589 struct worker *worker; 7593 7590 unsigned long irq_flags; ··· 7596 7593 raw_spin_lock_irqsave(&pool->lock, irq_flags); 7597 7594 7598 7595 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 7599 - if (task_is_running(worker->task)) { 7600 - /* 7601 - * Defer printing to avoid deadlocks in console 7602 - * drivers that queue work while holding locks 7603 - * also taken in their write paths. 7604 - */ 7605 - printk_deferred_enter(); 7596 + /* 7597 + * Defer printing to avoid deadlocks in console 7598 + * drivers that queue work while holding locks 7599 + * also taken in their write paths. 7600 + */ 7601 + printk_deferred_enter(); 7606 7602 7607 - pr_info("pool %d:\n", pool->id); 7608 - sched_show_task(worker->task); 7603 + pr_info("pool %d:\n", pool->id); 7604 + sched_show_task(worker->task); 7609 7605 7610 - printk_deferred_exit(); 7611 - } 7606 + printk_deferred_exit(); 7612 7607 } 7613 7608 7614 7609 raw_spin_unlock_irqrestore(&pool->lock, irq_flags); 7615 7610 } 7616 7611 7617 - static void show_cpu_pools_hogs(void) 7612 + static void show_cpu_pools_busy_workers(void) 7618 7613 { 7619 7614 struct worker_pool *pool; 7620 7615 int pi; 7621 7616 7622 - pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n"); 7617 + pr_info("Showing backtraces of busy workers in stalled worker pools:\n"); 7623 7618 7624 7619 rcu_read_lock(); 7625 7620 7626 7621 for_each_pool(pool, pi) { 7627 7622 if (pool->cpu_stall) 7628 - show_cpu_pool_hog(pool); 7623 + show_cpu_pool_busy_workers(pool); 7629 7624 7630 7625 } 7631 7626 ··· 7692 7691 touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); 7693 7692 else 7694 7693 touched = READ_ONCE(wq_watchdog_touched); 7695 - pool_ts = READ_ONCE(pool->watchdog_ts); 7694 + pool_ts = READ_ONCE(pool->last_progress_ts); 7696 7695 7697 7696 if (time_after(pool_ts, touched)) 7698 7697 ts = pool_ts; ··· 7720 7719 show_all_workqueues(); 7721 7720 7722 7721 if (cpu_pool_stall) 7723 - show_cpu_pools_hogs(); 7722 + show_cpu_pools_busy_workers(); 7724 7723 7725 7724 if (lockup_detected) 7726 7725 panic_on_wq_watchdog(max_stall_time);
+1
kernel/workqueue_internal.h
··· 32 32 work_func_t current_func; /* K: function */ 33 33 struct pool_workqueue *current_pwq; /* K: pwq */ 34 34 u64 current_at; /* K: runtime at start or last wakeup */ 35 + unsigned long current_start; /* K: start time of current work item */ 35 36 unsigned int current_color; /* K: color */ 36 37 37 38 int sleeping; /* S: is worker sleeping? */
+5 -4
lib/bootconfig.c
··· 316 316 depth ? "." : ""); 317 317 if (ret < 0) 318 318 return ret; 319 - if (ret > size) { 319 + if (ret >= size) { 320 320 size = 0; 321 321 } else { 322 322 size -= ret; ··· 532 532 static int __init __xbc_open_brace(char *p) 533 533 { 534 534 /* Push the last key as open brace */ 535 - open_brace[brace_index++] = xbc_node_index(last_parent); 536 535 if (brace_index >= XBC_DEPTH_MAX) 537 536 return xbc_parse_error("Exceed max depth of braces", p); 537 + open_brace[brace_index++] = xbc_node_index(last_parent); 538 538 539 539 return 0; 540 540 } ··· 723 723 if (op == ':') { 724 724 unsigned short nidx = child->next; 725 725 726 - xbc_init_node(child, v, XBC_VALUE); 726 + if (xbc_init_node(child, v, XBC_VALUE) < 0) 727 + return xbc_parse_error("Failed to override value", v); 727 728 child->next = nidx; /* keep subkeys */ 728 729 goto array; 729 730 } ··· 803 802 804 803 /* Brace closing */ 805 804 if (brace_index) { 806 - n = &xbc_nodes[open_brace[brace_index]]; 805 + n = &xbc_nodes[open_brace[brace_index - 1]]; 807 806 return xbc_parse_error("Brace is not closed", 808 807 xbc_node_get_data(n)); 809 808 }
+3
lib/crypto/Makefile
··· 55 55 libaes-$(CONFIG_X86) += x86/aes-aesni.o 56 56 endif # CONFIG_CRYPTO_LIB_AES_ARCH 57 57 58 + # clean-files must be defined unconditionally 59 + clean-files += powerpc/aesp8-ppc.S 60 + 58 61 ################################################################################ 59 62 60 63 obj-$(CONFIG_CRYPTO_LIB_AESCFB) += libaescfb.o
+4 -1
mm/cma.c
··· 1013 1013 unsigned long count) 1014 1014 { 1015 1015 struct cma_memrange *cmr; 1016 + unsigned long ret = 0; 1016 1017 unsigned long i, pfn; 1017 1018 1018 1019 cmr = find_cma_memrange(cma, pages, count); ··· 1022 1021 1023 1022 pfn = page_to_pfn(pages); 1024 1023 for (i = 0; i < count; i++, pfn++) 1025 - VM_WARN_ON(!put_page_testzero(pfn_to_page(pfn))); 1024 + ret += !put_page_testzero(pfn_to_page(pfn)); 1025 + 1026 + WARN(ret, "%lu pages are still in use!\n", ret); 1026 1027 1027 1028 __cma_release_frozen(cma, cmr, pages, count); 1028 1029
+6 -1
mm/damon/core.c
··· 1562 1562 } 1563 1563 ctx->walk_control = control; 1564 1564 mutex_unlock(&ctx->walk_control_lock); 1565 - if (!damon_is_running(ctx)) 1565 + if (!damon_is_running(ctx)) { 1566 + mutex_lock(&ctx->walk_control_lock); 1567 + if (ctx->walk_control == control) 1568 + ctx->walk_control = NULL; 1569 + mutex_unlock(&ctx->walk_control_lock); 1566 1570 return -EINVAL; 1571 + } 1567 1572 wait_for_completion(&control->completion); 1568 1573 if (control->canceled) 1569 1574 return -ECANCELED;
+11 -5
mm/huge_memory.c
··· 2797 2797 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma); 2798 2798 } else { 2799 2799 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); 2800 - _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot); 2800 + _dst_pmd = move_soft_dirty_pmd(src_pmdval); 2801 + _dst_pmd = clear_uffd_wp_pmd(_dst_pmd); 2801 2802 } 2802 2803 set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd); 2803 2804 ··· 3632 3631 const bool is_anon = folio_test_anon(folio); 3633 3632 int old_order = folio_order(folio); 3634 3633 int start_order = split_type == SPLIT_TYPE_UNIFORM ? new_order : old_order - 1; 3634 + struct folio *old_folio = folio; 3635 3635 int split_order; 3636 3636 3637 3637 /* ··· 3653 3651 * uniform split has xas_split_alloc() called before 3654 3652 * irq is disabled to allocate enough memory, whereas 3655 3653 * non-uniform split can handle ENOMEM. 3654 + * Use the to-be-split folio, so that a parallel 3655 + * folio_try_get() waits on it until xarray is updated 3656 + * with after-split folios and the original one is 3657 + * unfrozen. 3656 3658 */ 3657 - if (split_type == SPLIT_TYPE_UNIFORM) 3658 - xas_split(xas, folio, old_order); 3659 - else { 3659 + if (split_type == SPLIT_TYPE_UNIFORM) { 3660 + xas_split(xas, old_folio, old_order); 3661 + } else { 3660 3662 xas_set_order(xas, folio->index, split_order); 3661 - xas_try_split(xas, folio, old_order); 3663 + xas_try_split(xas, old_folio, old_order); 3662 3664 if (xas_error(xas)) 3663 3665 return xas_error(xas); 3664 3666 }
+2 -2
mm/hugetlb.c
··· 3101 3101 * extract the actual node first. 3102 3102 */ 3103 3103 if (m) 3104 - listnode = early_pfn_to_nid(PHYS_PFN(virt_to_phys(m))); 3104 + listnode = early_pfn_to_nid(PHYS_PFN(__pa(m))); 3105 3105 } 3106 3106 3107 3107 if (m) { ··· 3160 3160 * The head struct page is used to get folio information by the HugeTLB 3161 3161 * subsystem like zone id and node id. 3162 3162 */ 3163 - memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE), 3163 + memblock_reserved_mark_noinit(__pa((void *)m + PAGE_SIZE), 3164 3164 huge_page_size(h) - PAGE_SIZE); 3165 3165 3166 3166 return 1;
+1 -1
mm/memcontrol.c
··· 3086 3086 3087 3087 if (!local_trylock(&obj_stock.lock)) { 3088 3088 if (pgdat) 3089 - mod_objcg_mlstate(objcg, pgdat, idx, nr_bytes); 3089 + mod_objcg_mlstate(objcg, pgdat, idx, nr_acct); 3090 3090 nr_pages = nr_bytes >> PAGE_SHIFT; 3091 3091 nr_bytes = nr_bytes & (PAGE_SIZE - 1); 3092 3092 atomic_add(nr_bytes, &objcg->nr_charged_bytes);
+43 -6
mm/memfd_luo.c
··· 146 146 for (i = 0; i < nr_folios; i++) { 147 147 struct memfd_luo_folio_ser *pfolio = &folios_ser[i]; 148 148 struct folio *folio = folios[i]; 149 - unsigned int flags = 0; 150 149 151 150 err = kho_preserve_folio(folio); 152 151 if (err) 153 152 goto err_unpreserve; 154 153 155 - if (folio_test_dirty(folio)) 156 - flags |= MEMFD_LUO_FOLIO_DIRTY; 157 - if (folio_test_uptodate(folio)) 158 - flags |= MEMFD_LUO_FOLIO_UPTODATE; 154 + folio_lock(folio); 155 + 156 + /* 157 + * A dirty folio is one which has been written to. A clean folio 158 + * is its opposite. Since a clean folio does not carry user 159 + * data, it can be freed by page reclaim under memory pressure. 160 + * 161 + * Saving the dirty flag at prepare() time doesn't work since it 162 + * can change later. Saving it at freeze() also won't work 163 + * because the dirty bit is normally synced at unmap and there 164 + * might still be a mapping of the file at freeze(). 165 + * 166 + * To see why this is a problem, say a folio is clean at 167 + * preserve, but gets dirtied later. The pfolio flags will mark 168 + * it as clean. After retrieve, the next kernel might try to 169 + * reclaim this folio under memory pressure, losing user data. 170 + * 171 + * Unconditionally mark it dirty to avoid this problem. This 172 + * comes at the cost of making clean folios un-reclaimable after 173 + * live update. 174 + */ 175 + folio_mark_dirty(folio); 176 + 177 + /* 178 + * If the folio is not uptodate, it was fallocated but never 179 + * used. Saving this flag at prepare() doesn't work since it 180 + * might change later when someone uses the folio. 181 + * 182 + * Since we have taken the performance penalty of allocating, 183 + * zeroing, and pinning all the folios in the holes, take a bit 184 + * more and zero all non-uptodate folios too. 185 + * 186 + * NOTE: For someone looking to improve preserve performance, 187 + * this is a good place to look. 188 + */ 189 + if (!folio_test_uptodate(folio)) { 190 + folio_zero_range(folio, 0, folio_size(folio)); 191 + flush_dcache_folio(folio); 192 + folio_mark_uptodate(folio); 193 + } 194 + 195 + folio_unlock(folio); 159 196 160 197 pfolio->pfn = folio_pfn(folio); 161 - pfolio->flags = flags; 198 + pfolio->flags = MEMFD_LUO_FOLIO_DIRTY | MEMFD_LUO_FOLIO_UPTODATE; 162 199 pfolio->index = folio->index; 163 200 } 164 201
+17 -4
mm/rmap.c
··· 1955 1955 if (userfaultfd_wp(vma)) 1956 1956 return 1; 1957 1957 1958 - return folio_pte_batch(folio, pvmw->pte, pte, max_nr); 1958 + /* 1959 + * If unmap fails, we need to restore the ptes. To avoid accidentally 1960 + * upgrading write permissions for ptes that were not originally 1961 + * writable, and to avoid losing the soft-dirty bit, use the 1962 + * appropriate FPB flags. 1963 + */ 1964 + return folio_pte_batch_flags(folio, vma, pvmw->pte, &pte, max_nr, 1965 + FPB_RESPECT_WRITE | FPB_RESPECT_SOFT_DIRTY); 1959 1966 } 1960 1967 1961 1968 /* ··· 2450 2443 __maybe_unused pmd_t pmdval; 2451 2444 2452 2445 if (flags & TTU_SPLIT_HUGE_PMD) { 2446 + /* 2447 + * split_huge_pmd_locked() might leave the 2448 + * folio mapped through PTEs. Retry the walk 2449 + * so we can detect this scenario and properly 2450 + * abort the walk. 2451 + */ 2453 2452 split_huge_pmd_locked(vma, pvmw.address, 2454 2453 pvmw.pmd, true); 2455 - ret = false; 2456 - page_vma_mapped_walk_done(&pvmw); 2457 - break; 2454 + flags &= ~TTU_SPLIT_HUGE_PMD; 2455 + page_vma_mapped_walk_restart(&pvmw); 2456 + continue; 2458 2457 } 2459 2458 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 2460 2459 pmdval = pmdp_get(pvmw.pmd);
+4 -7
mm/slub.c
··· 2119 2119 size_t sz = sizeof(struct slabobj_ext) * slab->objects; 2120 2120 struct kmem_cache *obj_exts_cache; 2121 2121 2122 - /* 2123 - * slabobj_ext array for KMALLOC_CGROUP allocations 2124 - * are served from KMALLOC_NORMAL caches. 2125 - */ 2126 - if (!mem_alloc_profiling_enabled()) 2127 - return sz; 2128 - 2129 2122 if (sz > KMALLOC_MAX_CACHE_SIZE) 2130 2123 return sz; 2131 2124 ··· 2790 2797 if (s->flags & SLAB_KMALLOC) 2791 2798 mark_obj_codetag_empty(sheaf); 2792 2799 2800 + VM_WARN_ON_ONCE(sheaf->size > 0); 2793 2801 kfree(sheaf); 2794 2802 2795 2803 stat(s, SHEAF_FREE); ··· 2822 2828 return 0; 2823 2829 } 2824 2830 2831 + static void sheaf_flush_unused(struct kmem_cache *s, struct slab_sheaf *sheaf); 2825 2832 2826 2833 static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp) 2827 2834 { ··· 2832 2837 return NULL; 2833 2838 2834 2839 if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) { 2840 + sheaf_flush_unused(s, sheaf); 2835 2841 free_empty_sheaf(s, sheaf); 2836 2842 return NULL; 2837 2843 } ··· 4619 4623 * we must be very low on memory so don't bother 4620 4624 * with the barn 4621 4625 */ 4626 + sheaf_flush_unused(s, empty); 4622 4627 free_empty_sheaf(s, empty); 4623 4628 } 4624 4629 } else {
+47 -25
net/atm/lec.c
··· 154 154 /* 0x01 is topology change */ 155 155 156 156 priv = netdev_priv(dev); 157 - atm_force_charge(priv->lecd, skb2->truesize); 158 - sk = sk_atm(priv->lecd); 159 - skb_queue_tail(&sk->sk_receive_queue, skb2); 160 - sk->sk_data_ready(sk); 157 + struct atm_vcc *vcc; 158 + 159 + rcu_read_lock(); 160 + vcc = rcu_dereference(priv->lecd); 161 + if (vcc) { 162 + atm_force_charge(vcc, skb2->truesize); 163 + sk = sk_atm(vcc); 164 + skb_queue_tail(&sk->sk_receive_queue, skb2); 165 + sk->sk_data_ready(sk); 166 + } else { 167 + dev_kfree_skb(skb2); 168 + } 169 + rcu_read_unlock(); 161 170 } 162 171 } 163 172 #endif /* IS_ENABLED(CONFIG_BRIDGE) */ ··· 225 216 int is_rdesc; 226 217 227 218 pr_debug("called\n"); 228 - if (!priv->lecd) { 219 + if (!rcu_access_pointer(priv->lecd)) { 229 220 pr_info("%s:No lecd attached\n", dev->name); 230 221 dev->stats.tx_errors++; 231 222 netif_stop_queue(dev); ··· 458 449 break; 459 450 skb2->len = sizeof(struct atmlec_msg); 460 451 skb_copy_to_linear_data(skb2, mesg, sizeof(*mesg)); 461 - atm_force_charge(priv->lecd, skb2->truesize); 462 - sk = sk_atm(priv->lecd); 463 - skb_queue_tail(&sk->sk_receive_queue, skb2); 464 - sk->sk_data_ready(sk); 452 + struct atm_vcc *vcc; 453 + 454 + rcu_read_lock(); 455 + vcc = rcu_dereference(priv->lecd); 456 + if (vcc) { 457 + atm_force_charge(vcc, skb2->truesize); 458 + sk = sk_atm(vcc); 459 + skb_queue_tail(&sk->sk_receive_queue, skb2); 460 + sk->sk_data_ready(sk); 461 + } else { 462 + dev_kfree_skb(skb2); 463 + } 464 + rcu_read_unlock(); 465 465 } 466 466 } 467 467 #endif /* IS_ENABLED(CONFIG_BRIDGE) */ ··· 486 468 487 469 static void lec_atm_close(struct atm_vcc *vcc) 488 470 { 489 - struct sk_buff *skb; 490 471 struct net_device *dev = (struct net_device *)vcc->proto_data; 491 472 struct lec_priv *priv = netdev_priv(dev); 492 473 493 - priv->lecd = NULL; 474 + rcu_assign_pointer(priv->lecd, NULL); 475 + synchronize_rcu(); 494 476 /* Do something needful? */ 495 477 496 478 netif_stop_queue(dev); 497 479 lec_arp_destroy(priv); 498 - 499 - if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) 500 - pr_info("%s closing with messages pending\n", dev->name); 501 - while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) { 502 - atm_return(vcc, skb->truesize); 503 - dev_kfree_skb(skb); 504 - } 505 480 506 481 pr_info("%s: Shut down!\n", dev->name); 507 482 module_put(THIS_MODULE); ··· 521 510 const unsigned char *mac_addr, const unsigned char *atm_addr, 522 511 struct sk_buff *data) 523 512 { 513 + struct atm_vcc *vcc; 524 514 struct sock *sk; 525 515 struct sk_buff *skb; 526 516 struct atmlec_msg *mesg; 527 517 528 - if (!priv || !priv->lecd) 518 + if (!priv || !rcu_access_pointer(priv->lecd)) 529 519 return -1; 520 + 530 521 skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC); 531 522 if (!skb) 532 523 return -1; ··· 545 532 if (atm_addr) 546 533 memcpy(&mesg->content.normal.atm_addr, atm_addr, ATM_ESA_LEN); 547 534 548 - atm_force_charge(priv->lecd, skb->truesize); 549 - sk = sk_atm(priv->lecd); 535 + rcu_read_lock(); 536 + vcc = rcu_dereference(priv->lecd); 537 + if (!vcc) { 538 + rcu_read_unlock(); 539 + kfree_skb(skb); 540 + return -1; 541 + } 542 + 543 + atm_force_charge(vcc, skb->truesize); 544 + sk = sk_atm(vcc); 550 545 skb_queue_tail(&sk->sk_receive_queue, skb); 551 546 sk->sk_data_ready(sk); 552 547 553 548 if (data != NULL) { 554 549 pr_debug("about to send %d bytes of data\n", data->len); 555 - atm_force_charge(priv->lecd, data->truesize); 550 + atm_force_charge(vcc, data->truesize); 556 551 skb_queue_tail(&sk->sk_receive_queue, data); 557 552 sk->sk_data_ready(sk); 558 553 } 559 554 555 + rcu_read_unlock(); 560 556 return 0; 561 557 } 562 558 ··· 640 618 641 619 atm_return(vcc, skb->truesize); 642 620 if (*(__be16 *) skb->data == htons(priv->lecid) || 643 - !priv->lecd || !(dev->flags & IFF_UP)) { 621 + !rcu_access_pointer(priv->lecd) || !(dev->flags & IFF_UP)) { 644 622 /* 645 623 * Probably looping back, or if lecd is missing, 646 624 * lecd has gone down ··· 775 753 priv = netdev_priv(dev_lec[i]); 776 754 } else { 777 755 priv = netdev_priv(dev_lec[i]); 778 - if (priv->lecd) 756 + if (rcu_access_pointer(priv->lecd)) 779 757 return -EADDRINUSE; 780 758 } 781 759 lec_arp_init(priv); 782 760 priv->itfnum = i; /* LANE2 addition */ 783 - priv->lecd = vcc; 761 + rcu_assign_pointer(priv->lecd, vcc); 784 762 vcc->dev = &lecatm_dev; 785 763 vcc_insert_socket(sk_atm(vcc)); 786 764
+1 -1
net/atm/lec.h
··· 91 91 */ 92 92 spinlock_t lec_arp_lock; 93 93 struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ 94 - struct atm_vcc *lecd; 94 + struct atm_vcc __rcu *lecd; 95 95 struct delayed_work lec_arp_work; /* C10 */ 96 96 unsigned int maximum_unknown_frame_count; 97 97 /*
+3
net/batman-adv/bat_iv_ogm.c
··· 473 473 if (aggregated_bytes > max_bytes) 474 474 return false; 475 475 476 + if (skb_tailroom(forw_packet->skb) < packet_len) 477 + return false; 478 + 476 479 if (packet_num >= BATADV_MAX_AGGREGATION_PACKETS) 477 480 return false; 478 481
+2 -2
net/bluetooth/hci_conn.c
··· 1944 1944 return false; 1945 1945 1946 1946 done: 1947 + conn->iso_qos = *qos; 1948 + 1947 1949 if (hci_cmd_sync_queue(hdev, set_cig_params_sync, 1948 1950 UINT_PTR(qos->ucast.cig), NULL) < 0) 1949 1951 return false; ··· 2015 2013 } 2016 2014 2017 2015 hci_conn_hold(cis); 2018 - 2019 - cis->iso_qos = *qos; 2020 2016 cis->state = BT_BOUND; 2021 2017 2022 2018 return cis;
+1 -1
net/bluetooth/hci_sync.c
··· 6627 6627 * state. 6628 6628 */ 6629 6629 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { 6630 - hci_scan_disable_sync(hdev); 6631 6630 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); 6631 + hci_scan_disable_sync(hdev); 6632 6632 } 6633 6633 6634 6634 /* Update random address, but set require_privacy to false so
+14 -2
net/bluetooth/hidp/core.c
··· 986 986 skb_queue_purge(&session->intr_transmit); 987 987 fput(session->intr_sock->file); 988 988 fput(session->ctrl_sock->file); 989 - l2cap_conn_put(session->conn); 989 + if (session->conn) 990 + l2cap_conn_put(session->conn); 990 991 kfree(session); 991 992 } 992 993 ··· 1165 1164 1166 1165 down_write(&hidp_session_sem); 1167 1166 1167 + /* Drop L2CAP reference immediately to indicate that 1168 + * l2cap_unregister_user() shall not be called as it is already 1169 + * considered removed. 1170 + */ 1171 + if (session->conn) { 1172 + l2cap_conn_put(session->conn); 1173 + session->conn = NULL; 1174 + } 1175 + 1168 1176 hidp_session_terminate(session); 1169 1177 1170 1178 cancel_work_sync(&session->dev_init); ··· 1311 1301 * Instead, this call has the same semantics as if user-space tried to 1312 1302 * delete the session. 1313 1303 */ 1314 - l2cap_unregister_user(session->conn, &session->user); 1304 + if (session->conn) 1305 + l2cap_unregister_user(session->conn, &session->user); 1306 + 1315 1307 hidp_session_put(session); 1316 1308 1317 1309 module_put_and_kthread_exit(0);
+31 -20
net/bluetooth/l2cap_core.c
··· 1678 1678 1679 1679 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user) 1680 1680 { 1681 - struct hci_dev *hdev = conn->hcon->hdev; 1682 1681 int ret; 1683 1682 1684 1683 /* We need to check whether l2cap_conn is registered. If it is not, we 1685 - * must not register the l2cap_user. l2cap_conn_del() is unregisters 1686 - * l2cap_conn objects, but doesn't provide its own locking. Instead, it 1687 - * relies on the parent hci_conn object to be locked. This itself relies 1688 - * on the hci_dev object to be locked. So we must lock the hci device 1689 - * here, too. */ 1684 + * must not register the l2cap_user. l2cap_conn_del() unregisters 1685 + * l2cap_conn objects under conn->lock, and we use the same lock here 1686 + * to protect access to conn->users and conn->hchan. 1687 + */ 1690 1688 1691 - hci_dev_lock(hdev); 1689 + mutex_lock(&conn->lock); 1692 1690 1693 1691 if (!list_empty(&user->list)) { 1694 1692 ret = -EINVAL; ··· 1707 1709 ret = 0; 1708 1710 1709 1711 out_unlock: 1710 - hci_dev_unlock(hdev); 1712 + mutex_unlock(&conn->lock); 1711 1713 return ret; 1712 1714 } 1713 1715 EXPORT_SYMBOL(l2cap_register_user); 1714 1716 1715 1717 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user) 1716 1718 { 1717 - struct hci_dev *hdev = conn->hcon->hdev; 1718 - 1719 - hci_dev_lock(hdev); 1719 + mutex_lock(&conn->lock); 1720 1720 1721 1721 if (list_empty(&user->list)) 1722 1722 goto out_unlock; ··· 1723 1727 user->remove(conn, user); 1724 1728 1725 1729 out_unlock: 1726 - hci_dev_unlock(hdev); 1730 + mutex_unlock(&conn->lock); 1727 1731 } 1728 1732 EXPORT_SYMBOL(l2cap_unregister_user); 1729 1733 ··· 4612 4616 4613 4617 switch (type) { 4614 4618 case L2CAP_IT_FEAT_MASK: 4615 - conn->feat_mask = get_unaligned_le32(rsp->data); 4619 + if (cmd_len >= sizeof(*rsp) + sizeof(u32)) 4620 + conn->feat_mask = get_unaligned_le32(rsp->data); 4616 4621 4617 4622 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { 4618 4623 struct l2cap_info_req req; ··· 4632 4635 break; 4633 4636 4634 4637 case L2CAP_IT_FIXED_CHAN: 4635 - conn->remote_fixed_chan = rsp->data[0]; 4638 + if (cmd_len >= sizeof(*rsp) + sizeof(rsp->data[0])) 4639 + conn->remote_fixed_chan = rsp->data[0]; 4636 4640 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; 4637 4641 conn->info_ident = 0; 4638 4642 ··· 5057 5059 u16 mtu, mps; 5058 5060 __le16 psm; 5059 5061 u8 result, rsp_len = 0; 5060 - int i, num_scid; 5062 + int i, num_scid = 0; 5061 5063 bool defer = false; 5062 5064 5063 5065 if (!enable_ecred) ··· 5066 5068 memset(pdu, 0, sizeof(*pdu)); 5067 5069 5068 5070 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) { 5071 + result = L2CAP_CR_LE_INVALID_PARAMS; 5072 + goto response; 5073 + } 5074 + 5075 + /* Check if there are no pending channels with the same ident */ 5076 + __l2cap_chan_list_id(conn, cmd->ident, l2cap_ecred_list_defer, 5077 + &num_scid); 5078 + if (num_scid) { 5069 5079 result = L2CAP_CR_LE_INVALID_PARAMS; 5070 5080 goto response; 5071 5081 } ··· 5430 5424 u8 *data) 5431 5425 { 5432 5426 struct l2cap_chan *chan, *tmp; 5433 - struct l2cap_ecred_conn_rsp *rsp = (void *) data; 5427 + struct l2cap_ecred_reconf_rsp *rsp = (void *)data; 5434 5428 u16 result; 5435 5429 5436 5430 if (cmd_len < sizeof(*rsp)) ··· 5438 5432 5439 5433 result = __le16_to_cpu(rsp->result); 5440 5434 5441 - BT_DBG("result 0x%4.4x", rsp->result); 5435 + BT_DBG("result 0x%4.4x", result); 5442 5436 5443 5437 if (!result) 5444 5438 return 0; ··· 6668 6662 return -ENOBUFS; 6669 6663 } 6670 6664 6671 - if (chan->imtu < skb->len) { 6672 - BT_ERR("Too big LE L2CAP PDU"); 6665 + if (skb->len > chan->imtu) { 6666 + BT_ERR("Too big LE L2CAP PDU: len %u > %u", skb->len, 6667 + chan->imtu); 6668 + l2cap_send_disconn_req(chan, ECONNRESET); 6673 6669 return -ENOBUFS; 6674 6670 } 6675 6671 ··· 6697 6689 sdu_len, skb->len, chan->imtu); 6698 6690 6699 6691 if (sdu_len > chan->imtu) { 6700 - BT_ERR("Too big LE L2CAP SDU length received"); 6692 + BT_ERR("Too big LE L2CAP SDU length: len %u > %u", 6693 + skb->len, sdu_len); 6694 + l2cap_send_disconn_req(chan, ECONNRESET); 6701 6695 err = -EMSGSIZE; 6702 6696 goto failed; 6703 6697 } ··· 6735 6725 6736 6726 if (chan->sdu->len + skb->len > chan->sdu_len) { 6737 6727 BT_ERR("Too much LE L2CAP data received"); 6728 + l2cap_send_disconn_req(chan, ECONNRESET); 6738 6729 err = -EINVAL; 6739 6730 goto failed; 6740 6731 }
+2 -5
net/bluetooth/mgmt.c
··· 2195 2195 sk = cmd->sk; 2196 2196 2197 2197 if (status) { 2198 - mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 2199 - status); 2200 - mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true, 2201 - cmd_status_rsp, &status); 2198 + mgmt_cmd_status(cmd->sk, hdev->id, cmd->opcode, status); 2202 2199 goto done; 2203 2200 } 2204 2201 ··· 5374 5377 5375 5378 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, 5376 5379 mgmt_status(status), &rp, sizeof(rp)); 5377 - mgmt_pending_remove(cmd); 5380 + mgmt_pending_free(cmd); 5378 5381 5379 5382 hci_dev_unlock(hdev); 5380 5383 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
+1 -1
net/bluetooth/smp.c
··· 2743 2743 if (!test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags) && 2744 2744 !crypto_memneq(key, smp->local_pk, 64)) { 2745 2745 bt_dev_err(hdev, "Remote and local public keys are identical"); 2746 - return SMP_UNSPECIFIED; 2746 + return SMP_DHKEY_CHECK_FAILED; 2747 2747 } 2748 2748 2749 2749 memcpy(smp->remote_pk, key, 64);
+2 -2
net/bridge/br_cfm.c
··· 576 576 577 577 /* Empty and free peer MEP list */ 578 578 hlist_for_each_entry_safe(peer_mep, n_store, &mep->peer_mep_list, head) { 579 - cancel_delayed_work_sync(&peer_mep->ccm_rx_dwork); 579 + disable_delayed_work_sync(&peer_mep->ccm_rx_dwork); 580 580 hlist_del_rcu(&peer_mep->head); 581 581 kfree_rcu(peer_mep, rcu); 582 582 } ··· 732 732 return -ENOENT; 733 733 } 734 734 735 - cc_peer_disable(peer_mep); 735 + disable_delayed_work_sync(&peer_mep->ccm_rx_dwork); 736 736 737 737 hlist_del_rcu(&peer_mep->head); 738 738 kfree_rcu(peer_mep, rcu);
+4 -2
net/ceph/auth.c
··· 205 205 s32 result; 206 206 u64 global_id; 207 207 void *payload, *payload_end; 208 - int payload_len; 208 + u32 payload_len; 209 209 char *result_msg; 210 - int result_msg_len; 210 + u32 result_msg_len; 211 211 int ret = -EINVAL; 212 212 213 213 mutex_lock(&ac->mutex); ··· 217 217 result = ceph_decode_32(&p); 218 218 global_id = ceph_decode_64(&p); 219 219 payload_len = ceph_decode_32(&p); 220 + ceph_decode_need(&p, end, payload_len, bad); 220 221 payload = p; 221 222 p += payload_len; 222 223 ceph_decode_need(&p, end, sizeof(u32), bad); 223 224 result_msg_len = ceph_decode_32(&p); 225 + ceph_decode_need(&p, end, result_msg_len, bad); 224 226 result_msg = p; 225 227 p += result_msg_len; 226 228 if (p != end)
+21 -10
net/ceph/messenger_v2.c
··· 392 392 int head_len; 393 393 int rem_len; 394 394 395 - BUG_ON(ctrl_len < 0 || ctrl_len > CEPH_MSG_MAX_CONTROL_LEN); 395 + BUG_ON(ctrl_len < 1 || ctrl_len > CEPH_MSG_MAX_CONTROL_LEN); 396 396 397 397 if (secure) { 398 398 head_len = CEPH_PREAMBLE_SECURE_LEN; ··· 401 401 head_len += padded_len(rem_len) + CEPH_GCM_TAG_LEN; 402 402 } 403 403 } else { 404 - head_len = CEPH_PREAMBLE_PLAIN_LEN; 405 - if (ctrl_len) 406 - head_len += ctrl_len + CEPH_CRC_LEN; 404 + head_len = CEPH_PREAMBLE_PLAIN_LEN + ctrl_len + CEPH_CRC_LEN; 407 405 } 408 406 return head_len; 409 407 } ··· 526 528 desc->fd_aligns[i] = ceph_decode_16(&p); 527 529 } 528 530 529 - if (desc->fd_lens[0] < 0 || 531 + /* 532 + * This would fire for FRAME_TAG_WAIT (it has one empty 533 + * segment), but we should never get it as client. 534 + */ 535 + if (desc->fd_lens[0] < 1 || 530 536 desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) { 531 537 pr_err("bad control segment length %d\n", desc->fd_lens[0]); 532 538 return -EINVAL; 533 539 } 540 + 534 541 if (desc->fd_lens[1] < 0 || 535 542 desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) { 536 543 pr_err("bad front segment length %d\n", desc->fd_lens[1]); ··· 552 549 return -EINVAL; 553 550 } 554 551 555 - /* 556 - * This would fire for FRAME_TAG_WAIT (it has one empty 557 - * segment), but we should never get it as client. 558 - */ 559 552 if (!desc->fd_lens[desc->fd_seg_cnt - 1]) { 560 553 pr_err("last segment empty, segment count %d\n", 561 554 desc->fd_seg_cnt); ··· 2832 2833 void *p, void *end) 2833 2834 { 2834 2835 struct ceph_frame_desc *desc = &con->v2.in_desc; 2835 - struct ceph_msg_header2 *hdr2 = p; 2836 + struct ceph_msg_header2 *hdr2; 2836 2837 struct ceph_msg_header hdr; 2837 2838 int skip; 2838 2839 int ret; 2839 2840 u64 seq; 2841 + 2842 + ceph_decode_need(&p, end, sizeof(*hdr2), bad); 2843 + hdr2 = p; 2840 2844 2841 2845 /* verify seq# */ 2842 2846 seq = le64_to_cpu(hdr2->seq); ··· 2871 2869 WARN_ON(!con->in_msg); 2872 2870 WARN_ON(con->in_msg->con != con); 2873 2871 return 1; 2872 + 2873 + bad: 2874 + pr_err("failed to decode message header\n"); 2875 + return -EINVAL; 2874 2876 } 2875 2877 2876 2878 static int process_message(struct ceph_connection *con) ··· 2903 2897 2904 2898 if (con->v2.in_desc.fd_tag != FRAME_TAG_MESSAGE) 2905 2899 return process_control(con, p, end); 2900 + 2901 + if (con->state != CEPH_CON_S_OPEN) { 2902 + con->error_msg = "protocol error, unexpected message"; 2903 + return -EINVAL; 2904 + } 2906 2905 2907 2906 ret = process_message_header(con, p, end); 2908 2907 if (ret < 0)
+3 -3
net/ceph/mon_client.c
··· 72 72 struct ceph_monmap *monmap = NULL; 73 73 struct ceph_fsid fsid; 74 74 u32 struct_len; 75 - int blob_len; 76 - int num_mon; 75 + u32 blob_len; 76 + u32 num_mon; 77 77 u8 struct_v; 78 78 u32 epoch; 79 79 int ret; ··· 112 112 } 113 113 ceph_decode_32_safe(p, end, num_mon, e_inval); 114 114 115 - dout("%s fsid %pU epoch %u num_mon %d\n", __func__, &fsid, epoch, 115 + dout("%s fsid %pU epoch %u num_mon %u\n", __func__, &fsid, epoch, 116 116 num_mon); 117 117 if (num_mon > CEPH_MAX_MON) 118 118 goto e_inval;
-35
net/core/dev.h
··· 366 366 367 367 void kick_defer_list_purge(unsigned int cpu); 368 368 369 - #define XMIT_RECURSION_LIMIT 8 370 - 371 - #ifndef CONFIG_PREEMPT_RT 372 - static inline bool dev_xmit_recursion(void) 373 - { 374 - return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > 375 - XMIT_RECURSION_LIMIT); 376 - } 377 - 378 - static inline void dev_xmit_recursion_inc(void) 379 - { 380 - __this_cpu_inc(softnet_data.xmit.recursion); 381 - } 382 - 383 - static inline void dev_xmit_recursion_dec(void) 384 - { 385 - __this_cpu_dec(softnet_data.xmit.recursion); 386 - } 387 - #else 388 - static inline bool dev_xmit_recursion(void) 389 - { 390 - return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT); 391 - } 392 - 393 - static inline void dev_xmit_recursion_inc(void) 394 - { 395 - current->net_xmit.recursion++; 396 - } 397 - 398 - static inline void dev_xmit_recursion_dec(void) 399 - { 400 - current->net_xmit.recursion--; 401 - } 402 - #endif 403 - 404 369 int dev_set_hwtstamp_phylib(struct net_device *dev, 405 370 struct kernel_hwtstamp_config *cfg, 406 371 struct netlink_ext_ack *extack);
+7
net/core/filter.c
··· 2228 2228 return -ENOMEM; 2229 2229 } 2230 2230 2231 + if (unlikely(!ipv6_mod_enabled())) 2232 + goto out_drop; 2233 + 2231 2234 rcu_read_lock(); 2232 2235 if (!nh) { 2233 2236 dst = skb_dst(skb); ··· 2338 2335 2339 2336 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); 2340 2337 } else if (nh->nh_family == AF_INET6) { 2338 + if (unlikely(!ipv6_mod_enabled())) { 2339 + rcu_read_unlock(); 2340 + goto out_drop; 2341 + } 2341 2342 neigh = ip_neigh_gw6(dev, &nh->ipv6_nh); 2342 2343 is_v6gw = true; 2343 2344 } else if (nh->nh_family == AF_INET) {
+2 -1
net/core/neighbour.c
··· 820 820 update: 821 821 WRITE_ONCE(n->flags, flags); 822 822 n->permanent = permanent; 823 - WRITE_ONCE(n->protocol, protocol); 823 + if (protocol) 824 + WRITE_ONCE(n->protocol, protocol); 824 825 out: 825 826 mutex_unlock(&tbl->phash_lock); 826 827 return err;
+2 -2
net/core/page_pool_user.c
··· 245 245 goto err_cancel; 246 246 if (pool->user.detach_time && 247 247 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_DETACH_TIME, 248 - pool->user.detach_time)) 248 + ktime_divns(pool->user.detach_time, NSEC_PER_SEC))) 249 249 goto err_cancel; 250 250 251 251 if (pool->mp_ops && pool->mp_ops->nl_fill(pool->mp_priv, rsp, NULL)) ··· 337 337 void page_pool_detached(struct page_pool *pool) 338 338 { 339 339 mutex_lock(&page_pools_lock); 340 - pool->user.detach_time = ktime_get_boottime_seconds(); 340 + pool->user.detach_time = ktime_get_boottime(); 341 341 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF); 342 342 mutex_unlock(&page_pools_lock); 343 343 }
+3 -6
net/ethernet/eth.c
··· 193 193 } 194 194 EXPORT_SYMBOL(eth_type_trans); 195 195 196 - /** 197 - * eth_header_parse - extract hardware address from packet 198 - * @skb: packet to extract header from 199 - * @haddr: destination buffer 200 - */ 201 - int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr) 196 + int eth_header_parse(const struct sk_buff *skb, const struct net_device *dev, 197 + unsigned char *haddr) 202 198 { 203 199 const struct ethhdr *eth = eth_hdr(skb); 200 + 204 201 memcpy(haddr, eth->h_source, ETH_ALEN); 205 202 return ETH_ALEN; 206 203 }
+6
net/ipv4/af_inet.c
··· 124 124 125 125 #include <trace/events/sock.h> 126 126 127 + /* Keep the definition of IPv6 disable here for now, to avoid annoying linker 128 + * issues in case IPv6=m 129 + */ 130 + int disable_ipv6_mod; 131 + EXPORT_SYMBOL(disable_ipv6_mod); 132 + 127 133 /* The inetsw table contains everything that inet_create needs to 128 134 * build a new socket. 129 135 */
+3 -1
net/ipv4/icmp.c
··· 1079 1079 1080 1080 static bool icmp_tag_validation(int proto) 1081 1081 { 1082 + const struct net_protocol *ipprot; 1082 1083 bool ok; 1083 1084 1084 1085 rcu_read_lock(); 1085 - ok = rcu_dereference(inet_protos[proto])->icmp_strict_tag_validation; 1086 + ipprot = rcu_dereference(inet_protos[proto]); 1087 + ok = ipprot ? ipprot->icmp_strict_tag_validation : false; 1086 1088 rcu_read_unlock(); 1087 1089 return ok; 1088 1090 }
+2 -1
net/ipv4/ip_gre.c
··· 919 919 return -(t->hlen + sizeof(*iph)); 920 920 } 921 921 922 - static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) 922 + static int ipgre_header_parse(const struct sk_buff *skb, const struct net_device *dev, 923 + unsigned char *haddr) 923 924 { 924 925 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb); 925 926 memcpy(haddr, &iph->saddr, 4);
+15
net/ipv4/ip_tunnel_core.c
··· 58 58 struct iphdr *iph; 59 59 int err; 60 60 61 + if (unlikely(dev_recursion_level() > IP_TUNNEL_RECURSION_LIMIT)) { 62 + if (dev) { 63 + net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n", 64 + dev->name); 65 + DEV_STATS_INC(dev, tx_errors); 66 + } 67 + ip_rt_put(rt); 68 + kfree_skb(skb); 69 + return; 70 + } 71 + 72 + dev_xmit_recursion_inc(); 73 + 61 74 skb_scrub_packet(skb, xnet); 62 75 63 76 skb_clear_hash_if_not_l4(skb); ··· 101 88 pkt_len = 0; 102 89 iptunnel_xmit_stats(dev, pkt_len); 103 90 } 91 + 92 + dev_xmit_recursion_dec(); 104 93 } 105 94 EXPORT_SYMBOL_GPL(iptunnel_xmit); 106 95
+11 -3
net/ipv4/nexthop.c
··· 2002 2002 } 2003 2003 2004 2004 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge, 2005 - struct nl_info *nlinfo) 2005 + struct nl_info *nlinfo, 2006 + struct list_head *deferred_free) 2006 2007 { 2007 2008 struct nh_grp_entry *nhges, *new_nhges; 2008 2009 struct nexthop *nhp = nhge->nh_parent; ··· 2063 2062 rcu_assign_pointer(nhp->nh_grp, newg); 2064 2063 2065 2064 list_del(&nhge->nh_list); 2066 - free_percpu(nhge->stats); 2067 2065 nexthop_put(nhge->nh); 2066 + list_add(&nhge->nh_list, deferred_free); 2068 2067 2069 2068 /* Removal of a NH from a resilient group is notified through 2070 2069 * bucket notifications. ··· 2084 2083 struct nl_info *nlinfo) 2085 2084 { 2086 2085 struct nh_grp_entry *nhge, *tmp; 2086 + LIST_HEAD(deferred_free); 2087 2087 2088 2088 /* If there is nothing to do, let's avoid the costly call to 2089 2089 * synchronize_net() ··· 2093 2091 return; 2094 2092 2095 2093 list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) 2096 - remove_nh_grp_entry(net, nhge, nlinfo); 2094 + remove_nh_grp_entry(net, nhge, nlinfo, &deferred_free); 2097 2095 2098 2096 /* make sure all see the newly published array before releasing rtnl */ 2099 2097 synchronize_net(); 2098 + 2099 + /* Now safe to free percpu stats — all RCU readers have finished */ 2100 + list_for_each_entry_safe(nhge, tmp, &deferred_free, nh_list) { 2101 + list_del(&nhge->nh_list); 2102 + free_percpu(nhge->stats); 2103 + } 2100 2104 } 2101 2105 2102 2106 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
-8
net/ipv6/af_inet6.c
··· 86 86 .autoconf = 1, 87 87 }; 88 88 89 - static int disable_ipv6_mod; 90 - 91 89 module_param_named(disable, disable_ipv6_mod, int, 0444); 92 90 MODULE_PARM_DESC(disable, "Disable IPv6 module such that it is non-functional"); 93 91 ··· 94 96 95 97 module_param_named(autoconf, ipv6_defaults.autoconf, int, 0444); 96 98 MODULE_PARM_DESC(autoconf, "Enable IPv6 address autoconfiguration on all interfaces"); 97 - 98 - bool ipv6_mod_enabled(void) 99 - { 100 - return disable_ipv6_mod == 0; 101 - } 102 - EXPORT_SYMBOL_GPL(ipv6_mod_enabled); 103 99 104 100 static struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) 105 101 {
+4
net/ipv6/exthdrs.c
··· 379 379 hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb); 380 380 381 381 idev = __in6_dev_get(skb->dev); 382 + if (!idev) { 383 + kfree_skb(skb); 384 + return -1; 385 + } 382 386 383 387 accept_seg6 = min(READ_ONCE(net->ipv6.devconf_all->seg6_enabled), 384 388 READ_ONCE(idev->cnf.seg6_enabled));
+2
net/ipv6/seg6_hmac.c
··· 184 184 int require_hmac; 185 185 186 186 idev = __in6_dev_get(skb->dev); 187 + if (!idev) 188 + return false; 187 189 188 190 srh = (struct ipv6_sr_hdr *)skb_transport_header(skb); 189 191
+6 -6
net/mac80211/cfg.c
··· 1904 1904 1905 1905 __sta_info_flush(sdata, true, link_id, NULL); 1906 1906 1907 - ieee80211_remove_link_keys(link, &keys); 1908 - if (!list_empty(&keys)) { 1909 - synchronize_net(); 1910 - ieee80211_free_key_list(local, &keys); 1911 - } 1912 - 1913 1907 ieee80211_stop_mbssid(sdata); 1914 1908 RCU_INIT_POINTER(link_conf->tx_bss_conf, NULL); 1915 1909 ··· 1914 1920 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); 1915 1921 ieee80211_link_info_change_notify(sdata, link, 1916 1922 BSS_CHANGED_BEACON_ENABLED); 1923 + 1924 + ieee80211_remove_link_keys(link, &keys); 1925 + if (!list_empty(&keys)) { 1926 + synchronize_net(); 1927 + ieee80211_free_key_list(local, &keys); 1928 + } 1917 1929 1918 1930 if (sdata->wdev.links[link_id].cac_started) { 1919 1931 chandef = link_conf->chanreq.oper;
+4 -2
net/mac80211/chan.c
··· 561 561 rcu_read_lock(); 562 562 list_for_each_entry_rcu(sta, &local->sta_list, 563 563 list) { 564 - struct ieee80211_sub_if_data *sdata = sta->sdata; 564 + struct ieee80211_sub_if_data *sdata; 565 565 enum ieee80211_sta_rx_bandwidth new_sta_bw; 566 566 unsigned int link_id; 567 567 568 568 if (!ieee80211_sdata_running(sta->sdata)) 569 569 continue; 570 570 571 - for (link_id = 0; link_id < ARRAY_SIZE(sta->sdata->link); link_id++) { 571 + sdata = get_bss_sdata(sta->sdata); 572 + 573 + for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { 572 574 struct ieee80211_link_data *link = 573 575 rcu_dereference(sdata->link[link_id]); 574 576 struct ieee80211_bss_conf *link_conf;
+5 -9
net/mac80211/debugfs.c
··· 320 320 static ssize_t aql_enable_write(struct file *file, const char __user *user_buf, 321 321 size_t count, loff_t *ppos) 322 322 { 323 - bool aql_disabled = static_key_false(&aql_disable.key); 324 323 char buf[3]; 325 324 size_t len; 326 325 ··· 334 335 if (len > 0 && buf[len - 1] == '\n') 335 336 buf[len - 1] = 0; 336 337 337 - if (buf[0] == '0' && buf[1] == '\0') { 338 - if (!aql_disabled) 339 - static_branch_inc(&aql_disable); 340 - } else if (buf[0] == '1' && buf[1] == '\0') { 341 - if (aql_disabled) 342 - static_branch_dec(&aql_disable); 343 - } else { 338 + if (buf[0] == '0' && buf[1] == '\0') 339 + static_branch_enable(&aql_disable); 340 + else if (buf[0] == '1' && buf[1] == '\0') 341 + static_branch_disable(&aql_disable); 342 + else 344 343 return -EINVAL; 345 - } 346 344 347 345 return count; 348 346 }
+3
net/mac80211/mesh.c
··· 79 79 * - MDA enabled 80 80 * - Power management control on fc 81 81 */ 82 + if (!ie->mesh_config) 83 + return false; 84 + 82 85 if (!(ifmsh->mesh_id_len == ie->mesh_id_len && 83 86 memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && 84 87 (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) &&
+5 -2
net/mac80211/sta_info.c
··· 2782 2782 } 2783 2783 2784 2784 link_sinfo->inactive_time = 2785 - jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta, link_id)); 2785 + jiffies_delta_to_msecs(jiffies - 2786 + ieee80211_sta_last_active(sta, 2787 + link_id)); 2786 2788 2787 2789 if (!(link_sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | 2788 2790 BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) { ··· 3017 3015 sinfo->connected_time = ktime_get_seconds() - sta->last_connected; 3018 3016 sinfo->assoc_at = sta->assoc_at; 3019 3017 sinfo->inactive_time = 3020 - jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta, -1)); 3018 + jiffies_delta_to_msecs(jiffies - 3019 + ieee80211_sta_last_active(sta, -1)); 3021 3020 3022 3021 if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) | 3023 3022 BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) {
+1 -1
net/mac80211/tdls.c
··· 1449 1449 } 1450 1450 1451 1451 sta = sta_info_get(sdata, peer); 1452 - if (!sta) 1452 + if (!sta || !sta->sta.tdls) 1453 1453 return -ENOLINK; 1454 1454 1455 1455 iee80211_tdls_recalc_chanctx(sdata, sta);
+3 -1
net/mac80211/tx.c
··· 1899 1899 struct ieee80211_tx_data tx; 1900 1900 struct sk_buff *skb2; 1901 1901 1902 - if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP) 1902 + if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP) { 1903 + kfree_skb(skb); 1903 1904 return false; 1905 + } 1904 1906 1905 1907 info->band = band; 1906 1908 info->control.vif = vif;
+3 -1
net/mac802154/iface.c
··· 469 469 } 470 470 471 471 static int 472 - mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr) 472 + mac802154_header_parse(const struct sk_buff *skb, 473 + const struct net_device *dev, 474 + unsigned char *haddr) 473 475 { 474 476 struct ieee802154_hdr hdr; 475 477
+8 -5
net/mctp/route.c
··· 359 359 { 360 360 struct mctp_sk_key *key; 361 361 struct mctp_flow *flow; 362 + unsigned long flags; 362 363 363 364 flow = skb_ext_find(skb, SKB_EXT_MCTP); 364 365 if (!flow) ··· 367 366 368 367 key = flow->key; 369 368 370 - if (key->dev) { 371 - WARN_ON(key->dev != dev); 372 - return; 373 - } 369 + spin_lock_irqsave(&key->lock, flags); 374 370 375 - mctp_dev_set_key(dev, key); 371 + if (!key->dev) 372 + mctp_dev_set_key(dev, key); 373 + else 374 + WARN_ON(key->dev != dev); 375 + 376 + spin_unlock_irqrestore(&key->lock, flags); 376 377 } 377 378 #else 378 379 static void mctp_skb_set_flow(struct sk_buff *skb, struct mctp_sk_key *key) {}
+1
net/mpls/af_mpls.c
··· 2854 2854 rtnl_af_unregister(&mpls_af_ops); 2855 2855 out_unregister_dev_type: 2856 2856 dev_remove_pack(&mpls_packet_type); 2857 + unregister_netdevice_notifier(&mpls_dev_notifier); 2857 2858 out_unregister_pernet: 2858 2859 unregister_pernet_subsys(&mpls_net_ops); 2859 2860 goto out;
+1 -1
net/mptcp/pm_kernel.c
··· 838 838 static int mptcp_pm_nl_create_listen_socket(struct sock *sk, 839 839 struct mptcp_pm_addr_entry *entry) 840 840 { 841 - bool is_ipv6 = sk->sk_family == AF_INET6; 841 + bool is_ipv6 = entry->addr.family == AF_INET6; 842 842 int addrlen = sizeof(struct sockaddr_in); 843 843 struct sockaddr_storage addr; 844 844 struct sock *newsk, *ssk;
+2 -1
net/ncsi/ncsi-aen.c
··· 224 224 if (!nah) { 225 225 netdev_warn(ndp->ndev.dev, "Invalid AEN (0x%x) received\n", 226 226 h->type); 227 - return -ENOENT; 227 + ret = -ENOENT; 228 + goto out; 228 229 } 229 230 230 231 ret = ncsi_validate_aen_pkt(h, nah->payload);
+12 -4
net/ncsi/ncsi-rsp.c
··· 1176 1176 /* Find the NCSI device */ 1177 1177 nd = ncsi_find_dev(orig_dev); 1178 1178 ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL; 1179 - if (!ndp) 1180 - return -ENODEV; 1179 + if (!ndp) { 1180 + ret = -ENODEV; 1181 + goto err_free_skb; 1182 + } 1181 1183 1182 1184 /* Check if it is AEN packet */ 1183 1185 hdr = (struct ncsi_pkt_hdr *)skb_network_header(skb); ··· 1201 1199 if (!nrh) { 1202 1200 netdev_err(nd->dev, "Received unrecognized packet (0x%x)\n", 1203 1201 hdr->type); 1204 - return -ENOENT; 1202 + ret = -ENOENT; 1203 + goto err_free_skb; 1205 1204 } 1206 1205 1207 1206 /* Associate with the request */ ··· 1210 1207 nr = &ndp->requests[hdr->id]; 1211 1208 if (!nr->used) { 1212 1209 spin_unlock_irqrestore(&ndp->lock, flags); 1213 - return -ENODEV; 1210 + ret = -ENODEV; 1211 + goto err_free_skb; 1214 1212 } 1215 1213 1216 1214 nr->rsp = skb; ··· 1264 1260 1265 1261 out: 1266 1262 ncsi_free_request(nr); 1263 + return ret; 1264 + 1265 + err_free_skb: 1266 + kfree_skb(skb); 1267 1267 return ret; 1268 1268 }
+1 -1
net/netfilter/nf_bpf_link.c
··· 170 170 171 171 static const struct bpf_link_ops bpf_nf_link_lops = { 172 172 .release = bpf_nf_link_release, 173 - .dealloc = bpf_nf_link_dealloc, 173 + .dealloc_deferred = bpf_nf_link_dealloc, 174 174 .detach = bpf_nf_link_detach, 175 175 .show_fdinfo = bpf_nf_link_show_info, 176 176 .fill_link_info = bpf_nf_link_fill_link_info,
+4
net/netfilter/nf_conntrack_h323_asn1.c
··· 331 331 if (nf_h323_error_boundary(bs, 0, 2)) 332 332 return H323_ERROR_BOUND; 333 333 len = get_bits(bs, 2) + 1; 334 + if (nf_h323_error_boundary(bs, len, 0)) 335 + return H323_ERROR_BOUND; 334 336 BYTE_ALIGN(bs); 335 337 if (base && (f->attr & DECODE)) { /* timeToLive */ 336 338 unsigned int v = get_uint(bs, len) + f->lb; ··· 924 922 break; 925 923 p++; 926 924 len--; 925 + if (len <= 0) 926 + break; 927 927 return DecodeH323_UserInformation(buf, p, len, 928 928 &q931->UUIE); 929 929 }
+26 -2
net/netfilter/nf_conntrack_netlink.c
··· 3212 3212 { 3213 3213 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 3214 3214 struct nf_conn *ct = cb->data; 3215 - struct nf_conn_help *help = nfct_help(ct); 3215 + struct nf_conn_help *help; 3216 3216 u_int8_t l3proto = nfmsg->nfgen_family; 3217 3217 unsigned long last_id = cb->args[1]; 3218 3218 struct nf_conntrack_expect *exp; 3219 3219 3220 3220 if (cb->args[0]) 3221 + return 0; 3222 + 3223 + help = nfct_help(ct); 3224 + if (!help) 3221 3225 return 0; 3222 3226 3223 3227 rcu_read_lock(); ··· 3253 3249 return skb->len; 3254 3250 } 3255 3251 3252 + static int ctnetlink_dump_exp_ct_start(struct netlink_callback *cb) 3253 + { 3254 + struct nf_conn *ct = cb->data; 3255 + 3256 + if (!refcount_inc_not_zero(&ct->ct_general.use)) 3257 + return -ENOENT; 3258 + return 0; 3259 + } 3260 + 3261 + static int ctnetlink_dump_exp_ct_done(struct netlink_callback *cb) 3262 + { 3263 + struct nf_conn *ct = cb->data; 3264 + 3265 + if (ct) 3266 + nf_ct_put(ct); 3267 + return 0; 3268 + } 3269 + 3256 3270 static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl, 3257 3271 struct sk_buff *skb, 3258 3272 const struct nlmsghdr *nlh, ··· 3286 3264 struct nf_conntrack_zone zone; 3287 3265 struct netlink_dump_control c = { 3288 3266 .dump = ctnetlink_exp_ct_dump_table, 3267 + .start = ctnetlink_dump_exp_ct_start, 3268 + .done = ctnetlink_dump_exp_ct_done, 3289 3269 }; 3290 3270 3291 3271 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, ··· 3489 3465 3490 3466 #if IS_ENABLED(CONFIG_NF_NAT) 3491 3467 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = { 3492 - [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 }, 3468 + [CTA_EXPECT_NAT_DIR] = NLA_POLICY_MAX(NLA_BE32, IP_CT_DIR_REPLY), 3493 3469 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED }, 3494 3470 }; 3495 3471 #endif
+2 -1
net/netfilter/nf_conntrack_proto_sctp.c
··· 582 582 } 583 583 584 584 static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = { 585 - [CTA_PROTOINFO_SCTP_STATE] = { .type = NLA_U8 }, 585 + [CTA_PROTOINFO_SCTP_STATE] = NLA_POLICY_MAX(NLA_U8, 586 + SCTP_CONNTRACK_HEARTBEAT_SENT), 586 587 [CTA_PROTOINFO_SCTP_VTAG_ORIGINAL] = { .type = NLA_U32 }, 587 588 [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 }, 588 589 };
+5 -1
net/netfilter/nf_conntrack_sip.c
··· 1534 1534 { 1535 1535 struct tcphdr *th, _tcph; 1536 1536 unsigned int dataoff, datalen; 1537 - unsigned int matchoff, matchlen, clen; 1537 + unsigned int matchoff, matchlen; 1538 1538 unsigned int msglen, origlen; 1539 1539 const char *dptr, *end; 1540 1540 s16 diff, tdiff = 0; 1541 1541 int ret = NF_ACCEPT; 1542 + unsigned long clen; 1542 1543 bool term; 1543 1544 1544 1545 if (ctinfo != IP_CT_ESTABLISHED && ··· 1572 1571 1573 1572 clen = simple_strtoul(dptr + matchoff, (char **)&end, 10); 1574 1573 if (dptr + matchoff == end) 1574 + break; 1575 + 1576 + if (clen > datalen) 1575 1577 break; 1576 1578 1577 1579 term = false;
+1
net/netfilter/nf_flow_table_ip.c
··· 738 738 switch (tuple->encap[i].proto) { 739 739 case htons(ETH_P_8021Q): 740 740 case htons(ETH_P_8021AD): 741 + skb_reset_mac_header(skb); 741 742 if (skb_vlan_push(skb, tuple->encap[i].proto, 742 743 tuple->encap[i].id) < 0) 743 744 return -1;
+8 -22
net/netfilter/nf_tables_api.c
··· 829 829 830 830 nft_set_elem_change_active(ctx->net, set, ext); 831 831 nft_setelem_data_deactivate(ctx->net, set, catchall->elem); 832 - break; 833 832 } 834 833 } 835 834 ··· 5872 5873 5873 5874 nft_clear(ctx->net, ext); 5874 5875 nft_setelem_data_activate(ctx->net, set, catchall->elem); 5875 - break; 5876 5876 } 5877 5877 } 5878 5878 ··· 6744 6746 } 6745 6747 } 6746 6748 6747 - static void nft_set_elem_expr_destroy(const struct nft_ctx *ctx, 6748 - struct nft_set_elem_expr *elem_expr) 6749 + void nft_set_elem_expr_destroy(const struct nft_ctx *ctx, 6750 + struct nft_set_elem_expr *elem_expr) 6749 6751 { 6750 6752 struct nft_expr *expr; 6751 6753 u32 size; ··· 7156 7158 } 7157 7159 7158 7160 static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, 7159 - const struct nlattr *attr, u32 nlmsg_flags, 7160 - bool last) 7161 + const struct nlattr *attr, u32 nlmsg_flags) 7161 7162 { 7162 7163 struct nft_expr *expr_array[NFT_SET_EXPR_MAX] = {}; 7163 7164 struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; ··· 7443 7446 if (flags) 7444 7447 *nft_set_ext_flags(ext) = flags; 7445 7448 7446 - if (last) 7447 - elem.flags = NFT_SET_ELEM_INTERNAL_LAST; 7448 - else 7449 - elem.flags = 0; 7450 - 7451 7449 if (obj) 7452 7450 *nft_set_ext_obj(ext) = obj; 7453 7451 ··· 7607 7615 nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); 7608 7616 7609 7617 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { 7610 - err = nft_add_set_elem(&ctx, set, attr, info->nlh->nlmsg_flags, 7611 - nla_is_last(attr, rem)); 7618 + err = nft_add_set_elem(&ctx, set, attr, info->nlh->nlmsg_flags); 7612 7619 if (err < 0) { 7613 7620 NL_SET_BAD_ATTR(extack, attr); 7614 7621 return err; ··· 7731 7740 } 7732 7741 7733 7742 static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, 7734 - const struct nlattr *attr, bool last) 7743 + const struct nlattr *attr) 7735 7744 { 7736 7745 struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; 7737 7746 struct nft_set_ext_tmpl tmpl; ··· 7798 7807 ext = nft_set_elem_ext(set, elem.priv); 7799 7808 if (flags) 7800 7809 *nft_set_ext_flags(ext) = flags; 7801 - 7802 - if (last) 7803 - elem.flags = NFT_SET_ELEM_INTERNAL_LAST; 7804 - else 7805 - elem.flags = 0; 7806 7810 7807 7811 trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set); 7808 7812 if (trans == NULL) ··· 7949 7963 return nft_set_flush(&ctx, set, genmask); 7950 7964 7951 7965 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { 7952 - err = nft_del_setelem(&ctx, set, attr, 7953 - nla_is_last(attr, rem)); 7966 + err = nft_del_setelem(&ctx, set, attr); 7954 7967 if (err == -ENOENT && 7955 7968 NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYSETELEM) 7956 7969 continue; ··· 9203 9218 return 0; 9204 9219 9205 9220 err_flowtable_hooks: 9221 + synchronize_rcu(); 9206 9222 nft_trans_destroy(trans); 9207 9223 err_flowtable_trans: 9208 9224 nft_hooks_destroy(&flowtable->hook_list); ··· 9674 9688 break; 9675 9689 case NETDEV_REGISTER: 9676 9690 /* NOP if not matching or already registered */ 9677 - if (!match || (changename && ops)) 9691 + if (!match || ops) 9678 9692 continue; 9679 9693 9680 9694 ops = kzalloc_obj(struct nf_hook_ops,
+1 -1
net/netfilter/nft_chain_filter.c
··· 344 344 break; 345 345 case NETDEV_REGISTER: 346 346 /* NOP if not matching or already registered */ 347 - if (!match || (changename && ops)) 347 + if (!match || ops) 348 348 continue; 349 349 350 350 ops = kmemdup(&basechain->ops,
+4
net/netfilter/nft_ct.c
··· 23 23 #include <net/netfilter/nf_conntrack_l4proto.h> 24 24 #include <net/netfilter/nf_conntrack_expect.h> 25 25 #include <net/netfilter/nf_conntrack_seqadj.h> 26 + #include "nf_internals.h" 26 27 27 28 struct nft_ct_helper_obj { 28 29 struct nf_conntrack_helper *helper4; ··· 544 543 #endif 545 544 #ifdef CONFIG_NF_CONNTRACK_ZONES 546 545 case NFT_CT_ZONE: 546 + nf_queue_nf_hook_drop(ctx->net); 547 547 mutex_lock(&nft_ct_pcpu_mutex); 548 548 if (--nft_ct_pcpu_template_refcnt == 0) 549 549 nft_ct_tmpl_put_pcpu(); ··· 1017 1015 struct nft_ct_timeout_obj *priv = nft_obj_data(obj); 1018 1016 struct nf_ct_timeout *timeout = priv->timeout; 1019 1017 1018 + nf_queue_nf_hook_drop(ctx->net); 1020 1019 nf_ct_untimeout(ctx->net, timeout); 1021 1020 nf_ct_netns_put(ctx->net, ctx->family); 1022 1021 kfree(priv->timeout); ··· 1150 1147 { 1151 1148 struct nft_ct_helper_obj *priv = nft_obj_data(obj); 1152 1149 1150 + nf_queue_nf_hook_drop(ctx->net); 1153 1151 if (priv->helper4) 1154 1152 nf_conntrack_helper_put(priv->helper4); 1155 1153 if (priv->helper6)
+9 -1
net/netfilter/nft_dynset.c
··· 30 30 const struct nft_set_ext *ext) 31 31 { 32 32 struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext); 33 + struct nft_ctx ctx = { 34 + .net = read_pnet(&priv->set->net), 35 + .family = priv->set->table->family, 36 + }; 33 37 struct nft_expr *expr; 34 38 int i; 35 39 36 40 for (i = 0; i < priv->num_exprs; i++) { 37 41 expr = nft_setelem_expr_at(elem_expr, elem_expr->size); 38 42 if (nft_expr_clone(expr, priv->expr_array[i], GFP_ATOMIC) < 0) 39 - return -1; 43 + goto err_out; 40 44 41 45 elem_expr->size += priv->expr_array[i]->ops->size; 42 46 } 43 47 44 48 return 0; 49 + err_out: 50 + nft_set_elem_expr_destroy(&ctx, elem_expr); 51 + 52 + return -1; 45 53 } 46 54 47 55 struct nft_elem_priv *nft_dynset_new(struct nft_set *set,
+2 -1
net/netfilter/nft_set_pipapo.c
··· 1640 1640 int i; 1641 1641 1642 1642 nft_pipapo_for_each_field(f, i, m) { 1643 + bool last = i == m->field_count - 1; 1643 1644 int g; 1644 1645 1645 1646 for (g = 0; g < f->groups; g++) { ··· 1660 1659 } 1661 1660 1662 1661 pipapo_unmap(f->mt, f->rules, rulemap[i].to, rulemap[i].n, 1663 - rulemap[i + 1].n, i == m->field_count - 1); 1662 + last ? 0 : rulemap[i + 1].n, last); 1664 1663 if (pipapo_resize(f, f->rules, f->rules - rulemap[i].n)) { 1665 1664 /* We can ignore this, a failure to shrink tables down 1666 1665 * doesn't make tables invalid.
+10 -61
net/netfilter/nft_set_rbtree.c
··· 304 304 priv->start_rbe_cookie = (unsigned long)rbe; 305 305 } 306 306 307 - static void nft_rbtree_set_start_cookie_open(struct nft_rbtree *priv, 308 - const struct nft_rbtree_elem *rbe, 309 - unsigned long open_interval) 310 - { 311 - priv->start_rbe_cookie = (unsigned long)rbe | open_interval; 312 - } 313 - 314 - #define NFT_RBTREE_OPEN_INTERVAL 1UL 315 - 316 307 static bool nft_rbtree_cmp_start_cookie(struct nft_rbtree *priv, 317 308 const struct nft_rbtree_elem *rbe) 318 309 { 319 - return (priv->start_rbe_cookie & ~NFT_RBTREE_OPEN_INTERVAL) == (unsigned long)rbe; 310 + return priv->start_rbe_cookie == (unsigned long)rbe; 320 311 } 321 312 322 313 static bool nft_rbtree_insert_same_interval(const struct net *net, ··· 337 346 338 347 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, 339 348 struct nft_rbtree_elem *new, 340 - struct nft_elem_priv **elem_priv, u64 tstamp, bool last) 349 + struct nft_elem_priv **elem_priv, u64 tstamp) 341 350 { 342 351 struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL, *rbe_prev; 343 352 struct rb_node *node, *next, *parent, **p, *first = NULL; 344 353 struct nft_rbtree *priv = nft_set_priv(set); 345 354 u8 cur_genmask = nft_genmask_cur(net); 346 355 u8 genmask = nft_genmask_next(net); 347 - unsigned long open_interval = 0; 348 356 int d; 349 357 350 358 /* Descend the tree to search for an existing element greater than the ··· 449 459 } 450 460 } 451 461 452 - if (nft_rbtree_interval_null(set, new)) { 462 + if (nft_rbtree_interval_null(set, new)) 453 463 priv->start_rbe_cookie = 0; 454 - } else if (nft_rbtree_interval_start(new) && priv->start_rbe_cookie) { 455 - if (nft_set_is_anonymous(set)) { 456 - priv->start_rbe_cookie = 0; 457 - } else if (priv->start_rbe_cookie & NFT_RBTREE_OPEN_INTERVAL) { 458 - /* Previous element is an open interval that partially 459 - * overlaps with an existing non-open interval. 460 - */ 461 - return -ENOTEMPTY; 462 - } 463 - } 464 + else if (nft_rbtree_interval_start(new) && priv->start_rbe_cookie) 465 + priv->start_rbe_cookie = 0; 464 466 465 467 /* - new start element matching existing start element: full overlap 466 468 * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given. ··· 460 478 if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) && 461 479 nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) { 462 480 *elem_priv = &rbe_ge->priv; 463 - 464 - /* - Corner case: new start element of open interval (which 465 - * comes as last element in the batch) overlaps the start of 466 - * an existing interval with an end element: partial overlap. 467 - */ 468 - node = rb_first(&priv->root); 469 - rbe = __nft_rbtree_next_active(node, genmask); 470 - if (rbe && nft_rbtree_interval_end(rbe)) { 471 - rbe = nft_rbtree_next_active(rbe, genmask); 472 - if (rbe && 473 - nft_rbtree_interval_start(rbe) && 474 - !nft_rbtree_cmp(set, new, rbe)) { 475 - if (last) 476 - return -ENOTEMPTY; 477 - 478 - /* Maybe open interval? */ 479 - open_interval = NFT_RBTREE_OPEN_INTERVAL; 480 - } 481 - } 482 - nft_rbtree_set_start_cookie_open(priv, rbe_ge, open_interval); 483 - 481 + nft_rbtree_set_start_cookie(priv, rbe_ge); 484 482 return -EEXIST; 485 483 } 486 484 ··· 513 551 */ 514 552 if (rbe_ge && 515 553 nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new)) 516 - return -ENOTEMPTY; 517 - 518 - /* - start element overlaps an open interval but end element is new: 519 - * partial overlap, reported as -ENOEMPTY. 520 - */ 521 - if (!rbe_ge && priv->start_rbe_cookie && nft_rbtree_interval_end(new)) 522 554 return -ENOTEMPTY; 523 555 524 556 /* Accepted element: pick insertion point depending on key value */ ··· 624 668 struct nft_elem_priv **elem_priv) 625 669 { 626 670 struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem->priv); 627 - bool last = !!(elem->flags & NFT_SET_ELEM_INTERNAL_LAST); 628 671 struct nft_rbtree *priv = nft_set_priv(set); 629 672 u64 tstamp = nft_net_tstamp(net); 630 673 int err; ··· 640 685 cond_resched(); 641 686 642 687 write_lock_bh(&priv->lock); 643 - err = __nft_rbtree_insert(net, set, rbe, elem_priv, tstamp, last); 688 + err = __nft_rbtree_insert(net, set, rbe, elem_priv, tstamp); 644 689 write_unlock_bh(&priv->lock); 645 - 646 - if (nft_rbtree_interval_end(rbe)) 647 - priv->start_rbe_cookie = 0; 648 - 649 690 } while (err == -EAGAIN); 650 691 651 692 return err; ··· 729 778 const struct nft_set_elem *elem) 730 779 { 731 780 struct nft_rbtree_elem *rbe, *this = nft_elem_priv_cast(elem->priv); 732 - bool last = !!(elem->flags & NFT_SET_ELEM_INTERNAL_LAST); 733 781 struct nft_rbtree *priv = nft_set_priv(set); 734 782 const struct rb_node *parent = priv->root.rb_node; 735 783 u8 genmask = nft_genmask_next(net); ··· 769 819 continue; 770 820 } 771 821 772 - if (nft_rbtree_interval_start(rbe)) { 773 - if (!last) 774 - nft_rbtree_set_start_cookie(priv, rbe); 775 - } else if (!nft_rbtree_deactivate_same_interval(net, priv, rbe)) 822 + if (nft_rbtree_interval_start(rbe)) 823 + nft_rbtree_set_start_cookie(priv, rbe); 824 + else if (!nft_rbtree_deactivate_same_interval(net, priv, rbe)) 776 825 return NULL; 777 826 778 827 nft_rbtree_flush(net, set, &rbe->priv);
+4
net/netfilter/xt_CT.c
··· 16 16 #include <net/netfilter/nf_conntrack_ecache.h> 17 17 #include <net/netfilter/nf_conntrack_timeout.h> 18 18 #include <net/netfilter/nf_conntrack_zones.h> 19 + #include "nf_internals.h" 19 20 20 21 static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct) 21 22 { ··· 284 283 struct nf_conn_help *help; 285 284 286 285 if (ct) { 286 + if (info->helper[0] || info->timeout[0]) 287 + nf_queue_nf_hook_drop(par->net); 288 + 287 289 help = nfct_help(ct); 288 290 xt_ct_put_helper(help); 289 291
+6
net/netfilter/xt_IDLETIMER.c
··· 318 318 319 319 info->timer = __idletimer_tg_find_by_label(info->label); 320 320 if (info->timer) { 321 + if (info->timer->timer_type & XT_IDLETIMER_ALARM) { 322 + pr_debug("Adding/Replacing rule with same label and different timer type is not allowed\n"); 323 + mutex_unlock(&list_mutex); 324 + return -EINVAL; 325 + } 326 + 321 327 info->timer->refcnt++; 322 328 mod_timer(&info->timer->timer, 323 329 secs_to_jiffies(info->timeout) + jiffies);
+2 -2
net/netfilter/xt_dccp.c
··· 62 62 return true; 63 63 } 64 64 65 - if (op[i] < 2) 65 + if (op[i] < 2 || i == optlen - 1) 66 66 i++; 67 67 else 68 - i += op[i+1]?:1; 68 + i += op[i + 1] ? : 1; 69 69 } 70 70 71 71 spin_unlock_bh(&dccp_buflock);
+4 -2
net/netfilter/xt_tcpudp.c
··· 59 59 60 60 for (i = 0; i < optlen; ) { 61 61 if (op[i] == option) return !invert; 62 - if (op[i] < 2) i++; 63 - else i += op[i+1]?:1; 62 + if (op[i] < 2 || i == optlen - 1) 63 + i++; 64 + else 65 + i += op[i + 1] ? : 1; 64 66 } 65 67 66 68 return invert;
+2 -2
net/netfilter/xt_time.c
··· 223 223 224 224 localtime_2(&current_time, stamp); 225 225 226 - if (!(info->weekdays_match & (1 << current_time.weekday))) 226 + if (!(info->weekdays_match & (1U << current_time.weekday))) 227 227 return false; 228 228 229 229 /* Do not spend time computing monthday if all days match anyway */ 230 230 if (info->monthdays_match != XT_TIME_ALL_MONTHDAYS) { 231 231 localtime_3(&current_time, stamp); 232 - if (!(info->monthdays_match & (1 << current_time.monthday))) 232 + if (!(info->monthdays_match & (1U << current_time.monthday))) 233 233 return false; 234 234 } 235 235
+4 -1
net/phonet/af_phonet.c
··· 129 129 return 1; 130 130 } 131 131 132 - static int pn_header_parse(const struct sk_buff *skb, unsigned char *haddr) 132 + static int pn_header_parse(const struct sk_buff *skb, 133 + const struct net_device *dev, 134 + unsigned char *haddr) 133 135 { 134 136 const u8 *media = skb_mac_header(skb); 137 + 135 138 *haddr = *media; 136 139 return 1; 137 140 }
+5
net/rose/af_rose.c
··· 811 811 goto out_release; 812 812 } 813 813 814 + if (sk->sk_state == TCP_SYN_SENT) { 815 + err = -EALREADY; 816 + goto out_release; 817 + } 818 + 814 819 sk->sk_state = TCP_CLOSE; 815 820 sock->state = SS_UNCONNECTED; 816 821
+5 -3
net/rxrpc/af_rxrpc.c
··· 267 267 * Lookup or create a remote transport endpoint record for the specified 268 268 * address. 269 269 * 270 - * Return: The peer record found with a reference, %NULL if no record is found 271 - * or a negative error code if the address is invalid or unsupported. 270 + * Return: The peer record found with a reference or a negative error code if 271 + * the address is invalid or unsupported. 272 272 */ 273 273 struct rxrpc_peer *rxrpc_kernel_lookup_peer(struct socket *sock, 274 274 struct sockaddr_rxrpc *srx, gfp_t gfp) 275 275 { 276 + struct rxrpc_peer *peer; 276 277 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 277 278 int ret; 278 279 ··· 281 280 if (ret < 0) 282 281 return ERR_PTR(ret); 283 282 284 - return rxrpc_lookup_peer(rx->local, srx, gfp); 283 + peer = rxrpc_lookup_peer(rx->local, srx, gfp); 284 + return peer ?: ERR_PTR(-ENOMEM); 285 285 } 286 286 EXPORT_SYMBOL(rxrpc_kernel_lookup_peer); 287 287
-27
net/sched/sch_generic.c
··· 1288 1288 } 1289 1289 } 1290 1290 1291 - static void dev_reset_queue(struct net_device *dev, 1292 - struct netdev_queue *dev_queue, 1293 - void *_unused) 1294 - { 1295 - struct Qdisc *qdisc; 1296 - bool nolock; 1297 - 1298 - qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); 1299 - if (!qdisc) 1300 - return; 1301 - 1302 - nolock = qdisc->flags & TCQ_F_NOLOCK; 1303 - 1304 - if (nolock) 1305 - spin_lock_bh(&qdisc->seqlock); 1306 - spin_lock_bh(qdisc_lock(qdisc)); 1307 - 1308 - qdisc_reset(qdisc); 1309 - 1310 - spin_unlock_bh(qdisc_lock(qdisc)); 1311 - if (nolock) { 1312 - clear_bit(__QDISC_STATE_MISSED, &qdisc->state); 1313 - clear_bit(__QDISC_STATE_DRAINING, &qdisc->state); 1314 - spin_unlock_bh(&qdisc->seqlock); 1315 - } 1316 - } 1317 - 1318 1291 static bool some_qdisc_is_busy(struct net_device *dev) 1319 1292 { 1320 1293 unsigned int i;
+8 -6
net/sched/sch_ingress.c
··· 113 113 { 114 114 struct ingress_sched_data *q = qdisc_priv(sch); 115 115 struct net_device *dev = qdisc_dev(sch); 116 - struct bpf_mprog_entry *entry = rtnl_dereference(dev->tcx_ingress); 116 + struct bpf_mprog_entry *entry; 117 117 118 118 if (sch->parent != TC_H_INGRESS) 119 119 return; 120 120 121 121 tcf_block_put_ext(q->block, sch, &q->block_info); 122 122 123 - if (entry) { 123 + if (mini_qdisc_pair_inited(&q->miniqp)) { 124 + entry = rtnl_dereference(dev->tcx_ingress); 124 125 tcx_miniq_dec(entry); 125 126 if (!tcx_entry_is_active(entry)) { 126 127 tcx_entry_update(dev, NULL, true); ··· 291 290 292 291 static void clsact_destroy(struct Qdisc *sch) 293 292 { 293 + struct bpf_mprog_entry *ingress_entry, *egress_entry; 294 294 struct clsact_sched_data *q = qdisc_priv(sch); 295 295 struct net_device *dev = qdisc_dev(sch); 296 - struct bpf_mprog_entry *ingress_entry = rtnl_dereference(dev->tcx_ingress); 297 - struct bpf_mprog_entry *egress_entry = rtnl_dereference(dev->tcx_egress); 298 296 299 297 if (sch->parent != TC_H_CLSACT) 300 298 return; ··· 301 301 tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info); 302 302 tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info); 303 303 304 - if (ingress_entry) { 304 + if (mini_qdisc_pair_inited(&q->miniqp_ingress)) { 305 + ingress_entry = rtnl_dereference(dev->tcx_ingress); 305 306 tcx_miniq_dec(ingress_entry); 306 307 if (!tcx_entry_is_active(ingress_entry)) { 307 308 tcx_entry_update(dev, NULL, true); ··· 310 309 } 311 310 } 312 311 313 - if (egress_entry) { 312 + if (mini_qdisc_pair_inited(&q->miniqp_egress)) { 313 + egress_entry = rtnl_dereference(dev->tcx_egress); 314 314 tcx_miniq_dec(egress_entry); 315 315 if (!tcx_entry_is_active(egress_entry)) { 316 316 tcx_entry_update(dev, NULL, false);
+3 -5
net/sched/sch_teql.c
··· 146 146 master->slaves = NEXT_SLAVE(q); 147 147 if (q == master->slaves) { 148 148 struct netdev_queue *txq; 149 - spinlock_t *root_lock; 150 149 151 150 txq = netdev_get_tx_queue(master->dev, 0); 152 151 master->slaves = NULL; 153 152 154 - root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc)); 155 - spin_lock_bh(root_lock); 156 - qdisc_reset(rtnl_dereference(txq->qdisc)); 157 - spin_unlock_bh(root_lock); 153 + dev_reset_queue(master->dev, 154 + txq, NULL); 158 155 } 159 156 } 160 157 skb_queue_purge(&dat->q); ··· 312 315 if (__netif_tx_trylock(slave_txq)) { 313 316 unsigned int length = qdisc_pkt_len(skb); 314 317 318 + skb->dev = slave; 315 319 if (!netif_xmit_frozen_or_stopped(slave_txq) && 316 320 netdev_start_xmit(skb, slave, slave_txq, false) == 317 321 NETDEV_TX_OK) {
+96 -75
net/shaper/shaper.c
··· 36 36 return &((struct net_shaper_nl_ctx *)ctx)->binding; 37 37 } 38 38 39 - static void net_shaper_lock(struct net_shaper_binding *binding) 40 - { 41 - switch (binding->type) { 42 - case NET_SHAPER_BINDING_TYPE_NETDEV: 43 - netdev_lock(binding->netdev); 44 - break; 45 - } 46 - } 47 - 48 - static void net_shaper_unlock(struct net_shaper_binding *binding) 49 - { 50 - switch (binding->type) { 51 - case NET_SHAPER_BINDING_TYPE_NETDEV: 52 - netdev_unlock(binding->netdev); 53 - break; 54 - } 55 - } 56 - 57 39 static struct net_shaper_hierarchy * 58 40 net_shaper_hierarchy(struct net_shaper_binding *binding) 59 41 { 60 42 /* Pairs with WRITE_ONCE() in net_shaper_hierarchy_setup. */ 61 43 if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV) 44 + return READ_ONCE(binding->netdev->net_shaper_hierarchy); 45 + 46 + /* No other type supported yet. */ 47 + return NULL; 48 + } 49 + 50 + static struct net_shaper_hierarchy * 51 + net_shaper_hierarchy_rcu(struct net_shaper_binding *binding) 52 + { 53 + /* Readers look up the device and take a ref, then take RCU lock 54 + * later at which point netdev may have been unregistered and flushed. 55 + * READ_ONCE() pairs with WRITE_ONCE() in net_shaper_hierarchy_setup. 56 + */ 57 + if (binding->type == NET_SHAPER_BINDING_TYPE_NETDEV && 58 + READ_ONCE(binding->netdev->reg_state) <= NETREG_REGISTERED) 62 59 return READ_ONCE(binding->netdev->net_shaper_hierarchy); 63 60 64 61 /* No other type supported yet. */ ··· 201 204 return 0; 202 205 } 203 206 207 + /* Like net_shaper_ctx_setup(), but for "write" handlers (never for dumps!) 208 + * Acquires the lock protecting the hierarchy (instance lock for netdev). 209 + */ 210 + static int net_shaper_ctx_setup_lock(const struct genl_info *info, int type, 211 + struct net_shaper_nl_ctx *ctx) 212 + { 213 + struct net *ns = genl_info_net(info); 214 + struct net_device *dev; 215 + int ifindex; 216 + 217 + if (GENL_REQ_ATTR_CHECK(info, type)) 218 + return -EINVAL; 219 + 220 + ifindex = nla_get_u32(info->attrs[type]); 221 + dev = netdev_get_by_index_lock(ns, ifindex); 222 + if (!dev) { 223 + NL_SET_BAD_ATTR(info->extack, info->attrs[type]); 224 + return -ENOENT; 225 + } 226 + 227 + if (!dev->netdev_ops->net_shaper_ops) { 228 + NL_SET_BAD_ATTR(info->extack, info->attrs[type]); 229 + netdev_unlock(dev); 230 + return -EOPNOTSUPP; 231 + } 232 + 233 + ctx->binding.type = NET_SHAPER_BINDING_TYPE_NETDEV; 234 + ctx->binding.netdev = dev; 235 + return 0; 236 + } 237 + 204 238 static void net_shaper_ctx_cleanup(struct net_shaper_nl_ctx *ctx) 205 239 { 206 240 if (ctx->binding.type == NET_SHAPER_BINDING_TYPE_NETDEV) 207 241 netdev_put(ctx->binding.netdev, &ctx->dev_tracker); 242 + } 243 + 244 + static void net_shaper_ctx_cleanup_unlock(struct net_shaper_nl_ctx *ctx) 245 + { 246 + if (ctx->binding.type == NET_SHAPER_BINDING_TYPE_NETDEV) 247 + netdev_unlock(ctx->binding.netdev); 208 248 } 209 249 210 250 static u32 net_shaper_handle_to_index(const struct net_shaper_handle *handle) ··· 285 251 net_shaper_lookup(struct net_shaper_binding *binding, 286 252 const struct net_shaper_handle *handle) 287 253 { 288 - struct net_shaper_hierarchy *hierarchy = net_shaper_hierarchy(binding); 289 254 u32 index = net_shaper_handle_to_index(handle); 255 + struct net_shaper_hierarchy *hierarchy; 290 256 257 + hierarchy = net_shaper_hierarchy_rcu(binding); 291 258 if (!hierarchy || xa_get_mark(&hierarchy->shapers, index, 292 259 NET_SHAPER_NOT_VALID)) 293 260 return NULL; ··· 297 262 } 298 263 299 264 /* Allocate on demand the per device shaper's hierarchy container. 300 - * Called under the net shaper lock 265 + * Called under the lock protecting the hierarchy (instance lock for netdev) 301 266 */ 302 267 static struct net_shaper_hierarchy * 303 268 net_shaper_hierarchy_setup(struct net_shaper_binding *binding) ··· 716 681 net_shaper_generic_post(info); 717 682 } 718 683 684 + int net_shaper_nl_pre_doit_write(const struct genl_split_ops *ops, 685 + struct sk_buff *skb, struct genl_info *info) 686 + { 687 + struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)info->ctx; 688 + 689 + BUILD_BUG_ON(sizeof(*ctx) > sizeof(info->ctx)); 690 + 691 + return net_shaper_ctx_setup_lock(info, NET_SHAPER_A_IFINDEX, ctx); 692 + } 693 + 694 + void net_shaper_nl_post_doit_write(const struct genl_split_ops *ops, 695 + struct sk_buff *skb, struct genl_info *info) 696 + { 697 + net_shaper_ctx_cleanup_unlock((struct net_shaper_nl_ctx *)info->ctx); 698 + } 699 + 719 700 int net_shaper_nl_pre_dumpit(struct netlink_callback *cb) 720 701 { 721 702 struct net_shaper_nl_ctx *ctx = (struct net_shaper_nl_ctx *)cb->ctx; ··· 810 759 if (ret) 811 760 goto free_msg; 812 761 813 - ret = genlmsg_reply(msg, info); 814 - if (ret) 815 - goto free_msg; 816 - 817 - return 0; 762 + return genlmsg_reply(msg, info); 818 763 819 764 free_msg: 820 765 nlmsg_free(msg); ··· 829 782 830 783 /* Don't error out dumps performed before any set operation. */ 831 784 binding = net_shaper_binding_from_ctx(ctx); 832 - hierarchy = net_shaper_hierarchy(binding); 833 - if (!hierarchy) 834 - return 0; 835 785 836 786 rcu_read_lock(); 787 + hierarchy = net_shaper_hierarchy_rcu(binding); 788 + if (!hierarchy) 789 + goto out_unlock; 790 + 837 791 for (; (shaper = xa_find(&hierarchy->shapers, &ctx->start_index, 838 792 U32_MAX, XA_PRESENT)); ctx->start_index++) { 839 793 ret = net_shaper_fill_one(skb, binding, shaper, info); 840 794 if (ret) 841 795 break; 842 796 } 797 + out_unlock: 843 798 rcu_read_unlock(); 844 799 845 800 return ret; ··· 859 810 860 811 binding = net_shaper_binding_from_ctx(info->ctx); 861 812 862 - net_shaper_lock(binding); 863 813 ret = net_shaper_parse_info(binding, info->attrs, info, &shaper, 864 814 &exists); 865 815 if (ret) 866 - goto unlock; 816 + return ret; 867 817 868 818 if (!exists) 869 819 net_shaper_default_parent(&shaper.handle, &shaper.parent); 870 820 871 821 hierarchy = net_shaper_hierarchy_setup(binding); 872 - if (!hierarchy) { 873 - ret = -ENOMEM; 874 - goto unlock; 875 - } 822 + if (!hierarchy) 823 + return -ENOMEM; 876 824 877 825 /* The 'set' operation can't create node-scope shapers. */ 878 826 handle = shaper.handle; 879 827 if (handle.scope == NET_SHAPER_SCOPE_NODE && 880 - !net_shaper_lookup(binding, &handle)) { 881 - ret = -ENOENT; 882 - goto unlock; 883 - } 828 + !net_shaper_lookup(binding, &handle)) 829 + return -ENOENT; 884 830 885 831 ret = net_shaper_pre_insert(binding, &handle, info->extack); 886 832 if (ret) 887 - goto unlock; 833 + return ret; 888 834 889 835 ops = net_shaper_ops(binding); 890 836 ret = ops->set(binding, &shaper, info->extack); 891 837 if (ret) { 892 838 net_shaper_rollback(binding); 893 - goto unlock; 839 + return ret; 894 840 } 895 841 896 842 net_shaper_commit(binding, 1, &shaper); 897 843 898 - unlock: 899 - net_shaper_unlock(binding); 900 - return ret; 844 + return 0; 901 845 } 902 846 903 847 static int __net_shaper_delete(struct net_shaper_binding *binding, ··· 1118 1076 1119 1077 binding = net_shaper_binding_from_ctx(info->ctx); 1120 1078 1121 - net_shaper_lock(binding); 1122 1079 ret = net_shaper_parse_handle(info->attrs[NET_SHAPER_A_HANDLE], info, 1123 1080 &handle); 1124 1081 if (ret) 1125 - goto unlock; 1082 + return ret; 1126 1083 1127 1084 hierarchy = net_shaper_hierarchy(binding); 1128 - if (!hierarchy) { 1129 - ret = -ENOENT; 1130 - goto unlock; 1131 - } 1085 + if (!hierarchy) 1086 + return -ENOENT; 1132 1087 1133 1088 shaper = net_shaper_lookup(binding, &handle); 1134 - if (!shaper) { 1135 - ret = -ENOENT; 1136 - goto unlock; 1137 - } 1089 + if (!shaper) 1090 + return -ENOENT; 1138 1091 1139 1092 if (handle.scope == NET_SHAPER_SCOPE_NODE) { 1140 1093 ret = net_shaper_pre_del_node(binding, shaper, info->extack); 1141 1094 if (ret) 1142 - goto unlock; 1095 + return ret; 1143 1096 } 1144 1097 1145 - ret = __net_shaper_delete(binding, shaper, info->extack); 1146 - 1147 - unlock: 1148 - net_shaper_unlock(binding); 1149 - return ret; 1098 + return __net_shaper_delete(binding, shaper, info->extack); 1150 1099 } 1151 1100 1152 1101 static int net_shaper_group_send_reply(struct net_shaper_binding *binding, ··· 1186 1153 if (!net_shaper_ops(binding)->group) 1187 1154 return -EOPNOTSUPP; 1188 1155 1189 - net_shaper_lock(binding); 1190 1156 leaves_count = net_shaper_list_len(info, NET_SHAPER_A_LEAVES); 1191 1157 if (!leaves_count) { 1192 1158 NL_SET_BAD_ATTR(info->extack, 1193 1159 info->attrs[NET_SHAPER_A_LEAVES]); 1194 - ret = -EINVAL; 1195 - goto unlock; 1160 + return -EINVAL; 1196 1161 } 1197 1162 1198 1163 leaves = kcalloc(leaves_count, sizeof(struct net_shaper) + 1199 1164 sizeof(struct net_shaper *), GFP_KERNEL); 1200 - if (!leaves) { 1201 - ret = -ENOMEM; 1202 - goto unlock; 1203 - } 1165 + if (!leaves) 1166 + return -ENOMEM; 1204 1167 old_nodes = (void *)&leaves[leaves_count]; 1205 1168 1206 1169 ret = net_shaper_parse_node(binding, info->attrs, info, &node); ··· 1273 1244 1274 1245 free_leaves: 1275 1246 kfree(leaves); 1276 - 1277 - unlock: 1278 - net_shaper_unlock(binding); 1279 1247 return ret; 1280 1248 1281 1249 free_msg: ··· 1339 1313 if (ret) 1340 1314 goto free_msg; 1341 1315 1342 - ret = genlmsg_reply(msg, info); 1343 - if (ret) 1344 - goto free_msg; 1345 - return 0; 1316 + return genlmsg_reply(msg, info); 1346 1317 1347 1318 free_msg: 1348 1319 nlmsg_free(msg); ··· 1382 1359 if (!hierarchy) 1383 1360 return; 1384 1361 1385 - net_shaper_lock(binding); 1386 1362 xa_lock(&hierarchy->shapers); 1387 1363 xa_for_each(&hierarchy->shapers, index, cur) { 1388 1364 __xa_erase(&hierarchy->shapers, index); 1389 1365 kfree(cur); 1390 1366 } 1391 1367 xa_unlock(&hierarchy->shapers); 1392 - net_shaper_unlock(binding); 1393 1368 1394 1369 kfree(hierarchy); 1395 1370 }
+6 -6
net/shaper/shaper_nl_gen.c
··· 99 99 }, 100 100 { 101 101 .cmd = NET_SHAPER_CMD_SET, 102 - .pre_doit = net_shaper_nl_pre_doit, 102 + .pre_doit = net_shaper_nl_pre_doit_write, 103 103 .doit = net_shaper_nl_set_doit, 104 - .post_doit = net_shaper_nl_post_doit, 104 + .post_doit = net_shaper_nl_post_doit_write, 105 105 .policy = net_shaper_set_nl_policy, 106 106 .maxattr = NET_SHAPER_A_IFINDEX, 107 107 .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, 108 108 }, 109 109 { 110 110 .cmd = NET_SHAPER_CMD_DELETE, 111 - .pre_doit = net_shaper_nl_pre_doit, 111 + .pre_doit = net_shaper_nl_pre_doit_write, 112 112 .doit = net_shaper_nl_delete_doit, 113 - .post_doit = net_shaper_nl_post_doit, 113 + .post_doit = net_shaper_nl_post_doit_write, 114 114 .policy = net_shaper_delete_nl_policy, 115 115 .maxattr = NET_SHAPER_A_IFINDEX, 116 116 .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, 117 117 }, 118 118 { 119 119 .cmd = NET_SHAPER_CMD_GROUP, 120 - .pre_doit = net_shaper_nl_pre_doit, 120 + .pre_doit = net_shaper_nl_pre_doit_write, 121 121 .doit = net_shaper_nl_group_doit, 122 - .post_doit = net_shaper_nl_post_doit, 122 + .post_doit = net_shaper_nl_post_doit_write, 123 123 .policy = net_shaper_group_nl_policy, 124 124 .maxattr = NET_SHAPER_A_LEAVES, 125 125 .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+5
net/shaper/shaper_nl_gen.h
··· 18 18 19 19 int net_shaper_nl_pre_doit(const struct genl_split_ops *ops, 20 20 struct sk_buff *skb, struct genl_info *info); 21 + int net_shaper_nl_pre_doit_write(const struct genl_split_ops *ops, 22 + struct sk_buff *skb, struct genl_info *info); 21 23 int net_shaper_nl_cap_pre_doit(const struct genl_split_ops *ops, 22 24 struct sk_buff *skb, struct genl_info *info); 23 25 void 24 26 net_shaper_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, 25 27 struct genl_info *info); 28 + void 29 + net_shaper_nl_post_doit_write(const struct genl_split_ops *ops, 30 + struct sk_buff *skb, struct genl_info *info); 26 31 void 27 32 net_shaper_nl_cap_post_doit(const struct genl_split_ops *ops, 28 33 struct sk_buff *skb, struct genl_info *info);
+17 -6
net/smc/af_smc.c
··· 131 131 struct smc_sock *smc; 132 132 struct sock *child; 133 133 134 - smc = smc_clcsock_user_data(sk); 134 + rcu_read_lock(); 135 + smc = smc_clcsock_user_data_rcu(sk); 136 + if (!smc || !refcount_inc_not_zero(&smc->sk.sk_refcnt)) { 137 + rcu_read_unlock(); 138 + smc = NULL; 139 + goto drop; 140 + } 141 + rcu_read_unlock(); 135 142 136 143 if (READ_ONCE(sk->sk_ack_backlog) + atomic_read(&smc->queued_smc_hs) > 137 144 sk->sk_max_ack_backlog) ··· 160 153 if (inet_csk(child)->icsk_af_ops == inet_csk(sk)->icsk_af_ops) 161 154 inet_csk(child)->icsk_af_ops = smc->ori_af_ops; 162 155 } 156 + sock_put(&smc->sk); 163 157 return child; 164 158 165 159 drop: 166 160 dst_release(dst); 167 161 tcp_listendrop(sk); 162 + if (smc) 163 + sock_put(&smc->sk); 168 164 return NULL; 169 165 } 170 166 ··· 264 254 struct sock *clcsk = smc->clcsock->sk; 265 255 266 256 write_lock_bh(&clcsk->sk_callback_lock); 267 - clcsk->sk_user_data = NULL; 257 + rcu_assign_sk_user_data(clcsk, NULL); 268 258 269 259 smc_clcsock_restore_cb(&clcsk->sk_state_change, &smc->clcsk_state_change); 270 260 smc_clcsock_restore_cb(&clcsk->sk_data_ready, &smc->clcsk_data_ready); ··· 912 902 struct sock *clcsk = smc->clcsock->sk; 913 903 914 904 write_lock_bh(&clcsk->sk_callback_lock); 915 - clcsk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY); 905 + __rcu_assign_sk_user_data_with_flags(clcsk, smc, SK_USER_DATA_NOCOPY); 916 906 917 907 smc_clcsock_replace_cb(&clcsk->sk_state_change, smc_fback_state_change, 918 908 &smc->clcsk_state_change); ··· 2675 2665 * smc-specific sk_data_ready function 2676 2666 */ 2677 2667 write_lock_bh(&smc->clcsock->sk->sk_callback_lock); 2678 - smc->clcsock->sk->sk_user_data = 2679 - (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY); 2668 + __rcu_assign_sk_user_data_with_flags(smc->clcsock->sk, smc, 2669 + SK_USER_DATA_NOCOPY); 2680 2670 smc_clcsock_replace_cb(&smc->clcsock->sk->sk_data_ready, 2681 2671 smc_clcsock_data_ready, &smc->clcsk_data_ready); 2682 2672 write_unlock_bh(&smc->clcsock->sk->sk_callback_lock); ··· 2697 2687 write_lock_bh(&smc->clcsock->sk->sk_callback_lock); 2698 2688 smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready, 2699 2689 &smc->clcsk_data_ready); 2700 - smc->clcsock->sk->sk_user_data = NULL; 2690 + rcu_assign_sk_user_data(smc->clcsock->sk, NULL); 2701 2691 write_unlock_bh(&smc->clcsock->sk->sk_callback_lock); 2702 2692 goto out; 2703 2693 } 2694 + sock_set_flag(sk, SOCK_RCU_FREE); 2704 2695 sk->sk_max_ack_backlog = backlog; 2705 2696 sk->sk_ack_backlog = 0; 2706 2697 sk->sk_state = SMC_LISTEN;
+5
net/smc/smc.h
··· 346 346 ((uintptr_t)clcsk->sk_user_data & ~SK_USER_DATA_NOCOPY); 347 347 } 348 348 349 + static inline struct smc_sock *smc_clcsock_user_data_rcu(const struct sock *clcsk) 350 + { 351 + return (struct smc_sock *)rcu_dereference_sk_user_data(clcsk); 352 + } 353 + 349 354 /* save target_cb in saved_cb, and replace target_cb with new_cb */ 350 355 static inline void smc_clcsock_replace_cb(void (**target_cb)(struct sock *), 351 356 void (*new_cb)(struct sock *),
+1 -1
net/smc/smc_close.c
··· 218 218 write_lock_bh(&smc->clcsock->sk->sk_callback_lock); 219 219 smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready, 220 220 &smc->clcsk_data_ready); 221 - smc->clcsock->sk->sk_user_data = NULL; 221 + rcu_assign_sk_user_data(smc->clcsock->sk, NULL); 222 222 write_unlock_bh(&smc->clcsock->sk->sk_callback_lock); 223 223 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); 224 224 }
+21 -5
net/sunrpc/cache.c
··· 1062 1062 struct cache_reader *rp = filp->private_data; 1063 1063 1064 1064 if (rp) { 1065 + struct cache_request *rq = NULL; 1066 + 1065 1067 spin_lock(&queue_lock); 1066 1068 if (rp->offset) { 1067 1069 struct cache_queue *cq; 1068 - for (cq= &rp->q; &cq->list != &cd->queue; 1069 - cq = list_entry(cq->list.next, struct cache_queue, list)) 1070 + for (cq = &rp->q; &cq->list != &cd->queue; 1071 + cq = list_entry(cq->list.next, 1072 + struct cache_queue, list)) 1070 1073 if (!cq->reader) { 1071 - container_of(cq, struct cache_request, q) 1072 - ->readers--; 1074 + struct cache_request *cr = 1075 + container_of(cq, 1076 + struct cache_request, q); 1077 + cr->readers--; 1078 + if (cr->readers == 0 && 1079 + !test_bit(CACHE_PENDING, 1080 + &cr->item->flags)) { 1081 + list_del(&cr->q.list); 1082 + rq = cr; 1083 + } 1073 1084 break; 1074 1085 } 1075 1086 rp->offset = 0; ··· 1088 1077 list_del(&rp->q.list); 1089 1078 spin_unlock(&queue_lock); 1090 1079 1080 + if (rq) { 1081 + cache_put(rq->item, cd); 1082 + kfree(rq->buf); 1083 + kfree(rq); 1084 + } 1085 + 1091 1086 filp->private_data = NULL; 1092 1087 kfree(rp); 1093 - 1094 1088 } 1095 1089 if (filp->f_mode & FMODE_WRITE) { 1096 1090 atomic_dec(&cd->writers);
+4 -3
net/sunrpc/xprtrdma/verbs.c
··· 1362 1362 needed += RPCRDMA_MAX_RECV_BATCH; 1363 1363 1364 1364 if (atomic_inc_return(&ep->re_receiving) > 1) 1365 - goto out; 1365 + goto out_dec; 1366 1366 1367 1367 /* fast path: all needed reps can be found on the free list */ 1368 1368 wr = NULL; ··· 1385 1385 ++count; 1386 1386 } 1387 1387 if (!wr) 1388 - goto out; 1388 + goto out_dec; 1389 1389 1390 1390 rc = ib_post_recv(ep->re_id->qp, wr, 1391 1391 (const struct ib_recv_wr **)&bad_wr); ··· 1400 1400 --count; 1401 1401 } 1402 1402 } 1403 + 1404 + out_dec: 1403 1405 if (atomic_dec_return(&ep->re_receiving) > 0) 1404 1406 complete(&ep->re_done); 1405 - 1406 1407 out: 1407 1408 trace_xprtrdma_post_recvs(r_xprt, count); 1408 1409 ep->re_receive_count += count;
+2
net/tipc/socket.c
··· 2233 2233 if (skb_queue_empty(&sk->sk_write_queue)) 2234 2234 break; 2235 2235 get_random_bytes(&delay, 2); 2236 + if (tsk->conn_timeout < 4) 2237 + tsk->conn_timeout = 4; 2236 2238 delay %= (tsk->conn_timeout / 4); 2237 2239 delay = msecs_to_jiffies(delay + 100); 2238 2240 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
+2
net/unix/af_unix.c
··· 1958 1958 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) 1959 1959 { 1960 1960 scm->fp = scm_fp_dup(UNIXCB(skb).fp); 1961 + 1962 + unix_peek_fpl(scm->fp); 1961 1963 } 1962 1964 1963 1965 static void unix_destruct_scm(struct sk_buff *skb)
+1
net/unix/af_unix.h
··· 29 29 void unix_update_edges(struct unix_sock *receiver); 30 30 int unix_prepare_fpl(struct scm_fp_list *fpl); 31 31 void unix_destroy_fpl(struct scm_fp_list *fpl); 32 + void unix_peek_fpl(struct scm_fp_list *fpl); 32 33 void unix_schedule_gc(struct user_struct *user); 33 34 34 35 /* SOCK_DIAG */
+51 -28
net/unix/garbage.c
··· 318 318 unix_free_vertices(fpl); 319 319 } 320 320 321 + static bool gc_in_progress; 322 + static seqcount_t unix_peek_seq = SEQCNT_ZERO(unix_peek_seq); 323 + 324 + void unix_peek_fpl(struct scm_fp_list *fpl) 325 + { 326 + static DEFINE_SPINLOCK(unix_peek_lock); 327 + 328 + if (!fpl || !fpl->count_unix) 329 + return; 330 + 331 + if (!READ_ONCE(gc_in_progress)) 332 + return; 333 + 334 + /* Invalidate the final refcnt check in unix_vertex_dead(). */ 335 + spin_lock(&unix_peek_lock); 336 + raw_write_seqcount_barrier(&unix_peek_seq); 337 + spin_unlock(&unix_peek_lock); 338 + } 339 + 321 340 static bool unix_vertex_dead(struct unix_vertex *vertex) 322 341 { 323 342 struct unix_edge *edge; ··· 368 349 return false; 369 350 370 351 return true; 352 + } 353 + 354 + static LIST_HEAD(unix_visited_vertices); 355 + static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2; 356 + 357 + static bool unix_scc_dead(struct list_head *scc, bool fast) 358 + { 359 + struct unix_vertex *vertex; 360 + bool scc_dead = true; 361 + unsigned int seq; 362 + 363 + seq = read_seqcount_begin(&unix_peek_seq); 364 + 365 + list_for_each_entry_reverse(vertex, scc, scc_entry) { 366 + /* Don't restart DFS from this vertex. */ 367 + list_move_tail(&vertex->entry, &unix_visited_vertices); 368 + 369 + /* Mark vertex as off-stack for __unix_walk_scc(). */ 370 + if (!fast) 371 + vertex->index = unix_vertex_grouped_index; 372 + 373 + if (scc_dead) 374 + scc_dead = unix_vertex_dead(vertex); 375 + } 376 + 377 + /* If MSG_PEEK intervened, defer this SCC to the next round. */ 378 + if (read_seqcount_retry(&unix_peek_seq, seq)) 379 + return false; 380 + 381 + return scc_dead; 371 382 } 372 383 373 384 static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist) ··· 452 403 453 404 return false; 454 405 } 455 - 456 - static LIST_HEAD(unix_visited_vertices); 457 - static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2; 458 406 459 407 static unsigned long __unix_walk_scc(struct unix_vertex *vertex, 460 408 unsigned long *last_index, ··· 520 474 } 521 475 522 476 if (vertex->index == vertex->scc_index) { 523 - struct unix_vertex *v; 524 477 struct list_head scc; 525 - bool scc_dead = true; 526 478 527 479 /* SCC finalised. 528 480 * ··· 529 485 */ 530 486 __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry); 531 487 532 - list_for_each_entry_reverse(v, &scc, scc_entry) { 533 - /* Don't restart DFS from this vertex in unix_walk_scc(). */ 534 - list_move_tail(&v->entry, &unix_visited_vertices); 535 - 536 - /* Mark vertex as off-stack. */ 537 - v->index = unix_vertex_grouped_index; 538 - 539 - if (scc_dead) 540 - scc_dead = unix_vertex_dead(v); 541 - } 542 - 543 - if (scc_dead) { 488 + if (unix_scc_dead(&scc, false)) { 544 489 unix_collect_skb(&scc, hitlist); 545 490 } else { 546 491 if (unix_vertex_max_scc_index < vertex->scc_index) ··· 583 550 while (!list_empty(&unix_unvisited_vertices)) { 584 551 struct unix_vertex *vertex; 585 552 struct list_head scc; 586 - bool scc_dead = true; 587 553 588 554 vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry); 589 555 list_add(&scc, &vertex->scc_entry); 590 556 591 - list_for_each_entry_reverse(vertex, &scc, scc_entry) { 592 - list_move_tail(&vertex->entry, &unix_visited_vertices); 593 - 594 - if (scc_dead) 595 - scc_dead = unix_vertex_dead(vertex); 596 - } 597 - 598 - if (scc_dead) { 557 + if (unix_scc_dead(&scc, true)) { 599 558 cyclic_sccs--; 600 559 unix_collect_skb(&scc, hitlist); 601 560 } ··· 601 576 WRITE_ONCE(unix_graph_state, 602 577 cyclic_sccs ? UNIX_GRAPH_CYCLIC : UNIX_GRAPH_NOT_CYCLIC); 603 578 } 604 - 605 - static bool gc_in_progress; 606 579 607 580 static void unix_gc(struct work_struct *work) 608 581 {
+1
net/wireless/pmsr.c
··· 664 664 } 665 665 spin_unlock_bh(&wdev->pmsr_lock); 666 666 667 + cancel_work_sync(&wdev->pmsr_free_wk); 667 668 if (found) 668 669 cfg80211_pmsr_process_abort(wdev); 669 670
+5 -6
rust/Makefile
··· 148 148 quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $< 149 149 cmd_rustdoc = \ 150 150 OBJTREE=$(abspath $(objtree)) \ 151 - $(RUSTDOC) $(filter-out $(skip_flags) --remap-path-prefix=%,$(if $(rustdoc_host),$(rust_common_flags),$(rust_flags))) \ 151 + $(RUSTDOC) $(filter-out $(skip_flags) --remap-path-prefix=% --remap-path-scope=%, \ 152 + $(if $(rustdoc_host),$(rust_common_flags),$(rust_flags))) \ 152 153 $(rustc_target_flags) -L$(objtree)/$(obj) \ 153 154 -Zunstable-options --generate-link-to-definition \ 154 155 --output $(rustdoc_output) \ ··· 335 334 rm -rf $(objtree)/$(obj)/test/doctests/kernel; \ 336 335 mkdir -p $(objtree)/$(obj)/test/doctests/kernel; \ 337 336 OBJTREE=$(abspath $(objtree)) \ 338 - $(RUSTDOC) --test $(filter-out --remap-path-prefix=%,$(rust_flags)) \ 337 + $(RUSTDOC) --test $(filter-out --remap-path-prefix=% --remap-path-scope=%,$(rust_flags)) \ 339 338 -L$(objtree)/$(obj) --extern ffi --extern pin_init \ 340 339 --extern kernel --extern build_error --extern macros \ 341 340 --extern bindings --extern uapi \ ··· 527 526 cmd_rustc_procmacrolibrary = \ 528 527 $(if $(skip_clippy),$(RUSTC),$(RUSTC_OR_CLIPPY)) \ 529 528 $(filter-out $(skip_flags),$(rust_common_flags) $(rustc_target_flags)) \ 530 - --emit=dep-info,link --crate-type rlib -O \ 529 + --emit=dep-info=$(depfile) --emit=link=$@ --crate-type rlib -O \ 531 530 --out-dir $(objtree)/$(obj) -L$(objtree)/$(obj) \ 532 - --crate-name $(patsubst lib%.rlib,%,$(notdir $@)) $<; \ 533 - mv $(objtree)/$(obj)/$(patsubst lib%.rlib,%,$(notdir $@)).d $(depfile); \ 534 - sed -i '/^\#/d' $(depfile) 531 + --crate-name $(patsubst lib%.rlib,%,$(notdir $@)) $< 535 532 536 533 $(obj)/libproc_macro2.rlib: private skip_clippy = 1 537 534 $(obj)/libproc_macro2.rlib: private rustc_target_flags = $(proc_macro2-flags)
+1
rust/kernel/cpufreq.rs
··· 401 401 /// ``` 402 402 /// use kernel::cpufreq::{DEFAULT_TRANSITION_LATENCY_NS, Policy}; 403 403 /// 404 + /// #[allow(clippy::double_parens, reason = "False positive before 1.92.0")] 404 405 /// fn update_policy(policy: &mut Policy) { 405 406 /// policy 406 407 /// .set_dvfs_possible_from_any_cpu(true)
+50 -64
rust/kernel/dma.rs
··· 461 461 self.count * core::mem::size_of::<T>() 462 462 } 463 463 464 + /// Returns the raw pointer to the allocated region in the CPU's virtual address space. 465 + #[inline] 466 + pub fn as_ptr(&self) -> *const [T] { 467 + core::ptr::slice_from_raw_parts(self.cpu_addr.as_ptr(), self.count) 468 + } 469 + 470 + /// Returns the raw pointer to the allocated region in the CPU's virtual address space as 471 + /// a mutable pointer. 472 + #[inline] 473 + pub fn as_mut_ptr(&self) -> *mut [T] { 474 + core::ptr::slice_from_raw_parts_mut(self.cpu_addr.as_ptr(), self.count) 475 + } 476 + 464 477 /// Returns the base address to the allocated region in the CPU's virtual address space. 465 478 pub fn start_ptr(&self) -> *const T { 466 479 self.cpu_addr.as_ptr() ··· 594 581 Ok(()) 595 582 } 596 583 597 - /// Returns a pointer to an element from the region with bounds checking. `offset` is in 598 - /// units of `T`, not the number of bytes. 599 - /// 600 - /// Public but hidden since it should only be used from [`dma_read`] and [`dma_write`] macros. 601 - #[doc(hidden)] 602 - pub fn item_from_index(&self, offset: usize) -> Result<*mut T> { 603 - if offset >= self.count { 604 - return Err(EINVAL); 605 - } 606 - // SAFETY: 607 - // - The pointer is valid due to type invariant on `CoherentAllocation` 608 - // and we've just checked that the range and index is within bounds. 609 - // - `offset` can't overflow since it is smaller than `self.count` and we've checked 610 - // that `self.count` won't overflow early in the constructor. 611 - Ok(unsafe { self.cpu_addr.as_ptr().add(offset) }) 612 - } 613 - 614 584 /// Reads the value of `field` and ensures that its type is [`FromBytes`]. 615 585 /// 616 586 /// # Safety ··· 666 670 667 671 /// Reads a field of an item from an allocated region of structs. 668 672 /// 673 + /// The syntax is of the form `kernel::dma_read!(dma, proj)` where `dma` is an expression evaluating 674 + /// to a [`CoherentAllocation`] and `proj` is a [projection specification](kernel::ptr::project!). 675 + /// 669 676 /// # Examples 670 677 /// 671 678 /// ``` ··· 683 684 /// unsafe impl kernel::transmute::AsBytes for MyStruct{}; 684 685 /// 685 686 /// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result { 686 - /// let whole = kernel::dma_read!(alloc[2]); 687 - /// let field = kernel::dma_read!(alloc[1].field); 687 + /// let whole = kernel::dma_read!(alloc, [2]?); 688 + /// let field = kernel::dma_read!(alloc, [1]?.field); 688 689 /// # Ok::<(), Error>(()) } 689 690 /// ``` 690 691 #[macro_export] 691 692 macro_rules! dma_read { 692 - ($dma:expr, $idx: expr, $($field:tt)*) => {{ 693 - (|| -> ::core::result::Result<_, $crate::error::Error> { 694 - let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?; 695 - // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be 696 - // dereferenced. The compiler also further validates the expression on whether `field` 697 - // is a member of `item` when expanded by the macro. 698 - unsafe { 699 - let ptr_field = ::core::ptr::addr_of!((*item) $($field)*); 700 - ::core::result::Result::Ok( 701 - $crate::dma::CoherentAllocation::field_read(&$dma, ptr_field) 702 - ) 703 - } 704 - })() 693 + ($dma:expr, $($proj:tt)*) => {{ 694 + let dma = &$dma; 695 + let ptr = $crate::ptr::project!( 696 + $crate::dma::CoherentAllocation::as_ptr(dma), $($proj)* 697 + ); 698 + // SAFETY: The pointer created by the projection is within the DMA region. 699 + unsafe { $crate::dma::CoherentAllocation::field_read(dma, ptr) } 705 700 }}; 706 - ($dma:ident [ $idx:expr ] $($field:tt)* ) => { 707 - $crate::dma_read!($dma, $idx, $($field)*) 708 - }; 709 - ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => { 710 - $crate::dma_read!($($dma).*, $idx, $($field)*) 711 - }; 712 701 } 713 702 714 703 /// Writes to a field of an item from an allocated region of structs. 704 + /// 705 + /// The syntax is of the form `kernel::dma_write!(dma, proj, val)` where `dma` is an expression 706 + /// evaluating to a [`CoherentAllocation`], `proj` is a 707 + /// [projection specification](kernel::ptr::project!), and `val` is the value to be written to the 708 + /// projected location. 715 709 /// 716 710 /// # Examples 717 711 /// ··· 720 728 /// unsafe impl kernel::transmute::AsBytes for MyStruct{}; 721 729 /// 722 730 /// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result { 723 - /// kernel::dma_write!(alloc[2].member = 0xf); 724 - /// kernel::dma_write!(alloc[1] = MyStruct { member: 0xf }); 731 + /// kernel::dma_write!(alloc, [2]?.member, 0xf); 732 + /// kernel::dma_write!(alloc, [1]?, MyStruct { member: 0xf }); 725 733 /// # Ok::<(), Error>(()) } 726 734 /// ``` 727 735 #[macro_export] 728 736 macro_rules! dma_write { 729 - ($dma:ident [ $idx:expr ] $($field:tt)*) => {{ 730 - $crate::dma_write!($dma, $idx, $($field)*) 737 + (@parse [$dma:expr] [$($proj:tt)*] [, $val:expr]) => {{ 738 + let dma = &$dma; 739 + let ptr = $crate::ptr::project!( 740 + mut $crate::dma::CoherentAllocation::as_mut_ptr(dma), $($proj)* 741 + ); 742 + let val = $val; 743 + // SAFETY: The pointer created by the projection is within the DMA region. 744 + unsafe { $crate::dma::CoherentAllocation::field_write(dma, ptr, val) } 731 745 }}; 732 - ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {{ 733 - $crate::dma_write!($($dma).*, $idx, $($field)*) 734 - }}; 735 - ($dma:expr, $idx: expr, = $val:expr) => { 736 - (|| -> ::core::result::Result<_, $crate::error::Error> { 737 - let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?; 738 - // SAFETY: `item_from_index` ensures that `item` is always a valid item. 739 - unsafe { $crate::dma::CoherentAllocation::field_write(&$dma, item, $val) } 740 - ::core::result::Result::Ok(()) 741 - })() 746 + (@parse [$dma:expr] [$($proj:tt)*] [.$field:tt $($rest:tt)*]) => { 747 + $crate::dma_write!(@parse [$dma] [$($proj)* .$field] [$($rest)*]) 742 748 }; 743 - ($dma:expr, $idx: expr, $(.$field:ident)* = $val:expr) => { 744 - (|| -> ::core::result::Result<_, $crate::error::Error> { 745 - let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?; 746 - // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be 747 - // dereferenced. The compiler also further validates the expression on whether `field` 748 - // is a member of `item` when expanded by the macro. 749 - unsafe { 750 - let ptr_field = ::core::ptr::addr_of_mut!((*item) $(.$field)*); 751 - $crate::dma::CoherentAllocation::field_write(&$dma, ptr_field, $val) 752 - } 753 - ::core::result::Result::Ok(()) 754 - })() 749 + (@parse [$dma:expr] [$($proj:tt)*] [[$index:expr]? $($rest:tt)*]) => { 750 + $crate::dma_write!(@parse [$dma] [$($proj)* [$index]?] [$($rest)*]) 751 + }; 752 + (@parse [$dma:expr] [$($proj:tt)*] [[$index:expr] $($rest:tt)*]) => { 753 + $crate::dma_write!(@parse [$dma] [$($proj)* [$index]] [$($rest)*]) 754 + }; 755 + ($dma:expr, $($rest:tt)*) => { 756 + $crate::dma_write!(@parse [$dma] [] [$($rest)*]) 755 757 }; 756 758 }
+4
rust/kernel/lib.rs
··· 20 20 #![feature(generic_nonzero)] 21 21 #![feature(inline_const)] 22 22 #![feature(pointer_is_aligned)] 23 + #![feature(slice_ptr_len)] 23 24 // 24 25 // Stable since Rust 1.80.0. 25 26 #![feature(slice_flatten)] ··· 37 36 #![feature(const_option)] 38 37 #![feature(const_ptr_write)] 39 38 #![feature(const_refs_to_cell)] 39 + // 40 + // Stable since Rust 1.84.0. 41 + #![feature(strict_provenance)] 40 42 // 41 43 // Expected to become stable. 42 44 #![feature(arbitrary_self_types)]
+29 -1
rust/kernel/ptr.rs
··· 2 2 3 3 //! Types and functions to work with pointers and addresses. 4 4 5 - use core::mem::align_of; 5 + pub mod projection; 6 + pub use crate::project_pointer as project; 7 + 8 + use core::mem::{ 9 + align_of, 10 + size_of, // 11 + }; 6 12 use core::num::NonZero; 7 13 8 14 /// Type representing an alignment, which is always a power of two. ··· 231 225 } 232 226 233 227 impl_alignable_uint!(u8, u16, u32, u64, usize); 228 + 229 + /// Trait to represent compile-time known size information. 230 + /// 231 + /// This is a generalization of [`size_of`] that works for dynamically sized types. 232 + pub trait KnownSize { 233 + /// Get the size of an object of this type in bytes, with the metadata of the given pointer. 234 + fn size(p: *const Self) -> usize; 235 + } 236 + 237 + impl<T> KnownSize for T { 238 + #[inline(always)] 239 + fn size(_: *const Self) -> usize { 240 + size_of::<T>() 241 + } 242 + } 243 + 244 + impl<T> KnownSize for [T] { 245 + #[inline(always)] 246 + fn size(p: *const Self) -> usize { 247 + p.len() * size_of::<T>() 248 + } 249 + }
+305
rust/kernel/ptr/projection.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + //! Infrastructure for handling projections. 4 + 5 + use core::{ 6 + mem::MaybeUninit, 7 + ops::Deref, // 8 + }; 9 + 10 + use crate::prelude::*; 11 + 12 + /// Error raised when a projection is attempted on an array or slice out of bounds. 13 + pub struct OutOfBound; 14 + 15 + impl From<OutOfBound> for Error { 16 + #[inline(always)] 17 + fn from(_: OutOfBound) -> Self { 18 + ERANGE 19 + } 20 + } 21 + 22 + /// A helper trait to perform index projection. 23 + /// 24 + /// This is similar to [`core::slice::SliceIndex`], but operates on raw pointers safely and 25 + /// fallibly. 26 + /// 27 + /// # Safety 28 + /// 29 + /// The implementation of `index` and `get` (if [`Some`] is returned) must ensure that, if provided 30 + /// input pointer `slice` and returned pointer `output`, then: 31 + /// - `output` has the same provenance as `slice`; 32 + /// - `output.byte_offset_from(slice)` is between 0 to 33 + /// `KnownSize::size(slice) - KnownSize::size(output)`. 34 + /// 35 + /// This means that if the input pointer is valid, then pointer returned by `get` or `index` is 36 + /// also valid. 37 + #[diagnostic::on_unimplemented(message = "`{Self}` cannot be used to index `{T}`")] 38 + #[doc(hidden)] 39 + pub unsafe trait ProjectIndex<T: ?Sized>: Sized { 40 + type Output: ?Sized; 41 + 42 + /// Returns an index-projected pointer, if in bounds. 43 + fn get(self, slice: *mut T) -> Option<*mut Self::Output>; 44 + 45 + /// Returns an index-projected pointer; fail the build if it cannot be proved to be in bounds. 46 + #[inline(always)] 47 + fn index(self, slice: *mut T) -> *mut Self::Output { 48 + Self::get(self, slice).unwrap_or_else(|| build_error!()) 49 + } 50 + } 51 + 52 + // Forward array impl to slice impl. 53 + // 54 + // SAFETY: Safety requirement guaranteed by the forwarded impl. 55 + unsafe impl<T, I, const N: usize> ProjectIndex<[T; N]> for I 56 + where 57 + I: ProjectIndex<[T]>, 58 + { 59 + type Output = <I as ProjectIndex<[T]>>::Output; 60 + 61 + #[inline(always)] 62 + fn get(self, slice: *mut [T; N]) -> Option<*mut Self::Output> { 63 + <I as ProjectIndex<[T]>>::get(self, slice) 64 + } 65 + 66 + #[inline(always)] 67 + fn index(self, slice: *mut [T; N]) -> *mut Self::Output { 68 + <I as ProjectIndex<[T]>>::index(self, slice) 69 + } 70 + } 71 + 72 + // SAFETY: `get`-returned pointer has the same provenance as `slice` and the offset is checked to 73 + // not exceed the required bound. 74 + unsafe impl<T> ProjectIndex<[T]> for usize { 75 + type Output = T; 76 + 77 + #[inline(always)] 78 + fn get(self, slice: *mut [T]) -> Option<*mut T> { 79 + if self >= slice.len() { 80 + None 81 + } else { 82 + Some(slice.cast::<T>().wrapping_add(self)) 83 + } 84 + } 85 + } 86 + 87 + // SAFETY: `get`-returned pointer has the same provenance as `slice` and the offset is checked to 88 + // not exceed the required bound. 89 + unsafe impl<T> ProjectIndex<[T]> for core::ops::Range<usize> { 90 + type Output = [T]; 91 + 92 + #[inline(always)] 93 + fn get(self, slice: *mut [T]) -> Option<*mut [T]> { 94 + let new_len = self.end.checked_sub(self.start)?; 95 + if self.end > slice.len() { 96 + return None; 97 + } 98 + Some(core::ptr::slice_from_raw_parts_mut( 99 + slice.cast::<T>().wrapping_add(self.start), 100 + new_len, 101 + )) 102 + } 103 + } 104 + 105 + // SAFETY: Safety requirement guaranteed by the forwarded impl. 106 + unsafe impl<T> ProjectIndex<[T]> for core::ops::RangeTo<usize> { 107 + type Output = [T]; 108 + 109 + #[inline(always)] 110 + fn get(self, slice: *mut [T]) -> Option<*mut [T]> { 111 + (0..self.end).get(slice) 112 + } 113 + } 114 + 115 + // SAFETY: Safety requirement guaranteed by the forwarded impl. 116 + unsafe impl<T> ProjectIndex<[T]> for core::ops::RangeFrom<usize> { 117 + type Output = [T]; 118 + 119 + #[inline(always)] 120 + fn get(self, slice: *mut [T]) -> Option<*mut [T]> { 121 + (self.start..slice.len()).get(slice) 122 + } 123 + } 124 + 125 + // SAFETY: `get` returned the pointer as is, so it always has the same provenance and offset of 0. 126 + unsafe impl<T> ProjectIndex<[T]> for core::ops::RangeFull { 127 + type Output = [T]; 128 + 129 + #[inline(always)] 130 + fn get(self, slice: *mut [T]) -> Option<*mut [T]> { 131 + Some(slice) 132 + } 133 + } 134 + 135 + /// A helper trait to perform field projection. 136 + /// 137 + /// This trait has a `DEREF` generic parameter so it can be implemented twice for types that 138 + /// implement [`Deref`]. This will cause an ambiguity error and thus block [`Deref`] types being 139 + /// used as base of projection, as they can inject unsoundness. Users therefore must not specify 140 + /// `DEREF` and should always leave it to be inferred. 141 + /// 142 + /// # Safety 143 + /// 144 + /// `proj` may only invoke `f` with a valid allocation, as the documentation of [`Self::proj`] 145 + /// describes. 146 + #[doc(hidden)] 147 + pub unsafe trait ProjectField<const DEREF: bool> { 148 + /// Project a pointer to a type to a pointer of a field. 149 + /// 150 + /// `f` may only be invoked with a valid allocation so it can safely obtain raw pointers to 151 + /// fields using `&raw mut`. 152 + /// 153 + /// This is needed because `base` might not point to a valid allocation, while `&raw mut` 154 + /// requires pointers to be in bounds of a valid allocation. 155 + /// 156 + /// # Safety 157 + /// 158 + /// `f` must return a pointer in bounds of the provided pointer. 159 + unsafe fn proj<F>(base: *mut Self, f: impl FnOnce(*mut Self) -> *mut F) -> *mut F; 160 + } 161 + 162 + // NOTE: in theory, this API should work for `T: ?Sized` and `F: ?Sized`, too. However, we cannot 163 + // currently support that as we need to obtain a valid allocation that `&raw const` can operate on. 164 + // 165 + // SAFETY: `proj` invokes `f` with valid allocation. 166 + unsafe impl<T> ProjectField<false> for T { 167 + #[inline(always)] 168 + unsafe fn proj<F>(base: *mut Self, f: impl FnOnce(*mut Self) -> *mut F) -> *mut F { 169 + // Create a valid allocation to start projection, as `base` is not necessarily so. The 170 + // memory is never actually used so it will be optimized out, so it should work even for 171 + // very large `T` (`memoffset` crate also relies on this). To be extra certain, we also 172 + // annotate `f` closure with `#[inline(always)]` in the macro. 173 + let mut place = MaybeUninit::uninit(); 174 + let place_base = place.as_mut_ptr(); 175 + let field = f(place_base); 176 + // SAFETY: `field` is in bounds from `base` per safety requirement. 177 + let offset = unsafe { field.byte_offset_from(place_base) }; 178 + // Use `wrapping_byte_offset` as `base` does not need to be of valid allocation. 179 + base.wrapping_byte_offset(offset).cast() 180 + } 181 + } 182 + 183 + // SAFETY: Vacuously satisfied. 184 + unsafe impl<T: Deref> ProjectField<true> for T { 185 + #[inline(always)] 186 + unsafe fn proj<F>(_: *mut Self, _: impl FnOnce(*mut Self) -> *mut F) -> *mut F { 187 + build_error!("this function is a guard against `Deref` impl and is never invoked"); 188 + } 189 + } 190 + 191 + /// Create a projection from a raw pointer. 192 + /// 193 + /// The projected pointer is within the memory region marked by the input pointer. There is no 194 + /// requirement that the input raw pointer needs to be valid, so this macro may be used for 195 + /// projecting pointers outside normal address space, e.g. I/O pointers. However, if the input 196 + /// pointer is valid, the projected pointer is also valid. 197 + /// 198 + /// Supported projections include field projections and index projections. 199 + /// It is not allowed to project into types that implement custom [`Deref`] or 200 + /// [`Index`](core::ops::Index). 201 + /// 202 + /// The macro has basic syntax of `kernel::ptr::project!(ptr, projection)`, where `ptr` is an 203 + /// expression that evaluates to a raw pointer which serves as the base of projection. `projection` 204 + /// can be a projection expression of form `.field` (normally identifier, or numeral in case of 205 + /// tuple structs) or of form `[index]`. 206 + /// 207 + /// If a mutable pointer is needed, the macro input can be prefixed with the `mut` keyword, i.e. 208 + /// `kernel::ptr::project!(mut ptr, projection)`. By default, a const pointer is created. 209 + /// 210 + /// `ptr::project!` macro can perform both fallible indexing and build-time checked indexing. 211 + /// `[index]` form performs build-time bounds checking; if compiler fails to prove `[index]` is in 212 + /// bounds, compilation will fail. `[index]?` can be used to perform runtime bounds checking; 213 + /// `OutOfBound` error is raised via `?` if the index is out of bounds. 214 + /// 215 + /// # Examples 216 + /// 217 + /// Field projections are performed with `.field_name`: 218 + /// 219 + /// ``` 220 + /// struct MyStruct { field: u32, } 221 + /// let ptr: *const MyStruct = core::ptr::dangling(); 222 + /// let field_ptr: *const u32 = kernel::ptr::project!(ptr, .field); 223 + /// 224 + /// struct MyTupleStruct(u32, u32); 225 + /// 226 + /// fn proj(ptr: *const MyTupleStruct) { 227 + /// let field_ptr: *const u32 = kernel::ptr::project!(ptr, .1); 228 + /// } 229 + /// ``` 230 + /// 231 + /// Index projections are performed with `[index]`: 232 + /// 233 + /// ``` 234 + /// fn proj(ptr: *const [u8; 32]) -> Result { 235 + /// let field_ptr: *const u8 = kernel::ptr::project!(ptr, [1]); 236 + /// // The following invocation, if uncommented, would fail the build. 237 + /// // 238 + /// // kernel::ptr::project!(ptr, [128]); 239 + /// 240 + /// // This will raise an `OutOfBound` error (which is convertible to `ERANGE`). 241 + /// kernel::ptr::project!(ptr, [128]?); 242 + /// Ok(()) 243 + /// } 244 + /// ``` 245 + /// 246 + /// If you need to match on the error instead of propagate, put the invocation inside a closure: 247 + /// 248 + /// ``` 249 + /// let ptr: *const [u8; 32] = core::ptr::dangling(); 250 + /// let field_ptr: Result<*const u8> = (|| -> Result<_> { 251 + /// Ok(kernel::ptr::project!(ptr, [128]?)) 252 + /// })(); 253 + /// assert!(field_ptr.is_err()); 254 + /// ``` 255 + /// 256 + /// For mutable pointers, put `mut` as the first token in macro invocation. 257 + /// 258 + /// ``` 259 + /// let ptr: *mut [(u8, u16); 32] = core::ptr::dangling_mut(); 260 + /// let field_ptr: *mut u16 = kernel::ptr::project!(mut ptr, [1].1); 261 + /// ``` 262 + #[macro_export] 263 + macro_rules! project_pointer { 264 + (@gen $ptr:ident, ) => {}; 265 + // Field projection. `$field` needs to be `tt` to support tuple index like `.0`. 266 + (@gen $ptr:ident, .$field:tt $($rest:tt)*) => { 267 + // SAFETY: The provided closure always returns an in-bounds pointer. 268 + let $ptr = unsafe { 269 + $crate::ptr::projection::ProjectField::proj($ptr, #[inline(always)] |ptr| { 270 + // Check unaligned field. Not all users (e.g. DMA) can handle unaligned 271 + // projections. 272 + if false { 273 + let _ = &(*ptr).$field; 274 + } 275 + // SAFETY: `$field` is in bounds, and no implicit `Deref` is possible (if the 276 + // type implements `Deref`, Rust cannot infer the generic parameter `DEREF`). 277 + &raw mut (*ptr).$field 278 + }) 279 + }; 280 + $crate::ptr::project!(@gen $ptr, $($rest)*) 281 + }; 282 + // Fallible index projection. 283 + (@gen $ptr:ident, [$index:expr]? $($rest:tt)*) => { 284 + let $ptr = $crate::ptr::projection::ProjectIndex::get($index, $ptr) 285 + .ok_or($crate::ptr::projection::OutOfBound)?; 286 + $crate::ptr::project!(@gen $ptr, $($rest)*) 287 + }; 288 + // Build-time checked index projection. 289 + (@gen $ptr:ident, [$index:expr] $($rest:tt)*) => { 290 + let $ptr = $crate::ptr::projection::ProjectIndex::index($index, $ptr); 291 + $crate::ptr::project!(@gen $ptr, $($rest)*) 292 + }; 293 + (mut $ptr:expr, $($proj:tt)*) => {{ 294 + let ptr: *mut _ = $ptr; 295 + $crate::ptr::project!(@gen ptr, $($proj)*); 296 + ptr 297 + }}; 298 + ($ptr:expr, $($proj:tt)*) => {{ 299 + let ptr = <*const _>::cast_mut($ptr); 300 + // We currently always project using mutable pointer, as it is not decided whether `&raw 301 + // const` allows the resulting pointer to be mutated (see documentation of `addr_of!`). 302 + $crate::ptr::project!(@gen ptr, $($proj)*); 303 + ptr.cast_const() 304 + }}; 305 + }
+2 -2
rust/kernel/str.rs
··· 664 664 /// 665 665 /// * The first byte of `buffer` is always zero. 666 666 /// * The length of `buffer` is at least 1. 667 - pub(crate) struct NullTerminatedFormatter<'a> { 667 + pub struct NullTerminatedFormatter<'a> { 668 668 buffer: &'a mut [u8], 669 669 } 670 670 671 671 impl<'a> NullTerminatedFormatter<'a> { 672 672 /// Create a new [`Self`] instance. 673 - pub(crate) fn new(buffer: &'a mut [u8]) -> Option<NullTerminatedFormatter<'a>> { 673 + pub fn new(buffer: &'a mut [u8]) -> Option<NullTerminatedFormatter<'a>> { 674 674 *(buffer.first_mut()?) = 0; 675 675 676 676 // INVARIANT:
+23 -46
rust/pin-init/internal/src/init.rs
··· 62 62 63 63 enum InitializerAttribute { 64 64 DefaultError(DefaultErrorAttribute), 65 - DisableInitializedFieldAccess, 66 65 } 67 66 68 67 struct DefaultErrorAttribute { ··· 85 86 let error = error.map_or_else( 86 87 || { 87 88 if let Some(default_error) = attrs.iter().fold(None, |acc, attr| { 89 + #[expect(irrefutable_let_patterns)] 88 90 if let InitializerAttribute::DefaultError(DefaultErrorAttribute { ty }) = attr { 89 91 Some(ty.clone()) 90 92 } else { ··· 145 145 }; 146 146 // `mixed_site` ensures that the data is not accessible to the user-controlled code. 147 147 let data = Ident::new("__data", Span::mixed_site()); 148 - let init_fields = init_fields( 149 - &fields, 150 - pinned, 151 - !attrs 152 - .iter() 153 - .any(|attr| matches!(attr, InitializerAttribute::DisableInitializedFieldAccess)), 154 - &data, 155 - &slot, 156 - ); 148 + let init_fields = init_fields(&fields, pinned, &data, &slot); 157 149 let field_check = make_field_check(&fields, init_kind, &path); 158 150 Ok(quote! {{ 159 - // We do not want to allow arbitrary returns, so we declare this type as the `Ok` return 160 - // type and shadow it later when we insert the arbitrary user code. That way there will be 161 - // no possibility of returning without `unsafe`. 162 - struct __InitOk; 163 - 164 151 // Get the data about fields from the supplied type. 165 152 // SAFETY: TODO 166 153 let #data = unsafe { ··· 157 170 #path::#get_data() 158 171 }; 159 172 // Ensure that `#data` really is of type `#data` and help with type inference: 160 - let init = ::pin_init::__internal::#data_trait::make_closure::<_, __InitOk, #error>( 173 + let init = ::pin_init::__internal::#data_trait::make_closure::<_, #error>( 161 174 #data, 162 175 move |slot| { 163 - { 164 - // Shadow the structure so it cannot be used to return early. 165 - struct __InitOk; 166 - #zeroable_check 167 - #this 168 - #init_fields 169 - #field_check 170 - } 171 - Ok(__InitOk) 176 + #zeroable_check 177 + #this 178 + #init_fields 179 + #field_check 180 + // SAFETY: we are the `init!` macro that is allowed to call this. 181 + Ok(unsafe { ::pin_init::__internal::InitOk::new() }) 172 182 } 173 183 ); 174 184 let init = move |slot| -> ::core::result::Result<(), #error> { ··· 220 236 fn init_fields( 221 237 fields: &Punctuated<InitializerField, Token![,]>, 222 238 pinned: bool, 223 - generate_initialized_accessors: bool, 224 239 data: &Ident, 225 240 slot: &Ident, 226 241 ) -> TokenStream { ··· 243 260 }); 244 261 // Again span for better diagnostics 245 262 let write = quote_spanned!(ident.span()=> ::core::ptr::write); 263 + // NOTE: the field accessor ensures that the initialized field is properly aligned. 264 + // Unaligned fields will cause the compiler to emit E0793. We do not support 265 + // unaligned fields since `Init::__init` requires an aligned pointer; the call to 266 + // `ptr::write` below has the same requirement. 246 267 let accessor = if pinned { 247 268 let project_ident = format_ident!("__project_{ident}"); 248 269 quote! { ··· 259 272 unsafe { &mut (*#slot).#ident } 260 273 } 261 274 }; 262 - let accessor = generate_initialized_accessors.then(|| { 263 - quote! { 264 - #(#cfgs)* 265 - #[allow(unused_variables)] 266 - let #ident = #accessor; 267 - } 268 - }); 269 275 quote! { 270 276 #(#attrs)* 271 277 { ··· 266 286 // SAFETY: TODO 267 287 unsafe { #write(::core::ptr::addr_of_mut!((*#slot).#ident), #value_ident) }; 268 288 } 269 - #accessor 289 + #(#cfgs)* 290 + #[allow(unused_variables)] 291 + let #ident = #accessor; 270 292 } 271 293 } 272 294 InitializerKind::Init { ident, value, .. } => { 273 295 // Again span for better diagnostics 274 296 let init = format_ident!("init", span = value.span()); 297 + // NOTE: the field accessor ensures that the initialized field is properly aligned. 298 + // Unaligned fields will cause the compiler to emit E0793. We do not support 299 + // unaligned fields since `Init::__init` requires an aligned pointer; the call to 300 + // `ptr::write` below has the same requirement. 275 301 let (value_init, accessor) = if pinned { 276 302 let project_ident = format_ident!("__project_{ident}"); 277 303 ( ··· 312 326 }, 313 327 ) 314 328 }; 315 - let accessor = generate_initialized_accessors.then(|| { 316 - quote! { 317 - #(#cfgs)* 318 - #[allow(unused_variables)] 319 - let #ident = #accessor; 320 - } 321 - }); 322 329 quote! { 323 330 #(#attrs)* 324 331 { 325 332 let #init = #value; 326 333 #value_init 327 334 } 328 - #accessor 335 + #(#cfgs)* 336 + #[allow(unused_variables)] 337 + let #ident = #accessor; 329 338 } 330 339 } 331 340 InitializerKind::Code { block: value, .. } => quote! { ··· 447 466 if a.path().is_ident("default_error") { 448 467 a.parse_args::<DefaultErrorAttribute>() 449 468 .map(InitializerAttribute::DefaultError) 450 - } else if a.path().is_ident("disable_initialized_field_access") { 451 - a.meta 452 - .require_path_only() 453 - .map(|_| InitializerAttribute::DisableInitializedFieldAccess) 454 469 } else { 455 470 Err(syn::Error::new_spanned(a, "unknown initializer attribute")) 456 471 }
+24 -4
rust/pin-init/src/__internal.rs
··· 46 46 } 47 47 } 48 48 49 + /// Token type to signify successful initialization. 50 + /// 51 + /// Can only be constructed via the unsafe [`Self::new`] function. The initializer macros use this 52 + /// token type to prevent returning `Ok` from an initializer without initializing all fields. 53 + pub struct InitOk(()); 54 + 55 + impl InitOk { 56 + /// Creates a new token. 57 + /// 58 + /// # Safety 59 + /// 60 + /// This function may only be called from the `init!` macro in `../internal/src/init.rs`. 61 + #[inline(always)] 62 + pub unsafe fn new() -> Self { 63 + Self(()) 64 + } 65 + } 66 + 49 67 /// This trait is only implemented via the `#[pin_data]` proc-macro. It is used to facilitate 50 68 /// the pin projections within the initializers. 51 69 /// ··· 86 68 type Datee: ?Sized + HasPinData; 87 69 88 70 /// Type inference helper function. 89 - fn make_closure<F, O, E>(self, f: F) -> F 71 + #[inline(always)] 72 + fn make_closure<F, E>(self, f: F) -> F 90 73 where 91 - F: FnOnce(*mut Self::Datee) -> Result<O, E>, 74 + F: FnOnce(*mut Self::Datee) -> Result<InitOk, E>, 92 75 { 93 76 f 94 77 } ··· 117 98 type Datee: ?Sized + HasInitData; 118 99 119 100 /// Type inference helper function. 120 - fn make_closure<F, O, E>(self, f: F) -> F 101 + #[inline(always)] 102 + fn make_closure<F, E>(self, f: F) -> F 121 103 where 122 - F: FnOnce(*mut Self::Datee) -> Result<O, E>, 104 + F: FnOnce(*mut Self::Datee) -> Result<InitOk, E>, 123 105 { 124 106 f 125 107 }
+16 -14
samples/rust/rust_dma.rs
··· 68 68 CoherentAllocation::alloc_coherent(pdev.as_ref(), TEST_VALUES.len(), GFP_KERNEL)?; 69 69 70 70 for (i, value) in TEST_VALUES.into_iter().enumerate() { 71 - kernel::dma_write!(ca[i] = MyStruct::new(value.0, value.1))?; 71 + kernel::dma_write!(ca, [i]?, MyStruct::new(value.0, value.1)); 72 72 } 73 73 74 74 let size = 4 * page::PAGE_SIZE; ··· 85 85 } 86 86 } 87 87 88 + impl DmaSampleDriver { 89 + fn check_dma(&self) -> Result { 90 + for (i, value) in TEST_VALUES.into_iter().enumerate() { 91 + let val0 = kernel::dma_read!(self.ca, [i]?.h); 92 + let val1 = kernel::dma_read!(self.ca, [i]?.b); 93 + 94 + assert_eq!(val0, value.0); 95 + assert_eq!(val1, value.1); 96 + } 97 + 98 + Ok(()) 99 + } 100 + } 101 + 88 102 #[pinned_drop] 89 103 impl PinnedDrop for DmaSampleDriver { 90 104 fn drop(self: Pin<&mut Self>) { 91 105 dev_info!(self.pdev, "Unload DMA test driver.\n"); 92 106 93 - for (i, value) in TEST_VALUES.into_iter().enumerate() { 94 - let val0 = kernel::dma_read!(self.ca[i].h); 95 - let val1 = kernel::dma_read!(self.ca[i].b); 96 - assert!(val0.is_ok()); 97 - assert!(val1.is_ok()); 98 - 99 - if let Ok(val0) = val0 { 100 - assert_eq!(val0, value.0); 101 - } 102 - if let Ok(val1) = val1 { 103 - assert_eq!(val1, value.1); 104 - } 105 - } 107 + assert!(self.check_dma().is_ok()); 106 108 107 109 for (i, entry) in self.sgt.iter().enumerate() { 108 110 dev_info!(
+1
samples/workqueue/stall_detector/Makefile
··· 1 + obj-m += wq_stall.o
+98
samples/workqueue/stall_detector/wq_stall.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * wq_stall - Test module for the workqueue stall detector. 4 + * 5 + * Deliberately creates a workqueue stall so the watchdog fires and 6 + * prints diagnostic output. Useful for verifying that the stall 7 + * detector correctly identifies stuck workers and produces useful 8 + * backtraces. 9 + * 10 + * The stall is triggered by clearing PF_WQ_WORKER before sleeping, 11 + * which hides the worker from the concurrency manager. A second 12 + * work item queued on the same pool then sits in the worklist with 13 + * no worker available to process it. 14 + * 15 + * After ~30s the workqueue watchdog fires: 16 + * BUG: workqueue lockup - pool cpus=N ... 17 + * 18 + * Build: 19 + * make -C <kernel tree> M=samples/workqueue/stall_detector modules 20 + * 21 + * Copyright (c) 2026 Meta Platforms, Inc. and affiliates. 22 + * Copyright (c) 2026 Breno Leitao <leitao@debian.org> 23 + */ 24 + 25 + #include <linux/module.h> 26 + #include <linux/workqueue.h> 27 + #include <linux/wait.h> 28 + #include <linux/atomic.h> 29 + #include <linux/sched.h> 30 + 31 + static DECLARE_WAIT_QUEUE_HEAD(stall_wq_head); 32 + static atomic_t wake_condition = ATOMIC_INIT(0); 33 + static struct work_struct stall_work1; 34 + static struct work_struct stall_work2; 35 + 36 + static void stall_work2_fn(struct work_struct *work) 37 + { 38 + pr_info("wq_stall: second work item finally ran\n"); 39 + } 40 + 41 + static void stall_work1_fn(struct work_struct *work) 42 + { 43 + pr_info("wq_stall: first work item running on cpu %d\n", 44 + raw_smp_processor_id()); 45 + 46 + /* 47 + * Queue second item while we're still counted as running 48 + * (pool->nr_running > 0). Since schedule_work() on a per-CPU 49 + * workqueue targets raw_smp_processor_id(), item 2 lands on the 50 + * same pool. __queue_work -> kick_pool -> need_more_worker() 51 + * sees nr_running > 0 and does NOT wake a new worker. 52 + */ 53 + schedule_work(&stall_work2); 54 + 55 + /* 56 + * Hide from the workqueue concurrency manager. Without 57 + * PF_WQ_WORKER, schedule() won't call wq_worker_sleeping(), 58 + * so nr_running is never decremented and no replacement 59 + * worker is created. Item 2 stays stuck in pool->worklist. 60 + */ 61 + current->flags &= ~PF_WQ_WORKER; 62 + 63 + pr_info("wq_stall: entering wait_event_idle (PF_WQ_WORKER cleared)\n"); 64 + pr_info("wq_stall: expect 'BUG: workqueue lockup' in ~30-60s\n"); 65 + wait_event_idle(stall_wq_head, atomic_read(&wake_condition) != 0); 66 + 67 + /* Restore so process_one_work() cleanup works correctly */ 68 + current->flags |= PF_WQ_WORKER; 69 + pr_info("wq_stall: woke up, PF_WQ_WORKER restored\n"); 70 + } 71 + 72 + static int __init wq_stall_init(void) 73 + { 74 + pr_info("wq_stall: loading\n"); 75 + 76 + INIT_WORK(&stall_work1, stall_work1_fn); 77 + INIT_WORK(&stall_work2, stall_work2_fn); 78 + schedule_work(&stall_work1); 79 + 80 + return 0; 81 + } 82 + 83 + static void __exit wq_stall_exit(void) 84 + { 85 + pr_info("wq_stall: unloading\n"); 86 + atomic_set(&wake_condition, 1); 87 + wake_up(&stall_wq_head); 88 + flush_work(&stall_work1); 89 + flush_work(&stall_work2); 90 + pr_info("wq_stall: all work flushed, module unloaded\n"); 91 + } 92 + 93 + module_init(wq_stall_init); 94 + module_exit(wq_stall_exit); 95 + 96 + MODULE_LICENSE("GPL"); 97 + MODULE_DESCRIPTION("Reproduce workqueue stall caused by PF_WQ_WORKER misuse"); 98 + MODULE_AUTHOR("Breno Leitao <leitao@debian.org>");
+3 -1
scripts/Makefile.build
··· 310 310 311 311 # The features in this list are the ones allowed for non-`rust/` code. 312 312 # 313 + # - Stable since Rust 1.79.0: `feature(slice_ptr_len)`. 313 314 # - Stable since Rust 1.81.0: `feature(lint_reasons)`. 314 315 # - Stable since Rust 1.82.0: `feature(asm_const)`, 315 316 # `feature(offset_of_nested)`, `feature(raw_ref_op)`. 317 + # - Stable since Rust 1.84.0: `feature(strict_provenance)`. 316 318 # - Stable since Rust 1.87.0: `feature(asm_goto)`. 317 319 # - Expected to become stable: `feature(arbitrary_self_types)`. 318 320 # - To be determined: `feature(used_with_arg)`. 319 321 # 320 322 # Please see https://github.com/Rust-for-Linux/linux/issues/2 for details on 321 323 # the unstable features in use. 322 - rust_allowed_features := asm_const,asm_goto,arbitrary_self_types,lint_reasons,offset_of_nested,raw_ref_op,used_with_arg 324 + rust_allowed_features := asm_const,asm_goto,arbitrary_self_types,lint_reasons,offset_of_nested,raw_ref_op,slice_ptr_len,strict_provenance,used_with_arg 323 325 324 326 # `--out-dir` is required to avoid temporaries being created by `rustc` in the 325 327 # current working directory, which may be not accessible in the out-of-tree
+4 -5
scripts/livepatch/klp-build
··· 285 285 # application from appending it with '+' due to a dirty git working tree. 286 286 set_kernelversion() { 287 287 local file="$SRC/scripts/setlocalversion" 288 - local localversion 288 + local kernelrelease 289 289 290 290 stash_file "$file" 291 291 292 - localversion="$(cd "$SRC" && make --no-print-directory kernelversion)" 293 - localversion="$(cd "$SRC" && KERNELVERSION="$localversion" ./scripts/setlocalversion)" 294 - [[ -z "$localversion" ]] && die "setlocalversion failed" 292 + kernelrelease="$(cd "$SRC" && make syncconfig &>/dev/null && make -s kernelrelease)" 293 + [[ -z "$kernelrelease" ]] && die "failed to get kernel version" 295 294 296 - sed -i "2i echo $localversion; exit 0" scripts/setlocalversion 295 + sed -i "2i echo $kernelrelease; exit 0" scripts/setlocalversion 297 296 } 298 297 299 298 get_patch_files() {
+134 -91
security/apparmor/apparmorfs.c
··· 32 32 #include "include/crypto.h" 33 33 #include "include/ipc.h" 34 34 #include "include/label.h" 35 + #include "include/lib.h" 35 36 #include "include/policy.h" 36 37 #include "include/policy_ns.h" 37 38 #include "include/resource.h" ··· 63 62 * securityfs and apparmorfs filesystems. 64 63 */ 65 64 65 + #define IREF_POISON 101 66 66 67 67 /* 68 68 * support fns ··· 81 79 if (!private) 82 80 return; 83 81 84 - aa_put_loaddata(private->loaddata); 82 + aa_put_i_loaddata(private->loaddata); 85 83 kvfree(private); 86 84 } 87 85 ··· 155 153 return 0; 156 154 } 157 155 156 + static struct aa_ns *get_ns_common_ref(struct aa_common_ref *ref) 157 + { 158 + if (ref) { 159 + struct aa_label *reflabel = container_of(ref, struct aa_label, 160 + count); 161 + return aa_get_ns(labels_ns(reflabel)); 162 + } 163 + 164 + return NULL; 165 + } 166 + 167 + static struct aa_proxy *get_proxy_common_ref(struct aa_common_ref *ref) 168 + { 169 + if (ref) 170 + return aa_get_proxy(container_of(ref, struct aa_proxy, count)); 171 + 172 + return NULL; 173 + } 174 + 175 + static struct aa_loaddata *get_loaddata_common_ref(struct aa_common_ref *ref) 176 + { 177 + if (ref) 178 + return aa_get_i_loaddata(container_of(ref, struct aa_loaddata, 179 + count)); 180 + return NULL; 181 + } 182 + 183 + static void aa_put_common_ref(struct aa_common_ref *ref) 184 + { 185 + if (!ref) 186 + return; 187 + 188 + switch (ref->reftype) { 189 + case REF_RAWDATA: 190 + aa_put_i_loaddata(container_of(ref, struct aa_loaddata, 191 + count)); 192 + break; 193 + case REF_PROXY: 194 + aa_put_proxy(container_of(ref, struct aa_proxy, 195 + count)); 196 + break; 197 + case REF_NS: 198 + /* ns count is held on its unconfined label */ 199 + aa_put_ns(labels_ns(container_of(ref, struct aa_label, count))); 200 + break; 201 + default: 202 + AA_BUG(true, "unknown refcount type"); 203 + break; 204 + } 205 + } 206 + 207 + static void aa_get_common_ref(struct aa_common_ref *ref) 208 + { 209 + kref_get(&ref->count); 210 + } 211 + 212 + static void aafs_evict(struct inode *inode) 213 + { 214 + struct aa_common_ref *ref = inode->i_private; 215 + 216 + clear_inode(inode); 217 + aa_put_common_ref(ref); 218 + inode->i_private = (void *) IREF_POISON; 219 + } 220 + 158 221 static void aafs_free_inode(struct inode *inode) 159 222 { 160 223 if (S_ISLNK(inode->i_mode)) ··· 229 162 230 163 static const struct super_operations aafs_super_ops = { 231 164 .statfs = simple_statfs, 165 + .evict_inode = aafs_evict, 232 166 .free_inode = aafs_free_inode, 233 167 .show_path = aafs_show_path, 234 168 }; ··· 330 262 * aafs_remove(). Will return ERR_PTR on failure. 331 263 */ 332 264 static struct dentry *aafs_create(const char *name, umode_t mode, 333 - struct dentry *parent, void *data, void *link, 265 + struct dentry *parent, 266 + struct aa_common_ref *data, void *link, 334 267 const struct file_operations *fops, 335 268 const struct inode_operations *iops) 336 269 { ··· 368 299 goto fail_dentry; 369 300 inode_unlock(dir); 370 301 302 + if (data) 303 + aa_get_common_ref(data); 304 + 371 305 return dentry; 372 306 373 307 fail_dentry: ··· 395 323 * see aafs_create 396 324 */ 397 325 static struct dentry *aafs_create_file(const char *name, umode_t mode, 398 - struct dentry *parent, void *data, 326 + struct dentry *parent, 327 + struct aa_common_ref *data, 399 328 const struct file_operations *fops) 400 329 { 401 330 return aafs_create(name, mode, parent, data, NULL, fops, NULL); ··· 482 409 483 410 data->size = copy_size; 484 411 if (copy_from_user(data->data, userbuf, copy_size)) { 485 - aa_put_loaddata(data); 412 + /* trigger free - don't need to put pcount */ 413 + aa_put_i_loaddata(data); 486 414 return ERR_PTR(-EFAULT); 487 415 } 488 416 ··· 491 417 } 492 418 493 419 static ssize_t policy_update(u32 mask, const char __user *buf, size_t size, 494 - loff_t *pos, struct aa_ns *ns) 420 + loff_t *pos, struct aa_ns *ns, 421 + const struct cred *ocred) 495 422 { 496 423 struct aa_loaddata *data; 497 424 struct aa_label *label; ··· 503 428 /* high level check about policy management - fine grained in 504 429 * below after unpack 505 430 */ 506 - error = aa_may_manage_policy(current_cred(), label, ns, mask); 431 + error = aa_may_manage_policy(current_cred(), label, ns, ocred, mask); 507 432 if (error) 508 433 goto end_section; 509 434 ··· 511 436 error = PTR_ERR(data); 512 437 if (!IS_ERR(data)) { 513 438 error = aa_replace_profiles(ns, label, mask, data); 514 - aa_put_loaddata(data); 439 + /* put pcount, which will put count and free if no 440 + * profiles referencing it. 441 + */ 442 + aa_put_profile_loaddata(data); 515 443 } 516 444 end_section: 517 445 end_current_label_crit_section(label); ··· 526 448 static ssize_t profile_load(struct file *f, const char __user *buf, size_t size, 527 449 loff_t *pos) 528 450 { 529 - struct aa_ns *ns = aa_get_ns(f->f_inode->i_private); 530 - int error = policy_update(AA_MAY_LOAD_POLICY, buf, size, pos, ns); 451 + struct aa_ns *ns = get_ns_common_ref(f->f_inode->i_private); 452 + int error = policy_update(AA_MAY_LOAD_POLICY, buf, size, pos, ns, 453 + f->f_cred); 531 454 532 455 aa_put_ns(ns); 533 456 ··· 544 465 static ssize_t profile_replace(struct file *f, const char __user *buf, 545 466 size_t size, loff_t *pos) 546 467 { 547 - struct aa_ns *ns = aa_get_ns(f->f_inode->i_private); 468 + struct aa_ns *ns = get_ns_common_ref(f->f_inode->i_private); 548 469 int error = policy_update(AA_MAY_LOAD_POLICY | AA_MAY_REPLACE_POLICY, 549 - buf, size, pos, ns); 470 + buf, size, pos, ns, f->f_cred); 550 471 aa_put_ns(ns); 551 472 552 473 return error; ··· 564 485 struct aa_loaddata *data; 565 486 struct aa_label *label; 566 487 ssize_t error; 567 - struct aa_ns *ns = aa_get_ns(f->f_inode->i_private); 488 + struct aa_ns *ns = get_ns_common_ref(f->f_inode->i_private); 568 489 569 490 label = begin_current_label_crit_section(); 570 491 /* high level check about policy management - fine grained in 571 492 * below after unpack 572 493 */ 573 494 error = aa_may_manage_policy(current_cred(), label, ns, 574 - AA_MAY_REMOVE_POLICY); 495 + f->f_cred, AA_MAY_REMOVE_POLICY); 575 496 if (error) 576 497 goto out; 577 498 ··· 585 506 if (!IS_ERR(data)) { 586 507 data->data[size] = 0; 587 508 error = aa_remove_profiles(ns, label, data->data, size); 588 - aa_put_loaddata(data); 509 + aa_put_profile_loaddata(data); 589 510 } 590 511 out: 591 512 end_current_label_crit_section(label); ··· 654 575 if (!rev) 655 576 return -ENOMEM; 656 577 657 - rev->ns = aa_get_ns(inode->i_private); 578 + rev->ns = get_ns_common_ref(inode->i_private); 658 579 if (!rev->ns) 659 580 rev->ns = aa_get_current_ns(); 660 581 file->private_data = rev; ··· 1140 1061 static int seq_profile_open(struct inode *inode, struct file *file, 1141 1062 int (*show)(struct seq_file *, void *)) 1142 1063 { 1143 - struct aa_proxy *proxy = aa_get_proxy(inode->i_private); 1064 + struct aa_proxy *proxy = get_proxy_common_ref(inode->i_private); 1144 1065 int error = single_open(file, show, proxy); 1145 1066 1146 1067 if (error) { ··· 1332 1253 static int seq_rawdata_open(struct inode *inode, struct file *file, 1333 1254 int (*show)(struct seq_file *, void *)) 1334 1255 { 1335 - struct aa_loaddata *data = __aa_get_loaddata(inode->i_private); 1256 + struct aa_loaddata *data = get_loaddata_common_ref(inode->i_private); 1336 1257 int error; 1337 1258 1338 1259 if (!data) 1339 - /* lost race this ent is being reaped */ 1340 1260 return -ENOENT; 1341 1261 1342 1262 error = single_open(file, show, data); 1343 1263 if (error) { 1344 1264 AA_BUG(file->private_data && 1345 1265 ((struct seq_file *)file->private_data)->private); 1346 - aa_put_loaddata(data); 1266 + aa_put_i_loaddata(data); 1347 1267 } 1348 1268 1349 1269 return error; ··· 1353 1275 struct seq_file *seq = (struct seq_file *) file->private_data; 1354 1276 1355 1277 if (seq) 1356 - aa_put_loaddata(seq->private); 1278 + aa_put_i_loaddata(seq->private); 1357 1279 1358 1280 return single_release(inode, file); 1359 1281 } ··· 1465 1387 if (!aa_current_policy_view_capable(NULL)) 1466 1388 return -EACCES; 1467 1389 1468 - loaddata = __aa_get_loaddata(inode->i_private); 1390 + loaddata = get_loaddata_common_ref(inode->i_private); 1469 1391 if (!loaddata) 1470 - /* lost race: this entry is being reaped */ 1471 1392 return -ENOENT; 1472 1393 1473 1394 private = rawdata_f_data_alloc(loaddata->size); ··· 1491 1414 return error; 1492 1415 1493 1416 fail_private_alloc: 1494 - aa_put_loaddata(loaddata); 1417 + aa_put_i_loaddata(loaddata); 1495 1418 return error; 1496 1419 } 1497 1420 ··· 1508 1431 1509 1432 for (i = 0; i < AAFS_LOADDATA_NDENTS; i++) { 1510 1433 if (!IS_ERR_OR_NULL(rawdata->dents[i])) { 1511 - /* no refcounts on i_private */ 1512 1434 aafs_remove(rawdata->dents[i]); 1513 1435 rawdata->dents[i] = NULL; 1514 1436 } ··· 1550 1474 return PTR_ERR(dir); 1551 1475 rawdata->dents[AAFS_LOADDATA_DIR] = dir; 1552 1476 1553 - dent = aafs_create_file("abi", S_IFREG | 0444, dir, rawdata, 1477 + dent = aafs_create_file("abi", S_IFREG | 0444, dir, &rawdata->count, 1554 1478 &seq_rawdata_abi_fops); 1555 1479 if (IS_ERR(dent)) 1556 1480 goto fail; 1557 1481 rawdata->dents[AAFS_LOADDATA_ABI] = dent; 1558 1482 1559 - dent = aafs_create_file("revision", S_IFREG | 0444, dir, rawdata, 1560 - &seq_rawdata_revision_fops); 1483 + dent = aafs_create_file("revision", S_IFREG | 0444, dir, 1484 + &rawdata->count, 1485 + &seq_rawdata_revision_fops); 1561 1486 if (IS_ERR(dent)) 1562 1487 goto fail; 1563 1488 rawdata->dents[AAFS_LOADDATA_REVISION] = dent; 1564 1489 1565 1490 if (aa_g_hash_policy) { 1566 1491 dent = aafs_create_file("sha256", S_IFREG | 0444, dir, 1567 - rawdata, &seq_rawdata_hash_fops); 1492 + &rawdata->count, 1493 + &seq_rawdata_hash_fops); 1568 1494 if (IS_ERR(dent)) 1569 1495 goto fail; 1570 1496 rawdata->dents[AAFS_LOADDATA_HASH] = dent; 1571 1497 } 1572 1498 1573 1499 dent = aafs_create_file("compressed_size", S_IFREG | 0444, dir, 1574 - rawdata, 1500 + &rawdata->count, 1575 1501 &seq_rawdata_compressed_size_fops); 1576 1502 if (IS_ERR(dent)) 1577 1503 goto fail; 1578 1504 rawdata->dents[AAFS_LOADDATA_COMPRESSED_SIZE] = dent; 1579 1505 1580 - dent = aafs_create_file("raw_data", S_IFREG | 0444, 1581 - dir, rawdata, &rawdata_fops); 1506 + dent = aafs_create_file("raw_data", S_IFREG | 0444, dir, 1507 + &rawdata->count, &rawdata_fops); 1582 1508 if (IS_ERR(dent)) 1583 1509 goto fail; 1584 1510 rawdata->dents[AAFS_LOADDATA_DATA] = dent; ··· 1588 1510 1589 1511 rawdata->ns = aa_get_ns(ns); 1590 1512 list_add(&rawdata->list, &ns->rawdata_list); 1591 - /* no refcount on inode rawdata */ 1592 1513 1593 1514 return 0; 1594 1515 1595 1516 fail: 1596 1517 remove_rawdata_dents(rawdata); 1597 - 1598 1518 return PTR_ERR(dent); 1599 1519 } 1600 1520 #endif /* CONFIG_SECURITY_APPARMOR_EXPORT_BINARY */ ··· 1616 1540 __aafs_profile_rmdir(child); 1617 1541 1618 1542 for (i = AAFS_PROF_SIZEOF - 1; i >= 0; --i) { 1619 - struct aa_proxy *proxy; 1620 1543 if (!profile->dents[i]) 1621 1544 continue; 1622 1545 1623 - proxy = d_inode(profile->dents[i])->i_private; 1624 1546 aafs_remove(profile->dents[i]); 1625 - aa_put_proxy(proxy); 1626 1547 profile->dents[i] = NULL; 1627 1548 } 1628 1549 } ··· 1653 1580 struct aa_profile *profile, 1654 1581 const struct file_operations *fops) 1655 1582 { 1656 - struct aa_proxy *proxy = aa_get_proxy(profile->label.proxy); 1657 - struct dentry *dent; 1658 - 1659 - dent = aafs_create_file(name, S_IFREG | 0444, dir, proxy, fops); 1660 - if (IS_ERR(dent)) 1661 - aa_put_proxy(proxy); 1662 - 1663 - return dent; 1583 + return aafs_create_file(name, S_IFREG | 0444, dir, &profile->label.proxy->count, fops); 1664 1584 } 1665 1585 1666 1586 #ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY ··· 1703 1637 struct delayed_call *done, 1704 1638 const char *name) 1705 1639 { 1706 - struct aa_proxy *proxy = inode->i_private; 1640 + struct aa_common_ref *ref = inode->i_private; 1641 + struct aa_proxy *proxy = container_of(ref, struct aa_proxy, count); 1707 1642 struct aa_label *label; 1708 1643 struct aa_profile *profile; 1709 1644 char *target; ··· 1846 1779 if (profile->rawdata) { 1847 1780 if (aa_g_hash_policy) { 1848 1781 dent = aafs_create("raw_sha256", S_IFLNK | 0444, dir, 1849 - profile->label.proxy, NULL, NULL, 1850 - &rawdata_link_sha256_iops); 1782 + &profile->label.proxy->count, NULL, 1783 + NULL, &rawdata_link_sha256_iops); 1851 1784 if (IS_ERR(dent)) 1852 1785 goto fail; 1853 - aa_get_proxy(profile->label.proxy); 1854 1786 profile->dents[AAFS_PROF_RAW_HASH] = dent; 1855 1787 } 1856 1788 dent = aafs_create("raw_abi", S_IFLNK | 0444, dir, 1857 - profile->label.proxy, NULL, NULL, 1789 + &profile->label.proxy->count, NULL, NULL, 1858 1790 &rawdata_link_abi_iops); 1859 1791 if (IS_ERR(dent)) 1860 1792 goto fail; 1861 - aa_get_proxy(profile->label.proxy); 1862 1793 profile->dents[AAFS_PROF_RAW_ABI] = dent; 1863 1794 1864 1795 dent = aafs_create("raw_data", S_IFLNK | 0444, dir, 1865 - profile->label.proxy, NULL, NULL, 1796 + &profile->label.proxy->count, NULL, NULL, 1866 1797 &rawdata_link_data_iops); 1867 1798 if (IS_ERR(dent)) 1868 1799 goto fail; 1869 - aa_get_proxy(profile->label.proxy); 1870 1800 profile->dents[AAFS_PROF_RAW_DATA] = dent; 1871 1801 } 1872 1802 #endif /*CONFIG_SECURITY_APPARMOR_EXPORT_BINARY */ ··· 1894 1830 int error; 1895 1831 1896 1832 label = begin_current_label_crit_section(); 1897 - error = aa_may_manage_policy(current_cred(), label, NULL, 1833 + error = aa_may_manage_policy(current_cred(), label, NULL, NULL, 1898 1834 AA_MAY_LOAD_POLICY); 1899 1835 end_current_label_crit_section(label); 1900 1836 if (error) 1901 1837 return ERR_PTR(error); 1902 1838 1903 - parent = aa_get_ns(dir->i_private); 1839 + parent = get_ns_common_ref(dir->i_private); 1904 1840 AA_BUG(d_inode(ns_subns_dir(parent)) != dir); 1905 1841 1906 1842 /* we have to unlock and then relock to get locking order right ··· 1944 1880 int error; 1945 1881 1946 1882 label = begin_current_label_crit_section(); 1947 - error = aa_may_manage_policy(current_cred(), label, NULL, 1883 + error = aa_may_manage_policy(current_cred(), label, NULL, NULL, 1948 1884 AA_MAY_LOAD_POLICY); 1949 1885 end_current_label_crit_section(label); 1950 1886 if (error) 1951 1887 return error; 1952 1888 1953 - parent = aa_get_ns(dir->i_private); 1889 + parent = get_ns_common_ref(dir->i_private); 1954 1890 /* rmdir calls the generic securityfs functions to remove files 1955 1891 * from the apparmor dir. It is up to the apparmor ns locking 1956 1892 * to avoid races. ··· 2020 1956 2021 1957 __aa_fs_list_remove_rawdata(ns); 2022 1958 2023 - if (ns_subns_dir(ns)) { 2024 - sub = d_inode(ns_subns_dir(ns))->i_private; 2025 - aa_put_ns(sub); 2026 - } 2027 - if (ns_subload(ns)) { 2028 - sub = d_inode(ns_subload(ns))->i_private; 2029 - aa_put_ns(sub); 2030 - } 2031 - if (ns_subreplace(ns)) { 2032 - sub = d_inode(ns_subreplace(ns))->i_private; 2033 - aa_put_ns(sub); 2034 - } 2035 - if (ns_subremove(ns)) { 2036 - sub = d_inode(ns_subremove(ns))->i_private; 2037 - aa_put_ns(sub); 2038 - } 2039 - if (ns_subrevision(ns)) { 2040 - sub = d_inode(ns_subrevision(ns))->i_private; 2041 - aa_put_ns(sub); 2042 - } 2043 - 2044 1959 for (i = AAFS_NS_SIZEOF - 1; i >= 0; --i) { 2045 1960 aafs_remove(ns->dents[i]); 2046 1961 ns->dents[i] = NULL; ··· 2044 2001 return PTR_ERR(dent); 2045 2002 ns_subdata_dir(ns) = dent; 2046 2003 2047 - dent = aafs_create_file("revision", 0444, dir, ns, 2004 + dent = aafs_create_file("revision", 0444, dir, 2005 + &ns->unconfined->label.count, 2048 2006 &aa_fs_ns_revision_fops); 2049 2007 if (IS_ERR(dent)) 2050 2008 return PTR_ERR(dent); 2051 - aa_get_ns(ns); 2052 2009 ns_subrevision(ns) = dent; 2053 2010 2054 - dent = aafs_create_file(".load", 0640, dir, ns, 2055 - &aa_fs_profile_load); 2011 + dent = aafs_create_file(".load", 0640, dir, 2012 + &ns->unconfined->label.count, 2013 + &aa_fs_profile_load); 2056 2014 if (IS_ERR(dent)) 2057 2015 return PTR_ERR(dent); 2058 - aa_get_ns(ns); 2059 2016 ns_subload(ns) = dent; 2060 2017 2061 - dent = aafs_create_file(".replace", 0640, dir, ns, 2062 - &aa_fs_profile_replace); 2018 + dent = aafs_create_file(".replace", 0640, dir, 2019 + &ns->unconfined->label.count, 2020 + &aa_fs_profile_replace); 2063 2021 if (IS_ERR(dent)) 2064 2022 return PTR_ERR(dent); 2065 - aa_get_ns(ns); 2066 2023 ns_subreplace(ns) = dent; 2067 2024 2068 - dent = aafs_create_file(".remove", 0640, dir, ns, 2069 - &aa_fs_profile_remove); 2025 + dent = aafs_create_file(".remove", 0640, dir, 2026 + &ns->unconfined->label.count, 2027 + &aa_fs_profile_remove); 2070 2028 if (IS_ERR(dent)) 2071 2029 return PTR_ERR(dent); 2072 - aa_get_ns(ns); 2073 2030 ns_subremove(ns) = dent; 2074 2031 2075 2032 /* use create_dentry so we can supply private data */ 2076 - dent = aafs_create("namespaces", S_IFDIR | 0755, dir, ns, NULL, NULL, 2077 - &ns_dir_inode_operations); 2033 + dent = aafs_create("namespaces", S_IFDIR | 0755, dir, 2034 + &ns->unconfined->label.count, 2035 + NULL, NULL, &ns_dir_inode_operations); 2078 2036 if (IS_ERR(dent)) 2079 2037 return PTR_ERR(dent); 2080 - aa_get_ns(ns); 2081 2038 ns_subns_dir(ns) = dent; 2082 2039 2083 2040 return 0;
+8 -8
security/apparmor/include/label.h
··· 102 102 103 103 struct aa_label; 104 104 struct aa_proxy { 105 - struct kref count; 105 + struct aa_common_ref count; 106 106 struct aa_label __rcu *label; 107 107 }; 108 108 ··· 125 125 * vec: vector of profiles comprising the compound label 126 126 */ 127 127 struct aa_label { 128 - struct kref count; 128 + struct aa_common_ref count; 129 129 struct rb_node node; 130 130 struct rcu_head rcu; 131 131 struct aa_proxy *proxy; ··· 357 357 */ 358 358 static inline struct aa_label *__aa_get_label(struct aa_label *l) 359 359 { 360 - if (l && kref_get_unless_zero(&l->count)) 360 + if (l && kref_get_unless_zero(&l->count.count)) 361 361 return l; 362 362 363 363 return NULL; ··· 366 366 static inline struct aa_label *aa_get_label(struct aa_label *l) 367 367 { 368 368 if (l) 369 - kref_get(&(l->count)); 369 + kref_get(&(l->count.count)); 370 370 371 371 return l; 372 372 } ··· 386 386 rcu_read_lock(); 387 387 do { 388 388 c = rcu_dereference(*l); 389 - } while (c && !kref_get_unless_zero(&c->count)); 389 + } while (c && !kref_get_unless_zero(&c->count.count)); 390 390 rcu_read_unlock(); 391 391 392 392 return c; ··· 426 426 static inline void aa_put_label(struct aa_label *l) 427 427 { 428 428 if (l) 429 - kref_put(&l->count, aa_label_kref); 429 + kref_put(&l->count.count, aa_label_kref); 430 430 } 431 431 432 432 /* wrapper fn to indicate semantics of the check */ ··· 443 443 static inline struct aa_proxy *aa_get_proxy(struct aa_proxy *proxy) 444 444 { 445 445 if (proxy) 446 - kref_get(&(proxy->count)); 446 + kref_get(&(proxy->count.count)); 447 447 448 448 return proxy; 449 449 } ··· 451 451 static inline void aa_put_proxy(struct aa_proxy *proxy) 452 452 { 453 453 if (proxy) 454 - kref_put(&proxy->count, aa_proxy_kref); 454 + kref_put(&proxy->count.count, aa_proxy_kref); 455 455 } 456 456 457 457 void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new);
+12
security/apparmor/include/lib.h
··· 102 102 /* Security blob offsets */ 103 103 extern struct lsm_blob_sizes apparmor_blob_sizes; 104 104 105 + enum reftype { 106 + REF_NS, 107 + REF_PROXY, 108 + REF_RAWDATA, 109 + }; 110 + 111 + /* common reference count used by data the shows up in aafs */ 112 + struct aa_common_ref { 113 + struct kref count; 114 + enum reftype reftype; 115 + }; 116 + 105 117 /** 106 118 * aa_strneq - compare null terminated @str to a non null terminated substring 107 119 * @str: a null terminated string
+1
security/apparmor/include/match.h
··· 185 185 #define MATCH_FLAG_DIFF_ENCODE 0x80000000 186 186 #define MARK_DIFF_ENCODE 0x40000000 187 187 #define MATCH_FLAG_OOB_TRANSITION 0x20000000 188 + #define MARK_DIFF_ENCODE_VERIFIED 0x10000000 188 189 #define MATCH_FLAGS_MASK 0xff000000 189 190 #define MATCH_FLAGS_VALID (MATCH_FLAG_DIFF_ENCODE | MATCH_FLAG_OOB_TRANSITION) 190 191 #define MATCH_FLAGS_INVALID (MATCH_FLAGS_MASK & ~MATCH_FLAGS_VALID)
+5 -5
security/apparmor/include/policy.h
··· 379 379 static inline struct aa_profile *aa_get_profile(struct aa_profile *p) 380 380 { 381 381 if (p) 382 - kref_get(&(p->label.count)); 382 + kref_get(&(p->label.count.count)); 383 383 384 384 return p; 385 385 } ··· 393 393 */ 394 394 static inline struct aa_profile *aa_get_profile_not0(struct aa_profile *p) 395 395 { 396 - if (p && kref_get_unless_zero(&p->label.count)) 396 + if (p && kref_get_unless_zero(&p->label.count.count)) 397 397 return p; 398 398 399 399 return NULL; ··· 413 413 rcu_read_lock(); 414 414 do { 415 415 c = rcu_dereference(*p); 416 - } while (c && !kref_get_unless_zero(&c->label.count)); 416 + } while (c && !kref_get_unless_zero(&c->label.count.count)); 417 417 rcu_read_unlock(); 418 418 419 419 return c; ··· 426 426 static inline void aa_put_profile(struct aa_profile *p) 427 427 { 428 428 if (p) 429 - kref_put(&p->label.count, aa_label_kref); 429 + kref_put(&p->label.count.count, aa_label_kref); 430 430 } 431 431 432 432 static inline int AUDIT_MODE(struct aa_profile *profile) ··· 443 443 struct aa_label *label, struct aa_ns *ns); 444 444 int aa_may_manage_policy(const struct cred *subj_cred, 445 445 struct aa_label *label, struct aa_ns *ns, 446 - u32 mask); 446 + const struct cred *ocred, u32 mask); 447 447 bool aa_current_policy_view_capable(struct aa_ns *ns); 448 448 bool aa_current_policy_admin_capable(struct aa_ns *ns); 449 449
+2
security/apparmor/include/policy_ns.h
··· 18 18 #include "label.h" 19 19 #include "policy.h" 20 20 21 + /* Match max depth of user namespaces */ 22 + #define MAX_NS_DEPTH 32 21 23 22 24 /* struct aa_ns_acct - accounting of profiles in namespace 23 25 * @max_size: maximum space allowed for all profiles in namespace
+49 -34
security/apparmor/include/policy_unpack.h
··· 87 87 u32 version; 88 88 }; 89 89 90 - /* 91 - * struct aa_loaddata - buffer of policy raw_data set 90 + /* struct aa_loaddata - buffer of policy raw_data set 91 + * @count: inode/filesystem refcount - use aa_get_i_loaddata() 92 + * @pcount: profile refcount - use aa_get_profile_loaddata() 93 + * @list: list the loaddata is on 94 + * @work: used to do a delayed cleanup 95 + * @dents: refs to dents created in aafs 96 + * @ns: the namespace this loaddata was loaded into 97 + * @name: 98 + * @size: the size of the data that was loaded 99 + * @compressed_size: the size of the data when it is compressed 100 + * @revision: unique revision count that this data was loaded as 101 + * @abi: the abi number the loaddata uses 102 + * @hash: a hash of the loaddata, used to help dedup data 92 103 * 93 - * there is no loaddata ref for being on ns list, nor a ref from 94 - * d_inode(@dentry) when grab a ref from these, @ns->lock must be held 95 - * && __aa_get_loaddata() needs to be used, and the return value 96 - * checked, if NULL the loaddata is already being reaped and should be 97 - * considered dead. 104 + * There is no loaddata ref for being on ns->rawdata_list, so 105 + * @ns->lock must be held when walking the list. Dentries and 106 + * inode opens hold refs on @count; profiles hold refs on @pcount. 107 + * When the last @pcount drops, do_ploaddata_rmfs() removes the 108 + * fs entries and drops the associated @count ref. 98 109 */ 99 110 struct aa_loaddata { 100 - struct kref count; 111 + struct aa_common_ref count; 112 + struct kref pcount; 101 113 struct list_head list; 102 114 struct work_struct work; 103 115 struct dentry *dents[AAFS_LOADDATA_NDENTS]; ··· 131 119 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh, const char **ns); 132 120 133 121 /** 134 - * __aa_get_loaddata - get a reference count to uncounted data reference 135 - * @data: reference to get a count on 136 - * 137 - * Returns: pointer to reference OR NULL if race is lost and reference is 138 - * being repeated. 139 - * Requires: @data->ns->lock held, and the return code MUST be checked 140 - * 141 - * Use only from inode->i_private and @data->list found references 142 - */ 143 - static inline struct aa_loaddata * 144 - __aa_get_loaddata(struct aa_loaddata *data) 145 - { 146 - if (data && kref_get_unless_zero(&(data->count))) 147 - return data; 148 - 149 - return NULL; 150 - } 151 - 152 - /** 153 122 * aa_get_loaddata - get a reference count from a counted data reference 154 123 * @data: reference to get a count on 155 124 * 156 - * Returns: point to reference 125 + * Returns: pointer to reference 157 126 * Requires: @data to have a valid reference count on it. It is a bug 158 127 * if the race to reap can be encountered when it is used. 159 128 */ 160 129 static inline struct aa_loaddata * 161 - aa_get_loaddata(struct aa_loaddata *data) 130 + aa_get_i_loaddata(struct aa_loaddata *data) 162 131 { 163 - struct aa_loaddata *tmp = __aa_get_loaddata(data); 164 132 165 - AA_BUG(data && !tmp); 133 + if (data) 134 + kref_get(&(data->count.count)); 135 + return data; 136 + } 166 137 167 - return tmp; 138 + 139 + /** 140 + * aa_get_profile_loaddata - get a profile reference count on loaddata 141 + * @data: reference to get a count on 142 + * 143 + * Returns: pointer to reference 144 + * Requires: @data to have a valid reference count on it. 145 + */ 146 + static inline struct aa_loaddata * 147 + aa_get_profile_loaddata(struct aa_loaddata *data) 148 + { 149 + if (data) 150 + kref_get(&(data->pcount)); 151 + return data; 168 152 } 169 153 170 154 void __aa_loaddata_update(struct aa_loaddata *data, long revision); 171 155 bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r); 172 156 void aa_loaddata_kref(struct kref *kref); 157 + void aa_ploaddata_kref(struct kref *kref); 173 158 struct aa_loaddata *aa_loaddata_alloc(size_t size); 174 - static inline void aa_put_loaddata(struct aa_loaddata *data) 159 + static inline void aa_put_i_loaddata(struct aa_loaddata *data) 175 160 { 176 161 if (data) 177 - kref_put(&data->count, aa_loaddata_kref); 162 + kref_put(&data->count.count, aa_loaddata_kref); 163 + } 164 + 165 + static inline void aa_put_profile_loaddata(struct aa_loaddata *data) 166 + { 167 + if (data) 168 + kref_put(&data->pcount, aa_ploaddata_kref); 178 169 } 179 170 180 171 #if IS_ENABLED(CONFIG_KUNIT)
+8 -4
security/apparmor/label.c
··· 52 52 53 53 void aa_proxy_kref(struct kref *kref) 54 54 { 55 - struct aa_proxy *proxy = container_of(kref, struct aa_proxy, count); 55 + struct aa_proxy *proxy = container_of(kref, struct aa_proxy, 56 + count.count); 56 57 57 58 free_proxy(proxy); 58 59 } ··· 64 63 65 64 new = kzalloc_obj(struct aa_proxy, gfp); 66 65 if (new) { 67 - kref_init(&new->count); 66 + kref_init(&new->count.count); 67 + new->count.reftype = REF_PROXY; 68 68 rcu_assign_pointer(new->label, aa_get_label(label)); 69 69 } 70 70 return new; ··· 377 375 378 376 void aa_label_kref(struct kref *kref) 379 377 { 380 - struct aa_label *label = container_of(kref, struct aa_label, count); 378 + struct aa_label *label = container_of(kref, struct aa_label, 379 + count.count); 381 380 struct aa_ns *ns = labels_ns(label); 382 381 383 382 if (!ns) { ··· 415 412 416 413 label->size = size; /* doesn't include null */ 417 414 label->vec[size] = NULL; /* null terminate */ 418 - kref_init(&label->count); 415 + kref_init(&label->count.count); 416 + label->count.reftype = REF_NS; /* for aafs purposes */ 419 417 RB_CLEAR_NODE(&label->node); 420 418 421 419 return true;
+42 -16
security/apparmor/match.c
··· 160 160 if (state_count == 0) 161 161 goto out; 162 162 for (i = 0; i < state_count; i++) { 163 - if (!(BASE_TABLE(dfa)[i] & MATCH_FLAG_DIFF_ENCODE) && 164 - (DEFAULT_TABLE(dfa)[i] >= state_count)) 163 + if (DEFAULT_TABLE(dfa)[i] >= state_count) { 164 + pr_err("AppArmor DFA default state out of bounds"); 165 165 goto out; 166 + } 166 167 if (BASE_TABLE(dfa)[i] & MATCH_FLAGS_INVALID) { 167 168 pr_err("AppArmor DFA state with invalid match flags"); 168 169 goto out; ··· 202 201 size_t j, k; 203 202 204 203 for (j = i; 205 - (BASE_TABLE(dfa)[j] & MATCH_FLAG_DIFF_ENCODE) && 206 - !(BASE_TABLE(dfa)[j] & MARK_DIFF_ENCODE); 204 + ((BASE_TABLE(dfa)[j] & MATCH_FLAG_DIFF_ENCODE) && 205 + !(BASE_TABLE(dfa)[j] & MARK_DIFF_ENCODE_VERIFIED)); 207 206 j = k) { 207 + if (BASE_TABLE(dfa)[j] & MARK_DIFF_ENCODE) 208 + /* loop in current chain */ 209 + goto out; 208 210 k = DEFAULT_TABLE(dfa)[j]; 209 211 if (j == k) 212 + /* self loop */ 210 213 goto out; 211 - if (k < j) 212 - break; /* already verified */ 213 214 BASE_TABLE(dfa)[j] |= MARK_DIFF_ENCODE; 215 + } 216 + /* move mark to verified */ 217 + for (j = i; 218 + (BASE_TABLE(dfa)[j] & MATCH_FLAG_DIFF_ENCODE); 219 + j = k) { 220 + k = DEFAULT_TABLE(dfa)[j]; 221 + if (j < i) 222 + /* jumps to state/chain that has been 223 + * verified 224 + */ 225 + break; 226 + BASE_TABLE(dfa)[j] &= ~MARK_DIFF_ENCODE; 227 + BASE_TABLE(dfa)[j] |= MARK_DIFF_ENCODE_VERIFIED; 214 228 } 215 229 } 216 230 error = 0; ··· 479 463 if (dfa->tables[YYTD_ID_EC]) { 480 464 /* Equivalence class table defined */ 481 465 u8 *equiv = EQUIV_TABLE(dfa); 482 - for (; len; len--) 483 - match_char(state, def, base, next, check, 484 - equiv[(u8) *str++]); 466 + for (; len; len--) { 467 + u8 c = equiv[(u8) *str]; 468 + 469 + match_char(state, def, base, next, check, c); 470 + str++; 471 + } 485 472 } else { 486 473 /* default is direct to next state */ 487 - for (; len; len--) 488 - match_char(state, def, base, next, check, (u8) *str++); 474 + for (; len; len--) { 475 + match_char(state, def, base, next, check, (u8) *str); 476 + str++; 477 + } 489 478 } 490 479 491 480 return state; ··· 524 503 /* Equivalence class table defined */ 525 504 u8 *equiv = EQUIV_TABLE(dfa); 526 505 /* default is direct to next state */ 527 - while (*str) 528 - match_char(state, def, base, next, check, 529 - equiv[(u8) *str++]); 506 + while (*str) { 507 + u8 c = equiv[(u8) *str]; 508 + 509 + match_char(state, def, base, next, check, c); 510 + str++; 511 + } 530 512 } else { 531 513 /* default is direct to next state */ 532 - while (*str) 533 - match_char(state, def, base, next, check, (u8) *str++); 514 + while (*str) { 515 + match_char(state, def, base, next, check, (u8) *str); 516 + str++; 517 + } 534 518 } 535 519 536 520 return state;
+67 -10
security/apparmor/policy.c
··· 191 191 } 192 192 193 193 /** 194 - * __remove_profile - remove old profile, and children 195 - * @profile: profile to be replaced (NOT NULL) 194 + * __remove_profile - remove profile, and children 195 + * @profile: profile to be removed (NOT NULL) 196 196 * 197 197 * Requires: namespace list lock be held, or list not be shared 198 198 */ 199 199 static void __remove_profile(struct aa_profile *profile) 200 200 { 201 + struct aa_profile *curr, *to_remove; 202 + 201 203 AA_BUG(!profile); 202 204 AA_BUG(!profile->ns); 203 205 AA_BUG(!mutex_is_locked(&profile->ns->lock)); 204 206 205 207 /* release any children lists first */ 206 - __aa_profile_list_release(&profile->base.profiles); 208 + if (!list_empty(&profile->base.profiles)) { 209 + curr = list_first_entry(&profile->base.profiles, struct aa_profile, base.list); 210 + 211 + while (curr != profile) { 212 + 213 + while (!list_empty(&curr->base.profiles)) 214 + curr = list_first_entry(&curr->base.profiles, 215 + struct aa_profile, base.list); 216 + 217 + to_remove = curr; 218 + if (!list_is_last(&to_remove->base.list, 219 + &aa_deref_parent(curr)->base.profiles)) 220 + curr = list_next_entry(to_remove, base.list); 221 + else 222 + curr = aa_deref_parent(curr); 223 + 224 + /* released by free_profile */ 225 + aa_label_remove(&to_remove->label); 226 + __aafs_profile_rmdir(to_remove); 227 + __list_remove_profile(to_remove); 228 + } 229 + } 230 + 207 231 /* released by free_profile */ 208 232 aa_label_remove(&profile->label); 209 233 __aafs_profile_rmdir(profile); ··· 350 326 } 351 327 352 328 kfree_sensitive(profile->hash); 353 - aa_put_loaddata(profile->rawdata); 329 + aa_put_profile_loaddata(profile->rawdata); 354 330 aa_label_destroy(&profile->label); 355 331 356 332 kfree_sensitive(profile); ··· 942 918 return res; 943 919 } 944 920 921 + static bool is_subset_of_obj_privilege(const struct cred *cred, 922 + struct aa_label *label, 923 + const struct cred *ocred) 924 + { 925 + if (cred == ocred) 926 + return true; 927 + 928 + if (!aa_label_is_subset(label, cred_label(ocred))) 929 + return false; 930 + /* don't allow crossing userns for now */ 931 + if (cred->user_ns != ocred->user_ns) 932 + return false; 933 + if (!cap_issubset(cred->cap_inheritable, ocred->cap_inheritable)) 934 + return false; 935 + if (!cap_issubset(cred->cap_permitted, ocred->cap_permitted)) 936 + return false; 937 + if (!cap_issubset(cred->cap_effective, ocred->cap_effective)) 938 + return false; 939 + if (!cap_issubset(cred->cap_bset, ocred->cap_bset)) 940 + return false; 941 + if (!cap_issubset(cred->cap_ambient, ocred->cap_ambient)) 942 + return false; 943 + return true; 944 + } 945 + 946 + 945 947 /** 946 948 * aa_may_manage_policy - can the current task manage policy 947 949 * @subj_cred: subjects cred 948 950 * @label: label to check if it can manage policy 949 951 * @ns: namespace being managed by @label (may be NULL if @label's ns) 952 + * @ocred: object cred if request is coming from an open object 950 953 * @mask: contains the policy manipulation operation being done 951 954 * 952 955 * Returns: 0 if the task is allowed to manipulate policy else error 953 956 */ 954 957 int aa_may_manage_policy(const struct cred *subj_cred, struct aa_label *label, 955 - struct aa_ns *ns, u32 mask) 958 + struct aa_ns *ns, const struct cred *ocred, u32 mask) 956 959 { 957 960 const char *op; 958 961 ··· 993 942 /* check if loading policy is locked out */ 994 943 if (aa_g_lock_policy) 995 944 return audit_policy(label, op, NULL, NULL, "policy_locked", 945 + -EACCES); 946 + 947 + if (ocred && !is_subset_of_obj_privilege(subj_cred, label, ocred)) 948 + return audit_policy(label, op, NULL, NULL, 949 + "not privileged for target profile", 996 950 -EACCES); 997 951 998 952 if (!aa_policy_admin_capable(subj_cred, label, ns)) ··· 1171 1115 LIST_HEAD(lh); 1172 1116 1173 1117 op = mask & AA_MAY_REPLACE_POLICY ? OP_PROF_REPL : OP_PROF_LOAD; 1174 - aa_get_loaddata(udata); 1118 + aa_get_profile_loaddata(udata); 1175 1119 /* released below */ 1176 1120 error = aa_unpack(udata, &lh, &ns_name); 1177 1121 if (error) ··· 1198 1142 goto fail; 1199 1143 } 1200 1144 ns_name = ent->ns_name; 1145 + ent->ns_name = NULL; 1201 1146 } else 1202 1147 count++; 1203 1148 } ··· 1223 1166 if (aa_rawdata_eq(rawdata_ent, udata)) { 1224 1167 struct aa_loaddata *tmp; 1225 1168 1226 - tmp = __aa_get_loaddata(rawdata_ent); 1169 + tmp = aa_get_profile_loaddata(rawdata_ent); 1227 1170 /* check we didn't fail the race */ 1228 1171 if (tmp) { 1229 - aa_put_loaddata(udata); 1172 + aa_put_profile_loaddata(udata); 1230 1173 udata = tmp; 1231 1174 break; 1232 1175 } ··· 1239 1182 struct aa_profile *p; 1240 1183 1241 1184 if (aa_g_export_binary) 1242 - ent->new->rawdata = aa_get_loaddata(udata); 1185 + ent->new->rawdata = aa_get_profile_loaddata(udata); 1243 1186 error = __lookup_replace(ns, ent->new->base.hname, 1244 1187 !(mask & AA_MAY_REPLACE_POLICY), 1245 1188 &ent->old, &info); ··· 1372 1315 1373 1316 out: 1374 1317 aa_put_ns(ns); 1375 - aa_put_loaddata(udata); 1318 + aa_put_profile_loaddata(udata); 1376 1319 kfree(ns_name); 1377 1320 1378 1321 if (error)
+2
security/apparmor/policy_ns.c
··· 223 223 AA_BUG(!name); 224 224 AA_BUG(!mutex_is_locked(&parent->lock)); 225 225 226 + if (parent->level > MAX_NS_DEPTH) 227 + return ERR_PTR(-ENOSPC); 226 228 ns = alloc_ns(parent->base.hname, name); 227 229 if (!ns) 228 230 return ERR_PTR(-ENOMEM);
+45 -20
security/apparmor/policy_unpack.c
··· 109 109 return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0; 110 110 } 111 111 112 - /* 113 - * need to take the ns mutex lock which is NOT safe most places that 114 - * put_loaddata is called, so we have to delay freeing it 115 - */ 116 - static void do_loaddata_free(struct work_struct *work) 112 + static void do_loaddata_free(struct aa_loaddata *d) 117 113 { 118 - struct aa_loaddata *d = container_of(work, struct aa_loaddata, work); 119 - struct aa_ns *ns = aa_get_ns(d->ns); 120 - 121 - if (ns) { 122 - mutex_lock_nested(&ns->lock, ns->level); 123 - __aa_fs_remove_rawdata(d); 124 - mutex_unlock(&ns->lock); 125 - aa_put_ns(ns); 126 - } 127 - 128 114 kfree_sensitive(d->hash); 129 115 kfree_sensitive(d->name); 130 116 kvfree(d->data); ··· 119 133 120 134 void aa_loaddata_kref(struct kref *kref) 121 135 { 122 - struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count); 136 + struct aa_loaddata *d = container_of(kref, struct aa_loaddata, 137 + count.count); 138 + 139 + do_loaddata_free(d); 140 + } 141 + 142 + /* 143 + * need to take the ns mutex lock which is NOT safe most places that 144 + * put_loaddata is called, so we have to delay freeing it 145 + */ 146 + static void do_ploaddata_rmfs(struct work_struct *work) 147 + { 148 + struct aa_loaddata *d = container_of(work, struct aa_loaddata, work); 149 + struct aa_ns *ns = aa_get_ns(d->ns); 150 + 151 + if (ns) { 152 + mutex_lock_nested(&ns->lock, ns->level); 153 + /* remove fs ref to loaddata */ 154 + __aa_fs_remove_rawdata(d); 155 + mutex_unlock(&ns->lock); 156 + aa_put_ns(ns); 157 + } 158 + /* called by dropping last pcount, so drop its associated icount */ 159 + aa_put_i_loaddata(d); 160 + } 161 + 162 + void aa_ploaddata_kref(struct kref *kref) 163 + { 164 + struct aa_loaddata *d = container_of(kref, struct aa_loaddata, pcount); 123 165 124 166 if (d) { 125 - INIT_WORK(&d->work, do_loaddata_free); 167 + INIT_WORK(&d->work, do_ploaddata_rmfs); 126 168 schedule_work(&d->work); 127 169 } 128 170 } ··· 167 153 kfree(d); 168 154 return ERR_PTR(-ENOMEM); 169 155 } 170 - kref_init(&d->count); 156 + kref_init(&d->count.count); 157 + d->count.reftype = REF_RAWDATA; 158 + kref_init(&d->pcount); 171 159 INIT_LIST_HEAD(&d->list); 172 160 173 161 return d; ··· 1026 1010 if (!aa_unpack_u32(e, &pdb->start[AA_CLASS_FILE], "dfa_start")) { 1027 1011 /* default start state for xmatch and file dfa */ 1028 1012 pdb->start[AA_CLASS_FILE] = DFA_START; 1029 - } /* setup class index */ 1013 + } 1014 + 1015 + size_t state_count = pdb->dfa->tables[YYTD_ID_BASE]->td_lolen; 1016 + 1017 + if (pdb->start[0] >= state_count || 1018 + pdb->start[AA_CLASS_FILE] >= state_count) { 1019 + *info = "invalid dfa start state"; 1020 + goto fail; 1021 + } 1022 + 1023 + /* setup class index */ 1030 1024 for (i = AA_CLASS_FILE + 1; i <= AA_CLASS_LAST; i++) { 1031 1025 pdb->start[i] = aa_dfa_next(pdb->dfa, pdb->start[0], 1032 1026 i); ··· 1435 1409 { 1436 1410 int error = -EPROTONOSUPPORT; 1437 1411 const char *name = NULL; 1438 - *ns = NULL; 1439 1412 1440 1413 /* get the interface version */ 1441 1414 if (!aa_unpack_u32(e, &e->version, "version")) {
+16 -3
sound/core/pcm_native.c
··· 2144 2144 for (;;) { 2145 2145 long tout; 2146 2146 struct snd_pcm_runtime *to_check; 2147 + unsigned int drain_rate; 2148 + snd_pcm_uframes_t drain_bufsz; 2149 + bool drain_no_period_wakeup; 2150 + 2147 2151 if (signal_pending(current)) { 2148 2152 result = -ERESTARTSYS; 2149 2153 break; ··· 2167 2163 snd_pcm_group_unref(group, substream); 2168 2164 if (!to_check) 2169 2165 break; /* all drained */ 2166 + /* 2167 + * Cache the runtime fields needed after unlock. 2168 + * A concurrent close() on the linked stream may free 2169 + * its runtime via snd_pcm_detach_substream() once we 2170 + * release the stream lock below. 2171 + */ 2172 + drain_no_period_wakeup = to_check->no_period_wakeup; 2173 + drain_rate = to_check->rate; 2174 + drain_bufsz = to_check->buffer_size; 2170 2175 init_waitqueue_entry(&wait, current); 2171 2176 set_current_state(TASK_INTERRUPTIBLE); 2172 2177 add_wait_queue(&to_check->sleep, &wait); 2173 2178 snd_pcm_stream_unlock_irq(substream); 2174 - if (runtime->no_period_wakeup) 2179 + if (drain_no_period_wakeup) 2175 2180 tout = MAX_SCHEDULE_TIMEOUT; 2176 2181 else { 2177 2182 tout = 100; 2178 - if (runtime->rate) { 2179 - long t = runtime->buffer_size * 1100 / runtime->rate; 2183 + if (drain_rate) { 2184 + long t = drain_bufsz * 1100 / drain_rate; 2180 2185 tout = max(t, tout); 2181 2186 } 2182 2187 tout = msecs_to_jiffies(tout);
+3
sound/hda/codecs/realtek/alc269.c
··· 6940 6940 SND_PCI_QUIRK(0x103c, 0x89da, "HP Spectre x360 14t-ea100", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX), 6941 6941 SND_PCI_QUIRK(0x103c, 0x89e7, "HP Elite x2 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), 6942 6942 SND_PCI_QUIRK(0x103c, 0x8a0f, "HP Pavilion 14-ec1xxx", ALC287_FIXUP_HP_GPIO_LED), 6943 + SND_PCI_QUIRK(0x103c, 0x8a1f, "HP Laptop 14s-dr5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), 6943 6944 SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), 6944 6945 SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), 6945 6946 SND_PCI_QUIRK(0x103c, 0x8a26, "HP Victus 16-d1xxx (MB 8A26)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), ··· 7274 7273 SND_PCI_QUIRK(0x1043, 0x1e93, "ASUS ExpertBook B9403CVAR", ALC294_FIXUP_ASUS_HPE), 7275 7274 SND_PCI_QUIRK(0x1043, 0x1eb3, "ASUS Ally RCLA72", ALC287_FIXUP_TAS2781_I2C), 7276 7275 SND_PCI_QUIRK(0x1043, 0x1ed3, "ASUS HN7306W", ALC287_FIXUP_CS35L41_I2C_2), 7276 + HDA_CODEC_QUIRK(0x1043, 0x1ee2, "ASUS UM6702RA/RC", ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1), 7277 7277 SND_PCI_QUIRK(0x1043, 0x1ee2, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2), 7278 7278 SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401), 7279 7279 SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), ··· 7495 7493 SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 7496 7494 SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 7497 7495 SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 7496 + SND_PCI_QUIRK(0x17aa, 0x2288, "Thinkpad X390", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK), 7498 7497 SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), 7499 7498 SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), 7500 7499 SND_PCI_QUIRK(0x17aa, 0x22c1, "Thinkpad P1 Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK),
+9
sound/hda/codecs/realtek/alc662.c
··· 313 313 ALC897_FIXUP_HEADSET_MIC_PIN2, 314 314 ALC897_FIXUP_UNIS_H3C_X500S, 315 315 ALC897_FIXUP_HEADSET_MIC_PIN3, 316 + ALC897_FIXUP_H610M_HP_PIN, 316 317 }; 317 318 318 319 static const struct hda_fixup alc662_fixups[] = { ··· 767 766 { } 768 767 }, 769 768 }, 769 + [ALC897_FIXUP_H610M_HP_PIN] = { 770 + .type = HDA_FIXUP_PINS, 771 + .v.pins = (const struct hda_pintbl[]) { 772 + { 0x19, 0x0321403f }, /* HP out */ 773 + { } 774 + }, 775 + }, 770 776 }; 771 777 772 778 static const struct hda_quirk alc662_fixup_tbl[] = { ··· 823 815 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT), 824 816 SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2), 825 817 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), 818 + SND_PCI_QUIRK(0x1458, 0xa194, "H610M H V2 DDR4", ALC897_FIXUP_H610M_HP_PIN), 826 819 SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE), 827 820 SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS), 828 821 SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN),
+14 -4
sound/soc/amd/acp/acp-mach-common.c
··· 127 127 if (drvdata->hs_codec_id != RT5682) 128 128 return -EINVAL; 129 129 130 - drvdata->wclk = clk_get(component->dev, "rt5682-dai-wclk"); 131 - drvdata->bclk = clk_get(component->dev, "rt5682-dai-bclk"); 130 + drvdata->wclk = devm_clk_get(component->dev, "rt5682-dai-wclk"); 131 + if (IS_ERR(drvdata->wclk)) 132 + return PTR_ERR(drvdata->wclk); 133 + 134 + drvdata->bclk = devm_clk_get(component->dev, "rt5682-dai-bclk"); 135 + if (IS_ERR(drvdata->bclk)) 136 + return PTR_ERR(drvdata->bclk); 132 137 133 138 ret = snd_soc_dapm_new_controls(dapm, rt5682_widgets, 134 139 ARRAY_SIZE(rt5682_widgets)); ··· 375 370 return -EINVAL; 376 371 377 372 if (!drvdata->soc_mclk) { 378 - drvdata->wclk = clk_get(component->dev, "rt5682-dai-wclk"); 379 - drvdata->bclk = clk_get(component->dev, "rt5682-dai-bclk"); 373 + drvdata->wclk = devm_clk_get(component->dev, "rt5682-dai-wclk"); 374 + if (IS_ERR(drvdata->wclk)) 375 + return PTR_ERR(drvdata->wclk); 376 + 377 + drvdata->bclk = devm_clk_get(component->dev, "rt5682-dai-bclk"); 378 + if (IS_ERR(drvdata->bclk)) 379 + return PTR_ERR(drvdata->bclk); 380 380 } 381 381 382 382 ret = snd_soc_dapm_new_controls(dapm, rt5682s_widgets,
+7 -2
sound/soc/amd/acp3x-rt5682-max9836.c
··· 94 94 return ret; 95 95 } 96 96 97 - rt5682_dai_wclk = clk_get(component->dev, "rt5682-dai-wclk"); 98 - rt5682_dai_bclk = clk_get(component->dev, "rt5682-dai-bclk"); 97 + rt5682_dai_wclk = devm_clk_get(component->dev, "rt5682-dai-wclk"); 98 + if (IS_ERR(rt5682_dai_wclk)) 99 + return PTR_ERR(rt5682_dai_wclk); 100 + 101 + rt5682_dai_bclk = devm_clk_get(component->dev, "rt5682-dai-bclk"); 102 + if (IS_ERR(rt5682_dai_bclk)) 103 + return PTR_ERR(rt5682_dai_bclk); 99 104 100 105 ret = snd_soc_card_jack_new_pins(card, "Headset Jack", 101 106 SND_JACK_HEADSET |
+1 -1
sound/soc/codecs/rt1011.c
··· 1047 1047 struct snd_ctl_elem_value *ucontrol) 1048 1048 { 1049 1049 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); 1050 - struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_to_dapm(kcontrol); 1050 + struct snd_soc_dapm_context *dapm = snd_soc_component_to_dapm(component); 1051 1051 struct rt1011_priv *rt1011 = 1052 1052 snd_soc_component_get_drvdata(component); 1053 1053
+8 -4
sound/soc/generic/simple-card-utils.c
··· 1038 1038 else 1039 1039 port = np; 1040 1040 1041 - struct device_node *ports __free(device_node) = of_get_parent(port); 1042 - struct device_node *top __free(device_node) = of_get_parent(ports); 1043 - struct device_node *ports0 __free(device_node) = of_get_child_by_name(top, "ports"); 1041 + struct device_node *ports __free(device_node) = of_get_parent(port); 1042 + const char *at = strchr(kbasename(ports->full_name), '@'); 1044 1043 1045 - return ports0 == ports; 1044 + /* 1045 + * Since child iteration order may differ 1046 + * between a base DT and DT overlays, 1047 + * string match "ports" or "ports@0" in the node name instead. 1048 + */ 1049 + return !at || !strcmp(at, "@0"); 1046 1050 } 1047 1051 EXPORT_SYMBOL_GPL(graph_util_is_ports0); 1048 1052
+1
sound/soc/qcom/qdsp6/q6apm-dai.c
··· 838 838 .ack = q6apm_dai_ack, 839 839 .compress_ops = &q6apm_dai_compress_ops, 840 840 .use_dai_pcm_id = true, 841 + .remove_order = SND_SOC_COMP_ORDER_EARLY, 841 842 }; 842 843 843 844 static int q6apm_dai_probe(struct platform_device *pdev)
+1
sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
··· 278 278 .of_xlate_dai_name = q6dsp_audio_ports_of_xlate_dai_name, 279 279 .be_pcm_base = AUDIOREACH_BE_PCM_BASE, 280 280 .use_dai_pcm_id = true, 281 + .remove_order = SND_SOC_COMP_ORDER_FIRST, 281 282 }; 282 283 283 284 static int q6apm_lpass_dai_dev_probe(struct platform_device *pdev)
+1
sound/soc/qcom/qdsp6/q6apm.c
··· 715 715 .name = APM_AUDIO_DRV_NAME, 716 716 .probe = q6apm_audio_probe, 717 717 .remove = q6apm_audio_remove, 718 + .remove_order = SND_SOC_COMP_ORDER_LAST, 718 719 }; 719 720 720 721 static int apm_probe(gpr_device_t *gdev)
+3 -3
sound/soc/samsung/i2s.c
··· 1360 1360 if (!pdev_sec) 1361 1361 return -ENOMEM; 1362 1362 1363 - pdev_sec->driver_override = kstrdup("samsung-i2s", GFP_KERNEL); 1364 - if (!pdev_sec->driver_override) { 1363 + ret = device_set_driver_override(&pdev_sec->dev, "samsung-i2s"); 1364 + if (ret) { 1365 1365 platform_device_put(pdev_sec); 1366 - return -ENOMEM; 1366 + return ret; 1367 1367 } 1368 1368 1369 1369 ret = platform_device_add(pdev_sec);
+8 -3
sound/soc/soc-core.c
··· 462 462 463 463 list_del(&rtd->list); 464 464 465 - if (delayed_work_pending(&rtd->delayed_work)) 466 - flush_delayed_work(&rtd->delayed_work); 465 + flush_delayed_work(&rtd->delayed_work); 467 466 snd_soc_pcm_component_free(rtd); 468 467 469 468 /* ··· 1863 1864 1864 1865 /* 1865 1866 * Check if a DMI field is valid, i.e. not containing any string 1866 - * in the black list. 1867 + * in the black list and not the empty string. 1867 1868 */ 1868 1869 static int is_dmi_valid(const char *field) 1869 1870 { 1870 1871 int i = 0; 1872 + 1873 + if (!field[0]) 1874 + return 0; 1871 1875 1872 1876 while (dmi_blacklist[i]) { 1873 1877 if (strstr(field, dmi_blacklist[i])) ··· 2124 2122 for_each_card_rtds(card, rtd) 2125 2123 if (rtd->initialized) 2126 2124 snd_soc_link_exit(rtd); 2125 + /* flush delayed work before removing DAIs and DAPM widgets */ 2126 + snd_soc_flush_all_delayed_work(card); 2127 + 2127 2128 /* remove and free each DAI */ 2128 2129 soc_remove_link_dais(card); 2129 2130 soc_remove_link_components(card);
+11
sound/soc/tegra/tegra_audio_graph_card.c
··· 231 231 .plla_out0_rates[x11_RATE] = 45158400, 232 232 }; 233 233 234 + static const struct tegra_audio_cdata tegra238_data = { 235 + /* PLLA */ 236 + .plla_rates[x8_RATE] = 1277952000, 237 + .plla_rates[x11_RATE] = 1264435200, 238 + /* PLLA_OUT0 */ 239 + .plla_out0_rates[x8_RATE] = 49152000, 240 + .plla_out0_rates[x11_RATE] = 45158400, 241 + }; 242 + 234 243 static const struct tegra_audio_cdata tegra264_data = { 235 244 /* PLLA1 */ 236 245 .plla_rates[x8_RATE] = 983040000, ··· 254 245 .data = &tegra210_data }, 255 246 { .compatible = "nvidia,tegra186-audio-graph-card", 256 247 .data = &tegra186_data }, 248 + { .compatible = "nvidia,tegra238-audio-graph-card", 249 + .data = &tegra238_data }, 257 250 { .compatible = "nvidia,tegra264-audio-graph-card", 258 251 .data = &tegra264_data }, 259 252 {},
+2
sound/usb/mixer_scarlett2.c
··· 8251 8251 8252 8252 if (desc->bInterfaceClass != 255) 8253 8253 continue; 8254 + if (desc->bNumEndpoints < 1) 8255 + continue; 8254 8256 8255 8257 epd = get_endpoint(intf->altsetting, 0); 8256 8258 private->bInterfaceNumber = desc->bInterfaceNumber;
+2
sound/usb/quirks.c
··· 2243 2243 QUIRK_FLAG_IFACE_DELAY | QUIRK_FLAG_FORCE_IFACE_RESET), 2244 2244 DEVICE_FLG(0x0661, 0x0883, /* iBasso DC04 Ultra */ 2245 2245 QUIRK_FLAG_DSD_RAW), 2246 + DEVICE_FLG(0x0666, 0x0880, /* SPACETOUCH USB Audio */ 2247 + QUIRK_FLAG_FORCE_IFACE_RESET | QUIRK_FLAG_IFACE_DELAY), 2246 2248 DEVICE_FLG(0x06f8, 0xb000, /* Hercules DJ Console (Windows Edition) */ 2247 2249 QUIRK_FLAG_IGNORE_CTL_ERROR), 2248 2250 DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */
+1 -1
tools/arch/x86/include/asm/amd/ibs.h
··· 110 110 __u64 ld_op:1, /* 0: load op */ 111 111 st_op:1, /* 1: store op */ 112 112 dc_l1tlb_miss:1, /* 2: data cache L1TLB miss */ 113 - dc_l2tlb_miss:1, /* 3: data cache L2TLB hit in 2M page */ 113 + dc_l2tlb_miss:1, /* 3: data cache L2TLB miss in 2M page */ 114 114 dc_l1tlb_hit_2m:1, /* 4: data cache L1TLB hit in 2M page */ 115 115 dc_l1tlb_hit_1g:1, /* 5: data cache L1TLB hit in 1G page */ 116 116 dc_l2tlb_hit_2m:1, /* 6: data cache L2TLB hit in 2M page */
+3 -1
tools/arch/x86/include/asm/cpufeatures.h
··· 84 84 #define X86_FEATURE_PEBS ( 3*32+12) /* "pebs" Precise-Event Based Sampling */ 85 85 #define X86_FEATURE_BTS ( 3*32+13) /* "bts" Branch Trace Store */ 86 86 #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* syscall in IA32 userspace */ 87 - #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* sysenter in IA32 userspace */ 87 + #define X86_FEATURE_SYSFAST32 ( 3*32+15) /* sysenter/syscall in IA32 userspace */ 88 88 #define X86_FEATURE_REP_GOOD ( 3*32+16) /* "rep_good" REP microcode works well */ 89 89 #define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* "amd_lbr_v2" AMD Last Branch Record Extension Version 2 */ 90 90 #define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* Clear CPU buffers using VERW */ ··· 326 326 #define X86_FEATURE_AMX_FP16 (12*32+21) /* AMX fp16 Support */ 327 327 #define X86_FEATURE_AVX_IFMA (12*32+23) /* Support for VPMADD52[H,L]UQ */ 328 328 #define X86_FEATURE_LAM (12*32+26) /* "lam" Linear Address Masking */ 329 + #define X86_FEATURE_MOVRS (12*32+31) /* MOVRS instructions */ 329 330 330 331 /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ 331 332 #define X86_FEATURE_CLZERO (13*32+ 0) /* "clzero" CLZERO instruction */ ··· 473 472 #define X86_FEATURE_GP_ON_USER_CPUID (20*32+17) /* User CPUID faulting */ 474 473 475 474 #define X86_FEATURE_PREFETCHI (20*32+20) /* Prefetch Data/Instruction to Cache Level */ 475 + #define X86_FEATURE_ERAPS (20*32+24) /* Enhanced Return Address Predictor Security */ 476 476 #define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */ 477 477 #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */ 478 478 #define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
+6
tools/arch/x86/include/asm/msr-index.h
··· 263 263 #define MSR_SNOOP_RSP_0 0x00001328 264 264 #define MSR_SNOOP_RSP_1 0x00001329 265 265 266 + #define MSR_OMR_0 0x000003e0 267 + #define MSR_OMR_1 0x000003e1 268 + #define MSR_OMR_2 0x000003e2 269 + #define MSR_OMR_3 0x000003e3 270 + 266 271 #define MSR_LBR_SELECT 0x000001c8 267 272 #define MSR_LBR_TOS 0x000001c9 268 273 ··· 1224 1219 #define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e 1225 1220 #define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f 1226 1221 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390 1222 + #define MSR_CORE_PERF_GLOBAL_STATUS_SET 0x00000391 1227 1223 1228 1224 #define MSR_PERF_METRICS 0x00000329 1229 1225
+6 -2
tools/arch/x86/include/uapi/asm/kvm.h
··· 503 503 #define KVM_X86_GRP_SEV 1 504 504 # define KVM_X86_SEV_VMSA_FEATURES 0 505 505 # define KVM_X86_SNP_POLICY_BITS 1 506 + # define KVM_X86_SEV_SNP_REQ_CERTS 2 506 507 507 508 struct kvm_vmx_nested_state_data { 508 509 __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; ··· 744 743 KVM_SEV_SNP_LAUNCH_START = 100, 745 744 KVM_SEV_SNP_LAUNCH_UPDATE, 746 745 KVM_SEV_SNP_LAUNCH_FINISH, 746 + KVM_SEV_SNP_ENABLE_REQ_CERTS, 747 747 748 748 KVM_SEV_NR_MAX, 749 749 }; ··· 916 914 __u64 pad1[4]; 917 915 }; 918 916 919 - #define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0) 920 - #define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1) 917 + #define KVM_X2APIC_API_USE_32BIT_IDS _BITULL(0) 918 + #define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK _BITULL(1) 919 + #define KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST _BITULL(2) 920 + #define KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST _BITULL(3) 921 921 922 922 struct kvm_hyperv_eventfd { 923 923 __u32 conn_id;
+5 -2
tools/bootconfig/main.c
··· 162 162 if (fd < 0) 163 163 return -errno; 164 164 ret = fstat(fd, &stat); 165 - if (ret < 0) 166 - return -errno; 165 + if (ret < 0) { 166 + ret = -errno; 167 + close(fd); 168 + return ret; 169 + } 167 170 168 171 ret = load_xbc_fd(fd, buf, stat.st_size); 169 172
+4
tools/bootconfig/samples/bad-non-closed-brace.bconf
··· 1 + foo { 2 + bar { 3 + buz 4 + }
+19
tools/bootconfig/samples/bad-over-max-brace.bconf
··· 1 + key1 { 2 + key2 { 3 + key3 { 4 + key4 { 5 + key5 { 6 + key6 { 7 + key7 { 8 + key8 { 9 + key9 { 10 + key10 { 11 + key11 { 12 + key12 { 13 + key13 { 14 + key14 { 15 + key15 { 16 + key16 { 17 + key17 { 18 + }}}}}}}}}}}}}}}}} 19 +
+1
tools/bootconfig/samples/exp-good-nested-brace.bconf
··· 1 + key1.key2.key3.key4.key5.key6.key7.key8.key9.key10.key11.key12.key13.key14.key15.key16;
+18
tools/bootconfig/samples/good-nested-brace.bconf
··· 1 + key1 { 2 + key2 { 3 + key3 { 4 + key4 { 5 + key5 { 6 + key6 { 7 + key7 { 8 + key8 { 9 + key9 { 10 + key10 { 11 + key11 { 12 + key12 { 13 + key13 { 14 + key14 { 15 + key15 { 16 + key16 { 17 + }}}}}}}}}}}}}}}} 18 +
+9
tools/bootconfig/test-bootconfig.sh
··· 171 171 xfail grep -q 'val[[:space:]]' $OUTFILE 172 172 xpass grep -q 'val2[[:space:]]' $OUTFILE 173 173 174 + echo "Showing correct line:column of no closing brace" 175 + cat > $TEMPCONF << EOF 176 + foo { 177 + bar { 178 + } 179 + EOF 180 + $BOOTCONF -a $TEMPCONF $INITRD 2> $OUTFILE 181 + xpass grep -q "1:1" $OUTFILE 182 + 174 183 echo "=== expected failure cases ===" 175 184 for i in samples/bad-* ; do 176 185 xfail $BOOTCONF -a $i $INITRD
+9
tools/build/Build.include
··· 99 99 cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXXFLAGS_$(basetarget).o) $(CXXFLAGS_$(obj)) 100 100 101 101 ### 102 + # Rust flags to be used on rule definition, includes: 103 + # - global $(RUST_FLAGS) 104 + # - per target Rust flags 105 + # - per object Rust flags 106 + rust_flags_1 = $(RUST_FLAGS) $(RUST_FLAGS_$(basetarget).o) $(RUST_FLAGS_$(obj)) 107 + rust_flags_2 = $(filter-out $(RUST_FLAGS_REMOVE_$(basetarget).o), $(rust_flags_1)) 108 + rust_flags = $(filter-out $(RUST_FLAGS_REMOVE_$(obj)), $(rust_flags_2)) 109 + 110 + ### 102 111 ## HOSTCC C flags 103 112 104 113 host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj))
+4 -2
tools/build/Makefile.build
··· 70 70 # If there's nothing to link, create empty $@ object. 71 71 quiet_cmd_ld_multi = LD $@ 72 72 cmd_ld_multi = $(if $(strip $(obj-y)),\ 73 - $(LD) -r -o $@ $(filter $(obj-y),$^),rm -f $@; $(AR) rcs $@) 73 + printf "$(objprefix)%s " $(patsubst $(objprefix)%,%,$(filter $(obj-y),$^)) | \ 74 + xargs $(LD) -r -o $@,rm -f $@; $(AR) rcs $@) 74 75 75 76 quiet_cmd_host_ld_multi = HOSTLD $@ 76 77 cmd_host_ld_multi = $(if $(strip $(obj-y)),\ 77 - $(HOSTLD) -r -o $@ $(filter $(obj-y),$^),rm -f $@; $(HOSTAR) rcs $@) 78 + printf "$(objprefix)%s " $(patsubst $(objprefix)%,%,$(filter $(obj-y),$^)) | \ 79 + xargs $(HOSTLD) -r -o $@,rm -f $@; $(HOSTAR) rcs $@) 78 80 79 81 rust_common_cmd = \ 80 82 $(RUSTC) $(rust_flags) \
-24
tools/include/linux/coresight-pmu.h
··· 22 22 #define CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) (0x10 + (cpu * 2)) 23 23 24 24 /* 25 - * Below are the definition of bit offsets for perf option, and works as 26 - * arbitrary values for all ETM versions. 27 - * 28 - * Most of them are orignally from ETMv3.5/PTM's ETMCR config, therefore, 29 - * ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and 30 - * directly use below macros as config bits. 31 - */ 32 - #define ETM_OPT_BRANCH_BROADCAST 8 33 - #define ETM_OPT_CYCACC 12 34 - #define ETM_OPT_CTXTID 14 35 - #define ETM_OPT_CTXTID2 15 36 - #define ETM_OPT_TS 28 37 - #define ETM_OPT_RETSTK 29 38 - 39 - /* ETMv4 CONFIGR programming bits for the ETM OPTs */ 40 - #define ETM4_CFG_BIT_BB 3 41 - #define ETM4_CFG_BIT_CYCACC 4 42 - #define ETM4_CFG_BIT_CTXTID 6 43 - #define ETM4_CFG_BIT_VMID 7 44 - #define ETM4_CFG_BIT_TS 11 45 - #define ETM4_CFG_BIT_RETSTK 12 46 - #define ETM4_CFG_BIT_VMID_OPT 15 47 - 48 - /* 49 25 * Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload. 50 26 * Used to associate a CPU with the CoreSight Trace ID. 51 27 * [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
+4
tools/include/linux/gfp.h
··· 5 5 #include <linux/types.h> 6 6 #include <linux/gfp_types.h> 7 7 8 + /* Helper macro to avoid gfp flags if they are the default one */ 9 + #define __default_gfp(a,...) a 10 + #define default_gfp(...) __default_gfp(__VA_ARGS__ __VA_OPT__(,) GFP_KERNEL) 11 + 8 12 static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) 9 13 { 10 14 return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
+7 -2
tools/include/linux/gfp_types.h
··· 139 139 * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. 140 140 * 141 141 * %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension. 142 + * mark_obj_codetag_empty() should be called upon freeing for objects allocated 143 + * with this flag to indicate that their NULL tags are expected and normal. 142 144 */ 143 145 #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) 144 146 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) ··· 311 309 * 312 310 * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower 313 311 * watermark is applied to allow access to "atomic reserves". 314 - * The current implementation doesn't support NMI and few other strict 315 - * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT. 312 + * The current implementation doesn't support NMI, nor contexts that disable 313 + * preemption under PREEMPT_RT. This includes raw_spin_lock() and plain 314 + * preempt_disable() - see "Memory allocation" in 315 + * Documentation/core-api/real-time/differences.rst for more info. 316 316 * 317 317 * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires 318 318 * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim. ··· 325 321 * %GFP_NOWAIT is for kernel allocations that should not stall for direct 326 322 * reclaim, start physical IO or use any filesystem callback. It is very 327 323 * likely to fail to allocate memory, even for very small allocations. 324 + * The same restrictions on calling contexts apply as for %GFP_ATOMIC. 328 325 * 329 326 * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages 330 327 * that do not require the starting of any physical IO.
+19
tools/include/linux/overflow.h
··· 69 69 }) 70 70 71 71 /** 72 + * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX 73 + * @factor1: first factor 74 + * @factor2: second factor 75 + * 76 + * Returns: calculate @factor1 * @factor2, both promoted to size_t, 77 + * with any overflow causing the return value to be SIZE_MAX. The 78 + * lvalue must be size_t to avoid implicit type conversion. 79 + */ 80 + static inline size_t __must_check size_mul(size_t factor1, size_t factor2) 81 + { 82 + size_t bytes; 83 + 84 + if (check_mul_overflow(factor1, factor2, &bytes)) 85 + return SIZE_MAX; 86 + 87 + return bytes; 88 + } 89 + 90 + /** 72 91 * array_size() - Calculate size of 2-dimensional array. 73 92 * 74 93 * @a: dimension one
+9
tools/include/linux/slab.h
··· 202 202 return sheaf->size; 203 203 } 204 204 205 + #define __alloc_objs(KMALLOC, GFP, TYPE, COUNT) \ 206 + ({ \ 207 + const size_t __obj_size = size_mul(sizeof(TYPE), COUNT); \ 208 + (TYPE *)KMALLOC(__obj_size, GFP); \ 209 + }) 210 + 211 + #define kzalloc_obj(P, ...) \ 212 + __alloc_objs(kzalloc, default_gfp(__VA_ARGS__), typeof(P), 1) 213 + 205 214 #endif /* _TOOLS_SLAB_H */
+4 -1
tools/include/uapi/asm-generic/unistd.h
··· 860 860 #define __NR_listns 470 861 861 __SYSCALL(__NR_listns, sys_listns) 862 862 863 + #define __NR_rseq_slice_yield 471 864 + __SYSCALL(__NR_rseq_slice_yield, sys_rseq_slice_yield) 865 + 863 866 #undef __NR_syscalls 864 - #define __NR_syscalls 471 867 + #define __NR_syscalls 472 865 868 866 869 /* 867 870 * 32 bit systems traditionally used different
+23 -1
tools/include/uapi/linux/kvm.h
··· 135 135 } u; 136 136 }; 137 137 138 + struct kvm_exit_snp_req_certs { 139 + __u64 gpa; 140 + __u64 npages; 141 + __u64 ret; 142 + }; 143 + 138 144 #define KVM_S390_GET_SKEYS_NONE 1 139 145 #define KVM_S390_SKEYS_MAX 1048576 140 146 ··· 186 180 #define KVM_EXIT_MEMORY_FAULT 39 187 181 #define KVM_EXIT_TDX 40 188 182 #define KVM_EXIT_ARM_SEA 41 183 + #define KVM_EXIT_ARM_LDST64B 42 184 + #define KVM_EXIT_SNP_REQ_CERTS 43 189 185 190 186 /* For KVM_EXIT_INTERNAL_ERROR */ 191 187 /* Emulate instruction failed. */ ··· 410 402 } eoi; 411 403 /* KVM_EXIT_HYPERV */ 412 404 struct kvm_hyperv_exit hyperv; 413 - /* KVM_EXIT_ARM_NISV */ 405 + /* KVM_EXIT_ARM_NISV / KVM_EXIT_ARM_LDST64B */ 414 406 struct { 415 407 __u64 esr_iss; 416 408 __u64 fault_ipa; ··· 490 482 __u64 gva; 491 483 __u64 gpa; 492 484 } arm_sea; 485 + /* KVM_EXIT_SNP_REQ_CERTS */ 486 + struct kvm_exit_snp_req_certs snp_req_certs; 493 487 /* Fix the size of the union. */ 494 488 char padding[256]; 495 489 }; ··· 984 974 #define KVM_CAP_GUEST_MEMFD_FLAGS 244 985 975 #define KVM_CAP_ARM_SEA_TO_USER 245 986 976 #define KVM_CAP_S390_USER_OPEREXEC 246 977 + #define KVM_CAP_S390_KEYOP 247 987 978 988 979 struct kvm_irq_routing_irqchip { 989 980 __u32 irqchip; ··· 1230 1219 __s32 tablefd; 1231 1220 }; 1232 1221 1222 + #define KVM_S390_KEYOP_ISKE 0x01 1223 + #define KVM_S390_KEYOP_RRBE 0x02 1224 + #define KVM_S390_KEYOP_SSKE 0x03 1225 + struct kvm_s390_keyop { 1226 + __u64 guest_addr; 1227 + __u8 key; 1228 + __u8 operation; 1229 + __u8 pad[6]; 1230 + }; 1231 + 1233 1232 /* 1234 1233 * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns 1235 1234 * a vcpu fd. ··· 1259 1238 #define KVM_S390_UCAS_MAP _IOW(KVMIO, 0x50, struct kvm_s390_ucas_mapping) 1260 1239 #define KVM_S390_UCAS_UNMAP _IOW(KVMIO, 0x51, struct kvm_s390_ucas_mapping) 1261 1240 #define KVM_S390_VCPU_FAULT _IOW(KVMIO, 0x52, unsigned long) 1241 + #define KVM_S390_KEYOP _IOWR(KVMIO, 0x53, struct kvm_s390_keyop) 1262 1242 1263 1243 /* Device model IOC */ 1264 1244 #define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60)
+1 -1
tools/include/uapi/linux/perf_event.h
··· 1396 1396 #define PERF_MEM_LVLNUM_L4 0x0004 /* L4 */ 1397 1397 #define PERF_MEM_LVLNUM_L2_MHB 0x0005 /* L2 Miss Handling Buffer */ 1398 1398 #define PERF_MEM_LVLNUM_MSC 0x0006 /* Memory-side Cache */ 1399 - #define PERF_MEM_LVLNUM_L0 0x0007 /* L0 */ 1399 + #define PERF_MEM_LVLNUM_L0 0x0007 /* L0 */ 1400 1400 #define PERF_MEM_LVLNUM_UNC 0x0008 /* Uncached */ 1401 1401 #define PERF_MEM_LVLNUM_CXL 0x0009 /* CXL */ 1402 1402 #define PERF_MEM_LVLNUM_IO 0x000a /* I/O */
+1 -1
tools/objtool/Makefile
··· 13 13 14 14 ifeq ($(ARCH_HAS_KLP),y) 15 15 HAVE_XXHASH = $(shell printf "$(pound)include <xxhash.h>\nXXH3_state_t *state;int main() {}" | \ 16 - $(HOSTCC) -xc - -o /dev/null -lxxhash 2> /dev/null && echo y || echo n) 16 + $(HOSTCC) $(HOSTCFLAGS) -xc - -o /dev/null -lxxhash 2> /dev/null && echo y || echo n) 17 17 ifeq ($(HAVE_XXHASH),y) 18 18 BUILD_KLP := y 19 19 LIBXXHASH_CFLAGS := $(shell $(HOSTPKG_CONFIG) libxxhash --cflags 2>/dev/null) \
+26 -42
tools/objtool/arch/x86/decode.c
··· 395 395 if (!rex_w) 396 396 break; 397 397 398 - if (modrm_reg == CFI_SP) { 399 - 400 - if (mod_is_reg()) { 401 - /* mov %rsp, reg */ 402 - ADD_OP(op) { 403 - op->src.type = OP_SRC_REG; 404 - op->src.reg = CFI_SP; 405 - op->dest.type = OP_DEST_REG; 406 - op->dest.reg = modrm_rm; 407 - } 408 - break; 409 - 410 - } else { 411 - /* skip RIP relative displacement */ 412 - if (is_RIP()) 413 - break; 414 - 415 - /* skip nontrivial SIB */ 416 - if (have_SIB()) { 417 - modrm_rm = sib_base; 418 - if (sib_index != CFI_SP) 419 - break; 420 - } 421 - 422 - /* mov %rsp, disp(%reg) */ 423 - ADD_OP(op) { 424 - op->src.type = OP_SRC_REG; 425 - op->src.reg = CFI_SP; 426 - op->dest.type = OP_DEST_REG_INDIRECT; 427 - op->dest.reg = modrm_rm; 428 - op->dest.offset = ins.displacement.value; 429 - } 430 - break; 431 - } 432 - 433 - break; 434 - } 435 - 436 - if (rm_is_reg(CFI_SP)) { 437 - 438 - /* mov reg, %rsp */ 398 + if (mod_is_reg()) { 399 + /* mov reg, reg */ 439 400 ADD_OP(op) { 440 401 op->src.type = OP_SRC_REG; 441 402 op->src.reg = modrm_reg; 442 403 op->dest.type = OP_DEST_REG; 443 - op->dest.reg = CFI_SP; 404 + op->dest.reg = modrm_rm; 405 + } 406 + break; 407 + } 408 + 409 + /* skip RIP relative displacement */ 410 + if (is_RIP()) 411 + break; 412 + 413 + /* skip nontrivial SIB */ 414 + if (have_SIB()) { 415 + modrm_rm = sib_base; 416 + if (sib_index != CFI_SP) 417 + break; 418 + } 419 + 420 + /* mov %rsp, disp(%reg) */ 421 + if (modrm_reg == CFI_SP) { 422 + ADD_OP(op) { 423 + op->src.type = OP_SRC_REG; 424 + op->src.reg = CFI_SP; 425 + op->dest.type = OP_DEST_REG_INDIRECT; 426 + op->dest.reg = modrm_rm; 427 + op->dest.offset = ins.displacement.value; 444 428 } 445 429 break; 446 430 }
+21 -8
tools/objtool/check.c
··· 2184 2184 last = insn; 2185 2185 2186 2186 /* 2187 - * Store back-pointers for unconditional forward jumps such 2187 + * Store back-pointers for forward jumps such 2188 2188 * that find_jump_table() can back-track using those and 2189 2189 * avoid some potentially confusing code. 2190 2190 */ 2191 - if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && 2192 - insn->offset > last->offset && 2191 + if (insn->jump_dest && 2193 2192 insn->jump_dest->offset > insn->offset && 2194 2193 !insn->jump_dest->first_jump_src) { 2195 2194 ··· 2999 3000 cfi->stack_size += 8; 3000 3001 } 3001 3002 3003 + else if (cfi->vals[op->src.reg].base == CFI_CFA) { 3004 + /* 3005 + * Clang RSP musical chairs: 3006 + * 3007 + * mov %rsp, %rdx [handled above] 3008 + * ... 3009 + * mov %rdx, %rbx [handled here] 3010 + * ... 3011 + * mov %rbx, %rsp [handled above] 3012 + */ 3013 + cfi->vals[op->dest.reg].base = CFI_CFA; 3014 + cfi->vals[op->dest.reg].offset = cfi->vals[op->src.reg].offset; 3015 + } 3016 + 3002 3017 3003 3018 break; 3004 3019 ··· 3747 3734 static int validate_branch(struct objtool_file *file, struct symbol *func, 3748 3735 struct instruction *insn, struct insn_state state); 3749 3736 static int do_validate_branch(struct objtool_file *file, struct symbol *func, 3750 - struct instruction *insn, struct insn_state state); 3737 + struct instruction *insn, struct insn_state *state); 3751 3738 3752 3739 static int validate_insn(struct objtool_file *file, struct symbol *func, 3753 3740 struct instruction *insn, struct insn_state *statep, ··· 4012 3999 * tools/objtool/Documentation/objtool.txt. 4013 4000 */ 4014 4001 static int do_validate_branch(struct objtool_file *file, struct symbol *func, 4015 - struct instruction *insn, struct insn_state state) 4002 + struct instruction *insn, struct insn_state *state) 4016 4003 { 4017 4004 struct instruction *next_insn, *prev_insn = NULL; 4018 4005 bool dead_end; ··· 4043 4030 return 1; 4044 4031 } 4045 4032 4046 - ret = validate_insn(file, func, insn, &state, prev_insn, next_insn, 4033 + ret = validate_insn(file, func, insn, state, prev_insn, next_insn, 4047 4034 &dead_end); 4048 4035 4049 4036 if (!insn->trace) { ··· 4054 4041 } 4055 4042 4056 4043 if (!dead_end && !next_insn) { 4057 - if (state.cfi.cfa.base == CFI_UNDEFINED) 4044 + if (state->cfi.cfa.base == CFI_UNDEFINED) 4058 4045 return 0; 4059 4046 if (file->ignore_unreachables) 4060 4047 return 0; ··· 4079 4066 int ret; 4080 4067 4081 4068 trace_depth_inc(); 4082 - ret = do_validate_branch(file, func, insn, state); 4069 + ret = do_validate_branch(file, func, insn, &state); 4083 4070 trace_depth_dec(); 4084 4071 4085 4072 return ret;
+4 -21
tools/objtool/elf.c
··· 16 16 #include <string.h> 17 17 #include <unistd.h> 18 18 #include <errno.h> 19 - #include <libgen.h> 20 19 #include <ctype.h> 21 20 #include <linux/align.h> 22 21 #include <linux/kernel.h> ··· 1188 1189 struct elf *elf_create_file(GElf_Ehdr *ehdr, const char *name) 1189 1190 { 1190 1191 struct section *null, *symtab, *strtab, *shstrtab; 1191 - char *dir, *base, *tmp_name; 1192 + char *tmp_name; 1192 1193 struct symbol *sym; 1193 1194 struct elf *elf; 1194 1195 ··· 1202 1203 1203 1204 INIT_LIST_HEAD(&elf->sections); 1204 1205 1205 - dir = strdup(name); 1206 - if (!dir) { 1207 - ERROR_GLIBC("strdup"); 1208 - return NULL; 1209 - } 1210 - 1211 - dir = dirname(dir); 1212 - 1213 - base = strdup(name); 1214 - if (!base) { 1215 - ERROR_GLIBC("strdup"); 1216 - return NULL; 1217 - } 1218 - 1219 - base = basename(base); 1220 - 1221 - tmp_name = malloc(256); 1206 + tmp_name = malloc(strlen(name) + 8); 1222 1207 if (!tmp_name) { 1223 1208 ERROR_GLIBC("malloc"); 1224 1209 return NULL; 1225 1210 } 1226 1211 1227 - snprintf(tmp_name, 256, "%s/%s.XXXXXX", dir, base); 1212 + sprintf(tmp_name, "%s.XXXXXX", name); 1228 1213 1229 1214 elf->fd = mkstemp(tmp_name); 1230 1215 if (elf->fd == -1) { ··· 1358 1375 memcpy(sec->data->d_buf, data, size); 1359 1376 1360 1377 sec->data->d_size = size; 1361 - sec->data->d_align = 1; 1378 + sec->data->d_align = sec->sh.sh_addralign; 1362 1379 1363 1380 offset = ALIGN(sec->sh.sh_size, sec->sh.sh_addralign); 1364 1381 sec->sh.sh_size = offset + size;
+1 -1
tools/objtool/include/objtool/warn.h
··· 107 107 #define ERROR_ELF(format, ...) __WARN_ELF(ERROR_STR, format, ##__VA_ARGS__) 108 108 #define ERROR_GLIBC(format, ...) __WARN_GLIBC(ERROR_STR, format, ##__VA_ARGS__) 109 109 #define ERROR_FUNC(sec, offset, format, ...) __WARN_FUNC(ERROR_STR, sec, offset, format, ##__VA_ARGS__) 110 - #define ERROR_INSN(insn, format, ...) WARN_FUNC(insn->sec, insn->offset, format, ##__VA_ARGS__) 110 + #define ERROR_INSN(insn, format, ...) ERROR_FUNC(insn->sec, insn->offset, format, ##__VA_ARGS__) 111 111 112 112 extern bool debug; 113 113 extern int indent;
+28 -14
tools/objtool/klp-diff.c
··· 14 14 #include <objtool/util.h> 15 15 #include <arch/special.h> 16 16 17 + #include <linux/align.h> 17 18 #include <linux/objtool_types.h> 18 19 #include <linux/livepatch_external.h> 19 20 #include <linux/stringify.h> ··· 561 560 } 562 561 563 562 if (!is_sec_sym(patched_sym)) 564 - offset = sec_size(out_sec); 563 + offset = ALIGN(sec_size(out_sec), out_sec->sh.sh_addralign); 565 564 566 565 if (patched_sym->len || is_sec_sym(patched_sym)) { 567 566 void *data = NULL; ··· 1335 1334 * be applied after static branch/call init, resulting in code corruption. 1336 1335 * 1337 1336 * Validate a special section entry to avoid that. Note that an inert 1338 - * tracepoint is harmless enough, in that case just skip the entry and print a 1339 - * warning. Otherwise, return an error. 1337 + * tracepoint or pr_debug() is harmless enough, in that case just skip the 1338 + * entry and print a warning. Otherwise, return an error. 1340 1339 * 1341 - * This is only a temporary limitation which will be fixed when livepatch adds 1342 - * support for submodules: fully self-contained modules which are embedded in 1343 - * the top-level livepatch module's data and which can be loaded on demand when 1344 - * their corresponding to-be-patched module gets loaded. Then klp relocs can 1345 - * be retired. 1340 + * TODO: This is only a temporary limitation which will be fixed when livepatch 1341 + * adds support for submodules: fully self-contained modules which are embedded 1342 + * in the top-level livepatch module's data and which can be loaded on demand 1343 + * when their corresponding to-be-patched module gets loaded. Then klp relocs 1344 + * can be retired. 1346 1345 * 1347 1346 * Return: 1348 1347 * -1: error: validation failed 1349 - * 1: warning: tracepoint skipped 1348 + * 1: warning: disabled tracepoint or pr_debug() 1350 1349 * 0: success 1351 1350 */ 1352 1351 static int validate_special_section_klp_reloc(struct elfs *e, struct symbol *sym) 1353 1352 { 1354 1353 bool static_branch = !strcmp(sym->sec->name, "__jump_table"); 1355 1354 bool static_call = !strcmp(sym->sec->name, ".static_call_sites"); 1356 - struct symbol *code_sym = NULL; 1355 + const char *code_sym = NULL; 1357 1356 unsigned long code_offset = 0; 1358 1357 struct reloc *reloc; 1359 1358 int ret = 0; ··· 1365 1364 const char *sym_modname; 1366 1365 struct export *export; 1367 1366 1367 + if (convert_reloc_sym(e->patched, reloc)) 1368 + continue; 1369 + 1368 1370 /* Static branch/call keys are always STT_OBJECT */ 1369 1371 if (reloc->sym->type != STT_OBJECT) { 1370 1372 1371 1373 /* Save code location which can be printed below */ 1372 1374 if (reloc->sym->type == STT_FUNC && !code_sym) { 1373 - code_sym = reloc->sym; 1375 + code_sym = reloc->sym->name; 1374 1376 code_offset = reloc_addend(reloc); 1375 1377 } 1376 1378 ··· 1396 1392 if (!strcmp(sym_modname, "vmlinux")) 1397 1393 continue; 1398 1394 1395 + if (!code_sym) 1396 + code_sym = "<unknown>"; 1397 + 1399 1398 if (static_branch) { 1400 1399 if (strstarts(reloc->sym->name, "__tracepoint_")) { 1401 1400 WARN("%s: disabling unsupported tracepoint %s", 1402 - code_sym->name, reloc->sym->name + 13); 1401 + code_sym, reloc->sym->name + 13); 1402 + ret = 1; 1403 + continue; 1404 + } 1405 + 1406 + if (strstr(reloc->sym->name, "__UNIQUE_ID_ddebug_")) { 1407 + WARN("%s: disabling unsupported pr_debug()", 1408 + code_sym); 1403 1409 ret = 1; 1404 1410 continue; 1405 1411 } 1406 1412 1407 1413 ERROR("%s+0x%lx: unsupported static branch key %s. Use static_key_enabled() instead", 1408 - code_sym->name, code_offset, reloc->sym->name); 1414 + code_sym, code_offset, reloc->sym->name); 1409 1415 return -1; 1410 1416 } 1411 1417 ··· 1426 1412 } 1427 1413 1428 1414 ERROR("%s()+0x%lx: unsupported static call key %s. Use KLP_STATIC_CALL() instead", 1429 - code_sym->name, code_offset, reloc->sym->name); 1415 + code_sym, code_offset, reloc->sym->name); 1430 1416 return -1; 1431 1417 } 1432 1418
+18
tools/perf/Makefile.config
··· 1163 1163 CFLAGS += -DHAVE_RUST_SUPPORT 1164 1164 $(call detected,CONFIG_RUST_SUPPORT) 1165 1165 endif 1166 + 1167 + ifneq ($(CROSS_COMPILE),) 1168 + RUST_TARGET_FLAGS_arm := arm-unknown-linux-gnueabi 1169 + RUST_TARGET_FLAGS_arm64 := aarch64-unknown-linux-gnu 1170 + RUST_TARGET_FLAGS_m68k := m68k-unknown-linux-gnu 1171 + RUST_TARGET_FLAGS_mips := mipsel-unknown-linux-gnu 1172 + RUST_TARGET_FLAGS_powerpc := powerpc64le-unknown-linux-gnu 1173 + RUST_TARGET_FLAGS_riscv := riscv64gc-unknown-linux-gnu 1174 + RUST_TARGET_FLAGS_s390 := s390x-unknown-linux-gnu 1175 + RUST_TARGET_FLAGS_x86 := x86_64-unknown-linux-gnu 1176 + RUST_TARGET_FLAGS_x86_64 := x86_64-unknown-linux-gnu 1177 + 1178 + ifeq ($(RUST_TARGET_FLAGS_$(ARCH)),) 1179 + $(error Unknown rust cross compilation architecture $(ARCH)) 1180 + endif 1181 + 1182 + RUST_FLAGS += --target=$(RUST_TARGET_FLAGS_$(ARCH)) 1183 + endif 1166 1184 endif 1167 1185 1168 1186 # Among the variables below, these:
+1 -1
tools/perf/Makefile.perf
··· 274 274 PYLINT := $(shell which pylint 2> /dev/null) 275 275 endif 276 276 277 - export srctree OUTPUT RM CC CXX RUSTC LD AR CFLAGS CXXFLAGS V BISON FLEX AWK 277 + export srctree OUTPUT RM CC CXX RUSTC LD AR CFLAGS CXXFLAGS RUST_FLAGS V BISON FLEX AWK 278 278 export HOSTCC HOSTLD HOSTAR HOSTCFLAGS SHELLCHECK MYPY PYLINT 279 279 280 280 include $(srctree)/tools/build/Makefile.include
+1
tools/perf/arch/arm/entry/syscalls/syscall.tbl
··· 485 485 468 common file_getattr sys_file_getattr 486 486 469 common file_setattr sys_file_setattr 487 487 470 common listns sys_listns 488 + 471 common rseq_slice_yield sys_rseq_slice_yield
-14
tools/perf/arch/arm/util/cs-etm.c
··· 68 68 69 69 enum cs_etm_version { CS_NOT_PRESENT, CS_ETMV3, CS_ETMV4, CS_ETE }; 70 70 71 - /* ETMv4 CONFIGR register bits */ 72 - #define TRCCONFIGR_BB BIT(3) 73 - #define TRCCONFIGR_CCI BIT(4) 74 - #define TRCCONFIGR_CID BIT(6) 75 - #define TRCCONFIGR_VMID BIT(7) 76 - #define TRCCONFIGR_TS BIT(11) 77 - #define TRCCONFIGR_RS BIT(12) 78 - #define TRCCONFIGR_VMIDOPT BIT(15) 79 - 80 - /* ETMv3 ETMCR register bits */ 81 - #define ETMCR_CYC_ACC BIT(12) 82 - #define ETMCR_TIMESTAMP_EN BIT(28) 83 - #define ETMCR_RETURN_STACK BIT(29) 84 - 85 71 static bool cs_etm_is_ete(struct perf_pmu *cs_etm_pmu, struct perf_cpu cpu); 86 72 static int cs_etm_get_ro(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, __u64 *val); 87 73 static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path);
+1
tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
··· 385 385 468 n64 file_getattr sys_file_getattr 386 386 469 n64 file_setattr sys_file_setattr 387 387 470 n64 listns sys_listns 388 + 471 n64 rseq_slice_yield sys_rseq_slice_yield
+1
tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
··· 561 561 468 common file_getattr sys_file_getattr 562 562 469 common file_setattr sys_file_setattr 563 563 470 common listns sys_listns 564 + 471 nospu rseq_slice_yield sys_rseq_slice_yield
+392 -467
tools/perf/arch/s390/entry/syscalls/syscall.tbl
··· 3 3 # System call table for s390 4 4 # 5 5 # Format: 6 + # <nr> <abi> <syscall> <entry> 6 7 # 7 - # <nr> <abi> <syscall> <entry-64bit> <compat-entry> 8 - # 9 - # where <abi> can be common, 64, or 32 8 + # <abi> is always common. 10 9 11 - 1 common exit sys_exit sys_exit 12 - 2 common fork sys_fork sys_fork 13 - 3 common read sys_read compat_sys_s390_read 14 - 4 common write sys_write compat_sys_s390_write 15 - 5 common open sys_open compat_sys_open 16 - 6 common close sys_close sys_close 17 - 7 common restart_syscall sys_restart_syscall sys_restart_syscall 18 - 8 common creat sys_creat sys_creat 19 - 9 common link sys_link sys_link 20 - 10 common unlink sys_unlink sys_unlink 21 - 11 common execve sys_execve compat_sys_execve 22 - 12 common chdir sys_chdir sys_chdir 23 - 13 32 time - sys_time32 24 - 14 common mknod sys_mknod sys_mknod 25 - 15 common chmod sys_chmod sys_chmod 26 - 16 32 lchown - sys_lchown16 27 - 19 common lseek sys_lseek compat_sys_lseek 28 - 20 common getpid sys_getpid sys_getpid 29 - 21 common mount sys_mount sys_mount 30 - 22 common umount sys_oldumount sys_oldumount 31 - 23 32 setuid - sys_setuid16 32 - 24 32 getuid - sys_getuid16 33 - 25 32 stime - sys_stime32 34 - 26 common ptrace sys_ptrace compat_sys_ptrace 35 - 27 common alarm sys_alarm sys_alarm 36 - 29 common pause sys_pause sys_pause 37 - 30 common utime sys_utime sys_utime32 38 - 33 common access sys_access sys_access 39 - 34 common nice sys_nice sys_nice 40 - 36 common sync sys_sync sys_sync 41 - 37 common kill sys_kill sys_kill 42 - 38 common rename sys_rename sys_rename 43 - 39 common mkdir sys_mkdir sys_mkdir 44 - 40 common rmdir sys_rmdir sys_rmdir 45 - 41 common dup sys_dup sys_dup 46 - 42 common pipe sys_pipe sys_pipe 47 - 43 common times sys_times compat_sys_times 48 - 45 common brk sys_brk sys_brk 49 - 46 32 setgid - sys_setgid16 50 - 47 32 getgid - sys_getgid16 51 - 48 common signal sys_signal sys_signal 52 - 49 32 geteuid - sys_geteuid16 53 - 50 32 getegid - sys_getegid16 54 - 51 common acct sys_acct sys_acct 55 - 52 common umount2 sys_umount sys_umount 56 - 54 common ioctl sys_ioctl compat_sys_ioctl 57 - 55 common fcntl sys_fcntl compat_sys_fcntl 58 - 57 common setpgid sys_setpgid sys_setpgid 59 - 60 common umask sys_umask sys_umask 60 - 61 common chroot sys_chroot sys_chroot 61 - 62 common ustat sys_ustat compat_sys_ustat 62 - 63 common dup2 sys_dup2 sys_dup2 63 - 64 common getppid sys_getppid sys_getppid 64 - 65 common getpgrp sys_getpgrp sys_getpgrp 65 - 66 common setsid sys_setsid sys_setsid 66 - 67 common sigaction sys_sigaction compat_sys_sigaction 67 - 70 32 setreuid - sys_setreuid16 68 - 71 32 setregid - sys_setregid16 69 - 72 common sigsuspend sys_sigsuspend sys_sigsuspend 70 - 73 common sigpending sys_sigpending compat_sys_sigpending 71 - 74 common sethostname sys_sethostname sys_sethostname 72 - 75 common setrlimit sys_setrlimit compat_sys_setrlimit 73 - 76 32 getrlimit - compat_sys_old_getrlimit 74 - 77 common getrusage sys_getrusage compat_sys_getrusage 75 - 78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday 76 - 79 common settimeofday sys_settimeofday compat_sys_settimeofday 77 - 80 32 getgroups - sys_getgroups16 78 - 81 32 setgroups - sys_setgroups16 79 - 83 common symlink sys_symlink sys_symlink 80 - 85 common readlink sys_readlink sys_readlink 81 - 86 common uselib sys_uselib sys_uselib 82 - 87 common swapon sys_swapon sys_swapon 83 - 88 common reboot sys_reboot sys_reboot 84 - 89 common readdir - compat_sys_old_readdir 85 - 90 common mmap sys_old_mmap compat_sys_s390_old_mmap 86 - 91 common munmap sys_munmap sys_munmap 87 - 92 common truncate sys_truncate compat_sys_truncate 88 - 93 common ftruncate sys_ftruncate compat_sys_ftruncate 89 - 94 common fchmod sys_fchmod sys_fchmod 90 - 95 32 fchown - sys_fchown16 91 - 96 common getpriority sys_getpriority sys_getpriority 92 - 97 common setpriority sys_setpriority sys_setpriority 93 - 99 common statfs sys_statfs compat_sys_statfs 94 - 100 common fstatfs sys_fstatfs compat_sys_fstatfs 95 - 101 32 ioperm - - 96 - 102 common socketcall sys_socketcall compat_sys_socketcall 97 - 103 common syslog sys_syslog sys_syslog 98 - 104 common setitimer sys_setitimer compat_sys_setitimer 99 - 105 common getitimer sys_getitimer compat_sys_getitimer 100 - 106 common stat sys_newstat compat_sys_newstat 101 - 107 common lstat sys_newlstat compat_sys_newlstat 102 - 108 common fstat sys_newfstat compat_sys_newfstat 103 - 110 common lookup_dcookie - - 104 - 111 common vhangup sys_vhangup sys_vhangup 105 - 112 common idle - - 106 - 114 common wait4 sys_wait4 compat_sys_wait4 107 - 115 common swapoff sys_swapoff sys_swapoff 108 - 116 common sysinfo sys_sysinfo compat_sys_sysinfo 109 - 117 common ipc sys_s390_ipc compat_sys_s390_ipc 110 - 118 common fsync sys_fsync sys_fsync 111 - 119 common sigreturn sys_sigreturn compat_sys_sigreturn 112 - 120 common clone sys_clone sys_clone 113 - 121 common setdomainname sys_setdomainname sys_setdomainname 114 - 122 common uname sys_newuname sys_newuname 115 - 124 common adjtimex sys_adjtimex sys_adjtimex_time32 116 - 125 common mprotect sys_mprotect sys_mprotect 117 - 126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask 118 - 127 common create_module - - 119 - 128 common init_module sys_init_module sys_init_module 120 - 129 common delete_module sys_delete_module sys_delete_module 121 - 130 common get_kernel_syms - - 122 - 131 common quotactl sys_quotactl sys_quotactl 123 - 132 common getpgid sys_getpgid sys_getpgid 124 - 133 common fchdir sys_fchdir sys_fchdir 125 - 134 common bdflush sys_ni_syscall sys_ni_syscall 126 - 135 common sysfs sys_sysfs sys_sysfs 127 - 136 common personality sys_s390_personality sys_s390_personality 128 - 137 common afs_syscall - - 129 - 138 32 setfsuid - sys_setfsuid16 130 - 139 32 setfsgid - sys_setfsgid16 131 - 140 32 _llseek - sys_llseek 132 - 141 common getdents sys_getdents compat_sys_getdents 133 - 142 32 _newselect - compat_sys_select 134 - 142 64 select sys_select - 135 - 143 common flock sys_flock sys_flock 136 - 144 common msync sys_msync sys_msync 137 - 145 common readv sys_readv sys_readv 138 - 146 common writev sys_writev sys_writev 139 - 147 common getsid sys_getsid sys_getsid 140 - 148 common fdatasync sys_fdatasync sys_fdatasync 141 - 149 common _sysctl - - 142 - 150 common mlock sys_mlock sys_mlock 143 - 151 common munlock sys_munlock sys_munlock 144 - 152 common mlockall sys_mlockall sys_mlockall 145 - 153 common munlockall sys_munlockall sys_munlockall 146 - 154 common sched_setparam sys_sched_setparam sys_sched_setparam 147 - 155 common sched_getparam sys_sched_getparam sys_sched_getparam 148 - 156 common sched_setscheduler sys_sched_setscheduler sys_sched_setscheduler 149 - 157 common sched_getscheduler sys_sched_getscheduler sys_sched_getscheduler 150 - 158 common sched_yield sys_sched_yield sys_sched_yield 151 - 159 common sched_get_priority_max sys_sched_get_priority_max sys_sched_get_priority_max 152 - 160 common sched_get_priority_min sys_sched_get_priority_min sys_sched_get_priority_min 153 - 161 common sched_rr_get_interval sys_sched_rr_get_interval sys_sched_rr_get_interval_time32 154 - 162 common nanosleep sys_nanosleep sys_nanosleep_time32 155 - 163 common mremap sys_mremap sys_mremap 156 - 164 32 setresuid - sys_setresuid16 157 - 165 32 getresuid - sys_getresuid16 158 - 167 common query_module - - 159 - 168 common poll sys_poll sys_poll 160 - 169 common nfsservctl - - 161 - 170 32 setresgid - sys_setresgid16 162 - 171 32 getresgid - sys_getresgid16 163 - 172 common prctl sys_prctl sys_prctl 164 - 173 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn 165 - 174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction 166 - 175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask 167 - 176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending 168 - 177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time32 169 - 178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 170 - 179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend 171 - 180 common pread64 sys_pread64 compat_sys_s390_pread64 172 - 181 common pwrite64 sys_pwrite64 compat_sys_s390_pwrite64 173 - 182 32 chown - sys_chown16 174 - 183 common getcwd sys_getcwd sys_getcwd 175 - 184 common capget sys_capget sys_capget 176 - 185 common capset sys_capset sys_capset 177 - 186 common sigaltstack sys_sigaltstack compat_sys_sigaltstack 178 - 187 common sendfile sys_sendfile64 compat_sys_sendfile 179 - 188 common getpmsg - - 180 - 189 common putpmsg - - 181 - 190 common vfork sys_vfork sys_vfork 182 - 191 32 ugetrlimit - compat_sys_getrlimit 183 - 191 64 getrlimit sys_getrlimit - 184 - 192 32 mmap2 - compat_sys_s390_mmap2 185 - 193 32 truncate64 - compat_sys_s390_truncate64 186 - 194 32 ftruncate64 - compat_sys_s390_ftruncate64 187 - 195 32 stat64 - compat_sys_s390_stat64 188 - 196 32 lstat64 - compat_sys_s390_lstat64 189 - 197 32 fstat64 - compat_sys_s390_fstat64 190 - 198 32 lchown32 - sys_lchown 191 - 198 64 lchown sys_lchown - 192 - 199 32 getuid32 - sys_getuid 193 - 199 64 getuid sys_getuid - 194 - 200 32 getgid32 - sys_getgid 195 - 200 64 getgid sys_getgid - 196 - 201 32 geteuid32 - sys_geteuid 197 - 201 64 geteuid sys_geteuid - 198 - 202 32 getegid32 - sys_getegid 199 - 202 64 getegid sys_getegid - 200 - 203 32 setreuid32 - sys_setreuid 201 - 203 64 setreuid sys_setreuid - 202 - 204 32 setregid32 - sys_setregid 203 - 204 64 setregid sys_setregid - 204 - 205 32 getgroups32 - sys_getgroups 205 - 205 64 getgroups sys_getgroups - 206 - 206 32 setgroups32 - sys_setgroups 207 - 206 64 setgroups sys_setgroups - 208 - 207 32 fchown32 - sys_fchown 209 - 207 64 fchown sys_fchown - 210 - 208 32 setresuid32 - sys_setresuid 211 - 208 64 setresuid sys_setresuid - 212 - 209 32 getresuid32 - sys_getresuid 213 - 209 64 getresuid sys_getresuid - 214 - 210 32 setresgid32 - sys_setresgid 215 - 210 64 setresgid sys_setresgid - 216 - 211 32 getresgid32 - sys_getresgid 217 - 211 64 getresgid sys_getresgid - 218 - 212 32 chown32 - sys_chown 219 - 212 64 chown sys_chown - 220 - 213 32 setuid32 - sys_setuid 221 - 213 64 setuid sys_setuid - 222 - 214 32 setgid32 - sys_setgid 223 - 214 64 setgid sys_setgid - 224 - 215 32 setfsuid32 - sys_setfsuid 225 - 215 64 setfsuid sys_setfsuid - 226 - 216 32 setfsgid32 - sys_setfsgid 227 - 216 64 setfsgid sys_setfsgid - 228 - 217 common pivot_root sys_pivot_root sys_pivot_root 229 - 218 common mincore sys_mincore sys_mincore 230 - 219 common madvise sys_madvise sys_madvise 231 - 220 common getdents64 sys_getdents64 sys_getdents64 232 - 221 32 fcntl64 - compat_sys_fcntl64 233 - 222 common readahead sys_readahead compat_sys_s390_readahead 234 - 223 32 sendfile64 - compat_sys_sendfile64 235 - 224 common setxattr sys_setxattr sys_setxattr 236 - 225 common lsetxattr sys_lsetxattr sys_lsetxattr 237 - 226 common fsetxattr sys_fsetxattr sys_fsetxattr 238 - 227 common getxattr sys_getxattr sys_getxattr 239 - 228 common lgetxattr sys_lgetxattr sys_lgetxattr 240 - 229 common fgetxattr sys_fgetxattr sys_fgetxattr 241 - 230 common listxattr sys_listxattr sys_listxattr 242 - 231 common llistxattr sys_llistxattr sys_llistxattr 243 - 232 common flistxattr sys_flistxattr sys_flistxattr 244 - 233 common removexattr sys_removexattr sys_removexattr 245 - 234 common lremovexattr sys_lremovexattr sys_lremovexattr 246 - 235 common fremovexattr sys_fremovexattr sys_fremovexattr 247 - 236 common gettid sys_gettid sys_gettid 248 - 237 common tkill sys_tkill sys_tkill 249 - 238 common futex sys_futex sys_futex_time32 250 - 239 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity 251 - 240 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity 252 - 241 common tgkill sys_tgkill sys_tgkill 253 - 243 common io_setup sys_io_setup compat_sys_io_setup 254 - 244 common io_destroy sys_io_destroy sys_io_destroy 255 - 245 common io_getevents sys_io_getevents sys_io_getevents_time32 256 - 246 common io_submit sys_io_submit compat_sys_io_submit 257 - 247 common io_cancel sys_io_cancel sys_io_cancel 258 - 248 common exit_group sys_exit_group sys_exit_group 259 - 249 common epoll_create sys_epoll_create sys_epoll_create 260 - 250 common epoll_ctl sys_epoll_ctl sys_epoll_ctl 261 - 251 common epoll_wait sys_epoll_wait sys_epoll_wait 262 - 252 common set_tid_address sys_set_tid_address sys_set_tid_address 263 - 253 common fadvise64 sys_fadvise64_64 compat_sys_s390_fadvise64 264 - 254 common timer_create sys_timer_create compat_sys_timer_create 265 - 255 common timer_settime sys_timer_settime sys_timer_settime32 266 - 256 common timer_gettime sys_timer_gettime sys_timer_gettime32 267 - 257 common timer_getoverrun sys_timer_getoverrun sys_timer_getoverrun 268 - 258 common timer_delete sys_timer_delete sys_timer_delete 269 - 259 common clock_settime sys_clock_settime sys_clock_settime32 270 - 260 common clock_gettime sys_clock_gettime sys_clock_gettime32 271 - 261 common clock_getres sys_clock_getres sys_clock_getres_time32 272 - 262 common clock_nanosleep sys_clock_nanosleep sys_clock_nanosleep_time32 273 - 264 32 fadvise64_64 - compat_sys_s390_fadvise64_64 274 - 265 common statfs64 sys_statfs64 compat_sys_statfs64 275 - 266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 276 - 267 common remap_file_pages sys_remap_file_pages sys_remap_file_pages 277 - 268 common mbind sys_mbind sys_mbind 278 - 269 common get_mempolicy sys_get_mempolicy sys_get_mempolicy 279 - 270 common set_mempolicy sys_set_mempolicy sys_set_mempolicy 280 - 271 common mq_open sys_mq_open compat_sys_mq_open 281 - 272 common mq_unlink sys_mq_unlink sys_mq_unlink 282 - 273 common mq_timedsend sys_mq_timedsend sys_mq_timedsend_time32 283 - 274 common mq_timedreceive sys_mq_timedreceive sys_mq_timedreceive_time32 284 - 275 common mq_notify sys_mq_notify compat_sys_mq_notify 285 - 276 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr 286 - 277 common kexec_load sys_kexec_load compat_sys_kexec_load 287 - 278 common add_key sys_add_key sys_add_key 288 - 279 common request_key sys_request_key sys_request_key 289 - 280 common keyctl sys_keyctl compat_sys_keyctl 290 - 281 common waitid sys_waitid compat_sys_waitid 291 - 282 common ioprio_set sys_ioprio_set sys_ioprio_set 292 - 283 common ioprio_get sys_ioprio_get sys_ioprio_get 293 - 284 common inotify_init sys_inotify_init sys_inotify_init 294 - 285 common inotify_add_watch sys_inotify_add_watch sys_inotify_add_watch 295 - 286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch 296 - 287 common migrate_pages sys_migrate_pages sys_migrate_pages 297 - 288 common openat sys_openat compat_sys_openat 298 - 289 common mkdirat sys_mkdirat sys_mkdirat 299 - 290 common mknodat sys_mknodat sys_mknodat 300 - 291 common fchownat sys_fchownat sys_fchownat 301 - 292 common futimesat sys_futimesat sys_futimesat_time32 302 - 293 32 fstatat64 - compat_sys_s390_fstatat64 303 - 293 64 newfstatat sys_newfstatat - 304 - 294 common unlinkat sys_unlinkat sys_unlinkat 305 - 295 common renameat sys_renameat sys_renameat 306 - 296 common linkat sys_linkat sys_linkat 307 - 297 common symlinkat sys_symlinkat sys_symlinkat 308 - 298 common readlinkat sys_readlinkat sys_readlinkat 309 - 299 common fchmodat sys_fchmodat sys_fchmodat 310 - 300 common faccessat sys_faccessat sys_faccessat 311 - 301 common pselect6 sys_pselect6 compat_sys_pselect6_time32 312 - 302 common ppoll sys_ppoll compat_sys_ppoll_time32 313 - 303 common unshare sys_unshare sys_unshare 314 - 304 common set_robust_list sys_set_robust_list compat_sys_set_robust_list 315 - 305 common get_robust_list sys_get_robust_list compat_sys_get_robust_list 316 - 306 common splice sys_splice sys_splice 317 - 307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range 318 - 308 common tee sys_tee sys_tee 319 - 309 common vmsplice sys_vmsplice sys_vmsplice 320 - 310 common move_pages sys_move_pages sys_move_pages 321 - 311 common getcpu sys_getcpu sys_getcpu 322 - 312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait 323 - 313 common utimes sys_utimes sys_utimes_time32 324 - 314 common fallocate sys_fallocate compat_sys_s390_fallocate 325 - 315 common utimensat sys_utimensat sys_utimensat_time32 326 - 316 common signalfd sys_signalfd compat_sys_signalfd 327 - 317 common timerfd - - 328 - 318 common eventfd sys_eventfd sys_eventfd 329 - 319 common timerfd_create sys_timerfd_create sys_timerfd_create 330 - 320 common timerfd_settime sys_timerfd_settime sys_timerfd_settime32 331 - 321 common timerfd_gettime sys_timerfd_gettime sys_timerfd_gettime32 332 - 322 common signalfd4 sys_signalfd4 compat_sys_signalfd4 333 - 323 common eventfd2 sys_eventfd2 sys_eventfd2 334 - 324 common inotify_init1 sys_inotify_init1 sys_inotify_init1 335 - 325 common pipe2 sys_pipe2 sys_pipe2 336 - 326 common dup3 sys_dup3 sys_dup3 337 - 327 common epoll_create1 sys_epoll_create1 sys_epoll_create1 338 - 328 common preadv sys_preadv compat_sys_preadv 339 - 329 common pwritev sys_pwritev compat_sys_pwritev 340 - 330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo 341 - 331 common perf_event_open sys_perf_event_open sys_perf_event_open 342 - 332 common fanotify_init sys_fanotify_init sys_fanotify_init 343 - 333 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark 344 - 334 common prlimit64 sys_prlimit64 sys_prlimit64 345 - 335 common name_to_handle_at sys_name_to_handle_at sys_name_to_handle_at 346 - 336 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at 347 - 337 common clock_adjtime sys_clock_adjtime sys_clock_adjtime32 348 - 338 common syncfs sys_syncfs sys_syncfs 349 - 339 common setns sys_setns sys_setns 350 - 340 common process_vm_readv sys_process_vm_readv sys_process_vm_readv 351 - 341 common process_vm_writev sys_process_vm_writev sys_process_vm_writev 352 - 342 common s390_runtime_instr sys_s390_runtime_instr sys_s390_runtime_instr 353 - 343 common kcmp sys_kcmp sys_kcmp 354 - 344 common finit_module sys_finit_module sys_finit_module 355 - 345 common sched_setattr sys_sched_setattr sys_sched_setattr 356 - 346 common sched_getattr sys_sched_getattr sys_sched_getattr 357 - 347 common renameat2 sys_renameat2 sys_renameat2 358 - 348 common seccomp sys_seccomp sys_seccomp 359 - 349 common getrandom sys_getrandom sys_getrandom 360 - 350 common memfd_create sys_memfd_create sys_memfd_create 361 - 351 common bpf sys_bpf sys_bpf 362 - 352 common s390_pci_mmio_write sys_s390_pci_mmio_write sys_s390_pci_mmio_write 363 - 353 common s390_pci_mmio_read sys_s390_pci_mmio_read sys_s390_pci_mmio_read 364 - 354 common execveat sys_execveat compat_sys_execveat 365 - 355 common userfaultfd sys_userfaultfd sys_userfaultfd 366 - 356 common membarrier sys_membarrier sys_membarrier 367 - 357 common recvmmsg sys_recvmmsg compat_sys_recvmmsg_time32 368 - 358 common sendmmsg sys_sendmmsg compat_sys_sendmmsg 369 - 359 common socket sys_socket sys_socket 370 - 360 common socketpair sys_socketpair sys_socketpair 371 - 361 common bind sys_bind sys_bind 372 - 362 common connect sys_connect sys_connect 373 - 363 common listen sys_listen sys_listen 374 - 364 common accept4 sys_accept4 sys_accept4 375 - 365 common getsockopt sys_getsockopt sys_getsockopt 376 - 366 common setsockopt sys_setsockopt sys_setsockopt 377 - 367 common getsockname sys_getsockname sys_getsockname 378 - 368 common getpeername sys_getpeername sys_getpeername 379 - 369 common sendto sys_sendto sys_sendto 380 - 370 common sendmsg sys_sendmsg compat_sys_sendmsg 381 - 371 common recvfrom sys_recvfrom compat_sys_recvfrom 382 - 372 common recvmsg sys_recvmsg compat_sys_recvmsg 383 - 373 common shutdown sys_shutdown sys_shutdown 384 - 374 common mlock2 sys_mlock2 sys_mlock2 385 - 375 common copy_file_range sys_copy_file_range sys_copy_file_range 386 - 376 common preadv2 sys_preadv2 compat_sys_preadv2 387 - 377 common pwritev2 sys_pwritev2 compat_sys_pwritev2 388 - 378 common s390_guarded_storage sys_s390_guarded_storage sys_s390_guarded_storage 389 - 379 common statx sys_statx sys_statx 390 - 380 common s390_sthyi sys_s390_sthyi sys_s390_sthyi 391 - 381 common kexec_file_load sys_kexec_file_load sys_kexec_file_load 392 - 382 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents 393 - 383 common rseq sys_rseq sys_rseq 394 - 384 common pkey_mprotect sys_pkey_mprotect sys_pkey_mprotect 395 - 385 common pkey_alloc sys_pkey_alloc sys_pkey_alloc 396 - 386 common pkey_free sys_pkey_free sys_pkey_free 10 + 1 common exit sys_exit 11 + 2 common fork sys_fork 12 + 3 common read sys_read 13 + 4 common write sys_write 14 + 5 common open sys_open 15 + 6 common close sys_close 16 + 7 common restart_syscall sys_restart_syscall 17 + 8 common creat sys_creat 18 + 9 common link sys_link 19 + 10 common unlink sys_unlink 20 + 11 common execve sys_execve 21 + 12 common chdir sys_chdir 22 + 14 common mknod sys_mknod 23 + 15 common chmod sys_chmod 24 + 19 common lseek sys_lseek 25 + 20 common getpid sys_getpid 26 + 21 common mount sys_mount 27 + 22 common umount sys_oldumount 28 + 26 common ptrace sys_ptrace 29 + 27 common alarm sys_alarm 30 + 29 common pause sys_pause 31 + 30 common utime sys_utime 32 + 33 common access sys_access 33 + 34 common nice sys_nice 34 + 36 common sync sys_sync 35 + 37 common kill sys_kill 36 + 38 common rename sys_rename 37 + 39 common mkdir sys_mkdir 38 + 40 common rmdir sys_rmdir 39 + 41 common dup sys_dup 40 + 42 common pipe sys_pipe 41 + 43 common times sys_times 42 + 45 common brk sys_brk 43 + 48 common signal sys_signal 44 + 51 common acct sys_acct 45 + 52 common umount2 sys_umount 46 + 54 common ioctl sys_ioctl 47 + 55 common fcntl sys_fcntl 48 + 57 common setpgid sys_setpgid 49 + 60 common umask sys_umask 50 + 61 common chroot sys_chroot 51 + 62 common ustat sys_ustat 52 + 63 common dup2 sys_dup2 53 + 64 common getppid sys_getppid 54 + 65 common getpgrp sys_getpgrp 55 + 66 common setsid sys_setsid 56 + 67 common sigaction sys_sigaction 57 + 72 common sigsuspend sys_sigsuspend 58 + 73 common sigpending sys_sigpending 59 + 74 common sethostname sys_sethostname 60 + 75 common setrlimit sys_setrlimit 61 + 77 common getrusage sys_getrusage 62 + 78 common gettimeofday sys_gettimeofday 63 + 79 common settimeofday sys_settimeofday 64 + 83 common symlink sys_symlink 65 + 85 common readlink sys_readlink 66 + 86 common uselib sys_uselib 67 + 87 common swapon sys_swapon 68 + 88 common reboot sys_reboot 69 + 89 common readdir sys_ni_syscall 70 + 90 common mmap sys_old_mmap 71 + 91 common munmap sys_munmap 72 + 92 common truncate sys_truncate 73 + 93 common ftruncate sys_ftruncate 74 + 94 common fchmod sys_fchmod 75 + 96 common getpriority sys_getpriority 76 + 97 common setpriority sys_setpriority 77 + 99 common statfs sys_statfs 78 + 100 common fstatfs sys_fstatfs 79 + 102 common socketcall sys_socketcall 80 + 103 common syslog sys_syslog 81 + 104 common setitimer sys_setitimer 82 + 105 common getitimer sys_getitimer 83 + 106 common stat sys_newstat 84 + 107 common lstat sys_newlstat 85 + 108 common fstat sys_newfstat 86 + 110 common lookup_dcookie sys_ni_syscall 87 + 111 common vhangup sys_vhangup 88 + 112 common idle sys_ni_syscall 89 + 114 common wait4 sys_wait4 90 + 115 common swapoff sys_swapoff 91 + 116 common sysinfo sys_sysinfo 92 + 117 common ipc sys_s390_ipc 93 + 118 common fsync sys_fsync 94 + 119 common sigreturn sys_sigreturn 95 + 120 common clone sys_clone 96 + 121 common setdomainname sys_setdomainname 97 + 122 common uname sys_newuname 98 + 124 common adjtimex sys_adjtimex 99 + 125 common mprotect sys_mprotect 100 + 126 common sigprocmask sys_sigprocmask 101 + 127 common create_module sys_ni_syscall 102 + 128 common init_module sys_init_module 103 + 129 common delete_module sys_delete_module 104 + 130 common get_kernel_syms sys_ni_syscall 105 + 131 common quotactl sys_quotactl 106 + 132 common getpgid sys_getpgid 107 + 133 common fchdir sys_fchdir 108 + 134 common bdflush sys_ni_syscall 109 + 135 common sysfs sys_sysfs 110 + 136 common personality sys_s390_personality 111 + 137 common afs_syscall sys_ni_syscall 112 + 141 common getdents sys_getdents 113 + 142 common select sys_select 114 + 143 common flock sys_flock 115 + 144 common msync sys_msync 116 + 145 common readv sys_readv 117 + 146 common writev sys_writev 118 + 147 common getsid sys_getsid 119 + 148 common fdatasync sys_fdatasync 120 + 149 common _sysctl sys_ni_syscall 121 + 150 common mlock sys_mlock 122 + 151 common munlock sys_munlock 123 + 152 common mlockall sys_mlockall 124 + 153 common munlockall sys_munlockall 125 + 154 common sched_setparam sys_sched_setparam 126 + 155 common sched_getparam sys_sched_getparam 127 + 156 common sched_setscheduler sys_sched_setscheduler 128 + 157 common sched_getscheduler sys_sched_getscheduler 129 + 158 common sched_yield sys_sched_yield 130 + 159 common sched_get_priority_max sys_sched_get_priority_max 131 + 160 common sched_get_priority_min sys_sched_get_priority_min 132 + 161 common sched_rr_get_interval sys_sched_rr_get_interval 133 + 162 common nanosleep sys_nanosleep 134 + 163 common mremap sys_mremap 135 + 167 common query_module sys_ni_syscall 136 + 168 common poll sys_poll 137 + 169 common nfsservctl sys_ni_syscall 138 + 172 common prctl sys_prctl 139 + 173 common rt_sigreturn sys_rt_sigreturn 140 + 174 common rt_sigaction sys_rt_sigaction 141 + 175 common rt_sigprocmask sys_rt_sigprocmask 142 + 176 common rt_sigpending sys_rt_sigpending 143 + 177 common rt_sigtimedwait sys_rt_sigtimedwait 144 + 178 common rt_sigqueueinfo sys_rt_sigqueueinfo 145 + 179 common rt_sigsuspend sys_rt_sigsuspend 146 + 180 common pread64 sys_pread64 147 + 181 common pwrite64 sys_pwrite64 148 + 183 common getcwd sys_getcwd 149 + 184 common capget sys_capget 150 + 185 common capset sys_capset 151 + 186 common sigaltstack sys_sigaltstack 152 + 187 common sendfile sys_sendfile64 153 + 188 common getpmsg sys_ni_syscall 154 + 189 common putpmsg sys_ni_syscall 155 + 190 common vfork sys_vfork 156 + 191 common getrlimit sys_getrlimit 157 + 198 common lchown sys_lchown 158 + 199 common getuid sys_getuid 159 + 200 common getgid sys_getgid 160 + 201 common geteuid sys_geteuid 161 + 202 common getegid sys_getegid 162 + 203 common setreuid sys_setreuid 163 + 204 common setregid sys_setregid 164 + 205 common getgroups sys_getgroups 165 + 206 common setgroups sys_setgroups 166 + 207 common fchown sys_fchown 167 + 208 common setresuid sys_setresuid 168 + 209 common getresuid sys_getresuid 169 + 210 common setresgid sys_setresgid 170 + 211 common getresgid sys_getresgid 171 + 212 common chown sys_chown 172 + 213 common setuid sys_setuid 173 + 214 common setgid sys_setgid 174 + 215 common setfsuid sys_setfsuid 175 + 216 common setfsgid sys_setfsgid 176 + 217 common pivot_root sys_pivot_root 177 + 218 common mincore sys_mincore 178 + 219 common madvise sys_madvise 179 + 220 common getdents64 sys_getdents64 180 + 222 common readahead sys_readahead 181 + 224 common setxattr sys_setxattr 182 + 225 common lsetxattr sys_lsetxattr 183 + 226 common fsetxattr sys_fsetxattr 184 + 227 common getxattr sys_getxattr 185 + 228 common lgetxattr sys_lgetxattr 186 + 229 common fgetxattr sys_fgetxattr 187 + 230 common listxattr sys_listxattr 188 + 231 common llistxattr sys_llistxattr 189 + 232 common flistxattr sys_flistxattr 190 + 233 common removexattr sys_removexattr 191 + 234 common lremovexattr sys_lremovexattr 192 + 235 common fremovexattr sys_fremovexattr 193 + 236 common gettid sys_gettid 194 + 237 common tkill sys_tkill 195 + 238 common futex sys_futex 196 + 239 common sched_setaffinity sys_sched_setaffinity 197 + 240 common sched_getaffinity sys_sched_getaffinity 198 + 241 common tgkill sys_tgkill 199 + 243 common io_setup sys_io_setup 200 + 244 common io_destroy sys_io_destroy 201 + 245 common io_getevents sys_io_getevents 202 + 246 common io_submit sys_io_submit 203 + 247 common io_cancel sys_io_cancel 204 + 248 common exit_group sys_exit_group 205 + 249 common epoll_create sys_epoll_create 206 + 250 common epoll_ctl sys_epoll_ctl 207 + 251 common epoll_wait sys_epoll_wait 208 + 252 common set_tid_address sys_set_tid_address 209 + 253 common fadvise64 sys_fadvise64_64 210 + 254 common timer_create sys_timer_create 211 + 255 common timer_settime sys_timer_settime 212 + 256 common timer_gettime sys_timer_gettime 213 + 257 common timer_getoverrun sys_timer_getoverrun 214 + 258 common timer_delete sys_timer_delete 215 + 259 common clock_settime sys_clock_settime 216 + 260 common clock_gettime sys_clock_gettime 217 + 261 common clock_getres sys_clock_getres 218 + 262 common clock_nanosleep sys_clock_nanosleep 219 + 265 common statfs64 sys_statfs64 220 + 266 common fstatfs64 sys_fstatfs64 221 + 267 common remap_file_pages sys_remap_file_pages 222 + 268 common mbind sys_mbind 223 + 269 common get_mempolicy sys_get_mempolicy 224 + 270 common set_mempolicy sys_set_mempolicy 225 + 271 common mq_open sys_mq_open 226 + 272 common mq_unlink sys_mq_unlink 227 + 273 common mq_timedsend sys_mq_timedsend 228 + 274 common mq_timedreceive sys_mq_timedreceive 229 + 275 common mq_notify sys_mq_notify 230 + 276 common mq_getsetattr sys_mq_getsetattr 231 + 277 common kexec_load sys_kexec_load 232 + 278 common add_key sys_add_key 233 + 279 common request_key sys_request_key 234 + 280 common keyctl sys_keyctl 235 + 281 common waitid sys_waitid 236 + 282 common ioprio_set sys_ioprio_set 237 + 283 common ioprio_get sys_ioprio_get 238 + 284 common inotify_init sys_inotify_init 239 + 285 common inotify_add_watch sys_inotify_add_watch 240 + 286 common inotify_rm_watch sys_inotify_rm_watch 241 + 287 common migrate_pages sys_migrate_pages 242 + 288 common openat sys_openat 243 + 289 common mkdirat sys_mkdirat 244 + 290 common mknodat sys_mknodat 245 + 291 common fchownat sys_fchownat 246 + 292 common futimesat sys_futimesat 247 + 293 common newfstatat sys_newfstatat 248 + 294 common unlinkat sys_unlinkat 249 + 295 common renameat sys_renameat 250 + 296 common linkat sys_linkat 251 + 297 common symlinkat sys_symlinkat 252 + 298 common readlinkat sys_readlinkat 253 + 299 common fchmodat sys_fchmodat 254 + 300 common faccessat sys_faccessat 255 + 301 common pselect6 sys_pselect6 256 + 302 common ppoll sys_ppoll 257 + 303 common unshare sys_unshare 258 + 304 common set_robust_list sys_set_robust_list 259 + 305 common get_robust_list sys_get_robust_list 260 + 306 common splice sys_splice 261 + 307 common sync_file_range sys_sync_file_range 262 + 308 common tee sys_tee 263 + 309 common vmsplice sys_vmsplice 264 + 310 common move_pages sys_move_pages 265 + 311 common getcpu sys_getcpu 266 + 312 common epoll_pwait sys_epoll_pwait 267 + 313 common utimes sys_utimes 268 + 314 common fallocate sys_fallocate 269 + 315 common utimensat sys_utimensat 270 + 316 common signalfd sys_signalfd 271 + 317 common timerfd sys_ni_syscall 272 + 318 common eventfd sys_eventfd 273 + 319 common timerfd_create sys_timerfd_create 274 + 320 common timerfd_settime sys_timerfd_settime 275 + 321 common timerfd_gettime sys_timerfd_gettime 276 + 322 common signalfd4 sys_signalfd4 277 + 323 common eventfd2 sys_eventfd2 278 + 324 common inotify_init1 sys_inotify_init1 279 + 325 common pipe2 sys_pipe2 280 + 326 common dup3 sys_dup3 281 + 327 common epoll_create1 sys_epoll_create1 282 + 328 common preadv sys_preadv 283 + 329 common pwritev sys_pwritev 284 + 330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo 285 + 331 common perf_event_open sys_perf_event_open 286 + 332 common fanotify_init sys_fanotify_init 287 + 333 common fanotify_mark sys_fanotify_mark 288 + 334 common prlimit64 sys_prlimit64 289 + 335 common name_to_handle_at sys_name_to_handle_at 290 + 336 common open_by_handle_at sys_open_by_handle_at 291 + 337 common clock_adjtime sys_clock_adjtime 292 + 338 common syncfs sys_syncfs 293 + 339 common setns sys_setns 294 + 340 common process_vm_readv sys_process_vm_readv 295 + 341 common process_vm_writev sys_process_vm_writev 296 + 342 common s390_runtime_instr sys_s390_runtime_instr 297 + 343 common kcmp sys_kcmp 298 + 344 common finit_module sys_finit_module 299 + 345 common sched_setattr sys_sched_setattr 300 + 346 common sched_getattr sys_sched_getattr 301 + 347 common renameat2 sys_renameat2 302 + 348 common seccomp sys_seccomp 303 + 349 common getrandom sys_getrandom 304 + 350 common memfd_create sys_memfd_create 305 + 351 common bpf sys_bpf 306 + 352 common s390_pci_mmio_write sys_s390_pci_mmio_write 307 + 353 common s390_pci_mmio_read sys_s390_pci_mmio_read 308 + 354 common execveat sys_execveat 309 + 355 common userfaultfd sys_userfaultfd 310 + 356 common membarrier sys_membarrier 311 + 357 common recvmmsg sys_recvmmsg 312 + 358 common sendmmsg sys_sendmmsg 313 + 359 common socket sys_socket 314 + 360 common socketpair sys_socketpair 315 + 361 common bind sys_bind 316 + 362 common connect sys_connect 317 + 363 common listen sys_listen 318 + 364 common accept4 sys_accept4 319 + 365 common getsockopt sys_getsockopt 320 + 366 common setsockopt sys_setsockopt 321 + 367 common getsockname sys_getsockname 322 + 368 common getpeername sys_getpeername 323 + 369 common sendto sys_sendto 324 + 370 common sendmsg sys_sendmsg 325 + 371 common recvfrom sys_recvfrom 326 + 372 common recvmsg sys_recvmsg 327 + 373 common shutdown sys_shutdown 328 + 374 common mlock2 sys_mlock2 329 + 375 common copy_file_range sys_copy_file_range 330 + 376 common preadv2 sys_preadv2 331 + 377 common pwritev2 sys_pwritev2 332 + 378 common s390_guarded_storage sys_s390_guarded_storage 333 + 379 common statx sys_statx 334 + 380 common s390_sthyi sys_s390_sthyi 335 + 381 common kexec_file_load sys_kexec_file_load 336 + 382 common io_pgetevents sys_io_pgetevents 337 + 383 common rseq sys_rseq 338 + 384 common pkey_mprotect sys_pkey_mprotect 339 + 385 common pkey_alloc sys_pkey_alloc 340 + 386 common pkey_free sys_pkey_free 397 341 # room for arch specific syscalls 398 - 392 64 semtimedop sys_semtimedop - 399 - 393 common semget sys_semget sys_semget 400 - 394 common semctl sys_semctl compat_sys_semctl 401 - 395 common shmget sys_shmget sys_shmget 402 - 396 common shmctl sys_shmctl compat_sys_shmctl 403 - 397 common shmat sys_shmat compat_sys_shmat 404 - 398 common shmdt sys_shmdt sys_shmdt 405 - 399 common msgget sys_msgget sys_msgget 406 - 400 common msgsnd sys_msgsnd compat_sys_msgsnd 407 - 401 common msgrcv sys_msgrcv compat_sys_msgrcv 408 - 402 common msgctl sys_msgctl compat_sys_msgctl 409 - 403 32 clock_gettime64 - sys_clock_gettime 410 - 404 32 clock_settime64 - sys_clock_settime 411 - 405 32 clock_adjtime64 - sys_clock_adjtime 412 - 406 32 clock_getres_time64 - sys_clock_getres 413 - 407 32 clock_nanosleep_time64 - sys_clock_nanosleep 414 - 408 32 timer_gettime64 - sys_timer_gettime 415 - 409 32 timer_settime64 - sys_timer_settime 416 - 410 32 timerfd_gettime64 - sys_timerfd_gettime 417 - 411 32 timerfd_settime64 - sys_timerfd_settime 418 - 412 32 utimensat_time64 - sys_utimensat 419 - 413 32 pselect6_time64 - compat_sys_pselect6_time64 420 - 414 32 ppoll_time64 - compat_sys_ppoll_time64 421 - 416 32 io_pgetevents_time64 - compat_sys_io_pgetevents_time64 422 - 417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64 423 - 418 32 mq_timedsend_time64 - sys_mq_timedsend 424 - 419 32 mq_timedreceive_time64 - sys_mq_timedreceive 425 - 420 32 semtimedop_time64 - sys_semtimedop 426 - 421 32 rt_sigtimedwait_time64 - compat_sys_rt_sigtimedwait_time64 427 - 422 32 futex_time64 - sys_futex 428 - 423 32 sched_rr_get_interval_time64 - sys_sched_rr_get_interval 429 - 424 common pidfd_send_signal sys_pidfd_send_signal sys_pidfd_send_signal 430 - 425 common io_uring_setup sys_io_uring_setup sys_io_uring_setup 431 - 426 common io_uring_enter sys_io_uring_enter sys_io_uring_enter 432 - 427 common io_uring_register sys_io_uring_register sys_io_uring_register 433 - 428 common open_tree sys_open_tree sys_open_tree 434 - 429 common move_mount sys_move_mount sys_move_mount 435 - 430 common fsopen sys_fsopen sys_fsopen 436 - 431 common fsconfig sys_fsconfig sys_fsconfig 437 - 432 common fsmount sys_fsmount sys_fsmount 438 - 433 common fspick sys_fspick sys_fspick 439 - 434 common pidfd_open sys_pidfd_open sys_pidfd_open 440 - 435 common clone3 sys_clone3 sys_clone3 441 - 436 common close_range sys_close_range sys_close_range 442 - 437 common openat2 sys_openat2 sys_openat2 443 - 438 common pidfd_getfd sys_pidfd_getfd sys_pidfd_getfd 444 - 439 common faccessat2 sys_faccessat2 sys_faccessat2 445 - 440 common process_madvise sys_process_madvise sys_process_madvise 446 - 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 447 - 442 common mount_setattr sys_mount_setattr sys_mount_setattr 448 - 443 common quotactl_fd sys_quotactl_fd sys_quotactl_fd 449 - 444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset 450 - 445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule 451 - 446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self 452 - 447 common memfd_secret sys_memfd_secret sys_memfd_secret 453 - 448 common process_mrelease sys_process_mrelease sys_process_mrelease 454 - 449 common futex_waitv sys_futex_waitv sys_futex_waitv 455 - 450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node 456 - 451 common cachestat sys_cachestat sys_cachestat 457 - 452 common fchmodat2 sys_fchmodat2 sys_fchmodat2 458 - 453 common map_shadow_stack sys_map_shadow_stack sys_map_shadow_stack 459 - 454 common futex_wake sys_futex_wake sys_futex_wake 460 - 455 common futex_wait sys_futex_wait sys_futex_wait 461 - 456 common futex_requeue sys_futex_requeue sys_futex_requeue 462 - 457 common statmount sys_statmount sys_statmount 463 - 458 common listmount sys_listmount sys_listmount 464 - 459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr 465 - 460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr 466 - 461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules 467 - 462 common mseal sys_mseal sys_mseal 468 - 463 common setxattrat sys_setxattrat sys_setxattrat 469 - 464 common getxattrat sys_getxattrat sys_getxattrat 470 - 465 common listxattrat sys_listxattrat sys_listxattrat 471 - 466 common removexattrat sys_removexattrat sys_removexattrat 472 - 467 common open_tree_attr sys_open_tree_attr sys_open_tree_attr 473 - 468 common file_getattr sys_file_getattr sys_file_getattr 474 - 469 common file_setattr sys_file_setattr sys_file_setattr 475 - 470 common listns sys_listns sys_listns 342 + 392 common semtimedop sys_semtimedop 343 + 393 common semget sys_semget 344 + 394 common semctl sys_semctl 345 + 395 common shmget sys_shmget 346 + 396 common shmctl sys_shmctl 347 + 397 common shmat sys_shmat 348 + 398 common shmdt sys_shmdt 349 + 399 common msgget sys_msgget 350 + 400 common msgsnd sys_msgsnd 351 + 401 common msgrcv sys_msgrcv 352 + 402 common msgctl sys_msgctl 353 + 424 common pidfd_send_signal sys_pidfd_send_signal 354 + 425 common io_uring_setup sys_io_uring_setup 355 + 426 common io_uring_enter sys_io_uring_enter 356 + 427 common io_uring_register sys_io_uring_register 357 + 428 common open_tree sys_open_tree 358 + 429 common move_mount sys_move_mount 359 + 430 common fsopen sys_fsopen 360 + 431 common fsconfig sys_fsconfig 361 + 432 common fsmount sys_fsmount 362 + 433 common fspick sys_fspick 363 + 434 common pidfd_open sys_pidfd_open 364 + 435 common clone3 sys_clone3 365 + 436 common close_range sys_close_range 366 + 437 common openat2 sys_openat2 367 + 438 common pidfd_getfd sys_pidfd_getfd 368 + 439 common faccessat2 sys_faccessat2 369 + 440 common process_madvise sys_process_madvise 370 + 441 common epoll_pwait2 sys_epoll_pwait2 371 + 442 common mount_setattr sys_mount_setattr 372 + 443 common quotactl_fd sys_quotactl_fd 373 + 444 common landlock_create_ruleset sys_landlock_create_ruleset 374 + 445 common landlock_add_rule sys_landlock_add_rule 375 + 446 common landlock_restrict_self sys_landlock_restrict_self 376 + 447 common memfd_secret sys_memfd_secret 377 + 448 common process_mrelease sys_process_mrelease 378 + 449 common futex_waitv sys_futex_waitv 379 + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node 380 + 451 common cachestat sys_cachestat 381 + 452 common fchmodat2 sys_fchmodat2 382 + 453 common map_shadow_stack sys_map_shadow_stack 383 + 454 common futex_wake sys_futex_wake 384 + 455 common futex_wait sys_futex_wait 385 + 456 common futex_requeue sys_futex_requeue 386 + 457 common statmount sys_statmount 387 + 458 common listmount sys_listmount 388 + 459 common lsm_get_self_attr sys_lsm_get_self_attr 389 + 460 common lsm_set_self_attr sys_lsm_set_self_attr 390 + 461 common lsm_list_modules sys_lsm_list_modules 391 + 462 common mseal sys_mseal 392 + 463 common setxattrat sys_setxattrat 393 + 464 common getxattrat sys_getxattrat 394 + 465 common listxattrat sys_listxattrat 395 + 466 common removexattrat sys_removexattrat 396 + 467 common open_tree_attr sys_open_tree_attr 397 + 468 common file_getattr sys_file_getattr 398 + 469 common file_setattr sys_file_setattr 399 + 470 common listns sys_listns 400 + 471 common rseq_slice_yield sys_rseq_slice_yield
+1
tools/perf/arch/sh/entry/syscalls/syscall.tbl
··· 474 474 468 common file_getattr sys_file_getattr 475 475 469 common file_setattr sys_file_setattr 476 476 470 common listns sys_listns 477 + 471 common rseq_slice_yield sys_rseq_slice_yield
+2 -1
tools/perf/arch/sparc/entry/syscalls/syscall.tbl
··· 480 480 432 common fsmount sys_fsmount 481 481 433 common fspick sys_fspick 482 482 434 common pidfd_open sys_pidfd_open 483 - # 435 reserved for clone3 483 + 435 common clone3 __sys_clone3 484 484 436 common close_range sys_close_range 485 485 437 common openat2 sys_openat2 486 486 438 common pidfd_getfd sys_pidfd_getfd ··· 516 516 468 common file_getattr sys_file_getattr 517 517 469 common file_setattr sys_file_setattr 518 518 470 common listns sys_listns 519 + 471 common rseq_slice_yield sys_rseq_slice_yield
+1
tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
··· 476 476 468 i386 file_getattr sys_file_getattr 477 477 469 i386 file_setattr sys_file_setattr 478 478 470 i386 listns sys_listns 479 + 471 i386 rseq_slice_yield sys_rseq_slice_yield
+1
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
··· 395 395 468 common file_getattr sys_file_getattr 396 396 469 common file_setattr sys_file_setattr 397 397 470 common listns sys_listns 398 + 471 common rseq_slice_yield sys_rseq_slice_yield 398 399 399 400 # 400 401 # Due to a historical design error, certain syscalls are numbered differently
+1
tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
··· 441 441 468 common file_getattr sys_file_getattr 442 442 469 common file_setattr sys_file_setattr 443 443 470 common listns sys_listns 444 + 471 common rseq_slice_yield sys_rseq_slice_yield
+7 -2
tools/perf/builtin-ftrace.c
··· 18 18 #include <poll.h> 19 19 #include <ctype.h> 20 20 #include <linux/capability.h> 21 + #include <linux/err.h> 21 22 #include <linux/string.h> 22 23 #include <sys/stat.h> 23 24 ··· 1210 1209 ftrace->graph_verbose = 0; 1211 1210 1212 1211 ftrace->profile_hash = hashmap__new(profile_hash, profile_equal, NULL); 1213 - if (ftrace->profile_hash == NULL) 1214 - return -ENOMEM; 1212 + if (IS_ERR(ftrace->profile_hash)) { 1213 + int err = PTR_ERR(ftrace->profile_hash); 1214 + 1215 + ftrace->profile_hash = NULL; 1216 + return err; 1217 + } 1215 1218 1216 1219 return 0; 1217 1220 }
+2 -1
tools/perf/pmu-events/Build
··· 214 214 quiet_cmd_rm = RM $^ 215 215 216 216 prune_orphans: $(ORPHAN_FILES) 217 - $(Q)$(call echo-cmd,rm)rm -f $^ 217 + # The list of files can be long. Use xargs to prevent issues. 218 + $(Q)$(call echo-cmd,rm)echo "$^" | xargs rm -f 218 219 219 220 JEVENTS_DEPS += prune_orphans 220 221 endif
+1
tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h
··· 77 77 */ 78 78 #define IRQ_WORK_VECTOR 0xf6 79 79 80 + /* IRQ vector for PMIs when running a guest with a mediated PMU. */ 80 81 #define PERF_GUEST_MEDIATED_PMI_VECTOR 0xf5 81 82 82 83 #define DEFERRED_ERROR_VECTOR 0xf4
+1
tools/perf/trace/beauty/include/uapi/linux/fs.h
··· 253 253 #define FS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */ 254 254 #define FS_XFLAG_DAX 0x00008000 /* use DAX for IO */ 255 255 #define FS_XFLAG_COWEXTSIZE 0x00010000 /* CoW extent size allocator hint */ 256 + #define FS_XFLAG_VERITY 0x00020000 /* fs-verity enabled */ 256 257 #define FS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */ 257 258 258 259 /* the read-only stuff doesn't really belong here, but any other place is
+11 -2
tools/perf/trace/beauty/include/uapi/linux/mount.h
··· 61 61 /* 62 62 * open_tree() flags. 63 63 */ 64 - #define OPEN_TREE_CLONE 1 /* Clone the target tree and attach the clone */ 64 + #define OPEN_TREE_CLONE (1 << 0) /* Clone the target tree and attach the clone */ 65 + #define OPEN_TREE_NAMESPACE (1 << 1) /* Clone the target tree into a new mount namespace */ 65 66 #define OPEN_TREE_CLOEXEC O_CLOEXEC /* Close the file on execve() */ 66 67 67 68 /* ··· 198 197 */ 199 198 struct mnt_id_req { 200 199 __u32 size; 201 - __u32 mnt_ns_fd; 200 + union { 201 + __u32 mnt_ns_fd; 202 + __u32 mnt_fd; 203 + }; 202 204 __u64 mnt_id; 203 205 __u64 param; 204 206 __u64 mnt_ns_id; ··· 235 231 */ 236 232 #define LSMT_ROOT 0xffffffffffffffff /* root mount */ 237 233 #define LISTMOUNT_REVERSE (1 << 0) /* List later mounts first */ 234 + 235 + /* 236 + * @flag bits for statmount(2) 237 + */ 238 + #define STATMOUNT_BY_FD 0x00000001U /* want mountinfo for given fd */ 238 239 239 240 #endif /* _UAPI_LINUX_MOUNT_H */
+37
tools/perf/trace/beauty/include/uapi/linux/prctl.h
··· 386 386 # define PR_FUTEX_HASH_SET_SLOTS 1 387 387 # define PR_FUTEX_HASH_GET_SLOTS 2 388 388 389 + /* RSEQ time slice extensions */ 390 + #define PR_RSEQ_SLICE_EXTENSION 79 391 + # define PR_RSEQ_SLICE_EXTENSION_GET 1 392 + # define PR_RSEQ_SLICE_EXTENSION_SET 2 393 + /* 394 + * Bits for RSEQ_SLICE_EXTENSION_GET/SET 395 + * PR_RSEQ_SLICE_EXT_ENABLE: Enable 396 + */ 397 + # define PR_RSEQ_SLICE_EXT_ENABLE 0x01 398 + 399 + /* 400 + * Get the current indirect branch tracking configuration for the current 401 + * thread, this will be the value configured via PR_SET_INDIR_BR_LP_STATUS. 402 + */ 403 + #define PR_GET_INDIR_BR_LP_STATUS 80 404 + 405 + /* 406 + * Set the indirect branch tracking configuration. PR_INDIR_BR_LP_ENABLE will 407 + * enable cpu feature for user thread, to track all indirect branches and ensure 408 + * they land on arch defined landing pad instruction. 409 + * x86 - If enabled, an indirect branch must land on an ENDBRANCH instruction. 410 + * arch64 - If enabled, an indirect branch must land on a BTI instruction. 411 + * riscv - If enabled, an indirect branch must land on an lpad instruction. 412 + * PR_INDIR_BR_LP_DISABLE will disable feature for user thread and indirect 413 + * branches will no more be tracked by cpu to land on arch defined landing pad 414 + * instruction. 415 + */ 416 + #define PR_SET_INDIR_BR_LP_STATUS 81 417 + # define PR_INDIR_BR_LP_ENABLE (1UL << 0) 418 + 419 + /* 420 + * Prevent further changes to the specified indirect branch tracking 421 + * configuration. All bits may be locked via this call, including 422 + * undefined bits. 423 + */ 424 + #define PR_LOCK_INDIR_BR_LP_STATUS 82 425 + 389 426 #endif /* _LINUX_PRCTL_H */
+1 -1
tools/perf/util/annotate-arch/annotate-loongarch.c
··· 93 93 start = map__unmap_ip(map, sym->start); 94 94 end = map__unmap_ip(map, sym->end); 95 95 96 - ops->target.outside = target.addr < start || target.addr > end; 96 + ops->target.outside = target.addr < start || target.addr >= end; 97 97 98 98 if (maps__find_ams(thread__maps(ms->thread), &target) == 0 && 99 99 map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
+4 -1
tools/perf/util/annotate.c
··· 44 44 #include "strbuf.h" 45 45 #include <regex.h> 46 46 #include <linux/bitops.h> 47 + #include <linux/err.h> 47 48 #include <linux/kernel.h> 48 49 #include <linux/string.h> 49 50 #include <linux/zalloc.h> ··· 138 137 return -1; 139 138 140 139 src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL); 141 - if (src->samples == NULL) 140 + if (IS_ERR(src->samples)) { 142 141 zfree(&src->histograms); 142 + src->samples = NULL; 143 + } 143 144 144 145 return src->histograms ? 0 : -1; 145 146 }
+1 -1
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
··· 549 549 /* 550 550 * Process the PE_CONTEXT packets if we have a valid contextID or VMID. 551 551 * If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2 552 - * as VMID, Bit ETM_OPT_CTXTID2 is set in this case. 552 + * as VMID, Format attribute 'contextid2' is set in this case. 553 553 */ 554 554 switch (cs_etm__get_pid_fmt(etmq)) { 555 555 case CS_ETM_PIDFMT_CTXTID:
+13 -23
tools/perf/util/cs-etm.c
··· 194 194 * CS_ETM_PIDFMT_CTXTID2: CONTEXTIDR_EL2 is traced. 195 195 * CS_ETM_PIDFMT_NONE: No context IDs 196 196 * 197 - * It's possible that the two bits ETM_OPT_CTXTID and ETM_OPT_CTXTID2 197 + * It's possible that the two format attributes 'contextid1' and 'contextid2' 198 198 * are enabled at the same time when the session runs on an EL2 kernel. 199 199 * This means the CONTEXTIDR_EL1 and CONTEXTIDR_EL2 both will be 200 200 * recorded in the trace data, the tool will selectively use ··· 210 210 if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) { 211 211 val = metadata[CS_ETM_ETMCR]; 212 212 /* CONTEXTIDR is traced */ 213 - if (val & BIT(ETM_OPT_CTXTID)) 213 + if (val & ETMCR_CTXTID) 214 214 return CS_ETM_PIDFMT_CTXTID; 215 215 } else { 216 216 val = metadata[CS_ETMV4_TRCCONFIGR]; 217 217 /* CONTEXTIDR_EL2 is traced */ 218 - if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT))) 218 + if (val & (TRCCONFIGR_VMID | TRCCONFIGR_VMIDOPT)) 219 219 return CS_ETM_PIDFMT_CTXTID2; 220 220 /* CONTEXTIDR_EL1 is traced */ 221 - else if (val & BIT(ETM4_CFG_BIT_CTXTID)) 221 + else if (val & TRCCONFIGR_CID) 222 222 return CS_ETM_PIDFMT_CTXTID; 223 223 } 224 224 ··· 2914 2914 return 0; 2915 2915 } 2916 2916 2917 - static int cs_etm__setup_timeless_decoding(struct cs_etm_auxtrace *etm) 2917 + static void cs_etm__setup_timeless_decoding(struct cs_etm_auxtrace *etm) 2918 2918 { 2919 - struct evsel *evsel; 2920 - struct evlist *evlist = etm->session->evlist; 2919 + /* Take first ETM as all options will be the same for all ETMs */ 2920 + u64 *metadata = etm->metadata[0]; 2921 2921 2922 2922 /* Override timeless mode with user input from --itrace=Z */ 2923 2923 if (etm->synth_opts.timeless_decoding) { 2924 2924 etm->timeless_decoding = true; 2925 - return 0; 2925 + return; 2926 2926 } 2927 2927 2928 - /* 2929 - * Find the cs_etm evsel and look at what its timestamp setting was 2930 - */ 2931 - evlist__for_each_entry(evlist, evsel) 2932 - if (cs_etm__evsel_is_auxtrace(etm->session, evsel)) { 2933 - etm->timeless_decoding = 2934 - !(evsel->core.attr.config & BIT(ETM_OPT_TS)); 2935 - return 0; 2936 - } 2937 - 2938 - pr_err("CS ETM: Couldn't find ETM evsel\n"); 2939 - return -EINVAL; 2928 + if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) 2929 + etm->timeless_decoding = !(metadata[CS_ETM_ETMCR] & ETMCR_TIMESTAMP_EN); 2930 + else 2931 + etm->timeless_decoding = !(metadata[CS_ETMV4_TRCCONFIGR] & TRCCONFIGR_TS); 2940 2932 } 2941 2933 2942 2934 /* ··· 3491 3499 etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace; 3492 3500 session->auxtrace = &etm->auxtrace; 3493 3501 3494 - err = cs_etm__setup_timeless_decoding(etm); 3495 - if (err) 3496 - return err; 3502 + cs_etm__setup_timeless_decoding(etm); 3497 3503 3498 3504 etm->tc.time_shift = tc->time_shift; 3499 3505 etm->tc.time_mult = tc->time_mult;
+15
tools/perf/util/cs-etm.h
··· 230 230 /* CoreSight trace ID is currently the bottom 7 bits of the value */ 231 231 #define CORESIGHT_TRACE_ID_VAL_MASK GENMASK(6, 0) 232 232 233 + /* ETMv4 CONFIGR register bits */ 234 + #define TRCCONFIGR_BB BIT(3) 235 + #define TRCCONFIGR_CCI BIT(4) 236 + #define TRCCONFIGR_CID BIT(6) 237 + #define TRCCONFIGR_VMID BIT(7) 238 + #define TRCCONFIGR_TS BIT(11) 239 + #define TRCCONFIGR_RS BIT(12) 240 + #define TRCCONFIGR_VMIDOPT BIT(15) 241 + 242 + /* ETMv3 ETMCR register bits */ 243 + #define ETMCR_CYC_ACC BIT(12) 244 + #define ETMCR_CTXTID BIT(14) 245 + #define ETMCR_TIMESTAMP_EN BIT(28) 246 + #define ETMCR_RETURN_STACK BIT(29) 247 + 233 248 int cs_etm__process_auxtrace_info(union perf_event *event, 234 249 struct perf_session *session); 235 250 void cs_etm_get_default_config(const struct perf_pmu *pmu, struct perf_event_attr *attr);
+1 -1
tools/perf/util/disasm.c
··· 384 384 start = map__unmap_ip(map, sym->start); 385 385 end = map__unmap_ip(map, sym->end); 386 386 387 - ops->target.outside = target.addr < start || target.addr > end; 387 + ops->target.outside = target.addr < start || target.addr >= end; 388 388 389 389 /* 390 390 * FIXME: things like this in _cpp_lex_token (gcc's cc1 program):
+5
tools/perf/util/synthetic-events.c
··· 703 703 704 704 memcpy(event->mmap2.filename, dso__long_name(dso), dso__long_name_len(dso) + 1); 705 705 706 + /* Clear stale build ID from previous module iteration */ 707 + event->mmap2.header.misc &= ~PERF_RECORD_MISC_MMAP_BUILD_ID; 708 + memset(event->mmap2.build_id, 0, sizeof(event->mmap2.build_id)); 709 + event->mmap2.build_id_size = 0; 710 + 706 711 perf_record_mmap2__read_build_id(&event->mmap2, args->machine, false); 707 712 } else { 708 713 size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64));
+5
tools/power/cpupower/cpupower-service.conf
··· 30 30 # its policy for the relative importance of performance versus energy savings to 31 31 # the processor. See man CPUPOWER-SET(1) for additional details 32 32 #PERF_BIAS= 33 + 34 + # Set the Energy Performance Preference 35 + # Available options can be read from 36 + # /sys/devices/system/cpu/cpufreq/policy0/energy_performance_available_preferences 37 + #EPP=
+6
tools/power/cpupower/cpupower.sh
··· 23 23 cpupower set -b "$PERF_BIAS" > /dev/null || ESTATUS=1 24 24 fi 25 25 26 + # apply Energy Performance Preference 27 + if test -n "$EPP" 28 + then 29 + cpupower set -e "$EPP" > /dev/null || ESTATUS=1 30 + fi 31 + 26 32 exit $ESTATUS
+5 -1
tools/power/cpupower/utils/cpupower-set.c
··· 124 124 } 125 125 126 126 if (params.turbo_boost) { 127 - ret = cpupower_set_turbo_boost(turbo_boost); 127 + if (cpupower_cpu_info.vendor == X86_VENDOR_INTEL) 128 + ret = cpupower_set_intel_turbo_boost(turbo_boost); 129 + else 130 + ret = cpupower_set_generic_turbo_boost(turbo_boost); 131 + 128 132 if (ret) 129 133 fprintf(stderr, "Error setting turbo-boost\n"); 130 134 }
+4 -1
tools/power/cpupower/utils/helpers/helpers.h
··· 104 104 /* cpuid and cpuinfo helpers **************************/ 105 105 106 106 int cpufreq_has_generic_boost_support(bool *active); 107 - int cpupower_set_turbo_boost(int turbo_boost); 107 + int cpupower_set_generic_turbo_boost(int turbo_boost); 108 108 109 109 /* X86 ONLY ****************************************/ 110 110 #if defined(__i386__) || defined(__x86_64__) ··· 143 143 144 144 int cpufreq_has_x86_boost_support(unsigned int cpu, int *support, 145 145 int *active, int *states); 146 + int cpupower_set_intel_turbo_boost(int turbo_boost); 146 147 147 148 /* AMD P-State stuff **************************/ 148 149 bool cpupower_amd_pstate_enabled(void); ··· 189 188 190 189 static inline int cpufreq_has_x86_boost_support(unsigned int cpu, int *support, 191 190 int *active, int *states) 191 + { return -1; } 192 + static inline int cpupower_set_intel_turbo_boost(int turbo_boost) 192 193 { return -1; } 193 194 194 195 static inline bool cpupower_amd_pstate_enabled(void)
+39 -2
tools/power/cpupower/utils/helpers/misc.c
··· 19 19 { 20 20 int ret; 21 21 unsigned long long val; 22 + char linebuf[MAX_LINE_LEN]; 23 + char path[SYSFS_PATH_MAX]; 24 + char *endp; 22 25 23 26 *support = *active = *states = 0; 24 27 ··· 45 42 } 46 43 } else if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATE) { 47 44 amd_pstate_boost_init(cpu, support, active); 48 - } else if (cpupower_cpu_info.caps & CPUPOWER_CAP_INTEL_IDA) 45 + } else if (cpupower_cpu_info.caps & CPUPOWER_CAP_INTEL_IDA) { 49 46 *support = *active = 1; 47 + 48 + snprintf(path, sizeof(path), PATH_TO_CPU "intel_pstate/no_turbo"); 49 + 50 + if (!is_valid_path(path)) 51 + return 0; 52 + 53 + if (cpupower_read_sysfs(path, linebuf, MAX_LINE_LEN) == 0) 54 + return -1; 55 + 56 + val = strtol(linebuf, &endp, 0); 57 + if (endp == linebuf || errno == ERANGE) 58 + return -1; 59 + 60 + *active = !val; 61 + } 62 + return 0; 63 + } 64 + 65 + int cpupower_set_intel_turbo_boost(int turbo_boost) 66 + { 67 + char path[SYSFS_PATH_MAX]; 68 + char linebuf[2] = {}; 69 + 70 + snprintf(path, sizeof(path), PATH_TO_CPU "intel_pstate/no_turbo"); 71 + 72 + /* Fallback to generic solution when intel_pstate driver not running */ 73 + if (!is_valid_path(path)) 74 + return cpupower_set_generic_turbo_boost(turbo_boost); 75 + 76 + snprintf(linebuf, sizeof(linebuf), "%d", !turbo_boost); 77 + 78 + if (cpupower_write_sysfs(path, linebuf, 2) <= 0) 79 + return -1; 80 + 50 81 return 0; 51 82 } 52 83 ··· 311 274 } 312 275 } 313 276 314 - int cpupower_set_turbo_boost(int turbo_boost) 277 + int cpupower_set_generic_turbo_boost(int turbo_boost) 315 278 { 316 279 char path[SYSFS_PATH_MAX]; 317 280 char linebuf[2] = {};
+2 -2
tools/power/cpupower/utils/powercap-info.c
··· 38 38 printf(" (%s)\n", mode ? "enabled" : "disabled"); 39 39 40 40 if (zone->has_power_uw) 41 - printf(_("%sPower can be monitored in micro Jules\n"), 41 + printf(_("%sPower can be monitored in micro Watts\n"), 42 42 pr_prefix); 43 43 44 44 if (zone->has_energy_uj) 45 - printf(_("%sPower can be monitored in micro Watts\n"), 45 + printf(_("%sPower can be monitored in micro Jules\n"), 46 46 pr_prefix); 47 47 48 48 printf("\n");
+1
tools/scripts/syscall.tbl
··· 411 411 468 common file_getattr sys_file_getattr 412 412 469 common file_setattr sys_file_setattr 413 413 470 common listns sys_listns 414 + 471 common rseq_slice_yield sys_rseq_slice_yield
+1 -1
tools/testing/selftests/bpf/Makefile
··· 409 409 CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" \ 410 410 LIBBPF_INCLUDE=$(HOST_INCLUDE_DIR) \ 411 411 EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \ 412 - HOSTPKG_CONFIG=$(PKG_CONFIG) \ 412 + HOSTPKG_CONFIG='$(PKG_CONFIG)' \ 413 413 OUTPUT=$(HOST_BUILD_DIR)/resolve_btfids/ BPFOBJ=$(HOST_BPFOBJ) 414 414 415 415 # Get Clang's default includes on this system, as opposed to those seen by
+53 -3
tools/testing/selftests/bpf/progs/exceptions_fail.c
··· 8 8 #include "bpf_experimental.h" 9 9 10 10 extern void bpf_rcu_read_lock(void) __ksym; 11 + extern void bpf_rcu_read_unlock(void) __ksym; 12 + extern void bpf_preempt_disable(void) __ksym; 13 + extern void bpf_preempt_enable(void) __ksym; 14 + extern void bpf_local_irq_save(unsigned long *) __ksym; 15 + extern void bpf_local_irq_restore(unsigned long *) __ksym; 11 16 12 17 #define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) 13 18 ··· 136 131 } 137 132 138 133 SEC("?tc") 139 - __failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region") 134 + __failure __msg("bpf_throw cannot be used inside bpf_rcu_read_lock-ed region") 140 135 int reject_with_rcu_read_lock(void *ctx) 141 136 { 142 137 bpf_rcu_read_lock(); ··· 152 147 } 153 148 154 149 SEC("?tc") 155 - __failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region") 150 + __failure __msg("bpf_throw cannot be used inside bpf_rcu_read_lock-ed region") 156 151 int reject_subprog_with_rcu_read_lock(void *ctx) 157 152 { 158 153 bpf_rcu_read_lock(); 159 - return throwing_subprog(ctx); 154 + throwing_subprog(ctx); 155 + bpf_rcu_read_unlock(); 156 + return 0; 160 157 } 161 158 162 159 static bool rbless(struct bpf_rb_node *n1, const struct bpf_rb_node *n2) ··· 350 343 bpf_loop(5, loop_cb1, NULL, 0); 351 344 else 352 345 bpf_loop(5, loop_cb2, NULL, 0); 346 + return 0; 347 + } 348 + 349 + __noinline static int always_throws(void) 350 + { 351 + bpf_throw(0); 352 + return 0; 353 + } 354 + 355 + __noinline static int rcu_lock_then_throw(void) 356 + { 357 + bpf_rcu_read_lock(); 358 + bpf_throw(0); 359 + return 0; 360 + } 361 + 362 + SEC("?tc") 363 + __failure __msg("bpf_throw cannot be used inside bpf_rcu_read_lock-ed region") 364 + int reject_subprog_rcu_lock_throw(void *ctx) 365 + { 366 + rcu_lock_then_throw(); 367 + return 0; 368 + } 369 + 370 + SEC("?tc") 371 + __failure __msg("bpf_throw cannot be used inside bpf_preempt_disable-ed region") 372 + int reject_subprog_throw_preempt_lock(void *ctx) 373 + { 374 + bpf_preempt_disable(); 375 + always_throws(); 376 + bpf_preempt_enable(); 377 + return 0; 378 + } 379 + 380 + SEC("?tc") 381 + __failure __msg("bpf_throw cannot be used inside bpf_local_irq_save-ed region") 382 + int reject_subprog_throw_irq_lock(void *ctx) 383 + { 384 + unsigned long flags; 385 + 386 + bpf_local_irq_save(&flags); 387 + always_throws(); 388 + bpf_local_irq_restore(&flags); 353 389 return 0; 354 390 } 355 391
+94
tools/testing/selftests/bpf/progs/verifier_bounds.c
··· 2037 2037 : __clobber_all); 2038 2038 } 2039 2039 2040 + SEC("socket") 2041 + __description("maybe_fork_scalars: OR with constant rejects OOB") 2042 + __failure __msg("invalid access to map value") 2043 + __naked void or_scalar_fork_rejects_oob(void) 2044 + { 2045 + asm volatile (" \ 2046 + r1 = 0; \ 2047 + *(u64*)(r10 - 8) = r1; \ 2048 + r2 = r10; \ 2049 + r2 += -8; \ 2050 + r1 = %[map_hash_8b] ll; \ 2051 + call %[bpf_map_lookup_elem]; \ 2052 + if r0 == 0 goto l0_%=; \ 2053 + r9 = r0; \ 2054 + r6 = *(u64*)(r9 + 0); \ 2055 + r6 s>>= 63; \ 2056 + r6 |= 8; \ 2057 + /* r6 is -1 (current) or 8 (pushed) */ \ 2058 + if r6 s< 0 goto l0_%=; \ 2059 + /* pushed path: r6 = 8, OOB for value_size=8 */ \ 2060 + r9 += r6; \ 2061 + r0 = *(u8*)(r9 + 0); \ 2062 + l0_%=: r0 = 0; \ 2063 + exit; \ 2064 + " : 2065 + : __imm(bpf_map_lookup_elem), 2066 + __imm_addr(map_hash_8b) 2067 + : __clobber_all); 2068 + } 2069 + 2070 + SEC("socket") 2071 + __description("maybe_fork_scalars: AND with constant still works") 2072 + __success __retval(0) 2073 + __naked void and_scalar_fork_still_works(void) 2074 + { 2075 + asm volatile (" \ 2076 + r1 = 0; \ 2077 + *(u64*)(r10 - 8) = r1; \ 2078 + r2 = r10; \ 2079 + r2 += -8; \ 2080 + r1 = %[map_hash_8b] ll; \ 2081 + call %[bpf_map_lookup_elem]; \ 2082 + if r0 == 0 goto l0_%=; \ 2083 + r9 = r0; \ 2084 + r6 = *(u64*)(r9 + 0); \ 2085 + r6 s>>= 63; \ 2086 + r6 &= 4; \ 2087 + /* \ 2088 + * r6 is 0 (pushed, 0&4==0) or 4 (current) \ 2089 + * both within value_size=8 \ 2090 + */ \ 2091 + if r6 s< 0 goto l0_%=; \ 2092 + r9 += r6; \ 2093 + r0 = *(u8*)(r9 + 0); \ 2094 + l0_%=: r0 = 0; \ 2095 + exit; \ 2096 + " : 2097 + : __imm(bpf_map_lookup_elem), 2098 + __imm_addr(map_hash_8b) 2099 + : __clobber_all); 2100 + } 2101 + 2102 + SEC("socket") 2103 + __description("maybe_fork_scalars: OR with constant allows in-bounds") 2104 + __success __retval(0) 2105 + __naked void or_scalar_fork_allows_inbounds(void) 2106 + { 2107 + asm volatile (" \ 2108 + r1 = 0; \ 2109 + *(u64*)(r10 - 8) = r1; \ 2110 + r2 = r10; \ 2111 + r2 += -8; \ 2112 + r1 = %[map_hash_8b] ll; \ 2113 + call %[bpf_map_lookup_elem]; \ 2114 + if r0 == 0 goto l0_%=; \ 2115 + r9 = r0; \ 2116 + r6 = *(u64*)(r9 + 0); \ 2117 + r6 s>>= 63; \ 2118 + r6 |= 4; \ 2119 + /* \ 2120 + * r6 is -1 (current) or 4 (pushed) \ 2121 + * pushed path: r6 = 4, within value_size=8 \ 2122 + */ \ 2123 + if r6 s< 0 goto l0_%=; \ 2124 + r9 += r6; \ 2125 + r0 = *(u8*)(r9 + 0); \ 2126 + l0_%=: r0 = 0; \ 2127 + exit; \ 2128 + " : 2129 + : __imm(bpf_map_lookup_elem), 2130 + __imm_addr(map_hash_8b) 2131 + : __clobber_all); 2132 + } 2133 + 2040 2134 char _license[] SEC("license") = "GPL";
+22
tools/testing/selftests/bpf/progs/verifier_bswap.c
··· 91 91 BSWAP_RANGE_TEST(le64_range, "le64", 0x3f00, 0x3f000000000000) 92 92 #endif 93 93 94 + SEC("socket") 95 + __description("BSWAP, reset reg id") 96 + __failure __msg("math between fp pointer and register with unbounded min value is not allowed") 97 + __naked void bswap_reset_reg_id(void) 98 + { 99 + asm volatile (" \ 100 + call %[bpf_ktime_get_ns]; \ 101 + r1 = r0; \ 102 + r0 = be16 r0; \ 103 + if r0 != 1 goto l0_%=; \ 104 + r2 = r10; \ 105 + r2 += -512; \ 106 + r2 += r1; \ 107 + *(u8 *)(r2 + 0) = 0; \ 108 + l0_%=: \ 109 + r0 = 0; \ 110 + exit; \ 111 + " : 112 + : __imm(bpf_ktime_get_ns) 113 + : __clobber_all); 114 + } 115 + 94 116 #else 95 117 96 118 SEC("socket")
+108
tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
··· 348 348 : __clobber_all); 349 349 } 350 350 351 + /* 352 + * Test that sync_linked_regs() checks reg->id (the linked target register) 353 + * for BPF_ADD_CONST32 rather than known_reg->id (the branch register). 354 + */ 355 + SEC("socket") 356 + __success 357 + __naked void scalars_alu32_zext_linked_reg(void) 358 + { 359 + asm volatile (" \ 360 + call %[bpf_get_prandom_u32]; \ 361 + w6 = w0; /* r6 in [0, 0xFFFFFFFF] */ \ 362 + r7 = r6; /* linked: same id as r6 */ \ 363 + w7 += 1; /* alu32: r7.id |= BPF_ADD_CONST32 */ \ 364 + r8 = 0xFFFFffff ll; \ 365 + if r6 < r8 goto l0_%=; \ 366 + /* r6 in [0xFFFFFFFF, 0xFFFFFFFF] */ \ 367 + /* sync_linked_regs: known_reg=r6, reg=r7 */ \ 368 + /* CPU: w7 = (u32)(0xFFFFFFFF + 1) = 0, zext -> r7 = 0 */ \ 369 + /* With fix: r7 64-bit = [0, 0] (zext applied) */ \ 370 + /* Without fix: r7 64-bit = [0x100000000] (no zext) */ \ 371 + r7 >>= 32; \ 372 + if r7 == 0 goto l0_%=; \ 373 + r0 /= 0; /* unreachable with fix */ \ 374 + l0_%=: \ 375 + r0 = 0; \ 376 + exit; \ 377 + " : 378 + : __imm(bpf_get_prandom_u32) 379 + : __clobber_all); 380 + } 381 + 382 + /* 383 + * Test that sync_linked_regs() skips propagation when one register used 384 + * alu32 (BPF_ADD_CONST32) and the other used alu64 (BPF_ADD_CONST64). 385 + * The delta relationship doesn't hold across different ALU widths. 386 + */ 387 + SEC("socket") 388 + __failure __msg("div by zero") 389 + __naked void scalars_alu32_alu64_cross_type(void) 390 + { 391 + asm volatile (" \ 392 + call %[bpf_get_prandom_u32]; \ 393 + w6 = w0; /* r6 in [0, 0xFFFFFFFF] */ \ 394 + r7 = r6; /* linked: same id as r6 */ \ 395 + w7 += 1; /* alu32: BPF_ADD_CONST32, delta = 1 */ \ 396 + r8 = r6; /* linked: same id as r6 */ \ 397 + r8 += 2; /* alu64: BPF_ADD_CONST64, delta = 2 */ \ 398 + r9 = 0xFFFFffff ll; \ 399 + if r7 < r9 goto l0_%=; \ 400 + /* r7 = 0xFFFFFFFF */ \ 401 + /* sync: known_reg=r7 (ADD_CONST32), reg=r8 (ADD_CONST64) */ \ 402 + /* Without fix: r8 = zext(0xFFFFFFFF + 1) = 0 */ \ 403 + /* With fix: r8 stays [2, 0x100000001] (r8 >= 2) */ \ 404 + if r8 > 0 goto l1_%=; \ 405 + goto l0_%=; \ 406 + l1_%=: \ 407 + r0 /= 0; /* div by zero */ \ 408 + l0_%=: \ 409 + r0 = 0; \ 410 + exit; \ 411 + " : 412 + : __imm(bpf_get_prandom_u32) 413 + : __clobber_all); 414 + } 415 + 416 + /* 417 + * Test that regsafe() prevents pruning when two paths reach the same program 418 + * point with linked registers carrying different ADD_CONST flags (one 419 + * BPF_ADD_CONST32 from alu32, another BPF_ADD_CONST64 from alu64). 420 + */ 421 + SEC("socket") 422 + __failure __msg("div by zero") 423 + __flag(BPF_F_TEST_STATE_FREQ) 424 + __naked void scalars_alu32_alu64_regsafe_pruning(void) 425 + { 426 + asm volatile (" \ 427 + call %[bpf_get_prandom_u32]; \ 428 + w6 = w0; /* r6 in [0, 0xFFFFFFFF] */ \ 429 + r7 = r6; /* linked: same id as r6 */ \ 430 + /* Get another random value for the path branch */ \ 431 + call %[bpf_get_prandom_u32]; \ 432 + if r0 > 0 goto l_pathb_%=; \ 433 + /* Path A: alu32 */ \ 434 + w7 += 1; /* BPF_ADD_CONST32, delta = 1 */\ 435 + goto l_merge_%=; \ 436 + l_pathb_%=: \ 437 + /* Path B: alu64 */ \ 438 + r7 += 1; /* BPF_ADD_CONST64, delta = 1 */\ 439 + l_merge_%=: \ 440 + /* Merge point: regsafe() compares path B against cached path A. */ \ 441 + /* Narrow r6 to trigger sync_linked_regs for r7 */ \ 442 + r9 = 0xFFFFffff ll; \ 443 + if r6 < r9 goto l0_%=; \ 444 + /* r6 = 0xFFFFFFFF */ \ 445 + /* sync: r7 = 0xFFFFFFFF + 1 = 0x100000000 */ \ 446 + /* Path A: zext -> r7 = 0 */ \ 447 + /* Path B: no zext -> r7 = 0x100000000 */ \ 448 + r7 >>= 32; \ 449 + if r7 == 0 goto l0_%=; \ 450 + r0 /= 0; /* div by zero on path B */ \ 451 + l0_%=: \ 452 + r0 = 0; \ 453 + exit; \ 454 + " : 455 + : __imm(bpf_get_prandom_u32) 456 + : __clobber_all); 457 + } 458 + 351 459 SEC("socket") 352 460 __success 353 461 void alu32_negative_offset(void)
+58
tools/testing/selftests/bpf/progs/verifier_sdiv.c
··· 1209 1209 : __clobber_all); 1210 1210 } 1211 1211 1212 + SEC("socket") 1213 + __description("SDIV32, INT_MIN divided by 2, imm") 1214 + __success __success_unpriv __retval(-1073741824) 1215 + __naked void sdiv32_int_min_div_2_imm(void) 1216 + { 1217 + asm volatile (" \ 1218 + w0 = %[int_min]; \ 1219 + w0 s/= 2; \ 1220 + exit; \ 1221 + " : 1222 + : __imm_const(int_min, INT_MIN) 1223 + : __clobber_all); 1224 + } 1225 + 1226 + SEC("socket") 1227 + __description("SDIV32, INT_MIN divided by 2, reg") 1228 + __success __success_unpriv __retval(-1073741824) 1229 + __naked void sdiv32_int_min_div_2_reg(void) 1230 + { 1231 + asm volatile (" \ 1232 + w0 = %[int_min]; \ 1233 + w1 = 2; \ 1234 + w0 s/= w1; \ 1235 + exit; \ 1236 + " : 1237 + : __imm_const(int_min, INT_MIN) 1238 + : __clobber_all); 1239 + } 1240 + 1241 + SEC("socket") 1242 + __description("SMOD32, INT_MIN modulo 2, imm") 1243 + __success __success_unpriv __retval(0) 1244 + __naked void smod32_int_min_mod_2_imm(void) 1245 + { 1246 + asm volatile (" \ 1247 + w0 = %[int_min]; \ 1248 + w0 s%%= 2; \ 1249 + exit; \ 1250 + " : 1251 + : __imm_const(int_min, INT_MIN) 1252 + : __clobber_all); 1253 + } 1254 + 1255 + SEC("socket") 1256 + __description("SMOD32, INT_MIN modulo -2, imm") 1257 + __success __success_unpriv __retval(0) 1258 + __naked void smod32_int_min_mod_neg2_imm(void) 1259 + { 1260 + asm volatile (" \ 1261 + w0 = %[int_min]; \ 1262 + w0 s%%= -2; \ 1263 + exit; \ 1264 + " : 1265 + : __imm_const(int_min, INT_MIN) 1266 + : __clobber_all); 1267 + } 1268 + 1269 + 1212 1270 #else 1213 1271 1214 1272 SEC("socket")
+12
tools/testing/selftests/hid/progs/hid_bpf_helpers.h
··· 6 6 #define __HID_BPF_HELPERS_H 7 7 8 8 /* "undefine" structs and enums in vmlinux.h, because we "override" them below */ 9 + #define bpf_wq bpf_wq___not_used 9 10 #define hid_bpf_ctx hid_bpf_ctx___not_used 10 11 #define hid_bpf_ops hid_bpf_ops___not_used 12 + #define hid_device hid_device___not_used 11 13 #define hid_report_type hid_report_type___not_used 12 14 #define hid_class_request hid_class_request___not_used 13 15 #define hid_bpf_attach_flags hid_bpf_attach_flags___not_used ··· 29 27 30 28 #include "vmlinux.h" 31 29 30 + #undef bpf_wq 32 31 #undef hid_bpf_ctx 33 32 #undef hid_bpf_ops 33 + #undef hid_device 34 34 #undef hid_report_type 35 35 #undef hid_class_request 36 36 #undef hid_bpf_attach_flags ··· 57 53 HID_FEATURE_REPORT = 2, 58 54 59 55 HID_REPORT_TYPES, 56 + }; 57 + 58 + struct hid_device { 59 + unsigned int id; 60 + } __attribute__((preserve_access_index)); 61 + 62 + struct bpf_wq { 63 + __u64 __opaque[2]; 60 64 }; 61 65 62 66 struct hid_bpf_ctx {
+1
tools/testing/selftests/kvm/Makefile.kvm
··· 71 71 TEST_GEN_PROGS_x86 += x86/cr4_cpuid_sync_test 72 72 TEST_GEN_PROGS_x86 += x86/dirty_log_page_splitting_test 73 73 TEST_GEN_PROGS_x86 += x86/feature_msrs_test 74 + TEST_GEN_PROGS_x86 += x86/evmcs_smm_controls_test 74 75 TEST_GEN_PROGS_x86 += x86/exit_on_emulation_failure_test 75 76 TEST_GEN_PROGS_x86 += x86/fastops_test 76 77 TEST_GEN_PROGS_x86 += x86/fix_hypercall_test
+1 -1
tools/testing/selftests/kvm/guest_memfd_test.c
··· 80 80 { 81 81 const unsigned long nodemask_0 = 1; /* nid: 0 */ 82 82 unsigned long nodemask = 0; 83 - unsigned long maxnode = 8; 83 + unsigned long maxnode = BITS_PER_TYPE(nodemask); 84 84 int policy; 85 85 char *mem; 86 86 int ret;
+23
tools/testing/selftests/kvm/include/x86/processor.h
··· 557 557 return cr0; 558 558 } 559 559 560 + static inline void set_cr0(uint64_t val) 561 + { 562 + __asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory"); 563 + } 564 + 560 565 static inline uint64_t get_cr3(void) 561 566 { 562 567 uint64_t cr3; ··· 569 564 __asm__ __volatile__("mov %%cr3, %[cr3]" 570 565 : /* output */ [cr3]"=r"(cr3)); 571 566 return cr3; 567 + } 568 + 569 + static inline void set_cr3(uint64_t val) 570 + { 571 + __asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory"); 572 572 } 573 573 574 574 static inline uint64_t get_cr4(void) ··· 588 578 static inline void set_cr4(uint64_t val) 589 579 { 590 580 __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory"); 581 + } 582 + 583 + static inline uint64_t get_cr8(void) 584 + { 585 + uint64_t cr8; 586 + 587 + __asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8)); 588 + return cr8; 589 + } 590 + 591 + static inline void set_cr8(uint64_t val) 592 + { 593 + __asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory"); 591 594 } 592 595 593 596 static inline void set_idt(const struct desc_ptr *idt_desc)
+17
tools/testing/selftests/kvm/include/x86/smm.h
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + #ifndef SELFTEST_KVM_SMM_H 3 + #define SELFTEST_KVM_SMM_H 4 + 5 + #include "kvm_util.h" 6 + 7 + #define SMRAM_SIZE 65536 8 + #define SMRAM_MEMSLOT ((1 << 16) | 1) 9 + #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE) 10 + 11 + void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, 12 + uint64_t smram_gpa, 13 + const void *smi_handler, size_t handler_size); 14 + 15 + void inject_smi(struct kvm_vcpu *vcpu); 16 + 17 + #endif /* SELFTEST_KVM_SMM_H */
+26
tools/testing/selftests/kvm/lib/x86/processor.c
··· 8 8 #include "kvm_util.h" 9 9 #include "pmu.h" 10 10 #include "processor.h" 11 + #include "smm.h" 11 12 #include "svm_util.h" 12 13 #include "sev.h" 13 14 #include "vmx.h" ··· 1444 1443 bool kvm_arch_has_default_irqchip(void) 1445 1444 { 1446 1445 return true; 1446 + } 1447 + 1448 + void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu, 1449 + uint64_t smram_gpa, 1450 + const void *smi_handler, size_t handler_size) 1451 + { 1452 + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, smram_gpa, 1453 + SMRAM_MEMSLOT, SMRAM_PAGES, 0); 1454 + TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, smram_gpa, 1455 + SMRAM_MEMSLOT) == smram_gpa, 1456 + "Could not allocate guest physical addresses for SMRAM"); 1457 + 1458 + memset(addr_gpa2hva(vm, smram_gpa), 0x0, SMRAM_SIZE); 1459 + memcpy(addr_gpa2hva(vm, smram_gpa) + 0x8000, smi_handler, handler_size); 1460 + vcpu_set_msr(vcpu, MSR_IA32_SMBASE, smram_gpa); 1461 + } 1462 + 1463 + void inject_smi(struct kvm_vcpu *vcpu) 1464 + { 1465 + struct kvm_vcpu_events events; 1466 + 1467 + vcpu_events_get(vcpu, &events); 1468 + events.smi.pending = 1; 1469 + events.flags |= KVM_VCPUEVENT_VALID_SMM; 1470 + vcpu_events_set(vcpu, &events); 1447 1471 }
+150
tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2026, Red Hat, Inc. 4 + * 5 + * Test that vmx_leave_smm() validates vmcs12 controls before re-entering 6 + * nested guest mode on RSM. 7 + */ 8 + #include <fcntl.h> 9 + #include <stdio.h> 10 + #include <stdlib.h> 11 + #include <string.h> 12 + #include <sys/ioctl.h> 13 + 14 + #include "test_util.h" 15 + #include "kvm_util.h" 16 + #include "smm.h" 17 + #include "hyperv.h" 18 + #include "vmx.h" 19 + 20 + #define SMRAM_GPA 0x1000000 21 + #define SMRAM_STAGE 0xfe 22 + 23 + #define SYNC_PORT 0xe 24 + 25 + #define STR(x) #x 26 + #define XSTR(s) STR(s) 27 + 28 + /* 29 + * SMI handler: runs in real-address mode. 30 + * Reports SMRAM_STAGE via port IO, then does RSM. 31 + */ 32 + static uint8_t smi_handler[] = { 33 + 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */ 34 + 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */ 35 + 0x0f, 0xaa, /* rsm */ 36 + }; 37 + 38 + static inline void sync_with_host(uint64_t phase) 39 + { 40 + asm volatile("in $" XSTR(SYNC_PORT) ", %%al \n" 41 + : "+a" (phase)); 42 + } 43 + 44 + static void l2_guest_code(void) 45 + { 46 + sync_with_host(1); 47 + 48 + /* After SMI+RSM with invalid controls, we should not reach here. */ 49 + vmcall(); 50 + } 51 + 52 + static void guest_code(struct vmx_pages *vmx_pages, 53 + struct hyperv_test_pages *hv_pages) 54 + { 55 + #define L2_GUEST_STACK_SIZE 64 56 + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 57 + 58 + /* Set up Hyper-V enlightenments and eVMCS */ 59 + wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID); 60 + enable_vp_assist(hv_pages->vp_assist_gpa, hv_pages->vp_assist); 61 + evmcs_enable(); 62 + 63 + GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); 64 + GUEST_ASSERT(load_evmcs(hv_pages)); 65 + prepare_vmcs(vmx_pages, l2_guest_code, 66 + &l2_guest_stack[L2_GUEST_STACK_SIZE]); 67 + 68 + GUEST_ASSERT(!vmlaunch()); 69 + 70 + /* L2 exits via vmcall if test fails */ 71 + sync_with_host(2); 72 + } 73 + 74 + int main(int argc, char *argv[]) 75 + { 76 + vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0; 77 + struct hyperv_test_pages *hv; 78 + struct hv_enlightened_vmcs *evmcs; 79 + struct kvm_vcpu *vcpu; 80 + struct kvm_vm *vm; 81 + struct kvm_regs regs; 82 + int stage_reported; 83 + 84 + TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX)); 85 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE)); 86 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)); 87 + TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM)); 88 + 89 + vm = vm_create_with_one_vcpu(&vcpu, guest_code); 90 + 91 + setup_smram(vm, vcpu, SMRAM_GPA, smi_handler, sizeof(smi_handler)); 92 + 93 + vcpu_set_hv_cpuid(vcpu); 94 + vcpu_enable_evmcs(vcpu); 95 + vcpu_alloc_vmx(vm, &vmx_pages_gva); 96 + hv = vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva); 97 + vcpu_args_set(vcpu, 2, vmx_pages_gva, hv_pages_gva); 98 + 99 + vcpu_run(vcpu); 100 + 101 + /* L2 is running and syncs with host. */ 102 + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); 103 + vcpu_regs_get(vcpu, &regs); 104 + stage_reported = regs.rax & 0xff; 105 + TEST_ASSERT(stage_reported == 1, 106 + "Expected stage 1, got %d", stage_reported); 107 + 108 + /* Inject SMI while L2 is running. */ 109 + inject_smi(vcpu); 110 + vcpu_run(vcpu); 111 + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); 112 + vcpu_regs_get(vcpu, &regs); 113 + stage_reported = regs.rax & 0xff; 114 + TEST_ASSERT(stage_reported == SMRAM_STAGE, 115 + "Expected SMM handler stage %#x, got %#x", 116 + SMRAM_STAGE, stage_reported); 117 + 118 + /* 119 + * Guest is now paused in the SMI handler, about to execute RSM. 120 + * Hack the eVMCS page to set-up invalid pin-based execution 121 + * control (PIN_BASED_VIRTUAL_NMIS without PIN_BASED_NMI_EXITING). 122 + */ 123 + evmcs = hv->enlightened_vmcs_hva; 124 + evmcs->pin_based_vm_exec_control |= PIN_BASED_VIRTUAL_NMIS; 125 + evmcs->hv_clean_fields = 0; 126 + 127 + /* 128 + * Trigger copy_enlightened_to_vmcs12() via KVM_GET_NESTED_STATE, 129 + * copying the invalid pin_based_vm_exec_control into cached_vmcs12. 130 + */ 131 + union { 132 + struct kvm_nested_state state; 133 + char state_[16384]; 134 + } nested_state_buf; 135 + 136 + memset(&nested_state_buf, 0, sizeof(nested_state_buf)); 137 + nested_state_buf.state.size = sizeof(nested_state_buf); 138 + vcpu_nested_state_get(vcpu, &nested_state_buf.state); 139 + 140 + /* 141 + * Resume the guest. The SMI handler executes RSM, which calls 142 + * vmx_leave_smm(). nested_vmx_check_controls() should detect 143 + * VIRTUAL_NMIS without NMI_EXITING and cause a triple fault. 144 + */ 145 + vcpu_run(vcpu); 146 + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN); 147 + 148 + kvm_vm_free(vm); 149 + return 0; 150 + }
+30
tools/testing/selftests/kvm/x86/sev_smoke_test.c
··· 13 13 #include "linux/psp-sev.h" 14 14 #include "sev.h" 15 15 16 + static void guest_sev_test_msr(uint32_t msr) 17 + { 18 + uint64_t val = rdmsr(msr); 19 + 20 + wrmsr(msr, val); 21 + GUEST_ASSERT(val == rdmsr(msr)); 22 + } 23 + 24 + #define guest_sev_test_reg(reg) \ 25 + do { \ 26 + uint64_t val = get_##reg(); \ 27 + \ 28 + set_##reg(val); \ 29 + GUEST_ASSERT(val == get_##reg()); \ 30 + } while (0) 31 + 32 + static void guest_sev_test_regs(void) 33 + { 34 + guest_sev_test_msr(MSR_EFER); 35 + guest_sev_test_reg(cr0); 36 + guest_sev_test_reg(cr3); 37 + guest_sev_test_reg(cr4); 38 + guest_sev_test_reg(cr8); 39 + } 16 40 17 41 #define XFEATURE_MASK_X87_AVX (XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM) 18 42 ··· 48 24 GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ES_ENABLED); 49 25 GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_SNP_ENABLED); 50 26 27 + guest_sev_test_regs(); 28 + 51 29 wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ); 52 30 vmgexit(); 53 31 } ··· 59 33 /* TODO: Check CPUID after GHCB-based hypercall support is added. */ 60 34 GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED); 61 35 GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED); 36 + 37 + guest_sev_test_regs(); 62 38 63 39 /* 64 40 * TODO: Add GHCB and ucall support for SEV-ES guests. For now, simply ··· 74 46 { 75 47 GUEST_ASSERT(this_cpu_has(X86_FEATURE_SEV)); 76 48 GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED); 49 + 50 + guest_sev_test_regs(); 77 51 78 52 GUEST_DONE(); 79 53 }
+2 -25
tools/testing/selftests/kvm/x86/smm_test.c
··· 14 14 #include "test_util.h" 15 15 16 16 #include "kvm_util.h" 17 + #include "smm.h" 17 18 18 19 #include "vmx.h" 19 20 #include "svm_util.h" 20 21 21 - #define SMRAM_SIZE 65536 22 - #define SMRAM_MEMSLOT ((1 << 16) | 1) 23 - #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE) 24 22 #define SMRAM_GPA 0x1000000 25 23 #define SMRAM_STAGE 0xfe 26 24 ··· 111 113 sync_with_host(DONE); 112 114 } 113 115 114 - void inject_smi(struct kvm_vcpu *vcpu) 115 - { 116 - struct kvm_vcpu_events events; 117 - 118 - vcpu_events_get(vcpu, &events); 119 - 120 - events.smi.pending = 1; 121 - events.flags |= KVM_VCPUEVENT_VALID_SMM; 122 - 123 - vcpu_events_set(vcpu, &events); 124 - } 125 - 126 116 int main(int argc, char *argv[]) 127 117 { 128 118 vm_vaddr_t nested_gva = 0; ··· 126 140 /* Create VM */ 127 141 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 128 142 129 - vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA, 130 - SMRAM_MEMSLOT, SMRAM_PAGES, 0); 131 - TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT) 132 - == SMRAM_GPA, "could not allocate guest physical addresses?"); 133 - 134 - memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE); 135 - memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler, 136 - sizeof(smi_handler)); 137 - 138 - vcpu_set_msr(vcpu, MSR_IA32_SMBASE, SMRAM_GPA); 143 + setup_smram(vm, vcpu, SMRAM_GPA, smi_handler, sizeof(smi_handler)); 139 144 140 145 if (kvm_has_cap(KVM_CAP_NESTED_STATE)) { 141 146 if (kvm_cpu_has(X86_FEATURE_SVM))
+55
tools/testing/selftests/net/rtnetlink.sh
··· 28 28 kci_test_fdb_get 29 29 kci_test_fdb_del 30 30 kci_test_neigh_get 31 + kci_test_neigh_update 31 32 kci_test_bridge_parent_id 32 33 kci_test_address_proto 33 34 kci_test_enslave_bonding ··· 1159 1158 fi 1160 1159 1161 1160 end_test "PASS: neigh get" 1161 + } 1162 + 1163 + kci_test_neigh_update() 1164 + { 1165 + dstip=10.0.2.4 1166 + dstmac=de:ad:be:ef:13:37 1167 + local ret=0 1168 + 1169 + for proxy in "" "proxy" ; do 1170 + # add a neighbour entry without any flags 1171 + run_cmd ip neigh add $proxy $dstip dev "$devdummy" lladdr $dstmac nud permanent 1172 + run_cmd_grep $dstip ip neigh show $proxy 1173 + run_cmd_grep_fail "$dstip dev $devdummy .*\(managed\|use\|router\|extern\)" ip neigh show $proxy 1174 + 1175 + # set the extern_learn flag, but no other 1176 + run_cmd ip neigh change $proxy $dstip dev "$devdummy" extern_learn 1177 + run_cmd_grep "$dstip dev $devdummy .* extern_learn" ip neigh show $proxy 1178 + run_cmd_grep_fail "$dstip dev $devdummy .* \(managed\|use\|router\)" ip neigh show $proxy 1179 + 1180 + # flags are reset when not provided 1181 + run_cmd ip neigh change $proxy $dstip dev "$devdummy" 1182 + run_cmd_grep $dstip ip neigh show $proxy 1183 + run_cmd_grep_fail "$dstip dev $devdummy .* extern_learn" ip neigh show $proxy 1184 + 1185 + # add a protocol 1186 + run_cmd ip neigh change $proxy $dstip dev "$devdummy" protocol boot 1187 + run_cmd_grep "$dstip dev $devdummy .* proto boot" ip neigh show $proxy 1188 + 1189 + # protocol is retained when not provided 1190 + run_cmd ip neigh change $proxy $dstip dev "$devdummy" 1191 + run_cmd_grep "$dstip dev $devdummy .* proto boot" ip neigh show $proxy 1192 + 1193 + # change protocol 1194 + run_cmd ip neigh change $proxy $dstip dev "$devdummy" protocol static 1195 + run_cmd_grep "$dstip dev $devdummy .* proto static" ip neigh show $proxy 1196 + 1197 + # also check an extended flag for non-proxy neighs 1198 + if [ "$proxy" = "" ]; then 1199 + run_cmd ip neigh change $proxy $dstip dev "$devdummy" managed 1200 + run_cmd_grep "$dstip dev $devdummy managed" ip neigh show $proxy 1201 + 1202 + run_cmd ip neigh change $proxy $dstip dev "$devdummy" lladdr $dstmac 1203 + run_cmd_grep_fail "$dstip dev $devdummy managed" ip neigh show $proxy 1204 + fi 1205 + 1206 + run_cmd ip neigh del $proxy $dstip dev "$devdummy" 1207 + done 1208 + 1209 + if [ $ret -ne 0 ];then 1210 + end_test "FAIL: neigh update" 1211 + return 1 1212 + fi 1213 + 1214 + end_test "PASS: neigh update" 1162 1215 } 1163 1216 1164 1217 kci_test_bridge_parent_id()
+2 -2
tools/testing/selftests/powerpc/copyloops/.gitignore
··· 2 2 copyuser_64_t0 3 3 copyuser_64_t1 4 4 copyuser_64_t2 5 - copyuser_p7_t0 6 - copyuser_p7_t1 5 + copyuser_p7 6 + copyuser_p7_vmx 7 7 memcpy_64_t0 8 8 memcpy_64_t1 9 9 memcpy_64_t2
+8 -3
tools/testing/selftests/powerpc/copyloops/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \ 3 - copyuser_p7_t0 copyuser_p7_t1 \ 3 + copyuser_p7 copyuser_p7_vmx \ 4 4 memcpy_64_t0 memcpy_64_t1 memcpy_64_t2 \ 5 5 memcpy_p7_t0 memcpy_p7_t1 copy_mc_64 \ 6 6 copyuser_64_exc_t0 copyuser_64_exc_t1 copyuser_64_exc_t2 \ ··· 28 28 -D SELFTEST_CASE=$(subst copyuser_64_t,,$(notdir $@)) \ 29 29 -o $@ $^ 30 30 31 - $(OUTPUT)/copyuser_p7_t%: copyuser_power7.S $(EXTRA_SOURCES) 31 + $(OUTPUT)/copyuser_p7: copyuser_power7.S $(EXTRA_SOURCES) 32 32 $(CC) $(CPPFLAGS) $(CFLAGS) \ 33 33 -D COPY_LOOP=test___copy_tofrom_user_power7 \ 34 - -D SELFTEST_CASE=$(subst copyuser_p7_t,,$(notdir $@)) \ 34 + -o $@ $^ 35 + 36 + $(OUTPUT)/copyuser_p7_vmx: copyuser_power7.S $(EXTRA_SOURCES) ../utils.c 37 + $(CC) $(CPPFLAGS) $(CFLAGS) \ 38 + -D COPY_LOOP=test___copy_tofrom_user_power7_vmx \ 39 + -D VMX_TEST \ 35 40 -o $@ $^ 36 41 37 42 # Strictly speaking, we only need the memcpy_64 test cases for big-endian
-8
tools/testing/selftests/powerpc/copyloops/stubs.S
··· 1 1 #include <asm/ppc_asm.h> 2 2 3 - FUNC_START(enter_vmx_usercopy) 4 - li r3,1 5 - blr 6 - 7 - FUNC_START(exit_vmx_usercopy) 8 - li r3,0 9 - blr 10 - 11 3 FUNC_START(enter_vmx_ops) 12 4 li r3,1 13 5 blr
+14 -1
tools/testing/selftests/powerpc/copyloops/validate.c
··· 12 12 #define BUFLEN (MAX_LEN+MAX_OFFSET+2*MIN_REDZONE) 13 13 #define POISON 0xa5 14 14 15 + #ifdef VMX_TEST 16 + #define VMX_COPY_THRESHOLD 3328 17 + #endif 18 + 15 19 unsigned long COPY_LOOP(void *to, const void *from, unsigned long size); 16 20 17 21 static void do_one(char *src, char *dst, unsigned long src_off, ··· 85 81 /* Fill with sequential bytes */ 86 82 for (i = 0; i < BUFLEN; i++) 87 83 fill[i] = i & 0xff; 88 - 84 + #ifdef VMX_TEST 85 + /* Force sizes above kernel VMX threshold (3328) */ 86 + for (len = VMX_COPY_THRESHOLD + 1; len < MAX_LEN; len++) { 87 + #else 89 88 for (len = 1; len < MAX_LEN; len++) { 89 + #endif 90 90 for (src_off = 0; src_off < MAX_OFFSET; src_off++) { 91 91 for (dst_off = 0; dst_off < MAX_OFFSET; dst_off++) { 92 92 do_one(src, dst, src_off, dst_off, len, ··· 104 96 105 97 int main(void) 106 98 { 99 + #ifdef VMX_TEST 100 + /* Skip if Altivec not present */ 101 + SKIP_IF_MSG(!have_hwcap(PPC_FEATURE_HAS_ALTIVEC), "ALTIVEC not supported"); 102 + #endif 103 + 107 104 return test_harness(test_copy_loop, str(COPY_LOOP)); 108 105 }
+2 -2
tools/testing/selftests/sched_ext/util.c
··· 60 60 char buf[64]; 61 61 int ret; 62 62 63 - ret = sprintf(buf, "%lu", val); 63 + ret = sprintf(buf, "%ld", val); 64 64 if (ret < 0) 65 65 return ret; 66 66 67 - if (write_text(path, buf, sizeof(buf)) <= 0) 67 + if (write_text(path, buf, ret) <= 0) 68 68 return -1; 69 69 70 70 return 0;
+1 -1
virt/kvm/binary_stats.c
··· 50 50 * Return: the number of bytes that has been successfully read 51 51 */ 52 52 ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, 53 - const struct _kvm_stats_desc *desc, 53 + const struct kvm_stats_desc *desc, 54 54 void *stats, size_t size_stats, 55 55 char __user *user_buffer, size_t size, loff_t *offset) 56 56 {
+10 -10
virt/kvm/kvm_main.c
··· 973 973 kvm_free_memslot(kvm, memslot); 974 974 } 975 975 976 - static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc) 976 + static umode_t kvm_stats_debugfs_mode(const struct kvm_stats_desc *desc) 977 977 { 978 - switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) { 978 + switch (desc->flags & KVM_STATS_TYPE_MASK) { 979 979 case KVM_STATS_TYPE_INSTANT: 980 980 return 0444; 981 981 case KVM_STATS_TYPE_CUMULATIVE: ··· 1010 1010 struct dentry *dent; 1011 1011 char dir_name[ITOA_MAX_LEN * 2]; 1012 1012 struct kvm_stat_data *stat_data; 1013 - const struct _kvm_stats_desc *pdesc; 1013 + const struct kvm_stats_desc *pdesc; 1014 1014 int i, ret = -ENOMEM; 1015 1015 int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc + 1016 1016 kvm_vcpu_stats_header.num_desc; ··· 6171 6171 switch (stat_data->kind) { 6172 6172 case KVM_STAT_VM: 6173 6173 r = kvm_get_stat_per_vm(stat_data->kvm, 6174 - stat_data->desc->desc.offset, val); 6174 + stat_data->desc->offset, val); 6175 6175 break; 6176 6176 case KVM_STAT_VCPU: 6177 6177 r = kvm_get_stat_per_vcpu(stat_data->kvm, 6178 - stat_data->desc->desc.offset, val); 6178 + stat_data->desc->offset, val); 6179 6179 break; 6180 6180 } 6181 6181 ··· 6193 6193 switch (stat_data->kind) { 6194 6194 case KVM_STAT_VM: 6195 6195 r = kvm_clear_stat_per_vm(stat_data->kvm, 6196 - stat_data->desc->desc.offset); 6196 + stat_data->desc->offset); 6197 6197 break; 6198 6198 case KVM_STAT_VCPU: 6199 6199 r = kvm_clear_stat_per_vcpu(stat_data->kvm, 6200 - stat_data->desc->desc.offset); 6200 + stat_data->desc->offset); 6201 6201 break; 6202 6202 } 6203 6203 ··· 6345 6345 static void kvm_init_debug(void) 6346 6346 { 6347 6347 const struct file_operations *fops; 6348 - const struct _kvm_stats_desc *pdesc; 6348 + const struct kvm_stats_desc *pdesc; 6349 6349 int i; 6350 6350 6351 6351 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL); ··· 6358 6358 fops = &vm_stat_readonly_fops; 6359 6359 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 6360 6360 kvm_debugfs_dir, 6361 - (void *)(long)pdesc->desc.offset, fops); 6361 + (void *)(long)pdesc->offset, fops); 6362 6362 } 6363 6363 6364 6364 for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) { ··· 6369 6369 fops = &vcpu_stat_readonly_fops; 6370 6370 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc), 6371 6371 kvm_debugfs_dir, 6372 - (void *)(long)pdesc->desc.offset, fops); 6372 + (void *)(long)pdesc->offset, fops); 6373 6373 } 6374 6374 } 6375 6375