Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR (net-6.17-rc2).

No conflicts.

Adjacent changes:

drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
d7a276a5768f ("net: stmmac: rk: convert to suspend()/resume() methods")
de1e963ad064 ("net: stmmac: rk: put the PHY clock on remove")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+5801 -3226
+7
Documentation/ABI/testing/sysfs-firmware-efi
··· 36 36 Table version 2 on Dell EMC PowerEdge systems in binary format 37 37 Users: It is used by Dell EMC OpenManage Server Administrator tool to 38 38 populate BIOS setup page. 39 + 40 + What: /sys/firmware/efi/ovmf_debug_log 41 + Date: July 2025 42 + Contact: Gerd Hoffmann <kraxel@redhat.com>, linux-efi@vger.kernel.org 43 + Description: Displays the content of the OVMF debug log buffer. The file is 44 + only present in case the firmware supports logging to a memory 45 + buffer.
+7 -7
Documentation/devicetree/bindings/mailbox/allwinner,sun6i-a31-msgbox.yaml
··· 68 68 #include <dt-bindings/reset/sun8i-h3-ccu.h> 69 69 70 70 msgbox: mailbox@1c17000 { 71 - compatible = "allwinner,sun8i-h3-msgbox", 72 - "allwinner,sun6i-a31-msgbox"; 73 - reg = <0x01c17000 0x1000>; 74 - clocks = <&ccu CLK_BUS_MSGBOX>; 75 - resets = <&ccu RST_BUS_MSGBOX>; 76 - interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>; 77 - #mbox-cells = <1>; 71 + compatible = "allwinner,sun8i-h3-msgbox", 72 + "allwinner,sun6i-a31-msgbox"; 73 + reg = <0x01c17000 0x1000>; 74 + clocks = <&ccu CLK_BUS_MSGBOX>; 75 + resets = <&ccu RST_BUS_MSGBOX>; 76 + interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>; 77 + #mbox-cells = <1>; 78 78 }; 79 79 80 80 ...
+5 -5
Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml
··· 27 27 maxItems: 1 28 28 29 29 interrupts: 30 - minItems: 3 30 + maxItems: 3 31 31 description: 32 32 Contains the interrupt information corresponding to each of the 3 links 33 33 of MHU. ··· 46 46 examples: 47 47 - | 48 48 mailbox@c883c404 { 49 - compatible = "amlogic,meson-gxbb-mhu"; 50 - reg = <0xc883c404 0x4c>; 51 - interrupts = <208>, <209>, <210>; 52 - #mbox-cells = <1>; 49 + compatible = "amlogic,meson-gxbb-mhu"; 50 + reg = <0xc883c404 0x4c>; 51 + interrupts = <208>, <209>, <210>; 52 + #mbox-cells = <1>; 53 53 };
+8 -8
Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml
··· 78 78 79 79 examples: 80 80 - | 81 - mailbox@77408000 { 82 - compatible = "apple,t8103-asc-mailbox", "apple,asc-mailbox-v4"; 83 - reg = <0x77408000 0x4000>; 84 - interrupts = <1 583 4>, <1 584 4>, <1 585 4>, <1 586 4>; 85 - interrupt-names = "send-empty", "send-not-empty", 86 - "recv-empty", "recv-not-empty"; 87 - #mbox-cells = <0>; 88 - }; 81 + mailbox@77408000 { 82 + compatible = "apple,t8103-asc-mailbox", "apple,asc-mailbox-v4"; 83 + reg = <0x77408000 0x4000>; 84 + interrupts = <1 583 4>, <1 584 4>, <1 585 4>, <1 586 4>; 85 + interrupt-names = "send-empty", "send-not-empty", 86 + "recv-empty", "recv-not-empty"; 87 + #mbox-cells = <0>; 88 + };
+68
Documentation/devicetree/bindings/mailbox/aspeed,ast2700-mailbox.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/mailbox/aspeed,ast2700-mailbox.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: ASPEED AST2700 mailbox controller 8 + 9 + maintainers: 10 + - Jammy Huang <jammy_huang@aspeedtech.com> 11 + 12 + description: > 13 + ASPEED AST2700 has multiple processors that need to communicate with each 14 + other. The mailbox controller provides a way for these processors to send 15 + messages to each other. It is a hardware-based inter-processor communication 16 + mechanism that allows processors to send and receive messages through 17 + dedicated channels. 18 + 19 + The mailbox's tx/rx are independent, meaning that one processor can send a 20 + message while another processor is receiving a message simultaneously. 21 + There are 4 channels available for both tx and rx operations. Each channel 22 + has a FIFO buffer that can hold messages of a fixed size (32 bytes in this 23 + case). 24 + 25 + The mailbox controller also supports interrupt generation, allowing 26 + processors to notify each other when a message is available or when an event 27 + occurs. 28 + 29 + properties: 30 + compatible: 31 + const: aspeed,ast2700-mailbox 32 + 33 + reg: 34 + items: 35 + - description: TX control register 36 + - description: RX control register 37 + 38 + reg-names: 39 + items: 40 + - const: tx 41 + - const: rx 42 + 43 + interrupts: 44 + maxItems: 1 45 + 46 + "#mbox-cells": 47 + const: 1 48 + 49 + required: 50 + - compatible 51 + - reg 52 + - reg-names 53 + - interrupts 54 + - "#mbox-cells" 55 + 56 + additionalProperties: false 57 + 58 + examples: 59 + - | 60 + #include <dt-bindings/interrupt-controller/arm-gic.h> 61 + 62 + mailbox@12c1c200 { 63 + compatible = "aspeed,ast2700-mailbox"; 64 + reg = <0x12c1c200 0x100>, <0x12c1c300 0x100>; 65 + reg-names = "tx", "rx"; 66 + interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>; 67 + #mbox-cells = <1>; 68 + };
+64
Documentation/devicetree/bindings/mailbox/brcm,bcm74110-mbox.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/mailbox/brcm,bcm74110-mbox.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Broadcom BCM74110 Mailbox 8 + 9 + maintainers: 10 + - Justin Chen <justin.chen@broadcom.com> 11 + - Florian Fainelli <florian.fainelli@broadcom.com> 12 + 13 + description: Broadcom mailbox hardware first introduced with 74110 14 + 15 + properties: 16 + compatible: 17 + enum: 18 + - brcm,bcm74110-mbox 19 + 20 + reg: 21 + maxItems: 1 22 + 23 + interrupts: 24 + items: 25 + - description: RX doorbell and watermark interrupts 26 + - description: TX doorbell and watermark interrupts 27 + 28 + "#mbox-cells": 29 + const: 2 30 + description: 31 + The first cell is channel type and second cell is shared memory slot 32 + 33 + brcm,rx: 34 + $ref: /schemas/types.yaml#/definitions/uint32 35 + description: RX Mailbox number 36 + 37 + brcm,tx: 38 + $ref: /schemas/types.yaml#/definitions/uint32 39 + description: TX Mailbox number 40 + 41 + required: 42 + - compatible 43 + - reg 44 + - interrupts 45 + - "#mbox-cells" 46 + - brcm,rx 47 + - brcm,tx 48 + 49 + additionalProperties: false 50 + 51 + examples: 52 + - | 53 + #include <dt-bindings/interrupt-controller/irq.h> 54 + #include <dt-bindings/interrupt-controller/arm-gic.h> 55 + 56 + mailbox@a552000 { 57 + compatible = "brcm,bcm74110-mbox"; 58 + reg = <0xa552000 0x1104>; 59 + interrupts = <GIC_SPI 0x67 IRQ_TYPE_LEVEL_HIGH>, 60 + <GIC_SPI 0x66 IRQ_TYPE_LEVEL_HIGH>; 61 + #mbox-cells = <0x2>; 62 + brcm,rx = <0x7>; 63 + brcm,tx = <0x6>; 64 + };
+1 -8
Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.yaml
··· 59 59 <dt-bindings/mailbox/tegra186-hsp.h> 60 60 61 61 properties: 62 - $nodename: 63 - pattern: "^hsp@[0-9a-f]+$" 64 - 65 62 compatible: 66 63 oneOf: 67 64 - enum: ··· 128 131 #include <dt-bindings/interrupt-controller/arm-gic.h> 129 132 #include <dt-bindings/mailbox/tegra186-hsp.h> 130 133 131 - hsp_top0: hsp@3c00000 { 134 + mailbox@3c00000 { 132 135 compatible = "nvidia,tegra186-hsp"; 133 136 reg = <0x03c00000 0xa0000>; 134 137 interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>; 135 138 interrupt-names = "doorbell"; 136 139 #mbox-cells = <2>; 137 - }; 138 - 139 - client { 140 - mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_DB TEGRA_HSP_DB_MASTER_CCPLEX>; 141 140 };
+1 -8
Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
··· 251 251 # Example apcs with msm8996 252 252 - | 253 253 #include <dt-bindings/interrupt-controller/arm-gic.h> 254 - apcs_glb: mailbox@9820000 { 254 + mailbox@9820000 { 255 255 compatible = "qcom,msm8996-apcs-hmss-global"; 256 256 reg = <0x9820000 0x1000>; 257 257 258 258 #mbox-cells = <1>; 259 259 #clock-cells = <0>; 260 - }; 261 - 262 - rpm-glink { 263 - compatible = "qcom,glink-rpm"; 264 - interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>; 265 - qcom,rpm-msg-ram = <&rpm_msg_ram>; 266 - mboxes = <&apcs_glb 0>; 267 260 }; 268 261 269 262 # Example apcs with qcs404
+1
Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
··· 24 24 compatible: 25 25 items: 26 26 - enum: 27 + - qcom,milos-ipcc 27 28 - qcom,qcs8300-ipcc 28 29 - qcom,qdu1000-ipcc 29 30 - qcom,sa8255p-ipcc
+3 -7
Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml
··· 242 242 - | 243 243 /* OMAP4 */ 244 244 #include <dt-bindings/interrupt-controller/arm-gic.h> 245 - mailbox: mailbox@4a0f4000 { 245 + mailbox@4a0f4000 { 246 246 compatible = "ti,omap4-mailbox"; 247 247 reg = <0x4a0f4000 0x200>; 248 248 interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>; ··· 260 260 }; 261 261 }; 262 262 263 - dsp { 264 - mboxes = <&mailbox &mbox_dsp>; 265 - }; 266 - 267 263 - | 268 264 /* AM33xx */ 269 - mailbox1: mailbox@480c8000 { 265 + mailbox@480c8000 { 270 266 compatible = "ti,omap4-mailbox"; 271 267 reg = <0x480c8000 0x200>; 272 268 interrupts = <77>; ··· 279 283 280 284 - | 281 285 /* AM65x */ 282 - mailbox0_cluster0: mailbox@31f80000 { 286 + mailbox@31f80000 { 283 287 compatible = "ti,am654-mailbox"; 284 288 reg = <0x31f80000 0x200>; 285 289 #mbox-cells = <1>;
+9 -9
Documentation/devicetree/bindings/mailbox/ti,secure-proxy.yaml
··· 36 36 - const: scfg 37 37 38 38 reg: 39 - minItems: 3 39 + maxItems: 3 40 40 41 41 interrupt-names: 42 42 minItems: 1 ··· 68 68 - | 69 69 #include <dt-bindings/interrupt-controller/arm-gic.h> 70 70 secure_proxy: mailbox@32c00000 { 71 - compatible = "ti,am654-secure-proxy"; 72 - #mbox-cells = <1>; 73 - reg-names = "target_data", "rt", "scfg"; 74 - reg = <0x32c00000 0x100000>, 75 - <0x32400000 0x100000>, 76 - <0x32800000 0x100000>; 77 - interrupt-names = "rx_011"; 78 - interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; 71 + compatible = "ti,am654-secure-proxy"; 72 + #mbox-cells = <1>; 73 + reg-names = "target_data", "rt", "scfg"; 74 + reg = <0x32c00000 0x100000>, 75 + <0x32400000 0x100000>, 76 + <0x32800000 0x100000>; 77 + interrupt-names = "rx_011"; 78 + interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; 79 79 };
+4 -2
Documentation/devicetree/bindings/net/thead,th1520-gmac.yaml
··· 62 62 items: 63 63 - description: GMAC main clock 64 64 - description: Peripheral registers interface clock 65 + - description: APB glue registers interface clock 65 66 66 67 clock-names: 67 68 items: 68 69 - const: stmmaceth 69 70 - const: pclk 71 + - const: apb 70 72 71 73 interrupts: 72 74 items: ··· 90 88 compatible = "thead,th1520-gmac", "snps,dwmac-3.70a"; 91 89 reg = <0xe7070000 0x2000>, <0xec003000 0x1000>; 92 90 reg-names = "dwmac", "apb"; 93 - clocks = <&clk 1>, <&clk 2>; 94 - clock-names = "stmmaceth", "pclk"; 91 + clocks = <&clk 1>, <&clk 2>, <&clk 3>; 92 + clock-names = "stmmaceth", "pclk", "apb"; 95 93 interrupts = <66>; 96 94 interrupt-names = "macirq"; 97 95 phy-mode = "rgmii-id";
+1 -1
Documentation/networking/ip-sysctl.rst
··· 1420 1420 A negative value means the networking namespace does not own its 1421 1421 hash buckets and shares the initial networking namespace's one. 1422 1422 1423 - udp_child_ehash_entries - INTEGER 1423 + udp_child_hash_entries - INTEGER 1424 1424 Control the number of hash buckets for UDP sockets in the child 1425 1425 networking namespace, which must be set before clone() or unshare(). 1426 1426
+12 -6
MAINTAINERS
··· 11438 11438 HUNG TASK DETECTOR 11439 11439 M: Andrew Morton <akpm@linux-foundation.org> 11440 11440 R: Lance Yang <lance.yang@linux.dev> 11441 + R: Masami Hiramatsu <mhiramat@kernel.org> 11441 11442 L: linux-kernel@vger.kernel.org 11442 11443 S: Maintained 11443 11444 F: include/linux/hung_task.h ··· 12584 12583 F: drivers/cpufreq/intel_pstate.c 12585 12584 12586 12585 INTEL PTP DFL ToD DRIVER 12587 - M: Tianfei Zhang <tianfei.zhang@intel.com> 12588 12586 L: linux-fpga@vger.kernel.org 12589 12587 L: netdev@vger.kernel.org 12590 - S: Maintained 12588 + S: Orphan 12591 12589 F: drivers/ptp/ptp_dfl_tod.c 12592 12590 12593 12591 INTEL QUADRATURE ENCODER PERIPHERAL DRIVER ··· 12724 12724 F: drivers/platform/x86/intel/wmi/thunderbolt.c 12725 12725 12726 12726 INTEL WWAN IOSM DRIVER 12727 - M: M Chetan Kumar <m.chetan.kumar@intel.com> 12728 12727 L: netdev@vger.kernel.org 12729 - S: Maintained 12728 + S: Orphan 12730 12729 F: drivers/net/wwan/iosm/ 12731 12730 12732 12731 INTEL(R) FLEXIBLE RETURN AND EVENT DELIVERY ··· 13685 13686 13686 13687 KPROBES 13687 13688 M: Naveen N Rao <naveen@kernel.org> 13688 - M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 13689 13689 M: "David S. Miller" <davem@davemloft.net> 13690 13690 M: Masami Hiramatsu <mhiramat@kernel.org> 13691 13691 L: linux-kernel@vger.kernel.org ··· 15672 15674 M: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com> 15673 15675 R: Chiranjeevi Rapolu <chiranjeevi.rapolu@linux.intel.com> 15674 15676 R: Liu Haijun <haijun.liu@mediatek.com> 15675 - R: M Chetan Kumar <m.chetan.kumar@linux.intel.com> 15676 15677 R: Ricardo Martinez <ricardo.martinez@linux.intel.com> 15677 15678 L: netdev@vger.kernel.org 15678 15679 S: Supported ··· 17448 17451 NETFILTER 17449 17452 M: Pablo Neira Ayuso <pablo@netfilter.org> 17450 17453 M: Jozsef Kadlecsik <kadlec@netfilter.org> 17454 + M: Florian Westphal <fw@strlen.de> 17451 17455 L: netfilter-devel@vger.kernel.org 17452 17456 L: coreteam@netfilter.org 17453 17457 S: Maintained ··· 21484 21486 S: Maintained 21485 21487 F: Documentation/devicetree/bindings/net/renesas,rzv2h-gbeth.yaml 21486 21488 F: drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c 21489 + 21490 + RENESAS RZ/V2H(P) RSPI DRIVER 21491 + M: Fabrizio Castro <fabrizio.castro.jz@renesas.com> 21492 + L: linux-spi@vger.kernel.org 21493 + L: linux-renesas-soc@vger.kernel.org 21494 + S: Maintained 21495 + F: Documentation/devicetree/bindings/spi/renesas,rzv2h-rspi.yaml 21496 + F: drivers/spi/spi-rzv2h-rspi.c 21487 21497 21488 21498 RENESAS RZ/V2H(P) USB2PHY PORT RESET DRIVER 21489 21499 M: Fabrizio Castro <fabrizio.castro.jz@renesas.com>
+2 -2
Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 VERSION = 6 3 - PATCHLEVEL = 16 3 + PATCHLEVEL = 17 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = 5 + EXTRAVERSION = -rc1 6 6 NAME = Baby Opossum Posse 7 7 8 8 # *DOCUMENTATION*
+6 -3
arch/arm/boot/dts/broadcom/bcm7445.dtsi
··· 237 237 ranges = <0x0 0x0 0x80000>; 238 238 239 239 memc-ddr@2000 { 240 - compatible = "brcm,brcmstb-memc-ddr"; 240 + compatible = "brcm,brcmstb-memc-ddr-rev-b.1.x", 241 + "brcm,brcmstb-memc-ddr"; 241 242 reg = <0x2000 0x800>; 242 243 }; 243 244 ··· 260 259 ranges = <0x0 0x80000 0x80000>; 261 260 262 261 memc-ddr@2000 { 263 - compatible = "brcm,brcmstb-memc-ddr"; 262 + compatible = "brcm,brcmstb-memc-ddr-rev-b.1.x", 263 + "brcm,brcmstb-memc-ddr"; 264 264 reg = <0x2000 0x800>; 265 265 }; 266 266 ··· 283 281 ranges = <0x0 0x100000 0x80000>; 284 282 285 283 memc-ddr@2000 { 286 - compatible = "brcm,brcmstb-memc-ddr"; 284 + compatible = "brcm,brcmstb-memc-ddr-rev-b.1.x", 285 + "brcm,brcmstb-memc-ddr"; 287 286 reg = <0x2000 0x800>; 288 287 }; 289 288
+2 -2
arch/arm/common/sa1111.c
··· 617 617 sachip->gc.direction_input = sa1111_gpio_direction_input; 618 618 sachip->gc.direction_output = sa1111_gpio_direction_output; 619 619 sachip->gc.get = sa1111_gpio_get; 620 - sachip->gc.set_rv = sa1111_gpio_set; 621 - sachip->gc.set_multiple_rv = sa1111_gpio_set_multiple; 620 + sachip->gc.set = sa1111_gpio_set; 621 + sachip->gc.set_multiple = sa1111_gpio_set_multiple; 622 622 sachip->gc.to_irq = sa1111_gpio_to_irq; 623 623 sachip->gc.base = -1; 624 624 sachip->gc.ngpio = 18;
+1 -1
arch/arm/common/scoop.c
··· 218 218 devptr->gpio.label = dev_name(&pdev->dev); 219 219 devptr->gpio.base = inf->gpio_base; 220 220 devptr->gpio.ngpio = 12; /* PA11 = 0, PA12 = 1, etc. up to PA22 = 11 */ 221 - devptr->gpio.set_rv = scoop_gpio_set; 221 + devptr->gpio.set = scoop_gpio_set; 222 222 devptr->gpio.get = scoop_gpio_get; 223 223 devptr->gpio.direction_input = scoop_gpio_direction_input; 224 224 devptr->gpio.direction_output = scoop_gpio_direction_output;
+1 -1
arch/arm/mach-s3c/gpio-samsung.c
··· 517 517 if (!gc->direction_output) 518 518 gc->direction_output = samsung_gpiolib_2bit_output; 519 519 if (!gc->set) 520 - gc->set_rv = samsung_gpiolib_set; 520 + gc->set = samsung_gpiolib_set; 521 521 if (!gc->get) 522 522 gc->get = samsung_gpiolib_get; 523 523
+1 -1
arch/arm/mach-sa1100/assabet.c
··· 80 80 { 81 81 unsigned long m = mask, v = val; 82 82 83 - assabet_bcr_gc->set_multiple_rv(assabet_bcr_gc, &m, &v); 83 + assabet_bcr_gc->set_multiple(assabet_bcr_gc, &m, &v); 84 84 } 85 85 EXPORT_SYMBOL(ASSABET_BCR_frob); 86 86
+1 -1
arch/arm/mach-sa1100/neponset.c
··· 126 126 unsigned long m = mask, v = val; 127 127 128 128 if (nep) 129 - n->gpio[0]->set_multiple_rv(n->gpio[0], &m, &v); 129 + n->gpio[0]->set_multiple(n->gpio[0], &m, &v); 130 130 else 131 131 WARN(1, "nep unset\n"); 132 132 }
+1 -1
arch/arm/plat-orion/gpio.c
··· 540 540 ochip->chip.direction_input = orion_gpio_direction_input; 541 541 ochip->chip.get = orion_gpio_get; 542 542 ochip->chip.direction_output = orion_gpio_direction_output; 543 - ochip->chip.set_rv = orion_gpio_set; 543 + ochip->chip.set = orion_gpio_set; 544 544 ochip->chip.to_irq = orion_gpio_to_irq; 545 545 ochip->chip.base = gpio_base; 546 546 ochip->chip.ngpio = ngpio;
-3
arch/arm64/boot/dts/nvidia/tegra264.dtsi
··· 11 11 interrupt-parent = <&gic>; 12 12 #address-cells = <2>; 13 13 #size-cells = <2>; 14 - numa-node-id = <0>; 15 14 16 15 reserved-memory { 17 16 #address-cells = <2>; ··· 340 341 status = "okay"; 341 342 342 343 enable-method = "psci"; 343 - numa-node-id = <0>; 344 344 345 345 i-cache-size = <65536>; 346 346 i-cache-line-size = <64>; ··· 356 358 status = "okay"; 357 359 358 360 enable-method = "psci"; 359 - numa-node-id = <0>; 360 361 361 362 i-cache-size = <65536>; 362 363 i-cache-line-size = <64>;
+1 -1
arch/m68k/coldfire/gpio.c
··· 160 160 .direction_input = mcfgpio_direction_input, 161 161 .direction_output = mcfgpio_direction_output, 162 162 .get = mcfgpio_get_value, 163 - .set_rv = mcfgpio_set_value, 163 + .set = mcfgpio_set_value, 164 164 .to_irq = mcfgpio_to_irq, 165 165 .base = 0, 166 166 .ngpio = MCFGPIO_PIN_MAX,
+3 -3
arch/mips/alchemy/common/gpiolib.c
··· 101 101 .direction_input = gpio1_direction_input, 102 102 .direction_output = gpio1_direction_output, 103 103 .get = gpio1_get, 104 - .set_rv = gpio1_set, 104 + .set = gpio1_set, 105 105 .to_irq = gpio1_to_irq, 106 106 .base = ALCHEMY_GPIO1_BASE, 107 107 .ngpio = ALCHEMY_GPIO1_NUM, ··· 111 111 .direction_input = gpio2_direction_input, 112 112 .direction_output = gpio2_direction_output, 113 113 .get = gpio2_get, 114 - .set_rv = gpio2_set, 114 + .set = gpio2_set, 115 115 .to_irq = gpio2_to_irq, 116 116 .base = ALCHEMY_GPIO2_BASE, 117 117 .ngpio = ALCHEMY_GPIO2_NUM, ··· 151 151 .direction_input = alchemy_gpic_dir_input, 152 152 .direction_output = alchemy_gpic_dir_output, 153 153 .get = alchemy_gpic_get, 154 - .set_rv = alchemy_gpic_set, 154 + .set = alchemy_gpic_set, 155 155 .to_irq = alchemy_gpic_gpio_to_irq, 156 156 .base = AU1300_GPIO_BASE, 157 157 .ngpio = AU1300_GPIO_NUM,
+1 -1
arch/mips/bcm63xx/gpio.c
··· 131 131 .direction_input = bcm63xx_gpio_direction_input, 132 132 .direction_output = bcm63xx_gpio_direction_output, 133 133 .get = bcm63xx_gpio_get, 134 - .set_rv = bcm63xx_gpio_set, 134 + .set = bcm63xx_gpio_set, 135 135 .base = 0, 136 136 }; 137 137
+1 -1
arch/mips/kernel/gpio_txx9.c
··· 70 70 71 71 static struct gpio_chip txx9_gpio_chip = { 72 72 .get = txx9_gpio_get, 73 - .set_rv = txx9_gpio_set, 73 + .set = txx9_gpio_set, 74 74 .direction_input = txx9_gpio_dir_in, 75 75 .direction_output = txx9_gpio_dir_out, 76 76 .label = "TXx9",
+1 -1
arch/mips/rb532/gpio.c
··· 164 164 .direction_input = rb532_gpio_direction_input, 165 165 .direction_output = rb532_gpio_direction_output, 166 166 .get = rb532_gpio_get, 167 - .set_rv = rb532_gpio_set, 167 + .set = rb532_gpio_set, 168 168 .to_irq = rb532_gpio_to_irq, 169 169 .base = 0, 170 170 .ngpio = 32,
+1 -1
arch/mips/txx9/generic/setup.c
··· 655 655 if (!iocled->mmioaddr) 656 656 goto out_free; 657 657 iocled->chip.get = txx9_iocled_get; 658 - iocled->chip.set_rv = txx9_iocled_set; 658 + iocled->chip.set = txx9_iocled_set; 659 659 iocled->chip.direction_input = txx9_iocled_dir_in; 660 660 iocled->chip.direction_output = txx9_iocled_dir_out; 661 661 iocled->chip.label = "iocled";
+1 -1
arch/powerpc/platforms/44x/gpio.c
··· 180 180 gc->direction_input = ppc4xx_gpio_dir_in; 181 181 gc->direction_output = ppc4xx_gpio_dir_out; 182 182 gc->get = ppc4xx_gpio_get; 183 - gc->set_rv = ppc4xx_gpio_set; 183 + gc->set = ppc4xx_gpio_set; 184 184 185 185 ret = of_mm_gpiochip_add_data(np, mm_gc, ppc4xx_gc); 186 186 if (ret)
+1 -1
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
··· 336 336 gpt->gc.direction_input = mpc52xx_gpt_gpio_dir_in; 337 337 gpt->gc.direction_output = mpc52xx_gpt_gpio_dir_out; 338 338 gpt->gc.get = mpc52xx_gpt_gpio_get; 339 - gpt->gc.set_rv = mpc52xx_gpt_gpio_set; 339 + gpt->gc.set = mpc52xx_gpt_gpio_set; 340 340 gpt->gc.base = -1; 341 341 gpt->gc.parent = gpt->dev; 342 342
+1 -1
arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
··· 126 126 gc->can_sleep = 1; 127 127 gc->ngpio = MCU_NUM_GPIO; 128 128 gc->base = -1; 129 - gc->set_rv = mcu_gpio_set; 129 + gc->set = mcu_gpio_set; 130 130 gc->direction_output = mcu_gpio_dir_out; 131 131 gc->parent = dev; 132 132
+2 -2
arch/powerpc/platforms/8xx/cpm1.c
··· 499 499 gc->direction_input = cpm1_gpio16_dir_in; 500 500 gc->direction_output = cpm1_gpio16_dir_out; 501 501 gc->get = cpm1_gpio16_get; 502 - gc->set_rv = cpm1_gpio16_set; 502 + gc->set = cpm1_gpio16_set; 503 503 gc->to_irq = cpm1_gpio16_to_irq; 504 504 gc->parent = dev; 505 505 gc->owner = THIS_MODULE; ··· 622 622 gc->direction_input = cpm1_gpio32_dir_in; 623 623 gc->direction_output = cpm1_gpio32_dir_out; 624 624 gc->get = cpm1_gpio32_get; 625 - gc->set_rv = cpm1_gpio32_set; 625 + gc->set = cpm1_gpio32_set; 626 626 gc->parent = dev; 627 627 gc->owner = THIS_MODULE; 628 628
+1 -1
arch/powerpc/sysdev/cpm_common.c
··· 210 210 gc->direction_input = cpm2_gpio32_dir_in; 211 211 gc->direction_output = cpm2_gpio32_dir_out; 212 212 gc->get = cpm2_gpio32_get; 213 - gc->set_rv = cpm2_gpio32_set; 213 + gc->set = cpm2_gpio32_set; 214 214 gc->parent = dev; 215 215 gc->owner = THIS_MODULE; 216 216
+6 -4
arch/riscv/boot/dts/thead/th1520.dtsi
··· 297 297 reg-names = "dwmac", "apb"; 298 298 interrupts = <67 IRQ_TYPE_LEVEL_HIGH>; 299 299 interrupt-names = "macirq"; 300 - clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC1>; 301 - clock-names = "stmmaceth", "pclk"; 300 + clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC1>, 301 + <&clk CLK_PERISYS_APB4_HCLK>; 302 + clock-names = "stmmaceth", "pclk", "apb"; 302 303 snps,pbl = <32>; 303 304 snps,fixed-burst; 304 305 snps,multicast-filter-bins = <64>; ··· 320 319 reg-names = "dwmac", "apb"; 321 320 interrupts = <66 IRQ_TYPE_LEVEL_HIGH>; 322 321 interrupt-names = "macirq"; 323 - clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC0>; 324 - clock-names = "stmmaceth", "pclk"; 322 + clocks = <&clk CLK_GMAC_AXI>, <&clk CLK_GMAC0>, 323 + <&clk CLK_PERISYS_APB4_HCLK>; 324 + clock-names = "stmmaceth", "pclk", "apb"; 325 325 snps,pbl = <32>; 326 326 snps,fixed-burst; 327 327 snps,multicast-filter-bins = <64>;
+13
arch/x86/boot/cpuflags.c
··· 106 106 cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6], 107 107 &cpu.flags[1]); 108 108 } 109 + 110 + if (max_amd_level >= 0x8000001f) { 111 + u32 ebx; 112 + 113 + /* 114 + * The X86_FEATURE_COHERENCY_SFW_NO feature bit is in 115 + * the virtualization flags entry (word 8) and set by 116 + * scattered.c, so the bit needs to be explicitly set. 117 + */ 118 + cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored); 119 + if (ebx & BIT(31)) 120 + set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags); 121 + } 109 122 } 110 123 }
+7
arch/x86/boot/startup/sev-shared.c
··· 810 810 if (ret) 811 811 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); 812 812 } 813 + 814 + /* 815 + * If validating memory (making it private) and affected by the 816 + * cache-coherency vulnerability, perform the cache eviction mitigation. 817 + */ 818 + if (validate && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO)) 819 + sev_evict_cache((void *)vaddr, 1); 813 820 } 814 821 815 822 /*
+21
arch/x86/coco/sev/core.c
··· 358 358 359 359 static void pvalidate_pages(struct snp_psc_desc *desc) 360 360 { 361 + struct psc_entry *e; 362 + unsigned int i; 363 + 361 364 if (snp_vmpl) 362 365 svsm_pval_pages(desc); 363 366 else 364 367 pval_pages(desc); 368 + 369 + /* 370 + * If not affected by the cache-coherency vulnerability there is no need 371 + * to perform the cache eviction mitigation. 372 + */ 373 + if (cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO)) 374 + return; 375 + 376 + for (i = 0; i <= desc->hdr.end_entry; i++) { 377 + e = &desc->entries[i]; 378 + 379 + /* 380 + * If validating memory (making it private) perform the cache 381 + * eviction mitigation. 382 + */ 383 + if (e->operation == SNP_PAGE_STATE_PRIVATE) 384 + sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1); 385 + } 365 386 } 366 387 367 388 static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
+1
arch/x86/include/asm/cpufeatures.h
··· 218 218 #define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */ 219 219 #define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */ 220 220 #define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */ 221 + #define X86_FEATURE_COHERENCY_SFW_NO ( 8*32+ 4) /* SNP cache coherency software work around not needed */ 221 222 222 223 #define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */ 223 224 #define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */
+7 -5
arch/x86/include/asm/hw_irq.h
··· 92 92 93 93 extern struct irq_cfg *irq_cfg(unsigned int irq); 94 94 extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data); 95 - extern void lock_vector_lock(void); 96 - extern void unlock_vector_lock(void); 97 95 #ifdef CONFIG_SMP 98 96 extern void vector_schedule_cleanup(struct irq_cfg *); 99 97 extern void irq_complete_move(struct irq_cfg *cfg); ··· 99 101 static inline void vector_schedule_cleanup(struct irq_cfg *c) { } 100 102 static inline void irq_complete_move(struct irq_cfg *c) { } 101 103 #endif 102 - 103 104 extern void apic_ack_edge(struct irq_data *data); 104 - #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 105 + #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 106 + 107 + #ifdef CONFIG_X86_LOCAL_APIC 108 + extern void lock_vector_lock(void); 109 + extern void unlock_vector_lock(void); 110 + #else 105 111 static inline void lock_vector_lock(void) {} 106 112 static inline void unlock_vector_lock(void) {} 107 - #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ 113 + #endif 108 114 109 115 /* Statistics */ 110 116 extern atomic_t irq_err_count;
+5
arch/x86/include/asm/intel-family.h
··· 150 150 151 151 #define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Crestmont */ 152 152 153 + #define INTEL_WILDCATLAKE_L IFM(6, 0xD5) 154 + 155 + #define INTEL_NOVALAKE IFM(18, 0x01) 156 + #define INTEL_NOVALAKE_L IFM(18, 0x03) 157 + 153 158 /* "Small Core" Processors (Atom/E-Core) */ 154 159 155 160 #define INTEL_ATOM_BONNELL IFM(6, 0x1C) /* Diamondville, Pineview */
+19
arch/x86/include/asm/sev.h
··· 619 619 void snp_leak_pages(u64 pfn, unsigned int npages); 620 620 void kdump_sev_callback(void); 621 621 void snp_fixup_e820_tables(void); 622 + 623 + static inline void sev_evict_cache(void *va, int npages) 624 + { 625 + volatile u8 val __always_unused; 626 + u8 *bytes = va; 627 + int page_idx; 628 + 629 + /* 630 + * For SEV guests, a read from the first/last cache-lines of a 4K page 631 + * using the guest key is sufficient to cause a flush of all cache-lines 632 + * associated with that 4K page without incurring all the overhead of a 633 + * full CLFLUSH sequence. 634 + */ 635 + for (page_idx = 0; page_idx < npages; page_idx++) { 636 + val = bytes[page_idx * PAGE_SIZE]; 637 + val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1]; 638 + } 639 + } 622 640 #else 623 641 static inline bool snp_probe_rmptable_info(void) { return false; } 624 642 static inline int snp_rmptable_init(void) { return -ENOSYS; } ··· 652 634 static inline void snp_leak_pages(u64 pfn, unsigned int npages) {} 653 635 static inline void kdump_sev_callback(void) { } 654 636 static inline void snp_fixup_e820_tables(void) {} 637 + static inline void sev_evict_cache(void *va, int npages) {} 655 638 #endif 656 639 657 640 #endif
+1
arch/x86/kernel/cpu/scattered.c
··· 48 48 { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, 49 49 { X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 }, 50 50 { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, 51 + { X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 }, 51 52 { X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 }, 52 53 { X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 }, 53 54 { X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },
+48 -15
arch/x86/kernel/irq.c
··· 256 256 __handle_irq(desc, regs); 257 257 } 258 258 259 - static __always_inline int call_irq_handler(int vector, struct pt_regs *regs) 259 + static struct irq_desc *reevaluate_vector(int vector) 260 260 { 261 - struct irq_desc *desc; 262 - int ret = 0; 261 + struct irq_desc *desc = __this_cpu_read(vector_irq[vector]); 263 262 264 - desc = __this_cpu_read(vector_irq[vector]); 263 + if (!IS_ERR_OR_NULL(desc)) 264 + return desc; 265 + 266 + if (desc == VECTOR_UNUSED) 267 + pr_emerg_ratelimited("No irq handler for %d.%u\n", smp_processor_id(), vector); 268 + else 269 + __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); 270 + return NULL; 271 + } 272 + 273 + static __always_inline bool call_irq_handler(int vector, struct pt_regs *regs) 274 + { 275 + struct irq_desc *desc = __this_cpu_read(vector_irq[vector]); 276 + 265 277 if (likely(!IS_ERR_OR_NULL(desc))) { 266 278 handle_irq(desc, regs); 267 - } else { 268 - ret = -EINVAL; 269 - if (desc == VECTOR_UNUSED) { 270 - pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n", 271 - __func__, smp_processor_id(), 272 - vector); 273 - } else { 274 - __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); 275 - } 279 + return true; 276 280 } 277 281 278 - return ret; 282 + /* 283 + * Reevaluate with vector_lock held to prevent a race against 284 + * request_irq() setting up the vector: 285 + * 286 + * CPU0 CPU1 287 + * interrupt is raised in APIC IRR 288 + * but not handled 289 + * free_irq() 290 + * per_cpu(vector_irq, CPU1)[vector] = VECTOR_SHUTDOWN; 291 + * 292 + * request_irq() common_interrupt() 293 + * d = this_cpu_read(vector_irq[vector]); 294 + * 295 + * per_cpu(vector_irq, CPU1)[vector] = desc; 296 + * 297 + * if (d == VECTOR_SHUTDOWN) 298 + * this_cpu_write(vector_irq[vector], VECTOR_UNUSED); 299 + * 300 + * This requires that the same vector on the same target CPU is 301 + * handed out or that a spurious interrupt hits that CPU/vector. 302 + */ 303 + lock_vector_lock(); 304 + desc = reevaluate_vector(vector); 305 + unlock_vector_lock(); 306 + 307 + if (!desc) 308 + return false; 309 + 310 + handle_irq(desc, regs); 311 + return true; 279 312 } 280 313 281 314 /* ··· 322 289 /* entry code tells RCU that we're not quiescent. Check it. */ 323 290 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU"); 324 291 325 - if (unlikely(call_irq_handler(vector, regs))) 292 + if (unlikely(!call_irq_handler(vector, regs))) 326 293 apic_eoi(); 327 294 328 295 set_irq_regs(old_regs);
+1 -1
arch/xtensa/include/asm/bootparam.h
··· 27 27 #define BP_TAG_FIRST 0x7B0B /* first tag with a version number */ 28 28 #define BP_TAG_LAST 0x7E0B /* last tag */ 29 29 30 - #ifndef __ASSEMBLY__ 30 + #ifndef __ASSEMBLER__ 31 31 32 32 /* All records are aligned to 4 bytes */ 33 33
+2 -2
arch/xtensa/include/asm/cmpxchg.h
··· 11 11 #ifndef _XTENSA_CMPXCHG_H 12 12 #define _XTENSA_CMPXCHG_H 13 13 14 - #ifndef __ASSEMBLY__ 14 + #ifndef __ASSEMBLER__ 15 15 16 16 #include <linux/bits.h> 17 17 #include <linux/stringify.h> ··· 220 220 } 221 221 } 222 222 223 - #endif /* __ASSEMBLY__ */ 223 + #endif /* __ASSEMBLER__ */ 224 224 225 225 #endif /* _XTENSA_CMPXCHG_H */
+4 -4
arch/xtensa/include/asm/coprocessor.h
··· 16 16 #include <asm/core.h> 17 17 #include <asm/types.h> 18 18 19 - #ifdef __ASSEMBLY__ 19 + #ifdef __ASSEMBLER__ 20 20 # include <variant/tie-asm.h> 21 21 22 22 .macro xchal_sa_start a b ··· 69 69 70 70 71 71 72 - #endif /* __ASSEMBLY__ */ 72 + #endif /* __ASSEMBLER__ */ 73 73 74 74 /* 75 75 * XTENSA_HAVE_COPROCESSOR(x) returns 1 if coprocessor x is configured. ··· 87 87 #define XTENSA_HAVE_IO_PORTS \ 88 88 XCHAL_CP_PORT_MASK 89 89 90 - #ifndef __ASSEMBLY__ 90 + #ifndef __ASSEMBLER__ 91 91 92 92 /* 93 93 * Additional registers. ··· 151 151 152 152 #endif /* XTENSA_HAVE_COPROCESSORS */ 153 153 154 - #endif /* !__ASSEMBLY__ */ 154 + #endif /* !__ASSEMBLER__ */ 155 155 #endif /* _XTENSA_COPROCESSOR_H */
+1 -1
arch/xtensa/include/asm/current.h
··· 13 13 14 14 #include <asm/thread_info.h> 15 15 16 - #ifndef __ASSEMBLY__ 16 + #ifndef __ASSEMBLER__ 17 17 18 18 #include <linux/thread_info.h> 19 19
+4 -4
arch/xtensa/include/asm/ftrace.h
··· 12 12 13 13 #include <asm/processor.h> 14 14 15 - #ifndef __ASSEMBLY__ 15 + #ifndef __ASSEMBLER__ 16 16 extern unsigned long return_address(unsigned level); 17 17 #define ftrace_return_address(n) return_address(n) 18 - #endif /* __ASSEMBLY__ */ 18 + #endif /* __ASSEMBLER__ */ 19 19 20 20 #ifdef CONFIG_FUNCTION_TRACER 21 21 22 22 #define MCOUNT_ADDR ((unsigned long)(_mcount)) 23 23 #define MCOUNT_INSN_SIZE 3 24 24 25 - #ifndef __ASSEMBLY__ 25 + #ifndef __ASSEMBLER__ 26 26 extern void _mcount(void); 27 27 #define mcount _mcount 28 - #endif /* __ASSEMBLY__ */ 28 + #endif /* __ASSEMBLER__ */ 29 29 #endif /* CONFIG_FUNCTION_TRACER */ 30 30 31 31 #endif /* _XTENSA_FTRACE_H */
+2 -2
arch/xtensa/include/asm/initialize_mmu.h
··· 34 34 #define CA_WRITEBACK (0x4) 35 35 #endif 36 36 37 - #ifdef __ASSEMBLY__ 37 + #ifdef __ASSEMBLER__ 38 38 39 39 #define XTENSA_HWVERSION_RC_2009_0 230000 40 40 ··· 240 240 241 241 .endm 242 242 243 - #endif /*__ASSEMBLY__*/ 243 + #endif /*__ASSEMBLER__*/ 244 244 245 245 #endif /* _XTENSA_INITIALIZE_MMU_H */
+2 -2
arch/xtensa/include/asm/jump_label.h
··· 4 4 #ifndef _ASM_XTENSA_JUMP_LABEL_H 5 5 #define _ASM_XTENSA_JUMP_LABEL_H 6 6 7 - #ifndef __ASSEMBLY__ 7 + #ifndef __ASSEMBLER__ 8 8 9 9 #include <linux/types.h> 10 10 ··· 61 61 jump_label_t key; 62 62 }; 63 63 64 - #endif /* __ASSEMBLY__ */ 64 + #endif /* __ASSEMBLER__ */ 65 65 #endif
+1 -1
arch/xtensa/include/asm/kasan.h
··· 2 2 #ifndef __ASM_KASAN_H 3 3 #define __ASM_KASAN_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 #ifdef CONFIG_KASAN 8 8
+1 -1
arch/xtensa/include/asm/kmem_layout.h
··· 80 80 81 81 #if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF) 82 82 #define XCHAL_KIO_PADDR xtensa_get_kio_paddr() 83 - #ifndef __ASSEMBLY__ 83 + #ifndef __ASSEMBLER__ 84 84 extern unsigned long xtensa_kio_paddr; 85 85 86 86 static inline unsigned long xtensa_get_kio_paddr(void)
+2 -2
arch/xtensa/include/asm/page.h
··· 80 80 #endif 81 81 82 82 83 - #ifdef __ASSEMBLY__ 83 + #ifdef __ASSEMBLER__ 84 84 85 85 #define __pgprot(x) (x) 86 86 ··· 172 172 #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) 173 173 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 174 174 175 - #endif /* __ASSEMBLY__ */ 175 + #endif /* __ASSEMBLER__ */ 176 176 177 177 #include <asm-generic/memory_model.h> 178 178 #endif /* _XTENSA_PAGE_H */
+4 -4
arch/xtensa/include/asm/pgtable.h
··· 203 203 * What follows is the closest we can get by reasonable means.. 204 204 * See linux/mm/mmap.c for protection_map[] array that uses these definitions. 205 205 */ 206 - #ifndef __ASSEMBLY__ 206 + #ifndef __ASSEMBLER__ 207 207 208 208 #define pte_ERROR(e) \ 209 209 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) ··· 366 366 return pte; 367 367 } 368 368 369 - #endif /* !defined (__ASSEMBLY__) */ 369 + #endif /* !defined (__ASSEMBLER__) */ 370 370 371 371 372 - #ifdef __ASSEMBLY__ 372 + #ifdef __ASSEMBLER__ 373 373 374 374 /* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long), 375 375 * _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long), ··· 408 408 unsigned long address, pte_t *ptep, unsigned int nr); 409 409 #define update_mmu_tlb_range update_mmu_tlb_range 410 410 411 - #endif /* !defined (__ASSEMBLY__) */ 411 + #endif /* !defined (__ASSEMBLER__) */ 412 412 413 413 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 414 414 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+2 -2
arch/xtensa/include/asm/processor.h
··· 105 105 #error Unsupported xtensa ABI 106 106 #endif 107 107 108 - #ifndef __ASSEMBLY__ 108 + #ifndef __ASSEMBLER__ 109 109 110 110 #if defined(__XTENSA_WINDOWED_ABI__) 111 111 ··· 263 263 264 264 #endif /* XCHAL_HAVE_EXTERN_REGS */ 265 265 266 - #endif /* __ASSEMBLY__ */ 266 + #endif /* __ASSEMBLER__ */ 267 267 #endif /* _XTENSA_PROCESSOR_H */
+3 -3
arch/xtensa/include/asm/ptrace.h
··· 41 41 42 42 #define NO_SYSCALL (-1) 43 43 44 - #ifndef __ASSEMBLY__ 44 + #ifndef __ASSEMBLER__ 45 45 46 46 #include <asm/coprocessor.h> 47 47 #include <asm/core.h> ··· 106 106 int do_syscall_trace_enter(struct pt_regs *regs); 107 107 void do_syscall_trace_leave(struct pt_regs *regs); 108 108 109 - #else /* __ASSEMBLY__ */ 109 + #else /* __ASSEMBLER__ */ 110 110 111 111 # include <asm/asm-offsets.h> 112 112 #define PT_REGS_OFFSET (KERNEL_STACK_SIZE - PT_USER_SIZE) 113 113 114 - #endif /* !__ASSEMBLY__ */ 114 + #endif /* !__ASSEMBLER__ */ 115 115 116 116 #endif /* _XTENSA_PTRACE_H */
+2 -2
arch/xtensa/include/asm/signal.h
··· 14 14 15 15 #include <uapi/asm/signal.h> 16 16 17 - #ifndef __ASSEMBLY__ 17 + #ifndef __ASSEMBLER__ 18 18 #define __ARCH_HAS_SA_RESTORER 19 19 20 20 #include <asm/sigcontext.h> 21 21 22 - #endif /* __ASSEMBLY__ */ 22 + #endif /* __ASSEMBLER__ */ 23 23 #endif /* _XTENSA_SIGNAL_H */
+4 -4
arch/xtensa/include/asm/thread_info.h
··· 16 16 17 17 #define CURRENT_SHIFT KERNEL_STACK_SHIFT 18 18 19 - #ifndef __ASSEMBLY__ 19 + #ifndef __ASSEMBLER__ 20 20 # include <asm/processor.h> 21 21 #endif 22 22 ··· 28 28 * must also be changed 29 29 */ 30 30 31 - #ifndef __ASSEMBLY__ 31 + #ifndef __ASSEMBLER__ 32 32 33 33 #if XTENSA_HAVE_COPROCESSORS 34 34 ··· 80 80 * macros/functions for gaining access to the thread information structure 81 81 */ 82 82 83 - #ifndef __ASSEMBLY__ 83 + #ifndef __ASSEMBLER__ 84 84 85 85 #define INIT_THREAD_INFO(tsk) \ 86 86 { \ ··· 99 99 return ti; 100 100 } 101 101 102 - #else /* !__ASSEMBLY__ */ 102 + #else /* !__ASSEMBLER__ */ 103 103 104 104 /* how to get the thread information struct from ASM */ 105 105 #define GET_THREAD_INFO(reg,sp) \
+2 -2
arch/xtensa/include/asm/tlbflush.h
··· 20 20 #define ITLB_HIT_BIT 3 21 21 #define DTLB_HIT_BIT 4 22 22 23 - #ifndef __ASSEMBLY__ 23 + #ifndef __ASSEMBLER__ 24 24 25 25 /* TLB flushing: 26 26 * ··· 201 201 return tmp; 202 202 } 203 203 204 - #endif /* __ASSEMBLY__ */ 204 + #endif /* __ASSEMBLER__ */ 205 205 #endif /* _XTENSA_TLBFLUSH_H */
+1 -1
arch/xtensa/include/uapi/asm/ptrace.h
··· 42 42 #define PTRACE_GETFDPIC_EXEC 0 43 43 #define PTRACE_GETFDPIC_INTERP 1 44 44 45 - #ifndef __ASSEMBLY__ 45 + #ifndef __ASSEMBLER__ 46 46 47 47 struct user_pt_regs { 48 48 __u32 pc;
+3 -3
arch/xtensa/include/uapi/asm/signal.h
··· 19 19 #define _NSIG_BPW 32 20 20 #define _NSIG_WORDS (_NSIG / _NSIG_BPW) 21 21 22 - #ifndef __ASSEMBLY__ 22 + #ifndef __ASSEMBLER__ 23 23 24 24 #include <linux/types.h> 25 25 ··· 77 77 #define MINSIGSTKSZ 2048 78 78 #define SIGSTKSZ 8192 79 79 80 - #ifndef __ASSEMBLY__ 80 + #ifndef __ASSEMBLER__ 81 81 82 82 #include <asm-generic/signal-defs.h> 83 83 ··· 106 106 __kernel_size_t ss_size; 107 107 } stack_t; 108 108 109 - #endif /* __ASSEMBLY__ */ 109 + #endif /* __ASSEMBLER__ */ 110 110 #endif /* _UAPI_XTENSA_SIGNAL_H */
+2 -2
arch/xtensa/include/uapi/asm/types.h
··· 14 14 15 15 #include <asm-generic/int-ll64.h> 16 16 17 - #ifdef __ASSEMBLY__ 17 + #ifdef __ASSEMBLER__ 18 18 # define __XTENSA_UL(x) (x) 19 19 # define __XTENSA_UL_CONST(x) x 20 20 #else ··· 23 23 # define __XTENSA_UL_CONST(x) ___XTENSA_UL_CONST(x) 24 24 #endif 25 25 26 - #ifndef __ASSEMBLY__ 26 + #ifndef __ASSEMBLER__ 27 27 28 28 #endif 29 29
+21 -45
block/bfq-iosched.c
··· 454 454 */ 455 455 static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q) 456 456 { 457 - struct bfq_io_cq *icq; 458 - unsigned long flags; 459 - 460 457 if (!current->io_context) 461 458 return NULL; 462 459 463 - spin_lock_irqsave(&q->queue_lock, flags); 464 - icq = icq_to_bic(ioc_lookup_icq(q)); 465 - spin_unlock_irqrestore(&q->queue_lock, flags); 466 - 467 - return icq; 460 + return icq_to_bic(ioc_lookup_icq(q)); 468 461 } 469 462 470 463 /* ··· 694 701 { 695 702 struct bfq_data *bfqd = data->q->elevator->elevator_data; 696 703 struct bfq_io_cq *bic = bfq_bic_lookup(data->q); 697 - int depth; 698 - unsigned limit = data->q->nr_requests; 699 - unsigned int act_idx; 704 + unsigned int limit, act_idx; 700 705 701 706 /* Sync reads have full depth available */ 702 - if (op_is_sync(opf) && !op_is_write(opf)) { 703 - depth = 0; 704 - } else { 705 - depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)]; 706 - limit = (limit * depth) >> bfqd->full_depth_shift; 707 - } 707 + if (op_is_sync(opf) && !op_is_write(opf)) 708 + limit = data->q->nr_requests; 709 + else 710 + limit = bfqd->async_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)]; 708 711 709 712 for (act_idx = 0; bic && act_idx < bfqd->num_actuators; act_idx++) { 710 713 /* Fast path to check if bfqq is already allocated. */ ··· 714 725 * available requests and thus starve other entities. 715 726 */ 716 727 if (bfqq_request_over_limit(bfqd, bic, opf, act_idx, limit)) { 717 - depth = 1; 728 + limit = 1; 718 729 break; 719 730 } 720 731 } 732 + 721 733 bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u", 722 - __func__, bfqd->wr_busy_queues, op_is_sync(opf), depth); 723 - if (depth) 724 - data->shallow_depth = depth; 734 + __func__, bfqd->wr_busy_queues, op_is_sync(opf), limit); 735 + 736 + if (limit < data->q->nr_requests) 737 + data->shallow_depth = limit; 725 738 } 726 739 727 740 static struct bfq_queue * ··· 2448 2457 unsigned int nr_segs) 2449 2458 { 2450 2459 struct bfq_data *bfqd = q->elevator->elevator_data; 2451 - struct request *free = NULL; 2452 - /* 2453 - * bfq_bic_lookup grabs the queue_lock: invoke it now and 2454 - * store its return value for later use, to avoid nesting 2455 - * queue_lock inside the bfqd->lock. We assume that the bic 2456 - * returned by bfq_bic_lookup does not go away before 2457 - * bfqd->lock is taken. 2458 - */ 2459 2460 struct bfq_io_cq *bic = bfq_bic_lookup(q); 2461 + struct request *free = NULL; 2460 2462 bool ret; 2461 2463 2462 2464 spin_lock_irq(&bfqd->lock); ··· 7112 7128 */ 7113 7129 static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt) 7114 7130 { 7115 - unsigned int depth = 1U << bt->sb.shift; 7131 + unsigned int nr_requests = bfqd->queue->nr_requests; 7116 7132 7117 - bfqd->full_depth_shift = bt->sb.shift; 7118 7133 /* 7119 7134 * In-word depths if no bfq_queue is being weight-raised: 7120 7135 * leaving 25% of tags only for sync reads. ··· 7125 7142 * limit 'something'. 7126 7143 */ 7127 7144 /* no more than 50% of tags for async I/O */ 7128 - bfqd->word_depths[0][0] = max(depth >> 1, 1U); 7145 + bfqd->async_depths[0][0] = max(nr_requests >> 1, 1U); 7129 7146 /* 7130 7147 * no more than 75% of tags for sync writes (25% extra tags 7131 7148 * w.r.t. async I/O, to prevent async I/O from starving sync 7132 7149 * writes) 7133 7150 */ 7134 - bfqd->word_depths[0][1] = max((depth * 3) >> 2, 1U); 7151 + bfqd->async_depths[0][1] = max((nr_requests * 3) >> 2, 1U); 7135 7152 7136 7153 /* 7137 7154 * In-word depths in case some bfq_queue is being weight- ··· 7141 7158 * shortage. 7142 7159 */ 7143 7160 /* no more than ~18% of tags for async I/O */ 7144 - bfqd->word_depths[1][0] = max((depth * 3) >> 4, 1U); 7161 + bfqd->async_depths[1][0] = max((nr_requests * 3) >> 4, 1U); 7145 7162 /* no more than ~37% of tags for sync writes (~20% extra tags) */ 7146 - bfqd->word_depths[1][1] = max((depth * 6) >> 4, 1U); 7163 + bfqd->async_depths[1][1] = max((nr_requests * 6) >> 4, 1U); 7147 7164 } 7148 7165 7149 7166 static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx) ··· 7215 7232 root_group->sched_data.bfq_class_idle_last_service = jiffies; 7216 7233 } 7217 7234 7218 - static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) 7235 + static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq) 7219 7236 { 7220 7237 struct bfq_data *bfqd; 7221 - struct elevator_queue *eq; 7222 7238 unsigned int i; 7223 7239 struct blk_independent_access_ranges *ia_ranges = q->disk->ia_ranges; 7224 7240 7225 - eq = elevator_alloc(q, e); 7226 - if (!eq) 7241 + bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); 7242 + if (!bfqd) 7227 7243 return -ENOMEM; 7228 7244 7229 - bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); 7230 - if (!bfqd) { 7231 - kobject_put(&eq->kobj); 7232 - return -ENOMEM; 7233 - } 7234 7245 eq->elevator_data = bfqd; 7235 7246 7236 7247 spin_lock_irq(&q->queue_lock); ··· 7382 7405 7383 7406 out_free: 7384 7407 kfree(bfqd); 7385 - kobject_put(&eq->kobj); 7386 7408 return -ENOMEM; 7387 7409 } 7388 7410
+6 -7
block/bfq-iosched.h
··· 427 427 */ 428 428 bool saved_IO_bound; 429 429 430 - u64 saved_io_start_time; 431 - u64 saved_tot_idle_time; 432 - 433 430 /* 434 431 * Same purpose as the previous fields for the values of the 435 432 * field keeping the queue's belonging to a large burst ··· 447 450 */ 448 451 unsigned int saved_weight; 449 452 453 + u64 saved_io_start_time; 454 + u64 saved_tot_idle_time; 455 + 450 456 /* 451 457 * Similar to previous fields: save wr information. 452 458 */ ··· 457 457 unsigned long saved_last_wr_start_finish; 458 458 unsigned long saved_service_from_wr; 459 459 unsigned long saved_wr_start_at_switch_to_srt; 460 - unsigned int saved_wr_cur_max_time; 461 460 struct bfq_ttime saved_ttime; 461 + unsigned int saved_wr_cur_max_time; 462 462 463 463 /* Save also injection state */ 464 - u64 saved_last_serv_time_ns; 465 464 unsigned int saved_inject_limit; 466 465 unsigned long saved_decrease_time_jif; 466 + u64 saved_last_serv_time_ns; 467 467 468 468 /* candidate queue for a stable merge (due to close creation time) */ 469 469 struct bfq_queue *stable_merge_bfqq; ··· 813 813 * Depth limits used in bfq_limit_depth (see comments on the 814 814 * function) 815 815 */ 816 - unsigned int word_depths[2][2]; 817 - unsigned int full_depth_shift; 816 + unsigned int async_depths[2][2]; 818 817 819 818 /* 820 819 * Number of independent actuators. This is equal to 1 in
+6 -10
block/blk-ioc.c
··· 308 308 309 309 #ifdef CONFIG_BLK_ICQ 310 310 /** 311 - * ioc_lookup_icq - lookup io_cq from ioc 311 + * ioc_lookup_icq - lookup io_cq from ioc in io issue path 312 312 * @q: the associated request_queue 313 313 * 314 314 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called 315 - * with @q->queue_lock held. 315 + * from io issue path, either return NULL if current issue io to @q for the 316 + * first time, or return a valid icq. 316 317 */ 317 318 struct io_cq *ioc_lookup_icq(struct request_queue *q) 318 319 { 319 320 struct io_context *ioc = current->io_context; 320 321 struct io_cq *icq; 321 322 322 - lockdep_assert_held(&q->queue_lock); 323 - 324 323 /* 325 324 * icq's are indexed from @ioc using radix tree and hint pointer, 326 - * both of which are protected with RCU. All removals are done 327 - * holding both q and ioc locks, and we're holding q lock - if we 328 - * find a icq which points to us, it's guaranteed to be valid. 325 + * both of which are protected with RCU, io issue path ensures that 326 + * both request_queue and current task are valid, the found icq 327 + * is guaranteed to be valid until the io is done. 329 328 */ 330 329 rcu_read_lock(); 331 330 icq = rcu_dereference(ioc->icq_hint); ··· 418 419 task_unlock(current); 419 420 } else { 420 421 get_io_context(ioc); 421 - 422 - spin_lock_irq(&q->queue_lock); 423 422 icq = ioc_lookup_icq(q); 424 - spin_unlock_irq(&q->queue_lock); 425 423 } 426 424 427 425 if (!icq) {
+152 -71
block/blk-mq-sched.c
··· 374 374 } 375 375 EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); 376 376 377 - static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, 378 - struct blk_mq_hw_ctx *hctx, 379 - unsigned int hctx_idx) 380 - { 381 - if (blk_mq_is_shared_tags(q->tag_set->flags)) { 382 - hctx->sched_tags = q->sched_shared_tags; 383 - return 0; 384 - } 385 - 386 - hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, 387 - q->nr_requests); 388 - 389 - if (!hctx->sched_tags) 390 - return -ENOMEM; 391 - return 0; 392 - } 393 - 394 - static void blk_mq_exit_sched_shared_tags(struct request_queue *queue) 395 - { 396 - blk_mq_free_rq_map(queue->sched_shared_tags); 397 - queue->sched_shared_tags = NULL; 398 - } 399 - 400 377 /* called in queue's release handler, tagset has gone away */ 401 378 static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags) 402 379 { 403 380 struct blk_mq_hw_ctx *hctx; 404 381 unsigned long i; 405 382 406 - queue_for_each_hw_ctx(q, hctx, i) { 407 - if (hctx->sched_tags) { 408 - if (!blk_mq_is_shared_tags(flags)) 409 - blk_mq_free_rq_map(hctx->sched_tags); 410 - hctx->sched_tags = NULL; 411 - } 412 - } 383 + queue_for_each_hw_ctx(q, hctx, i) 384 + hctx->sched_tags = NULL; 413 385 414 386 if (blk_mq_is_shared_tags(flags)) 415 - blk_mq_exit_sched_shared_tags(q); 416 - } 417 - 418 - static int blk_mq_init_sched_shared_tags(struct request_queue *queue) 419 - { 420 - struct blk_mq_tag_set *set = queue->tag_set; 421 - 422 - /* 423 - * Set initial depth at max so that we don't need to reallocate for 424 - * updating nr_requests. 425 - */ 426 - queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set, 427 - BLK_MQ_NO_HCTX_IDX, 428 - MAX_SCHED_RQ); 429 - if (!queue->sched_shared_tags) 430 - return -ENOMEM; 431 - 432 - blk_mq_tag_update_sched_shared_tags(queue); 433 - 434 - return 0; 387 + q->sched_shared_tags = NULL; 435 388 } 436 389 437 390 void blk_mq_sched_reg_debugfs(struct request_queue *q) ··· 411 458 mutex_unlock(&q->debugfs_mutex); 412 459 } 413 460 461 + void blk_mq_free_sched_tags(struct elevator_tags *et, 462 + struct blk_mq_tag_set *set) 463 + { 464 + unsigned long i; 465 + 466 + /* Shared tags are stored at index 0 in @tags. */ 467 + if (blk_mq_is_shared_tags(set->flags)) 468 + blk_mq_free_map_and_rqs(set, et->tags[0], BLK_MQ_NO_HCTX_IDX); 469 + else { 470 + for (i = 0; i < et->nr_hw_queues; i++) 471 + blk_mq_free_map_and_rqs(set, et->tags[i], i); 472 + } 473 + 474 + kfree(et); 475 + } 476 + 477 + void blk_mq_free_sched_tags_batch(struct xarray *et_table, 478 + struct blk_mq_tag_set *set) 479 + { 480 + struct request_queue *q; 481 + struct elevator_tags *et; 482 + 483 + lockdep_assert_held_write(&set->update_nr_hwq_lock); 484 + 485 + list_for_each_entry(q, &set->tag_list, tag_set_list) { 486 + /* 487 + * Accessing q->elevator without holding q->elevator_lock is 488 + * safe because we're holding here set->update_nr_hwq_lock in 489 + * the writer context. So, scheduler update/switch code (which 490 + * acquires the same lock but in the reader context) can't run 491 + * concurrently. 492 + */ 493 + if (q->elevator) { 494 + et = xa_load(et_table, q->id); 495 + if (unlikely(!et)) 496 + WARN_ON_ONCE(1); 497 + else 498 + blk_mq_free_sched_tags(et, set); 499 + } 500 + } 501 + } 502 + 503 + struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set, 504 + unsigned int nr_hw_queues) 505 + { 506 + unsigned int nr_tags; 507 + int i; 508 + struct elevator_tags *et; 509 + gfp_t gfp = GFP_NOIO | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; 510 + 511 + if (blk_mq_is_shared_tags(set->flags)) 512 + nr_tags = 1; 513 + else 514 + nr_tags = nr_hw_queues; 515 + 516 + et = kmalloc(sizeof(struct elevator_tags) + 517 + nr_tags * sizeof(struct blk_mq_tags *), gfp); 518 + if (!et) 519 + return NULL; 520 + /* 521 + * Default to double of smaller one between hw queue_depth and 522 + * 128, since we don't split into sync/async like the old code 523 + * did. Additionally, this is a per-hw queue depth. 524 + */ 525 + et->nr_requests = 2 * min_t(unsigned int, set->queue_depth, 526 + BLKDEV_DEFAULT_RQ); 527 + et->nr_hw_queues = nr_hw_queues; 528 + 529 + if (blk_mq_is_shared_tags(set->flags)) { 530 + /* Shared tags are stored at index 0 in @tags. */ 531 + et->tags[0] = blk_mq_alloc_map_and_rqs(set, BLK_MQ_NO_HCTX_IDX, 532 + MAX_SCHED_RQ); 533 + if (!et->tags[0]) 534 + goto out; 535 + } else { 536 + for (i = 0; i < et->nr_hw_queues; i++) { 537 + et->tags[i] = blk_mq_alloc_map_and_rqs(set, i, 538 + et->nr_requests); 539 + if (!et->tags[i]) 540 + goto out_unwind; 541 + } 542 + } 543 + 544 + return et; 545 + out_unwind: 546 + while (--i >= 0) 547 + blk_mq_free_map_and_rqs(set, et->tags[i], i); 548 + out: 549 + kfree(et); 550 + return NULL; 551 + } 552 + 553 + int blk_mq_alloc_sched_tags_batch(struct xarray *et_table, 554 + struct blk_mq_tag_set *set, unsigned int nr_hw_queues) 555 + { 556 + struct request_queue *q; 557 + struct elevator_tags *et; 558 + gfp_t gfp = GFP_NOIO | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; 559 + 560 + lockdep_assert_held_write(&set->update_nr_hwq_lock); 561 + 562 + list_for_each_entry(q, &set->tag_list, tag_set_list) { 563 + /* 564 + * Accessing q->elevator without holding q->elevator_lock is 565 + * safe because we're holding here set->update_nr_hwq_lock in 566 + * the writer context. So, scheduler update/switch code (which 567 + * acquires the same lock but in the reader context) can't run 568 + * concurrently. 569 + */ 570 + if (q->elevator) { 571 + et = blk_mq_alloc_sched_tags(set, nr_hw_queues); 572 + if (!et) 573 + goto out_unwind; 574 + if (xa_insert(et_table, q->id, et, gfp)) 575 + goto out_free_tags; 576 + } 577 + } 578 + return 0; 579 + out_free_tags: 580 + blk_mq_free_sched_tags(et, set); 581 + out_unwind: 582 + list_for_each_entry_continue_reverse(q, &set->tag_list, tag_set_list) { 583 + if (q->elevator) { 584 + et = xa_load(et_table, q->id); 585 + if (et) 586 + blk_mq_free_sched_tags(et, set); 587 + } 588 + } 589 + return -ENOMEM; 590 + } 591 + 414 592 /* caller must have a reference to @e, will grab another one if successful */ 415 - int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) 593 + int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e, 594 + struct elevator_tags *et) 416 595 { 417 596 unsigned int flags = q->tag_set->flags; 418 597 struct blk_mq_hw_ctx *hctx; ··· 552 467 unsigned long i; 553 468 int ret; 554 469 555 - /* 556 - * Default to double of smaller one between hw queue_depth and 128, 557 - * since we don't split into sync/async like the old code did. 558 - * Additionally, this is a per-hw queue depth. 559 - */ 560 - q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, 561 - BLKDEV_DEFAULT_RQ); 470 + eq = elevator_alloc(q, e, et); 471 + if (!eq) 472 + return -ENOMEM; 473 + 474 + q->nr_requests = et->nr_requests; 562 475 563 476 if (blk_mq_is_shared_tags(flags)) { 564 - ret = blk_mq_init_sched_shared_tags(q); 565 - if (ret) 566 - return ret; 477 + /* Shared tags are stored at index 0 in @et->tags. */ 478 + q->sched_shared_tags = et->tags[0]; 479 + blk_mq_tag_update_sched_shared_tags(q); 567 480 } 568 481 569 482 queue_for_each_hw_ctx(q, hctx, i) { 570 - ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); 571 - if (ret) 572 - goto err_free_map_and_rqs; 483 + if (blk_mq_is_shared_tags(flags)) 484 + hctx->sched_tags = q->sched_shared_tags; 485 + else 486 + hctx->sched_tags = et->tags[i]; 573 487 } 574 488 575 - ret = e->ops.init_sched(q, e); 489 + ret = e->ops.init_sched(q, eq); 576 490 if (ret) 577 - goto err_free_map_and_rqs; 491 + goto out; 578 492 579 493 queue_for_each_hw_ctx(q, hctx, i) { 580 494 if (e->ops.init_hctx) { 581 495 ret = e->ops.init_hctx(hctx, i); 582 496 if (ret) { 583 - eq = q->elevator; 584 - blk_mq_sched_free_rqs(q); 585 497 blk_mq_exit_sched(q, eq); 586 498 kobject_put(&eq->kobj); 587 499 return ret; ··· 587 505 } 588 506 return 0; 589 507 590 - err_free_map_and_rqs: 591 - blk_mq_sched_free_rqs(q); 508 + out: 592 509 blk_mq_sched_tags_teardown(q, flags); 593 - 510 + kobject_put(&eq->kobj); 594 511 q->elevator = NULL; 595 512 return ret; 596 513 }
+11 -1
block/blk-mq-sched.h
··· 18 18 19 19 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); 20 20 21 - int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); 21 + int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e, 22 + struct elevator_tags *et); 22 23 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); 23 24 void blk_mq_sched_free_rqs(struct request_queue *q); 25 + 26 + struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set, 27 + unsigned int nr_hw_queues); 28 + int blk_mq_alloc_sched_tags_batch(struct xarray *et_table, 29 + struct blk_mq_tag_set *set, unsigned int nr_hw_queues); 30 + void blk_mq_free_sched_tags(struct elevator_tags *et, 31 + struct blk_mq_tag_set *set); 32 + void blk_mq_free_sched_tags_batch(struct xarray *et_table, 33 + struct blk_mq_tag_set *set); 24 34 25 35 static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) 26 36 {
+11 -5
block/blk-mq.c
··· 4974 4974 * Switch back to the elevator type stored in the xarray. 4975 4975 */ 4976 4976 static void blk_mq_elv_switch_back(struct request_queue *q, 4977 - struct xarray *elv_tbl) 4977 + struct xarray *elv_tbl, struct xarray *et_tbl) 4978 4978 { 4979 4979 struct elevator_type *e = xa_load(elv_tbl, q->id); 4980 + struct elevator_tags *t = xa_load(et_tbl, q->id); 4980 4981 4981 4982 /* The elv_update_nr_hw_queues unfreezes the queue. */ 4982 - elv_update_nr_hw_queues(q, e); 4983 + elv_update_nr_hw_queues(q, e, t); 4983 4984 4984 4985 /* Drop the reference acquired in blk_mq_elv_switch_none. */ 4985 4986 if (e) ··· 5032 5031 int prev_nr_hw_queues = set->nr_hw_queues; 5033 5032 unsigned int memflags; 5034 5033 int i; 5035 - struct xarray elv_tbl; 5034 + struct xarray elv_tbl, et_tbl; 5036 5035 5037 5036 lockdep_assert_held(&set->tag_list_lock); 5038 5037 ··· 5044 5043 return; 5045 5044 5046 5045 memflags = memalloc_noio_save(); 5046 + 5047 + xa_init(&et_tbl); 5048 + if (blk_mq_alloc_sched_tags_batch(&et_tbl, set, nr_hw_queues) < 0) 5049 + goto out_memalloc_restore; 5047 5050 5048 5051 xa_init(&elv_tbl); 5049 5052 ··· 5092 5087 switch_back: 5093 5088 /* The blk_mq_elv_switch_back unfreezes queue for us. */ 5094 5089 list_for_each_entry(q, &set->tag_list, tag_set_list) 5095 - blk_mq_elv_switch_back(q, &elv_tbl); 5090 + blk_mq_elv_switch_back(q, &elv_tbl, &et_tbl); 5096 5091 5097 5092 list_for_each_entry(q, &set->tag_list, tag_set_list) { 5098 5093 blk_mq_sysfs_register_hctxs(q); ··· 5103 5098 } 5104 5099 5105 5100 xa_destroy(&elv_tbl); 5106 - 5101 + xa_destroy(&et_tbl); 5102 + out_memalloc_restore: 5107 5103 memalloc_noio_restore(memflags); 5108 5104 5109 5105 /* Free the excess tags when nr_hw_queues shrink. */
+26 -7
block/blk-settings.c
··· 62 62 void blk_apply_bdi_limits(struct backing_dev_info *bdi, 63 63 struct queue_limits *lim) 64 64 { 65 + u64 io_opt = lim->io_opt; 66 + 65 67 /* 66 68 * For read-ahead of large files to be effective, we need to read ahead 67 - * at least twice the optimal I/O size. 69 + * at least twice the optimal I/O size. For rotational devices that do 70 + * not report an optimal I/O size (e.g. ATA HDDs), use the maximum I/O 71 + * size to avoid falling back to the (rather inefficient) small default 72 + * read-ahead size. 68 73 * 69 74 * There is no hardware limitation for the read-ahead size and the user 70 75 * might have increased the read-ahead size through sysfs, so don't ever 71 76 * decrease it. 72 77 */ 78 + if (!io_opt && (lim->features & BLK_FEAT_ROTATIONAL)) 79 + io_opt = (u64)lim->max_sectors << SECTOR_SHIFT; 80 + 73 81 bdi->ra_pages = max3(bdi->ra_pages, 74 - lim->io_opt * 2 / PAGE_SIZE, 82 + io_opt * 2 >> PAGE_SHIFT, 75 83 VM_READAHEAD_PAGES); 76 84 bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT; 77 85 } ··· 320 312 pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size); 321 313 return -EINVAL; 322 314 } 323 - if (lim->physical_block_size < lim->logical_block_size) 315 + if (lim->physical_block_size < lim->logical_block_size) { 324 316 lim->physical_block_size = lim->logical_block_size; 317 + } else if (!is_power_of_2(lim->physical_block_size)) { 318 + pr_warn("Invalid physical block size (%d)\n", lim->physical_block_size); 319 + return -EINVAL; 320 + } 325 321 326 322 /* 327 323 * The minimum I/O size defaults to the physical block size unless ··· 400 388 lim->max_discard_sectors = 401 389 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors); 402 390 391 + /* 392 + * When discard is not supported, discard_granularity should be reported 393 + * as 0 to userspace. 394 + */ 395 + if (lim->max_discard_sectors) 396 + lim->discard_granularity = 397 + max(lim->discard_granularity, lim->physical_block_size); 398 + else 399 + lim->discard_granularity = 0; 400 + 403 401 if (!lim->max_discard_segments) 404 402 lim->max_discard_segments = 1; 405 - 406 - if (lim->discard_granularity < lim->physical_block_size) 407 - lim->discard_granularity = lim->physical_block_size; 408 403 409 404 /* 410 405 * By default there is no limit on the segment boundary alignment, ··· 868 849 } 869 850 870 851 /* chunk_sectors a multiple of the physical block size? */ 871 - if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { 852 + if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) { 872 853 t->chunk_sectors = 0; 873 854 t->flags |= BLK_FLAG_MISALIGNED; 874 855 ret = -1;
+3 -1
block/blk.h
··· 12 12 #include "blk-crypto-internal.h" 13 13 14 14 struct elevator_type; 15 + struct elevator_tags; 15 16 16 17 /* 17 18 * Default upper limit for the software max_sectors limit used for regular I/Os. ··· 331 330 332 331 bool blk_insert_flush(struct request *rq); 333 332 334 - void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e); 333 + void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e, 334 + struct elevator_tags *t); 335 335 void elevator_set_default(struct request_queue *q); 336 336 void elevator_set_none(struct request_queue *q); 337 337
+31 -7
block/elevator.c
··· 54 54 struct elevator_queue *old; 55 55 /* for registering new elevator */ 56 56 struct elevator_queue *new; 57 + /* holds sched tags data */ 58 + struct elevator_tags *et; 57 59 }; 58 60 59 61 static DEFINE_SPINLOCK(elv_list_lock); ··· 134 132 static const struct kobj_type elv_ktype; 135 133 136 134 struct elevator_queue *elevator_alloc(struct request_queue *q, 137 - struct elevator_type *e) 135 + struct elevator_type *e, struct elevator_tags *et) 138 136 { 139 137 struct elevator_queue *eq; 140 138 ··· 147 145 kobject_init(&eq->kobj, &elv_ktype); 148 146 mutex_init(&eq->sysfs_lock); 149 147 hash_init(eq->hash); 148 + eq->et = et; 150 149 151 150 return eq; 152 151 } 153 - EXPORT_SYMBOL(elevator_alloc); 154 152 155 153 static void elevator_release(struct kobject *kobj) 156 154 { ··· 168 166 lockdep_assert_held(&q->elevator_lock); 169 167 170 168 ioc_clear_queue(q); 171 - blk_mq_sched_free_rqs(q); 172 169 173 170 mutex_lock(&e->sysfs_lock); 174 171 blk_mq_exit_sched(q, e); ··· 593 592 } 594 593 595 594 if (new_e) { 596 - ret = blk_mq_init_sched(q, new_e); 595 + ret = blk_mq_init_sched(q, new_e, ctx->et); 597 596 if (ret) 598 597 goto out_unfreeze; 599 598 ctx->new = q->elevator; ··· 628 627 elevator_exit(q); 629 628 mutex_unlock(&q->elevator_lock); 630 629 blk_mq_unfreeze_queue(q, memflags); 631 - if (e) 630 + if (e) { 631 + blk_mq_free_sched_tags(e->et, q->tag_set); 632 632 kobject_put(&e->kobj); 633 + } 633 634 } 634 635 635 636 static int elevator_change_done(struct request_queue *q, ··· 644 641 &ctx->old->flags); 645 642 646 643 elv_unregister_queue(q, ctx->old); 644 + blk_mq_free_sched_tags(ctx->old->et, q->tag_set); 647 645 kobject_put(&ctx->old->kobj); 648 646 if (enable_wbt) 649 647 wbt_enable_default(q->disk); ··· 663 659 static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx) 664 660 { 665 661 unsigned int memflags; 662 + struct blk_mq_tag_set *set = q->tag_set; 666 663 int ret = 0; 667 664 668 - lockdep_assert_held(&q->tag_set->update_nr_hwq_lock); 665 + lockdep_assert_held(&set->update_nr_hwq_lock); 666 + 667 + if (strncmp(ctx->name, "none", 4)) { 668 + ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues); 669 + if (!ctx->et) 670 + return -ENOMEM; 671 + } 669 672 670 673 memflags = blk_mq_freeze_queue(q); 671 674 /* ··· 692 681 blk_mq_unfreeze_queue(q, memflags); 693 682 if (!ret) 694 683 ret = elevator_change_done(q, ctx); 684 + /* 685 + * Free sched tags if it's allocated but we couldn't switch elevator. 686 + */ 687 + if (ctx->et && !ctx->new) 688 + blk_mq_free_sched_tags(ctx->et, set); 695 689 696 690 return ret; 697 691 } ··· 705 689 * The I/O scheduler depends on the number of hardware queues, this forces a 706 690 * reattachment when nr_hw_queues changes. 707 691 */ 708 - void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e) 692 + void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e, 693 + struct elevator_tags *t) 709 694 { 695 + struct blk_mq_tag_set *set = q->tag_set; 710 696 struct elv_change_ctx ctx = {}; 711 697 int ret = -ENODEV; 712 698 ··· 716 698 717 699 if (e && !blk_queue_dying(q) && blk_queue_registered(q)) { 718 700 ctx.name = e->elevator_name; 701 + ctx.et = t; 719 702 720 703 mutex_lock(&q->elevator_lock); 721 704 /* force to reattach elevator after nr_hw_queue is updated */ ··· 726 707 blk_mq_unfreeze_queue_nomemrestore(q); 727 708 if (!ret) 728 709 WARN_ON_ONCE(elevator_change_done(q, &ctx)); 710 + /* 711 + * Free sched tags if it's allocated but we couldn't switch elevator. 712 + */ 713 + if (t && !ctx.new) 714 + blk_mq_free_sched_tags(t, set); 729 715 } 730 716 731 717 /*
+13 -3
block/elevator.h
··· 23 23 struct blk_mq_alloc_data; 24 24 struct blk_mq_hw_ctx; 25 25 26 + struct elevator_tags { 27 + /* num. of hardware queues for which tags are allocated */ 28 + unsigned int nr_hw_queues; 29 + /* depth used while allocating tags */ 30 + unsigned int nr_requests; 31 + /* shared tag is stored at index 0 */ 32 + struct blk_mq_tags *tags[]; 33 + }; 34 + 26 35 struct elevator_mq_ops { 27 - int (*init_sched)(struct request_queue *, struct elevator_type *); 36 + int (*init_sched)(struct request_queue *, struct elevator_queue *); 28 37 void (*exit_sched)(struct elevator_queue *); 29 38 int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); 30 39 void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); ··· 122 113 struct elevator_queue 123 114 { 124 115 struct elevator_type *type; 116 + struct elevator_tags *et; 125 117 void *elevator_data; 126 118 struct kobject kobj; 127 119 struct mutex sysfs_lock; ··· 162 152 ssize_t elv_iosched_store(struct gendisk *disk, const char *page, size_t count); 163 153 164 154 extern bool elv_bio_merge_ok(struct request *, struct bio *); 165 - extern struct elevator_queue *elevator_alloc(struct request_queue *, 166 - struct elevator_type *); 155 + struct elevator_queue *elevator_alloc(struct request_queue *, 156 + struct elevator_type *, struct elevator_tags *); 167 157 168 158 /* 169 159 * Helper functions.
+4 -16
block/kyber-iosched.c
··· 157 157 */ 158 158 struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS]; 159 159 160 - /* 161 - * Async request percentage, converted to per-word depth for 162 - * sbitmap_get_shallow(). 163 - */ 160 + /* Number of allowed async requests. */ 164 161 unsigned int async_depth; 165 162 166 163 struct kyber_cpu_latency __percpu *cpu_latency; ··· 399 402 return ERR_PTR(ret); 400 403 } 401 404 402 - static int kyber_init_sched(struct request_queue *q, struct elevator_type *e) 405 + static int kyber_init_sched(struct request_queue *q, struct elevator_queue *eq) 403 406 { 404 407 struct kyber_queue_data *kqd; 405 - struct elevator_queue *eq; 406 - 407 - eq = elevator_alloc(q, e); 408 - if (!eq) 409 - return -ENOMEM; 410 408 411 409 kqd = kyber_queue_data_alloc(q); 412 - if (IS_ERR(kqd)) { 413 - kobject_put(&eq->kobj); 410 + if (IS_ERR(kqd)) 414 411 return PTR_ERR(kqd); 415 - } 416 412 417 413 blk_stat_enable_accounting(q); 418 414 ··· 444 454 { 445 455 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; 446 456 struct blk_mq_tags *tags = hctx->sched_tags; 447 - unsigned int shift = tags->bitmap_tags.sb.shift; 448 457 449 - kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U; 450 - 458 + kqd->async_depth = hctx->queue->nr_requests * KYBER_ASYNC_PERCENT / 100U; 451 459 sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth); 452 460 } 453 461
+3 -27
block/mq-deadline.c
··· 488 488 } 489 489 490 490 /* 491 - * 'depth' is a number in the range 1..INT_MAX representing a number of 492 - * requests. Scale it with a factor (1 << bt->sb.shift) / q->nr_requests since 493 - * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow(). 494 - * Values larger than q->nr_requests have the same effect as q->nr_requests. 495 - */ 496 - static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth) 497 - { 498 - struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags; 499 - const unsigned int nrr = hctx->queue->nr_requests; 500 - 501 - return ((qdepth << bt->sb.shift) + nrr - 1) / nrr; 502 - } 503 - 504 - /* 505 491 * Called by __blk_mq_alloc_request(). The shallow_depth value set by this 506 492 * function is used by __blk_mq_get_tag(). 507 493 */ ··· 503 517 * Throttle asynchronous requests and writes such that these requests 504 518 * do not block the allocation of synchronous requests. 505 519 */ 506 - data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth); 520 + data->shallow_depth = dd->async_depth; 507 521 } 508 522 509 523 /* Called by blk_mq_update_nr_requests(). */ ··· 554 568 /* 555 569 * initialize elevator private data (deadline_data). 556 570 */ 557 - static int dd_init_sched(struct request_queue *q, struct elevator_type *e) 571 + static int dd_init_sched(struct request_queue *q, struct elevator_queue *eq) 558 572 { 559 573 struct deadline_data *dd; 560 - struct elevator_queue *eq; 561 574 enum dd_prio prio; 562 - int ret = -ENOMEM; 563 - 564 - eq = elevator_alloc(q, e); 565 - if (!eq) 566 - return ret; 567 575 568 576 dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); 569 577 if (!dd) 570 - goto put_eq; 578 + return -ENOMEM; 571 579 572 580 eq->elevator_data = dd; 573 581 ··· 588 608 589 609 q->elevator = eq; 590 610 return 0; 591 - 592 - put_eq: 593 - kobject_put(&eq->kobj); 594 - return ret; 595 611 } 596 612 597 613 /*
+7 -16
drivers/accel/habanalabs/common/memory.c
··· 1829 1829 struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv; 1830 1830 struct hl_ctx *ctx; 1831 1831 1832 - if (!hl_dmabuf) 1833 - return; 1834 - 1835 1832 ctx = hl_dmabuf->ctx; 1836 1833 1837 1834 if (hl_dmabuf->memhash_hnode) ··· 1856 1859 { 1857 1860 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 1858 1861 struct hl_device *hdev = ctx->hdev; 1859 - int rc, fd; 1862 + CLASS(get_unused_fd, fd)(flags); 1863 + 1864 + if (fd < 0) { 1865 + dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd); 1866 + return fd; 1867 + } 1860 1868 1861 1869 exp_info.ops = &habanalabs_dmabuf_ops; 1862 1870 exp_info.size = total_size; ··· 1872 1870 if (IS_ERR(hl_dmabuf->dmabuf)) { 1873 1871 dev_err(hdev->dev, "failed to export dma-buf\n"); 1874 1872 return PTR_ERR(hl_dmabuf->dmabuf); 1875 - } 1876 - 1877 - fd = dma_buf_fd(hl_dmabuf->dmabuf, flags); 1878 - if (fd < 0) { 1879 - dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd); 1880 - rc = fd; 1881 - goto err_dma_buf_put; 1882 1873 } 1883 1874 1884 1875 hl_dmabuf->ctx = ctx; ··· 1885 1890 get_file(ctx->hpriv->file_priv->filp); 1886 1891 1887 1892 *dmabuf_fd = fd; 1893 + fd_install(take_fd(fd), hl_dmabuf->dmabuf->file); 1888 1894 1889 1895 return 0; 1890 - 1891 - err_dma_buf_put: 1892 - hl_dmabuf->dmabuf->priv = NULL; 1893 - dma_buf_put(hl_dmabuf->dmabuf); 1894 - return rc; 1895 1896 } 1896 1897 1897 1898 static int validate_export_params_common(struct hl_device *hdev, u64 addr, u64 size, u64 offset)
+21 -9
drivers/base/regmap/regmap-irq.c
··· 21 21 22 22 struct regmap_irq_chip_data { 23 23 struct mutex lock; 24 + struct lock_class_key lock_key; 24 25 struct irq_chip irq_chip; 25 26 26 27 struct regmap *map; ··· 802 801 goto err_alloc; 803 802 } 804 803 805 - mutex_init(&d->lock); 804 + /* 805 + * If one regmap-irq is the parent of another then we'll try 806 + * to lock the child with the parent locked, use an explicit 807 + * lock_key so lockdep can figure out what's going on. 808 + */ 809 + lockdep_register_key(&d->lock_key); 810 + mutex_init_with_key(&d->lock, &d->lock_key); 806 811 807 812 for (i = 0; i < chip->num_irqs; i++) 808 813 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride] ··· 823 816 d->mask_buf[i], 824 817 chip->irq_drv_data); 825 818 if (ret) 826 - goto err_alloc; 819 + goto err_mutex; 827 820 } 828 821 829 822 if (chip->mask_base && !chip->handle_mask_sync) { ··· 834 827 if (ret) { 835 828 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 836 829 reg, ret); 837 - goto err_alloc; 830 + goto err_mutex; 838 831 } 839 832 } 840 833 ··· 845 838 if (ret) { 846 839 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 847 840 reg, ret); 848 - goto err_alloc; 841 + goto err_mutex; 849 842 } 850 843 } 851 844 ··· 862 855 if (ret != 0) { 863 856 dev_err(map->dev, "Failed to read IRQ status: %d\n", 864 857 ret); 865 - goto err_alloc; 858 + goto err_mutex; 866 859 } 867 860 } 868 861 ··· 886 879 if (ret != 0) { 887 880 dev_err(map->dev, "Failed to ack 0x%x: %d\n", 888 881 reg, ret); 889 - goto err_alloc; 882 + goto err_mutex; 890 883 } 891 884 } 892 885 } ··· 908 901 if (ret != 0) { 909 902 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 910 903 reg, ret); 911 - goto err_alloc; 904 + goto err_mutex; 912 905 } 913 906 } 914 907 } ··· 917 910 if (chip->status_is_level) { 918 911 ret = read_irq_data(d); 919 912 if (ret < 0) 920 - goto err_alloc; 913 + goto err_mutex; 921 914 922 915 memcpy(d->prev_status_buf, d->status_buf, 923 916 array_size(d->chip->num_regs, sizeof(d->prev_status_buf[0]))); ··· 925 918 926 919 ret = regmap_irq_create_domain(fwnode, irq_base, chip, d); 927 920 if (ret) 928 - goto err_alloc; 921 + goto err_mutex; 929 922 930 923 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, 931 924 irq_flags | IRQF_ONESHOT, ··· 942 935 943 936 err_domain: 944 937 /* Should really dispose of the domain but... */ 938 + err_mutex: 939 + mutex_destroy(&d->lock); 940 + lockdep_unregister_key(&d->lock_key); 945 941 err_alloc: 946 942 kfree(d->type_buf); 947 943 kfree(d->type_buf_def); ··· 1037 1027 kfree(d->config_buf[i]); 1038 1028 kfree(d->config_buf); 1039 1029 } 1030 + mutex_destroy(&d->lock); 1031 + lockdep_unregister_key(&d->lock_key); 1040 1032 kfree(d); 1041 1033 } 1042 1034 EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
+1 -1
drivers/bcma/driver_gpio.c
··· 186 186 chip->request = bcma_gpio_request; 187 187 chip->free = bcma_gpio_free; 188 188 chip->get = bcma_gpio_get_value; 189 - chip->set_rv = bcma_gpio_set_value; 189 + chip->set = bcma_gpio_set_value; 190 190 chip->direction_input = bcma_gpio_direction_input; 191 191 chip->direction_output = bcma_gpio_direction_output; 192 192 chip->parent = bus->dev;
+2 -1
drivers/block/zloop.c
··· 700 700 struct zloop_device *zlo = disk->private_data; 701 701 unsigned int i; 702 702 703 + blk_mq_free_tag_set(&zlo->tag_set); 704 + 703 705 for (i = 0; i < zlo->nr_zones; i++) { 704 706 struct zloop_zone *zone = &zlo->zones[i]; 705 707 ··· 1082 1080 1083 1081 del_gendisk(zlo->disk); 1084 1082 put_disk(zlo->disk); 1085 - blk_mq_free_tag_set(&zlo->tag_set); 1086 1083 1087 1084 pr_info("Removed device %d\n", opts->id); 1088 1085
+8
drivers/firmware/efi/Kconfig
··· 263 263 virt/coco/efi_secret module to access the secrets, which in turn 264 264 allows userspace programs to access the injected secrets. 265 265 266 + config OVMF_DEBUG_LOG 267 + bool "Expose OVMF firmware debug log via sysfs" 268 + depends on EFI 269 + help 270 + Recent OVMF versions (edk2-stable202508 + newer) can write 271 + their debug log to a memory buffer. This driver exposes the 272 + log content via sysfs (/sys/firmware/efi/ovmf_debug_log). 273 + 266 274 config UNACCEPTED_MEMORY 267 275 bool 268 276 depends on EFI_STUB
+1
drivers/firmware/efi/Makefile
··· 29 29 obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o 30 30 obj-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += embedded-firmware.o 31 31 obj-$(CONFIG_LOAD_UEFI_KEYS) += mokvar-table.o 32 + obj-$(CONFIG_OVMF_DEBUG_LOG) += ovmf-debug-log.o 32 33 33 34 obj-$(CONFIG_SYSFB) += sysfb_efi.o 34 35
+8
drivers/firmware/efi/efi.c
··· 45 45 .esrt = EFI_INVALID_TABLE_ADDR, 46 46 .tpm_log = EFI_INVALID_TABLE_ADDR, 47 47 .tpm_final_log = EFI_INVALID_TABLE_ADDR, 48 + .ovmf_debug_log = EFI_INVALID_TABLE_ADDR, 48 49 #ifdef CONFIG_LOAD_UEFI_KEYS 49 50 .mokvar_table = EFI_INVALID_TABLE_ADDR, 50 51 #endif ··· 474 473 platform_device_register_simple("efi_secret", 0, NULL, 0); 475 474 #endif 476 475 476 + if (IS_ENABLED(CONFIG_OVMF_DEBUG_LOG) && 477 + efi.ovmf_debug_log != EFI_INVALID_TABLE_ADDR) 478 + ovmf_log_probe(efi.ovmf_debug_log); 479 + 477 480 return 0; 478 481 479 482 err_remove_group: ··· 622 617 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" }, 623 618 {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" }, 624 619 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" }, 620 + #ifdef CONFIG_OVMF_DEBUG_LOG 621 + {OVMF_MEMORY_LOG_TABLE_GUID, &efi.ovmf_debug_log, "OvmfDebugLog" }, 622 + #endif 625 623 #ifdef CONFIG_EFI_RCI2_TABLE 626 624 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys }, 627 625 #endif
+2 -2
drivers/firmware/efi/libstub/printk.c
··· 5 5 #include <linux/ctype.h> 6 6 #include <linux/efi.h> 7 7 #include <linux/kernel.h> 8 - #include <linux/printk.h> /* For CONSOLE_LOGLEVEL_* */ 8 + #include <linux/kern_levels.h> 9 9 #include <asm/efi.h> 10 10 #include <asm/setup.h> 11 11 12 12 #include "efistub.h" 13 13 14 - int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT; 14 + int efi_loglevel = LOGLEVEL_NOTICE; 15 15 16 16 /** 17 17 * efi_char16_puts() - Write a UCS-2 encoded string to the console
+111
drivers/firmware/efi/ovmf-debug-log.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/efi.h> 4 + #include <linux/init.h> 5 + #include <linux/io.h> 6 + #include <linux/kernel.h> 7 + #include <linux/kobject.h> 8 + #include <linux/module.h> 9 + #include <linux/platform_device.h> 10 + #include <linux/sysfs.h> 11 + 12 + #define OVMF_DEBUG_LOG_MAGIC1 0x3167646d666d766f // "ovmfmdg1" 13 + #define OVMF_DEBUG_LOG_MAGIC2 0x3267646d666d766f // "ovmfmdg2" 14 + 15 + struct ovmf_debug_log_header { 16 + u64 magic1; 17 + u64 magic2; 18 + u64 hdr_size; 19 + u64 log_size; 20 + u64 lock; // edk2 spinlock 21 + u64 head_off; 22 + u64 tail_off; 23 + u64 truncated; 24 + u8 fw_version[128]; 25 + }; 26 + 27 + static struct ovmf_debug_log_header *hdr; 28 + static u8 *logbuf; 29 + static u64 logbufsize; 30 + 31 + static ssize_t ovmf_log_read(struct file *filp, struct kobject *kobj, 32 + const struct bin_attribute *attr, char *buf, 33 + loff_t offset, size_t count) 34 + { 35 + u64 start, end; 36 + 37 + start = hdr->head_off + offset; 38 + if (hdr->head_off > hdr->tail_off && start >= hdr->log_size) 39 + start -= hdr->log_size; 40 + 41 + end = start + count; 42 + if (start > hdr->tail_off) { 43 + if (end > hdr->log_size) 44 + end = hdr->log_size; 45 + } else { 46 + if (end > hdr->tail_off) 47 + end = hdr->tail_off; 48 + } 49 + 50 + if (start > logbufsize || end > logbufsize) 51 + return 0; 52 + if (start >= end) 53 + return 0; 54 + 55 + memcpy(buf, logbuf + start, end - start); 56 + return end - start; 57 + } 58 + 59 + static struct bin_attribute ovmf_log_bin_attr = { 60 + .attr = { 61 + .name = "ovmf_debug_log", 62 + .mode = 0444, 63 + }, 64 + .read = ovmf_log_read, 65 + }; 66 + 67 + int __init ovmf_log_probe(unsigned long ovmf_debug_log_table) 68 + { 69 + int ret = -EINVAL; 70 + u64 size; 71 + 72 + /* map + verify header */ 73 + hdr = memremap(ovmf_debug_log_table, sizeof(*hdr), MEMREMAP_WB); 74 + if (!hdr) { 75 + pr_err("OVMF debug log: header map failed\n"); 76 + return -EINVAL; 77 + } 78 + 79 + if (hdr->magic1 != OVMF_DEBUG_LOG_MAGIC1 || 80 + hdr->magic2 != OVMF_DEBUG_LOG_MAGIC2) { 81 + printk(KERN_ERR "OVMF debug log: magic mismatch\n"); 82 + goto err_unmap; 83 + } 84 + 85 + size = hdr->hdr_size + hdr->log_size; 86 + pr_info("OVMF debug log: firmware version: \"%s\"\n", hdr->fw_version); 87 + pr_info("OVMF debug log: buffer size: %lluk\n", size / 1024); 88 + 89 + /* map complete log buffer */ 90 + memunmap(hdr); 91 + hdr = memremap(ovmf_debug_log_table, size, MEMREMAP_WB); 92 + if (!hdr) { 93 + pr_err("OVMF debug log: buffer map failed\n"); 94 + return -EINVAL; 95 + } 96 + logbuf = (void *)hdr + hdr->hdr_size; 97 + logbufsize = hdr->log_size; 98 + 99 + ovmf_log_bin_attr.size = size; 100 + ret = sysfs_create_bin_file(efi_kobj, &ovmf_log_bin_attr); 101 + if (ret != 0) { 102 + pr_err("OVMF debug log: sysfs register failed\n"); 103 + goto err_unmap; 104 + } 105 + 106 + return 0; 107 + 108 + err_unmap: 109 + memunmap(hdr); 110 + return ret; 111 + }
+2 -2
drivers/gpio/gpio-74x164.c
··· 141 141 chip->gpio_chip.label = spi->modalias; 142 142 chip->gpio_chip.direction_output = gen_74x164_direction_output; 143 143 chip->gpio_chip.get = gen_74x164_get_value; 144 - chip->gpio_chip.set_rv = gen_74x164_set_value; 145 - chip->gpio_chip.set_multiple_rv = gen_74x164_set_multiple; 144 + chip->gpio_chip.set = gen_74x164_set_value; 145 + chip->gpio_chip.set_multiple = gen_74x164_set_multiple; 146 146 chip->gpio_chip.base = -1; 147 147 chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers; 148 148 chip->gpio_chip.can_sleep = true;
+1 -1
drivers/gpio/gpio-adnp.c
··· 430 430 chip->direction_input = adnp_gpio_direction_input; 431 431 chip->direction_output = adnp_gpio_direction_output; 432 432 chip->get = adnp_gpio_get; 433 - chip->set_rv = adnp_gpio_set; 433 + chip->set = adnp_gpio_set; 434 434 chip->can_sleep = true; 435 435 436 436 if (IS_ENABLED(CONFIG_DEBUG_FS))
+1 -1
drivers/gpio/gpio-adp5520.c
··· 122 122 gc->direction_input = adp5520_gpio_direction_input; 123 123 gc->direction_output = adp5520_gpio_direction_output; 124 124 gc->get = adp5520_gpio_get_value; 125 - gc->set_rv = adp5520_gpio_set_value; 125 + gc->set = adp5520_gpio_set_value; 126 126 gc->can_sleep = true; 127 127 128 128 gc->base = pdata->gpio_start;
+1 -1
drivers/gpio/gpio-adp5585.c
··· 428 428 gc->direction_input = adp5585_gpio_direction_input; 429 429 gc->direction_output = adp5585_gpio_direction_output; 430 430 gc->get = adp5585_gpio_get_value; 431 - gc->set_rv = adp5585_gpio_set_value; 431 + gc->set = adp5585_gpio_set_value; 432 432 gc->set_config = adp5585_gpio_set_config; 433 433 gc->request = adp5585_gpio_request; 434 434 gc->free = adp5585_gpio_free;
+2 -2
drivers/gpio/gpio-aggregator.c
··· 534 534 chip->direction_output = gpio_fwd_direction_output; 535 535 chip->get = gpio_fwd_get; 536 536 chip->get_multiple = gpio_fwd_get_multiple_locked; 537 - chip->set_rv = gpio_fwd_set; 538 - chip->set_multiple_rv = gpio_fwd_set_multiple_locked; 537 + chip->set = gpio_fwd_set; 538 + chip->set_multiple = gpio_fwd_set_multiple_locked; 539 539 chip->to_irq = gpio_fwd_to_irq; 540 540 chip->base = -1; 541 541 chip->ngpio = ngpios;
+1 -1
drivers/gpio/gpio-altera-a10sr.c
··· 69 69 .label = "altr_a10sr_gpio", 70 70 .owner = THIS_MODULE, 71 71 .get = altr_a10sr_gpio_get, 72 - .set_rv = altr_a10sr_gpio_set, 72 + .set = altr_a10sr_gpio_set, 73 73 .direction_input = altr_a10sr_gpio_direction_input, 74 74 .direction_output = altr_a10sr_gpio_direction_output, 75 75 .can_sleep = true,
+1 -1
drivers/gpio/gpio-altera.c
··· 259 259 altera_gc->gc.direction_input = altera_gpio_direction_input; 260 260 altera_gc->gc.direction_output = altera_gpio_direction_output; 261 261 altera_gc->gc.get = altera_gpio_get; 262 - altera_gc->gc.set_rv = altera_gpio_set; 262 + altera_gc->gc.set = altera_gpio_set; 263 263 altera_gc->gc.owner = THIS_MODULE; 264 264 altera_gc->gc.parent = &pdev->dev; 265 265 altera_gc->gc.base = -1;
+1 -1
drivers/gpio/gpio-amd-fch.c
··· 165 165 priv->gc.direction_output = amd_fch_gpio_direction_output; 166 166 priv->gc.get_direction = amd_fch_gpio_get_direction; 167 167 priv->gc.get = amd_fch_gpio_get; 168 - priv->gc.set_rv = amd_fch_gpio_set; 168 + priv->gc.set = amd_fch_gpio_set; 169 169 170 170 spin_lock_init(&priv->lock); 171 171
+1 -1
drivers/gpio/gpio-amd8111.c
··· 165 165 .ngpio = 32, 166 166 .request = amd_gpio_request, 167 167 .free = amd_gpio_free, 168 - .set_rv = amd_gpio_set, 168 + .set = amd_gpio_set, 169 169 .get = amd_gpio_get, 170 170 .direction_output = amd_gpio_dirout, 171 171 .direction_input = amd_gpio_dirin,
+1 -1
drivers/gpio/gpio-arizona.c
··· 138 138 .direction_input = arizona_gpio_direction_in, 139 139 .get = arizona_gpio_get, 140 140 .direction_output = arizona_gpio_direction_out, 141 - .set_rv = arizona_gpio_set, 141 + .set = arizona_gpio_set, 142 142 .can_sleep = true, 143 143 }; 144 144
+1 -1
drivers/gpio/gpio-aspeed-sgpio.c
··· 596 596 gpio->chip.request = NULL; 597 597 gpio->chip.free = NULL; 598 598 gpio->chip.get = aspeed_sgpio_get; 599 - gpio->chip.set_rv = aspeed_sgpio_set; 599 + gpio->chip.set = aspeed_sgpio_set; 600 600 gpio->chip.set_config = aspeed_sgpio_set_config; 601 601 gpio->chip.label = dev_name(&pdev->dev); 602 602 gpio->chip.base = -1;
+1 -1
drivers/gpio/gpio-aspeed.c
··· 1352 1352 gpio->chip.request = aspeed_gpio_request; 1353 1353 gpio->chip.free = aspeed_gpio_free; 1354 1354 gpio->chip.get = aspeed_gpio_get; 1355 - gpio->chip.set_rv = aspeed_gpio_set; 1355 + gpio->chip.set = aspeed_gpio_set; 1356 1356 gpio->chip.set_config = aspeed_gpio_set_config; 1357 1357 gpio->chip.label = dev_name(&pdev->dev); 1358 1358 gpio->chip.base = -1;
+1 -1
drivers/gpio/gpio-bcm-kona.c
··· 339 339 .direction_input = bcm_kona_gpio_direction_input, 340 340 .get = bcm_kona_gpio_get, 341 341 .direction_output = bcm_kona_gpio_direction_output, 342 - .set_rv = bcm_kona_gpio_set, 342 + .set = bcm_kona_gpio_set, 343 343 .set_config = bcm_kona_gpio_set_config, 344 344 .to_irq = bcm_kona_gpio_to_irq, 345 345 .base = 0,
+1 -1
drivers/gpio/gpio-bd71815.c
··· 85 85 .owner = THIS_MODULE, 86 86 .get = bd71815gpo_get, 87 87 .get_direction = bd71815gpo_direction_get, 88 - .set_rv = bd71815gpo_set, 88 + .set = bd71815gpo_set, 89 89 .set_config = bd71815_gpio_set_config, 90 90 .can_sleep = true, 91 91 };
+1 -1
drivers/gpio/gpio-bd71828.c
··· 109 109 bdgpio->gpio.set_config = bd71828_gpio_set_config; 110 110 bdgpio->gpio.can_sleep = true; 111 111 bdgpio->gpio.get = bd71828_gpio_get; 112 - bdgpio->gpio.set_rv = bd71828_gpio_set; 112 + bdgpio->gpio.set = bd71828_gpio_set; 113 113 bdgpio->gpio.base = -1; 114 114 115 115 /*
+1 -1
drivers/gpio/gpio-bd9571mwv.c
··· 88 88 .direction_input = bd9571mwv_gpio_direction_input, 89 89 .direction_output = bd9571mwv_gpio_direction_output, 90 90 .get = bd9571mwv_gpio_get, 91 - .set_rv = bd9571mwv_gpio_set, 91 + .set = bd9571mwv_gpio_set, 92 92 .base = -1, 93 93 .ngpio = 2, 94 94 .can_sleep = true,
+1 -1
drivers/gpio/gpio-bt8xx.c
··· 145 145 c->direction_input = bt8xxgpio_gpio_direction_input; 146 146 c->get = bt8xxgpio_gpio_get; 147 147 c->direction_output = bt8xxgpio_gpio_direction_output; 148 - c->set_rv = bt8xxgpio_gpio_set; 148 + c->set = bt8xxgpio_gpio_set; 149 149 c->dbg_show = NULL; 150 150 c->base = modparam_gpiobase; 151 151 c->ngpio = BT8XXGPIO_NR_GPIOS;
+1 -1
drivers/gpio/gpio-cgbc.c
··· 171 171 chip->direction_output = cgbc_gpio_direction_output; 172 172 chip->get_direction = cgbc_gpio_get_direction; 173 173 chip->get = cgbc_gpio_get; 174 - chip->set_rv = cgbc_gpio_set; 174 + chip->set = cgbc_gpio_set; 175 175 chip->ngpio = CGBC_GPIO_NGPIO; 176 176 177 177 ret = devm_mutex_init(dev, &gpio->lock);
+1 -1
drivers/gpio/gpio-creg-snps.c
··· 167 167 hcg->gc.label = dev_name(dev); 168 168 hcg->gc.base = -1; 169 169 hcg->gc.ngpio = ngpios; 170 - hcg->gc.set_rv = creg_gpio_set; 170 + hcg->gc.set = creg_gpio_set; 171 171 hcg->gc.direction_output = creg_gpio_dir_out; 172 172 173 173 ret = devm_gpiochip_add_data(dev, &hcg->gc, hcg);
+1 -1
drivers/gpio/gpio-cros-ec.c
··· 188 188 gc->can_sleep = true; 189 189 gc->label = dev_name(dev); 190 190 gc->base = -1; 191 - gc->set_rv = cros_ec_gpio_set; 191 + gc->set = cros_ec_gpio_set; 192 192 gc->get = cros_ec_gpio_get; 193 193 gc->get_direction = cros_ec_gpio_get_direction; 194 194
+1 -1
drivers/gpio/gpio-crystalcove.c
··· 349 349 cg->chip.direction_input = crystalcove_gpio_dir_in; 350 350 cg->chip.direction_output = crystalcove_gpio_dir_out; 351 351 cg->chip.get = crystalcove_gpio_get; 352 - cg->chip.set_rv = crystalcove_gpio_set; 352 + cg->chip.set = crystalcove_gpio_set; 353 353 cg->chip.base = -1; 354 354 cg->chip.ngpio = CRYSTALCOVE_VGPIO_NUM; 355 355 cg->chip.can_sleep = true;
+1 -1
drivers/gpio/gpio-cs5535.c
··· 296 296 .request = chip_gpio_request, 297 297 298 298 .get = chip_gpio_get, 299 - .set_rv = chip_gpio_set, 299 + .set = chip_gpio_set, 300 300 301 301 .direction_input = chip_direction_input, 302 302 .direction_output = chip_direction_output,
+1 -1
drivers/gpio/gpio-da9052.c
··· 172 172 .label = "da9052-gpio", 173 173 .owner = THIS_MODULE, 174 174 .get = da9052_gpio_get, 175 - .set_rv = da9052_gpio_set, 175 + .set = da9052_gpio_set, 176 176 .direction_input = da9052_gpio_direction_input, 177 177 .direction_output = da9052_gpio_direction_output, 178 178 .to_irq = da9052_gpio_to_irq,
+1 -1
drivers/gpio/gpio-da9055.c
··· 116 116 .label = "da9055-gpio", 117 117 .owner = THIS_MODULE, 118 118 .get = da9055_gpio_get, 119 - .set_rv = da9055_gpio_set, 119 + .set = da9055_gpio_set, 120 120 .direction_input = da9055_gpio_direction_input, 121 121 .direction_output = da9055_gpio_direction_output, 122 122 .to_irq = da9055_gpio_to_irq,
+1 -1
drivers/gpio/gpio-davinci.c
··· 202 202 chips->chip.direction_input = davinci_direction_in; 203 203 chips->chip.get = davinci_gpio_get; 204 204 chips->chip.direction_output = davinci_direction_out; 205 - chips->chip.set_rv = davinci_gpio_set; 205 + chips->chip.set = davinci_gpio_set; 206 206 207 207 chips->chip.ngpio = ngpio; 208 208 chips->chip.base = -1;
+1 -1
drivers/gpio/gpio-dln2.c
··· 469 469 dln2->gpio.base = -1; 470 470 dln2->gpio.ngpio = pins; 471 471 dln2->gpio.can_sleep = true; 472 - dln2->gpio.set_rv = dln2_gpio_set; 472 + dln2->gpio.set = dln2_gpio_set; 473 473 dln2->gpio.get = dln2_gpio_get; 474 474 dln2->gpio.request = dln2_gpio_request; 475 475 dln2->gpio.free = dln2_gpio_free;
+1 -1
drivers/gpio/gpio-eic-sprd.c
··· 663 663 sprd_eic->chip.request = sprd_eic_request; 664 664 sprd_eic->chip.free = sprd_eic_free; 665 665 sprd_eic->chip.set_config = sprd_eic_set_config; 666 - sprd_eic->chip.set_rv = sprd_eic_set; 666 + sprd_eic->chip.set = sprd_eic_set; 667 667 fallthrough; 668 668 case SPRD_EIC_ASYNC: 669 669 case SPRD_EIC_SYNC:
+1 -1
drivers/gpio/gpio-em.c
··· 306 306 gpio_chip->direction_input = em_gio_direction_input; 307 307 gpio_chip->get = em_gio_get; 308 308 gpio_chip->direction_output = em_gio_direction_output; 309 - gpio_chip->set_rv = em_gio_set; 309 + gpio_chip->set = em_gio_set; 310 310 gpio_chip->to_irq = em_gio_to_irq; 311 311 gpio_chip->request = pinctrl_gpio_request; 312 312 gpio_chip->free = em_gio_free;
+1 -1
drivers/gpio/gpio-exar.c
··· 211 211 exar_gpio->gpio_chip.direction_input = exar_direction_input; 212 212 exar_gpio->gpio_chip.get_direction = exar_get_direction; 213 213 exar_gpio->gpio_chip.get = exar_get_value; 214 - exar_gpio->gpio_chip.set_rv = exar_set_value; 214 + exar_gpio->gpio_chip.set = exar_set_value; 215 215 exar_gpio->gpio_chip.base = -1; 216 216 exar_gpio->gpio_chip.ngpio = ngpios; 217 217 exar_gpio->index = index;
+1 -1
drivers/gpio/gpio-f7188x.c
··· 173 173 .direction_input = f7188x_gpio_direction_in, \ 174 174 .get = f7188x_gpio_get, \ 175 175 .direction_output = f7188x_gpio_direction_out, \ 176 - .set_rv = f7188x_gpio_set, \ 176 + .set = f7188x_gpio_set, \ 177 177 .set_config = f7188x_gpio_set_config, \ 178 178 .base = -1, \ 179 179 .ngpio = _ngpio, \
+1 -1
drivers/gpio/gpio-graniterapids.c
··· 159 159 .owner = THIS_MODULE, 160 160 .request = gnr_gpio_request, 161 161 .get = gnr_gpio_get, 162 - .set_rv = gnr_gpio_set, 162 + .set = gnr_gpio_set, 163 163 .get_direction = gnr_gpio_get_direction, 164 164 .direction_input = gnr_gpio_direction_input, 165 165 .direction_output = gnr_gpio_direction_output,
+1 -1
drivers/gpio/gpio-gw-pld.c
··· 86 86 gw->chip.direction_input = gw_pld_input8; 87 87 gw->chip.get = gw_pld_get8; 88 88 gw->chip.direction_output = gw_pld_output8; 89 - gw->chip.set_rv = gw_pld_set8; 89 + gw->chip.set = gw_pld_set8; 90 90 gw->client = client; 91 91 92 92 /*
+1 -1
drivers/gpio/gpio-htc-egpio.c
··· 324 324 chip->parent = &pdev->dev; 325 325 chip->owner = THIS_MODULE; 326 326 chip->get = egpio_get; 327 - chip->set_rv = egpio_set; 327 + chip->set = egpio_set; 328 328 chip->direction_input = egpio_direction_input; 329 329 chip->direction_output = egpio_direction_output; 330 330 chip->get_direction = egpio_get_direction;
+1 -1
drivers/gpio/gpio-ich.c
··· 273 273 chip->get = ichx_priv.desc->get ? 274 274 ichx_priv.desc->get : ichx_gpio_get; 275 275 276 - chip->set_rv = ichx_gpio_set; 276 + chip->set = ichx_gpio_set; 277 277 chip->get_direction = ichx_gpio_get_direction; 278 278 chip->direction_input = ichx_gpio_direction_input; 279 279 chip->direction_output = ichx_gpio_direction_output;
+1 -1
drivers/gpio/gpio-imx-scu.c
··· 102 102 gc->ngpio = ARRAY_SIZE(scu_rsrc_arr); 103 103 gc->label = dev_name(dev); 104 104 gc->get = imx_scu_gpio_get; 105 - gc->set_rv = imx_scu_gpio_set; 105 + gc->set = imx_scu_gpio_set; 106 106 gc->get_direction = imx_scu_gpio_get_direction; 107 107 108 108 platform_set_drvdata(pdev, priv);
+1 -1
drivers/gpio/gpio-it87.c
··· 267 267 .request = it87_gpio_request, 268 268 .get = it87_gpio_get, 269 269 .direction_input = it87_gpio_direction_in, 270 - .set_rv = it87_gpio_set, 270 + .set = it87_gpio_set, 271 271 .direction_output = it87_gpio_direction_out, 272 272 .base = -1 273 273 };
+1 -1
drivers/gpio/gpio-janz-ttl.c
··· 171 171 gpio->parent = &pdev->dev; 172 172 gpio->label = pdev->name; 173 173 gpio->get = ttl_get_value; 174 - gpio->set_rv = ttl_set_value; 174 + gpio->set = ttl_set_value; 175 175 gpio->owner = THIS_MODULE; 176 176 177 177 /* request dynamic allocation */
+1 -1
drivers/gpio/gpio-kempld.c
··· 169 169 chip->direction_output = kempld_gpio_direction_output; 170 170 chip->get_direction = kempld_gpio_get_direction; 171 171 chip->get = kempld_gpio_get; 172 - chip->set_rv = kempld_gpio_set; 172 + chip->set = kempld_gpio_set; 173 173 chip->ngpio = kempld_gpio_pincount(pld); 174 174 if (chip->ngpio == 0) { 175 175 dev_err(dev, "No GPIO pins detected\n");
+2 -2
drivers/gpio/gpio-latch.c
··· 166 166 167 167 if (gpio_latch_can_sleep(priv, n_latches)) { 168 168 priv->gc.can_sleep = true; 169 - priv->gc.set_rv = gpio_latch_set_can_sleep; 169 + priv->gc.set = gpio_latch_set_can_sleep; 170 170 mutex_init(&priv->mutex); 171 171 } else { 172 172 priv->gc.can_sleep = false; 173 - priv->gc.set_rv = gpio_latch_set; 173 + priv->gc.set = gpio_latch_set; 174 174 spin_lock_init(&priv->spinlock); 175 175 } 176 176
+1 -1
drivers/gpio/gpio-ljca.c
··· 437 437 ljca_gpio->gc.direction_output = ljca_gpio_direction_output; 438 438 ljca_gpio->gc.get_direction = ljca_gpio_get_direction; 439 439 ljca_gpio->gc.get = ljca_gpio_get_value; 440 - ljca_gpio->gc.set_rv = ljca_gpio_set_value; 440 + ljca_gpio->gc.set = ljca_gpio_set_value; 441 441 ljca_gpio->gc.set_config = ljca_gpio_set_config; 442 442 ljca_gpio->gc.init_valid_mask = ljca_gpio_init_valid_mask; 443 443 ljca_gpio->gc.can_sleep = true;
+1 -1
drivers/gpio/gpio-logicvc.c
··· 134 134 logicvc->chip.ngpio = LOGICVC_CTRL_GPIO_BITS + 135 135 LOGICVC_POWER_CTRL_GPIO_BITS; 136 136 logicvc->chip.get = logicvc_gpio_get; 137 - logicvc->chip.set_rv = logicvc_gpio_set; 137 + logicvc->chip.set = logicvc_gpio_set; 138 138 logicvc->chip.direction_output = logicvc_gpio_direction_output; 139 139 140 140 return devm_gpiochip_add_data(dev, &logicvc->chip, logicvc);
+1 -1
drivers/gpio/gpio-loongson-64bit.c
··· 157 157 lgpio->chip.get = loongson_gpio_get; 158 158 lgpio->chip.get_direction = loongson_gpio_get_direction; 159 159 lgpio->chip.direction_output = loongson_gpio_direction_output; 160 - lgpio->chip.set_rv = loongson_gpio_set; 160 + lgpio->chip.set = loongson_gpio_set; 161 161 lgpio->chip.parent = dev; 162 162 spin_lock_init(&lgpio->lock); 163 163 }
+1 -1
drivers/gpio/gpio-loongson.c
··· 106 106 gc->base = 0; 107 107 gc->ngpio = LOONGSON_N_GPIO; 108 108 gc->get = loongson_gpio_get_value; 109 - gc->set_rv = loongson_gpio_set_value; 109 + gc->set = loongson_gpio_set_value; 110 110 gc->direction_input = loongson_gpio_direction_input; 111 111 gc->direction_output = loongson_gpio_direction_output; 112 112
+1 -1
drivers/gpio/gpio-lp3943.c
··· 184 184 .direction_input = lp3943_gpio_direction_input, 185 185 .get = lp3943_gpio_get, 186 186 .direction_output = lp3943_gpio_direction_output, 187 - .set_rv = lp3943_gpio_set, 187 + .set = lp3943_gpio_set, 188 188 .base = -1, 189 189 .ngpio = LP3943_MAX_GPIO, 190 190 .can_sleep = 1,
+1 -1
drivers/gpio/gpio-lp873x.c
··· 124 124 .direction_input = lp873x_gpio_direction_input, 125 125 .direction_output = lp873x_gpio_direction_output, 126 126 .get = lp873x_gpio_get, 127 - .set_rv = lp873x_gpio_set, 127 + .set = lp873x_gpio_set, 128 128 .set_config = lp873x_gpio_set_config, 129 129 .base = -1, 130 130 .ngpio = 2,
+1 -1
drivers/gpio/gpio-lp87565.c
··· 139 139 .direction_input = lp87565_gpio_direction_input, 140 140 .direction_output = lp87565_gpio_direction_output, 141 141 .get = lp87565_gpio_get, 142 - .set_rv = lp87565_gpio_set, 142 + .set = lp87565_gpio_set, 143 143 .set_config = lp87565_gpio_set_config, 144 144 .base = -1, 145 145 .ngpio = 3,
+1 -1
drivers/gpio/gpio-lpc18xx.c
··· 327 327 .free = gpiochip_generic_free, 328 328 .direction_input = lpc18xx_gpio_direction_input, 329 329 .direction_output = lpc18xx_gpio_direction_output, 330 - .set_rv = lpc18xx_gpio_set, 330 + .set = lpc18xx_gpio_set, 331 331 .get = lpc18xx_gpio_get, 332 332 .ngpio = LPC18XX_MAX_PORTS * LPC18XX_PINS_PER_PORT, 333 333 .owner = THIS_MODULE,
+5 -5
drivers/gpio/gpio-lpc32xx.c
··· 407 407 .direction_input = lpc32xx_gpio_dir_input_p012, 408 408 .get = lpc32xx_gpio_get_value_p012, 409 409 .direction_output = lpc32xx_gpio_dir_output_p012, 410 - .set_rv = lpc32xx_gpio_set_value_p012, 410 + .set = lpc32xx_gpio_set_value_p012, 411 411 .request = lpc32xx_gpio_request, 412 412 .to_irq = lpc32xx_gpio_to_irq_p01, 413 413 .base = LPC32XX_GPIO_P0_GRP, ··· 423 423 .direction_input = lpc32xx_gpio_dir_input_p012, 424 424 .get = lpc32xx_gpio_get_value_p012, 425 425 .direction_output = lpc32xx_gpio_dir_output_p012, 426 - .set_rv = lpc32xx_gpio_set_value_p012, 426 + .set = lpc32xx_gpio_set_value_p012, 427 427 .request = lpc32xx_gpio_request, 428 428 .to_irq = lpc32xx_gpio_to_irq_p01, 429 429 .base = LPC32XX_GPIO_P1_GRP, ··· 439 439 .direction_input = lpc32xx_gpio_dir_input_p012, 440 440 .get = lpc32xx_gpio_get_value_p012, 441 441 .direction_output = lpc32xx_gpio_dir_output_p012, 442 - .set_rv = lpc32xx_gpio_set_value_p012, 442 + .set = lpc32xx_gpio_set_value_p012, 443 443 .request = lpc32xx_gpio_request, 444 444 .base = LPC32XX_GPIO_P2_GRP, 445 445 .ngpio = LPC32XX_GPIO_P2_MAX, ··· 454 454 .direction_input = lpc32xx_gpio_dir_input_p3, 455 455 .get = lpc32xx_gpio_get_value_p3, 456 456 .direction_output = lpc32xx_gpio_dir_output_p3, 457 - .set_rv = lpc32xx_gpio_set_value_p3, 457 + .set = lpc32xx_gpio_set_value_p3, 458 458 .request = lpc32xx_gpio_request, 459 459 .to_irq = lpc32xx_gpio_to_irq_gpio_p3, 460 460 .base = LPC32XX_GPIO_P3_GRP, ··· 482 482 .chip = { 483 483 .label = "gpo_p3", 484 484 .direction_output = lpc32xx_gpio_dir_out_always, 485 - .set_rv = lpc32xx_gpo_set_value, 485 + .set = lpc32xx_gpo_set_value, 486 486 .get = lpc32xx_gpo_get_value, 487 487 .request = lpc32xx_gpio_request, 488 488 .base = LPC32XX_GPO_P3_GRP,
+1 -1
drivers/gpio/gpio-macsmc.c
··· 261 261 smcgp->gc.label = "macsmc-pmu-gpio"; 262 262 smcgp->gc.owner = THIS_MODULE; 263 263 smcgp->gc.get = macsmc_gpio_get; 264 - smcgp->gc.set_rv = macsmc_gpio_set; 264 + smcgp->gc.set = macsmc_gpio_set; 265 265 smcgp->gc.get_direction = macsmc_gpio_get_direction; 266 266 smcgp->gc.init_valid_mask = macsmc_gpio_init_valid_mask; 267 267 smcgp->gc.can_sleep = true;
+1 -1
drivers/gpio/gpio-madera.c
··· 109 109 .direction_input = madera_gpio_direction_in, 110 110 .get = madera_gpio_get, 111 111 .direction_output = madera_gpio_direction_out, 112 - .set_rv = madera_gpio_set, 112 + .set = madera_gpio_set, 113 113 .set_config = gpiochip_generic_config, 114 114 .can_sleep = true, 115 115 };
+1 -1
drivers/gpio/gpio-max730x.c
··· 188 188 ts->chip.direction_input = max7301_direction_input; 189 189 ts->chip.get = max7301_get; 190 190 ts->chip.direction_output = max7301_direction_output; 191 - ts->chip.set_rv = max7301_set; 191 + ts->chip.set = max7301_set; 192 192 193 193 ts->chip.ngpio = PIN_NUMBER; 194 194 ts->chip.can_sleep = true;
+2 -2
drivers/gpio/gpio-max732x.c
··· 585 585 gc->direction_input = max732x_gpio_direction_input; 586 586 if (chip->dir_output) { 587 587 gc->direction_output = max732x_gpio_direction_output; 588 - gc->set_rv = max732x_gpio_set_value; 589 - gc->set_multiple_rv = max732x_gpio_set_multiple; 588 + gc->set = max732x_gpio_set_value; 589 + gc->set_multiple = max732x_gpio_set_multiple; 590 590 } 591 591 gc->get = max732x_gpio_get_value; 592 592 gc->can_sleep = true;
+1 -1
drivers/gpio/gpio-max77620.c
··· 311 311 mgpio->gpio_chip.direction_input = max77620_gpio_dir_input; 312 312 mgpio->gpio_chip.get = max77620_gpio_get; 313 313 mgpio->gpio_chip.direction_output = max77620_gpio_dir_output; 314 - mgpio->gpio_chip.set_rv = max77620_gpio_set; 314 + mgpio->gpio_chip.set = max77620_gpio_set; 315 315 mgpio->gpio_chip.set_config = max77620_gpio_set_config; 316 316 mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR; 317 317 mgpio->gpio_chip.can_sleep = 1;
+1 -1
drivers/gpio/gpio-max77650.c
··· 166 166 167 167 chip->gc.direction_input = max77650_gpio_direction_input; 168 168 chip->gc.direction_output = max77650_gpio_direction_output; 169 - chip->gc.set_rv = max77650_gpio_set_value; 169 + chip->gc.set = max77650_gpio_set_value; 170 170 chip->gc.get = max77650_gpio_get_value; 171 171 chip->gc.get_direction = max77650_gpio_get_direction; 172 172 chip->gc.set_config = max77650_gpio_set_config;
+1 -1
drivers/gpio/gpio-max77759.c
··· 469 469 chip->gc.direction_input = max77759_gpio_direction_input; 470 470 chip->gc.direction_output = max77759_gpio_direction_output; 471 471 chip->gc.get = max77759_gpio_get_value; 472 - chip->gc.set_rv = max77759_gpio_set_value; 472 + chip->gc.set = max77759_gpio_set_value; 473 473 474 474 girq = &chip->gc.irq; 475 475 gpio_irq_chip_set_chip(girq, &max77759_gpio_irq_chip);
+1 -1
drivers/gpio/gpio-mb86s7x.c
··· 180 180 gchip->gc.request = mb86s70_gpio_request; 181 181 gchip->gc.free = mb86s70_gpio_free; 182 182 gchip->gc.get = mb86s70_gpio_get; 183 - gchip->gc.set_rv = mb86s70_gpio_set; 183 + gchip->gc.set = mb86s70_gpio_set; 184 184 gchip->gc.to_irq = mb86s70_gpio_to_irq; 185 185 gchip->gc.label = dev_name(&pdev->dev); 186 186 gchip->gc.ngpio = 32;
+1 -1
drivers/gpio/gpio-mc33880.c
··· 103 103 mc->spi = spi; 104 104 105 105 mc->chip.label = DRIVER_NAME; 106 - mc->chip.set_rv = mc33880_set; 106 + mc->chip.set = mc33880_set; 107 107 mc->chip.base = pdata->base; 108 108 mc->chip.ngpio = PIN_NUMBER; 109 109 mc->chip.can_sleep = true;
+1 -1
drivers/gpio/gpio-ml-ioh.c
··· 224 224 gpio->direction_input = ioh_gpio_direction_input; 225 225 gpio->get = ioh_gpio_get; 226 226 gpio->direction_output = ioh_gpio_direction_output; 227 - gpio->set_rv = ioh_gpio_set; 227 + gpio->set = ioh_gpio_set; 228 228 gpio->dbg_show = NULL; 229 229 gpio->base = -1; 230 230 gpio->ngpio = num_port;
+1 -1
drivers/gpio/gpio-mlxbf2.c
··· 397 397 gc->ngpio = npins; 398 398 gc->owner = THIS_MODULE; 399 399 400 - irq = platform_get_irq(pdev, 0); 400 + irq = platform_get_irq_optional(pdev, 0); 401 401 if (irq >= 0) { 402 402 girq = &gs->gc.irq; 403 403 gpio_irq_chip_set_chip(girq, &mlxbf2_gpio_irq_chip);
+1 -1
drivers/gpio/gpio-mm-lantiq.c
··· 111 111 112 112 chip->mmchip.gc.ngpio = 16; 113 113 chip->mmchip.gc.direction_output = ltq_mm_dir_out; 114 - chip->mmchip.gc.set_rv = ltq_mm_set; 114 + chip->mmchip.gc.set = ltq_mm_set; 115 115 chip->mmchip.save_regs = ltq_mm_save_regs; 116 116 117 117 /* store the shadow value if one was passed by the devicetree */
+12 -12
drivers/gpio/gpio-mmio.c
··· 367 367 static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio, 368 368 int val) 369 369 { 370 - gc->set_rv(gc, gpio, val); 370 + gc->set(gc, gpio, val); 371 371 372 372 return bgpio_dir_return(gc, gpio, true); 373 373 } ··· 432 432 int val) 433 433 { 434 434 bgpio_dir_out(gc, gpio, val); 435 - gc->set_rv(gc, gpio, val); 435 + gc->set(gc, gpio, val); 436 436 return bgpio_dir_return(gc, gpio, true); 437 437 } 438 438 439 439 static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio, 440 440 int val) 441 441 { 442 - gc->set_rv(gc, gpio, val); 442 + gc->set(gc, gpio, val); 443 443 bgpio_dir_out(gc, gpio, val); 444 444 return bgpio_dir_return(gc, gpio, true); 445 445 } ··· 528 528 if (set && clr) { 529 529 gc->reg_set = set; 530 530 gc->reg_clr = clr; 531 - gc->set_rv = bgpio_set_with_clear; 532 - gc->set_multiple_rv = bgpio_set_multiple_with_clear; 531 + gc->set = bgpio_set_with_clear; 532 + gc->set_multiple = bgpio_set_multiple_with_clear; 533 533 } else if (set && !clr) { 534 534 gc->reg_set = set; 535 - gc->set_rv = bgpio_set_set; 536 - gc->set_multiple_rv = bgpio_set_multiple_set; 535 + gc->set = bgpio_set_set; 536 + gc->set_multiple = bgpio_set_multiple_set; 537 537 } else if (flags & BGPIOF_NO_OUTPUT) { 538 - gc->set_rv = bgpio_set_none; 539 - gc->set_multiple_rv = NULL; 538 + gc->set = bgpio_set_none; 539 + gc->set_multiple = NULL; 540 540 } else { 541 - gc->set_rv = bgpio_set; 542 - gc->set_multiple_rv = bgpio_set_multiple; 541 + gc->set = bgpio_set; 542 + gc->set_multiple = bgpio_set_multiple; 543 543 } 544 544 545 545 if (!(flags & BGPIOF_UNREADABLE_REG_SET) && ··· 676 676 } 677 677 678 678 gc->bgpio_data = gc->read_reg(gc->reg_dat); 679 - if (gc->set_rv == bgpio_set_set && 679 + if (gc->set == bgpio_set_set && 680 680 !(flags & BGPIOF_UNREADABLE_REG_SET)) 681 681 gc->bgpio_data = gc->read_reg(gc->reg_set); 682 682
+2 -2
drivers/gpio/gpio-mockup.c
··· 449 449 gc->owner = THIS_MODULE; 450 450 gc->parent = dev; 451 451 gc->get = gpio_mockup_get; 452 - gc->set_rv = gpio_mockup_set; 452 + gc->set = gpio_mockup_set; 453 453 gc->get_multiple = gpio_mockup_get_multiple; 454 - gc->set_multiple_rv = gpio_mockup_set_multiple; 454 + gc->set_multiple = gpio_mockup_set_multiple; 455 455 gc->direction_output = gpio_mockup_dirout; 456 456 gc->direction_input = gpio_mockup_dirin; 457 457 gc->get_direction = gpio_mockup_get_direction;
+1 -1
drivers/gpio/gpio-moxtet.c
··· 140 140 chip->gpio_chip.direction_input = moxtet_gpio_direction_input; 141 141 chip->gpio_chip.direction_output = moxtet_gpio_direction_output; 142 142 chip->gpio_chip.get = moxtet_gpio_get_value; 143 - chip->gpio_chip.set_rv = moxtet_gpio_set_value; 143 + chip->gpio_chip.set = moxtet_gpio_set_value; 144 144 chip->gpio_chip.base = -1; 145 145 146 146 chip->gpio_chip.ngpio = MOXTET_GPIO_NGPIOS;
+2 -2
drivers/gpio/gpio-mpc5200.c
··· 153 153 gc->direction_input = mpc52xx_wkup_gpio_dir_in; 154 154 gc->direction_output = mpc52xx_wkup_gpio_dir_out; 155 155 gc->get = mpc52xx_wkup_gpio_get; 156 - gc->set_rv = mpc52xx_wkup_gpio_set; 156 + gc->set = mpc52xx_wkup_gpio_set; 157 157 158 158 ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip); 159 159 if (ret) ··· 315 315 gc->direction_input = mpc52xx_simple_gpio_dir_in; 316 316 gc->direction_output = mpc52xx_simple_gpio_dir_out; 317 317 gc->get = mpc52xx_simple_gpio_get; 318 - gc->set_rv = mpc52xx_simple_gpio_set; 318 + gc->set = mpc52xx_simple_gpio_set; 319 319 320 320 ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip); 321 321 if (ret)
+1 -1
drivers/gpio/gpio-mpfs.c
··· 150 150 mpfs_gpio->gc.direction_output = mpfs_gpio_direction_output; 151 151 mpfs_gpio->gc.get_direction = mpfs_gpio_get_direction; 152 152 mpfs_gpio->gc.get = mpfs_gpio_get; 153 - mpfs_gpio->gc.set_rv = mpfs_gpio_set; 153 + mpfs_gpio->gc.set = mpfs_gpio_set; 154 154 mpfs_gpio->gc.base = -1; 155 155 mpfs_gpio->gc.ngpio = ngpios; 156 156 mpfs_gpio->gc.label = dev_name(dev);
+2 -2
drivers/gpio/gpio-mpsse.c
··· 448 448 priv->gpio.direction_input = gpio_mpsse_direction_input; 449 449 priv->gpio.direction_output = gpio_mpsse_direction_output; 450 450 priv->gpio.get = gpio_mpsse_gpio_get; 451 - priv->gpio.set_rv = gpio_mpsse_gpio_set; 451 + priv->gpio.set = gpio_mpsse_gpio_set; 452 452 priv->gpio.get_multiple = gpio_mpsse_get_multiple; 453 - priv->gpio.set_multiple_rv = gpio_mpsse_set_multiple; 453 + priv->gpio.set_multiple = gpio_mpsse_set_multiple; 454 454 priv->gpio.base = -1; 455 455 priv->gpio.ngpio = 16; 456 456 priv->gpio.offset = priv->intf_id * priv->gpio.ngpio;
+1 -1
drivers/gpio/gpio-msc313.c
··· 658 658 gpiochip->direction_input = msc313_gpio_direction_input; 659 659 gpiochip->direction_output = msc313_gpio_direction_output; 660 660 gpiochip->get = msc313_gpio_get; 661 - gpiochip->set_rv = msc313_gpio_set; 661 + gpiochip->set = msc313_gpio_set; 662 662 gpiochip->base = -1; 663 663 gpiochip->ngpio = gpio->gpio_data->num; 664 664 gpiochip->names = gpio->gpio_data->names;
+1 -1
drivers/gpio/gpio-mvebu.c
··· 1168 1168 mvchip->chip.direction_input = mvebu_gpio_direction_input; 1169 1169 mvchip->chip.get = mvebu_gpio_get; 1170 1170 mvchip->chip.direction_output = mvebu_gpio_direction_output; 1171 - mvchip->chip.set_rv = mvebu_gpio_set; 1171 + mvchip->chip.set = mvebu_gpio_set; 1172 1172 if (have_irqs) 1173 1173 mvchip->chip.to_irq = mvebu_gpio_to_irq; 1174 1174 mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
+1 -1
drivers/gpio/gpio-nomadik.c
··· 674 674 chip->direction_input = nmk_gpio_make_input; 675 675 chip->get = nmk_gpio_get_input; 676 676 chip->direction_output = nmk_gpio_make_output; 677 - chip->set_rv = nmk_gpio_set_output; 677 + chip->set = nmk_gpio_set_output; 678 678 chip->dbg_show = nmk_gpio_dbg_show; 679 679 chip->can_sleep = false; 680 680 chip->owner = THIS_MODULE;
+2 -2
drivers/gpio/gpio-npcm-sgpio.c
··· 211 211 212 212 static int npcm_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val) 213 213 { 214 - return gc->set_rv(gc, offset, val); 214 + return gc->set(gc, offset, val); 215 215 } 216 216 217 217 static int npcm_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset) ··· 546 546 gpio->chip.direction_output = npcm_sgpio_dir_out; 547 547 gpio->chip.get_direction = npcm_sgpio_get_direction; 548 548 gpio->chip.get = npcm_sgpio_get; 549 - gpio->chip.set_rv = npcm_sgpio_set; 549 + gpio->chip.set = npcm_sgpio_set; 550 550 gpio->chip.label = dev_name(&pdev->dev); 551 551 gpio->chip.base = -1; 552 552
+1 -1
drivers/gpio/gpio-octeon.c
··· 108 108 chip->direction_input = octeon_gpio_dir_in; 109 109 chip->get = octeon_gpio_get; 110 110 chip->direction_output = octeon_gpio_dir_out; 111 - chip->set_rv = octeon_gpio_set; 111 + chip->set = octeon_gpio_set; 112 112 err = devm_gpiochip_add_data(&pdev->dev, chip, gpio); 113 113 if (err) 114 114 return err;
+2 -2
drivers/gpio/gpio-omap.c
··· 1046 1046 bank->chip.get_multiple = omap_gpio_get_multiple; 1047 1047 bank->chip.direction_output = omap_gpio_output; 1048 1048 bank->chip.set_config = omap_gpio_set_config; 1049 - bank->chip.set_rv = omap_gpio_set; 1050 - bank->chip.set_multiple_rv = omap_gpio_set_multiple; 1049 + bank->chip.set = omap_gpio_set; 1050 + bank->chip.set_multiple = omap_gpio_set_multiple; 1051 1051 if (bank->is_mpuio) { 1052 1052 bank->chip.label = "mpuio"; 1053 1053 if (bank->regs->wkup_en)
+1 -1
drivers/gpio/gpio-palmas.c
··· 166 166 palmas_gpio->gpio_chip.direction_input = palmas_gpio_input; 167 167 palmas_gpio->gpio_chip.direction_output = palmas_gpio_output; 168 168 palmas_gpio->gpio_chip.to_irq = palmas_gpio_to_irq; 169 - palmas_gpio->gpio_chip.set_rv = palmas_gpio_set; 169 + palmas_gpio->gpio_chip.set = palmas_gpio_set; 170 170 palmas_gpio->gpio_chip.get = palmas_gpio_get; 171 171 palmas_gpio->gpio_chip.parent = &pdev->dev; 172 172
+2 -2
drivers/gpio/gpio-pca953x.c
··· 789 789 gc->direction_input = pca953x_gpio_direction_input; 790 790 gc->direction_output = pca953x_gpio_direction_output; 791 791 gc->get = pca953x_gpio_get_value; 792 - gc->set_rv = pca953x_gpio_set_value; 792 + gc->set = pca953x_gpio_set_value; 793 793 gc->get_direction = pca953x_gpio_get_direction; 794 794 gc->get_multiple = pca953x_gpio_get_multiple; 795 - gc->set_multiple_rv = pca953x_gpio_set_multiple; 795 + gc->set_multiple = pca953x_gpio_set_multiple; 796 796 gc->set_config = pca953x_gpio_set_config; 797 797 gc->can_sleep = true; 798 798
+1 -1
drivers/gpio/gpio-pca9570.c
··· 126 126 gpio->chip.owner = THIS_MODULE; 127 127 gpio->chip.get_direction = pca9570_get_direction; 128 128 gpio->chip.get = pca9570_get; 129 - gpio->chip.set_rv = pca9570_set; 129 + gpio->chip.set = pca9570_set; 130 130 gpio->chip.base = -1; 131 131 gpio->chip_data = device_get_match_data(&client->dev); 132 132 gpio->chip.ngpio = gpio->chip_data->ngpio;
+2 -2
drivers/gpio/gpio-pcf857x.c
··· 295 295 gpio->chip.owner = THIS_MODULE; 296 296 gpio->chip.get = pcf857x_get; 297 297 gpio->chip.get_multiple = pcf857x_get_multiple; 298 - gpio->chip.set_rv = pcf857x_set; 299 - gpio->chip.set_multiple_rv = pcf857x_set_multiple; 298 + gpio->chip.set = pcf857x_set; 299 + gpio->chip.set_multiple = pcf857x_set_multiple; 300 300 gpio->chip.direction_input = pcf857x_input; 301 301 gpio->chip.direction_output = pcf857x_output; 302 302 gpio->chip.ngpio = (uintptr_t)i2c_get_match_data(client);
+1 -1
drivers/gpio/gpio-pch.c
··· 219 219 gpio->direction_input = pch_gpio_direction_input; 220 220 gpio->get = pch_gpio_get; 221 221 gpio->direction_output = pch_gpio_direction_output; 222 - gpio->set_rv = pch_gpio_set; 222 + gpio->set = pch_gpio_set; 223 223 gpio->base = -1; 224 224 gpio->ngpio = gpio_pins[chip->ioh]; 225 225 gpio->can_sleep = false;
+1 -1
drivers/gpio/gpio-pl061.c
··· 330 330 pl061->gc.direction_input = pl061_direction_input; 331 331 pl061->gc.direction_output = pl061_direction_output; 332 332 pl061->gc.get = pl061_get_value; 333 - pl061->gc.set_rv = pl061_set_value; 333 + pl061->gc.set = pl061_set_value; 334 334 pl061->gc.ngpio = PL061_GPIO_NR; 335 335 pl061->gc.label = dev_name(dev); 336 336 pl061->gc.parent = dev;
+2 -8
drivers/gpio/gpio-pxa.c
··· 355 355 pchip->chip.direction_input = pxa_gpio_direction_input; 356 356 pchip->chip.direction_output = pxa_gpio_direction_output; 357 357 pchip->chip.get = pxa_gpio_get; 358 - pchip->chip.set_rv = pxa_gpio_set; 358 + pchip->chip.set = pxa_gpio_set; 359 359 pchip->chip.to_irq = pxa_gpio_to_irq; 360 360 pchip->chip.ngpio = ngpio; 361 361 pchip->chip.request = gpiochip_generic_request; ··· 499 499 gfer = readl_relaxed(base + GFER_OFFSET) & ~GPIO_bit(gpio); 500 500 writel_relaxed(grer, base + GRER_OFFSET); 501 501 writel_relaxed(gfer, base + GFER_OFFSET); 502 - 503 - gpiochip_disable_irq(&pchip->chip, gpio); 504 502 } 505 503 506 504 static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on) ··· 518 520 unsigned int gpio = irqd_to_hwirq(d); 519 521 struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio); 520 522 521 - gpiochip_enable_irq(&pchip->chip, gpio); 522 - 523 523 c->irq_mask |= GPIO_bit(gpio); 524 524 update_edge_detect(c); 525 525 } 526 526 527 - static const struct irq_chip pxa_muxed_gpio_chip = { 527 + static struct irq_chip pxa_muxed_gpio_chip = { 528 528 .name = "GPIO", 529 529 .irq_ack = pxa_ack_muxed_gpio, 530 530 .irq_mask = pxa_mask_muxed_gpio, 531 531 .irq_unmask = pxa_unmask_muxed_gpio, 532 532 .irq_set_type = pxa_gpio_irq_type, 533 533 .irq_set_wake = pxa_gpio_set_wake, 534 - .flags = IRQCHIP_IMMUTABLE, 535 - GPIOCHIP_IRQ_RESOURCE_HELPERS, 536 534 }; 537 535 538 536 static int pxa_gpio_nums(struct platform_device *pdev)
+1 -1
drivers/gpio/gpio-raspberrypi-exp.c
··· 232 232 rpi_gpio->gc.direction_output = rpi_exp_gpio_dir_out; 233 233 rpi_gpio->gc.get_direction = rpi_exp_gpio_get_direction; 234 234 rpi_gpio->gc.get = rpi_exp_gpio_get; 235 - rpi_gpio->gc.set_rv = rpi_exp_gpio_set; 235 + rpi_gpio->gc.set = rpi_exp_gpio_set; 236 236 rpi_gpio->gc.can_sleep = true; 237 237 238 238 return devm_gpiochip_add_data(dev, &rpi_gpio->gc, rpi_gpio);
+1 -1
drivers/gpio/gpio-rc5t583.c
··· 118 118 rc5t583_gpio->gpio_chip.free = rc5t583_gpio_free, 119 119 rc5t583_gpio->gpio_chip.direction_input = rc5t583_gpio_dir_input, 120 120 rc5t583_gpio->gpio_chip.direction_output = rc5t583_gpio_dir_output, 121 - rc5t583_gpio->gpio_chip.set_rv = rc5t583_gpio_set, 121 + rc5t583_gpio->gpio_chip.set = rc5t583_gpio_set, 122 122 rc5t583_gpio->gpio_chip.get = rc5t583_gpio_get, 123 123 rc5t583_gpio->gpio_chip.to_irq = rc5t583_gpio_to_irq, 124 124 rc5t583_gpio->gpio_chip.ngpio = RC5T583_MAX_GPIO,
+2 -2
drivers/gpio/gpio-rcar.c
··· 535 535 gpio_chip->get = gpio_rcar_get; 536 536 gpio_chip->get_multiple = gpio_rcar_get_multiple; 537 537 gpio_chip->direction_output = gpio_rcar_direction_output; 538 - gpio_chip->set_rv = gpio_rcar_set; 539 - gpio_chip->set_multiple_rv = gpio_rcar_set_multiple; 538 + gpio_chip->set = gpio_rcar_set; 539 + gpio_chip->set_multiple = gpio_rcar_set_multiple; 540 540 gpio_chip->label = name; 541 541 gpio_chip->parent = dev; 542 542 gpio_chip->owner = THIS_MODULE;
+1 -1
drivers/gpio/gpio-rdc321x.c
··· 159 159 rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input; 160 160 rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config; 161 161 rdc321x_gpio_dev->chip.get = rdc_gpio_get_value; 162 - rdc321x_gpio_dev->chip.set_rv = rdc_gpio_set_value; 162 + rdc321x_gpio_dev->chip.set = rdc_gpio_set_value; 163 163 rdc321x_gpio_dev->chip.base = 0; 164 164 rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios; 165 165
+3 -3
drivers/gpio/gpio-reg.c
··· 46 46 if (r->direction & BIT(offset)) 47 47 return -ENOTSUPP; 48 48 49 - gc->set_rv(gc, offset, value); 49 + gc->set(gc, offset, value); 50 50 return 0; 51 51 } 52 52 ··· 161 161 r->gc.get_direction = gpio_reg_get_direction; 162 162 r->gc.direction_input = gpio_reg_direction_input; 163 163 r->gc.direction_output = gpio_reg_direction_output; 164 - r->gc.set_rv = gpio_reg_set; 164 + r->gc.set = gpio_reg_set; 165 165 r->gc.get = gpio_reg_get; 166 - r->gc.set_multiple_rv = gpio_reg_set_multiple; 166 + r->gc.set_multiple = gpio_reg_set_multiple; 167 167 if (irqs) 168 168 r->gc.to_irq = gpio_reg_to_irq; 169 169 r->gc.base = base;
+2 -2
drivers/gpio/gpio-regmap.c
··· 260 260 chip->free = gpiochip_generic_free; 261 261 chip->get = gpio_regmap_get; 262 262 if (gpio->reg_set_base && gpio->reg_clr_base) 263 - chip->set_rv = gpio_regmap_set_with_clear; 263 + chip->set = gpio_regmap_set_with_clear; 264 264 else if (gpio->reg_set_base) 265 - chip->set_rv = gpio_regmap_set; 265 + chip->set = gpio_regmap_set; 266 266 267 267 chip->get_direction = gpio_regmap_get_direction; 268 268 if (gpio->reg_dir_in_base || gpio->reg_dir_out_base) {
+1 -1
drivers/gpio/gpio-rockchip.c
··· 327 327 static const struct gpio_chip rockchip_gpiolib_chip = { 328 328 .request = gpiochip_generic_request, 329 329 .free = gpiochip_generic_free, 330 - .set_rv = rockchip_gpio_set, 330 + .set = rockchip_gpio_set, 331 331 .get = rockchip_gpio_get, 332 332 .get_direction = rockchip_gpio_get_direction, 333 333 .direction_input = rockchip_gpio_direction_input,
+1 -1
drivers/gpio/gpio-rtd.c
··· 565 565 data->gpio_chip.get_direction = rtd_gpio_get_direction; 566 566 data->gpio_chip.direction_input = rtd_gpio_direction_input; 567 567 data->gpio_chip.direction_output = rtd_gpio_direction_output; 568 - data->gpio_chip.set_rv = rtd_gpio_set; 568 + data->gpio_chip.set = rtd_gpio_set; 569 569 data->gpio_chip.get = rtd_gpio_get; 570 570 data->gpio_chip.set_config = rtd_gpio_set_config; 571 571 data->gpio_chip.parent = dev;
+1 -1
drivers/gpio/gpio-sa1100.c
··· 99 99 .get_direction = sa1100_get_direction, 100 100 .direction_input = sa1100_direction_input, 101 101 .direction_output = sa1100_direction_output, 102 - .set_rv = sa1100_gpio_set, 102 + .set = sa1100_gpio_set, 103 103 .get = sa1100_gpio_get, 104 104 .to_irq = sa1100_to_irq, 105 105 .base = 0,
+1 -1
drivers/gpio/gpio-sama5d2-piobu.c
··· 196 196 piobu->chip.direction_input = sama5d2_piobu_direction_input; 197 197 piobu->chip.direction_output = sama5d2_piobu_direction_output; 198 198 piobu->chip.get = sama5d2_piobu_get; 199 - piobu->chip.set_rv = sama5d2_piobu_set; 199 + piobu->chip.set = sama5d2_piobu_set; 200 200 piobu->chip.base = -1; 201 201 piobu->chip.ngpio = PIOBU_NUM; 202 202 piobu->chip.can_sleep = 0;
+1 -1
drivers/gpio/gpio-sch.c
··· 167 167 .direction_input = sch_gpio_direction_in, 168 168 .get = sch_gpio_get, 169 169 .direction_output = sch_gpio_direction_out, 170 - .set_rv = sch_gpio_set, 170 + .set = sch_gpio_set, 171 171 .get_direction = sch_gpio_get_direction, 172 172 }; 173 173
+1 -1
drivers/gpio/gpio-sch311x.c
··· 297 297 block->chip.get_direction = sch311x_gpio_get_direction; 298 298 block->chip.set_config = sch311x_gpio_set_config; 299 299 block->chip.get = sch311x_gpio_get; 300 - block->chip.set_rv = sch311x_gpio_set; 300 + block->chip.set = sch311x_gpio_set; 301 301 block->chip.ngpio = 8; 302 302 block->chip.parent = &pdev->dev; 303 303 block->chip.base = sch311x_gpio_blocks[i].base;
+2 -2
drivers/gpio/gpio-sim.c
··· 486 486 gc->parent = dev; 487 487 gc->fwnode = swnode; 488 488 gc->get = gpio_sim_get; 489 - gc->set_rv = gpio_sim_set; 489 + gc->set = gpio_sim_set; 490 490 gc->get_multiple = gpio_sim_get_multiple; 491 - gc->set_multiple_rv = gpio_sim_set_multiple; 491 + gc->set_multiple = gpio_sim_set_multiple; 492 492 gc->direction_output = gpio_sim_direction_output; 493 493 gc->direction_input = gpio_sim_direction_input; 494 494 gc->get_direction = gpio_sim_get_direction;
+1 -1
drivers/gpio/gpio-siox.c
··· 237 237 gc->parent = dev; 238 238 gc->owner = THIS_MODULE; 239 239 gc->get = gpio_siox_get; 240 - gc->set_rv = gpio_siox_set; 240 + gc->set = gpio_siox_set; 241 241 gc->direction_input = gpio_siox_direction_input; 242 242 gc->direction_output = gpio_siox_direction_output; 243 243 gc->get_direction = gpio_siox_get_direction;
+1 -1
drivers/gpio/gpio-spear-spics.c
··· 140 140 spics->chip.request = spics_request; 141 141 spics->chip.free = spics_free; 142 142 spics->chip.direction_output = spics_direction_output; 143 - spics->chip.set_rv = spics_set_value; 143 + spics->chip.set = spics_set_value; 144 144 spics->chip.label = dev_name(&pdev->dev); 145 145 spics->chip.parent = &pdev->dev; 146 146 spics->chip.owner = THIS_MODULE;
+1 -1
drivers/gpio/gpio-sprd.c
··· 245 245 sprd_gpio->chip.request = sprd_gpio_request; 246 246 sprd_gpio->chip.free = sprd_gpio_free; 247 247 sprd_gpio->chip.get = sprd_gpio_get; 248 - sprd_gpio->chip.set_rv = sprd_gpio_set; 248 + sprd_gpio->chip.set = sprd_gpio_set; 249 249 sprd_gpio->chip.direction_input = sprd_gpio_direction_input; 250 250 sprd_gpio->chip.direction_output = sprd_gpio_direction_output; 251 251
+1 -1
drivers/gpio/gpio-stmpe.c
··· 136 136 .direction_input = stmpe_gpio_direction_input, 137 137 .get = stmpe_gpio_get, 138 138 .direction_output = stmpe_gpio_direction_output, 139 - .set_rv = stmpe_gpio_set, 139 + .set = stmpe_gpio_set, 140 140 .request = stmpe_gpio_request, 141 141 .can_sleep = true, 142 142 };
+1 -1
drivers/gpio/gpio-stp-xway.c
··· 249 249 chip->gc.label = "stp-xway"; 250 250 chip->gc.direction_output = xway_stp_dir_out; 251 251 chip->gc.get = xway_stp_get; 252 - chip->gc.set_rv = xway_stp_set; 252 + chip->gc.set = xway_stp_set; 253 253 chip->gc.request = xway_stp_request; 254 254 chip->gc.base = -1; 255 255 chip->gc.owner = THIS_MODULE;
+2 -2
drivers/gpio/gpio-syscon.c
··· 115 115 BIT(offs % SYSCON_REG_BITS)); 116 116 } 117 117 118 - return chip->set_rv(chip, offset, val); 118 + return chip->set(chip, offset, val); 119 119 } 120 120 121 121 static const struct syscon_gpio_data clps711x_mctrl_gpio = { ··· 251 251 if (priv->data->flags & GPIO_SYSCON_FEAT_IN) 252 252 priv->chip.direction_input = syscon_gpio_dir_in; 253 253 if (priv->data->flags & GPIO_SYSCON_FEAT_OUT) { 254 - priv->chip.set_rv = priv->data->set ? : syscon_gpio_set; 254 + priv->chip.set = priv->data->set ? : syscon_gpio_set; 255 255 priv->chip.direction_output = syscon_gpio_dir_out; 256 256 } 257 257
+1 -1
drivers/gpio/gpio-tangier.c
··· 430 430 gpio->chip.direction_input = tng_gpio_direction_input; 431 431 gpio->chip.direction_output = tng_gpio_direction_output; 432 432 gpio->chip.get = tng_gpio_get; 433 - gpio->chip.set_rv = tng_gpio_set; 433 + gpio->chip.set = tng_gpio_set; 434 434 gpio->chip.get_direction = tng_gpio_get_direction; 435 435 gpio->chip.set_config = tng_gpio_set_config; 436 436 gpio->chip.base = info->base;
+1 -1
drivers/gpio/gpio-tc3589x.c
··· 149 149 .label = "tc3589x", 150 150 .owner = THIS_MODULE, 151 151 .get = tc3589x_gpio_get, 152 - .set_rv = tc3589x_gpio_set, 152 + .set = tc3589x_gpio_set, 153 153 .direction_output = tc3589x_gpio_direction_output, 154 154 .direction_input = tc3589x_gpio_direction_input, 155 155 .get_direction = tc3589x_gpio_get_direction,
+1 -1
drivers/gpio/gpio-tegra.c
··· 720 720 tgi->gc.direction_input = tegra_gpio_direction_input; 721 721 tgi->gc.get = tegra_gpio_get; 722 722 tgi->gc.direction_output = tegra_gpio_direction_output; 723 - tgi->gc.set_rv = tegra_gpio_set; 723 + tgi->gc.set = tegra_gpio_set; 724 724 tgi->gc.get_direction = tegra_gpio_get_direction; 725 725 tgi->gc.base = 0; 726 726 tgi->gc.ngpio = tgi->bank_count * 32;
+1 -1
drivers/gpio/gpio-tegra186.c
··· 891 891 gpio->gpio.direction_input = tegra186_gpio_direction_input; 892 892 gpio->gpio.direction_output = tegra186_gpio_direction_output; 893 893 gpio->gpio.get = tegra186_gpio_get; 894 - gpio->gpio.set_rv = tegra186_gpio_set; 894 + gpio->gpio.set = tegra186_gpio_set; 895 895 gpio->gpio.set_config = tegra186_gpio_set_config; 896 896 gpio->gpio.add_pin_ranges = tegra186_gpio_add_pin_ranges; 897 897 gpio->gpio.init_valid_mask = tegra186_init_valid_mask;
+2 -2
drivers/gpio/gpio-thunderx.c
··· 533 533 chip->direction_input = thunderx_gpio_dir_in; 534 534 chip->get = thunderx_gpio_get; 535 535 chip->direction_output = thunderx_gpio_dir_out; 536 - chip->set_rv = thunderx_gpio_set; 537 - chip->set_multiple_rv = thunderx_gpio_set_multiple; 536 + chip->set = thunderx_gpio_set; 537 + chip->set_multiple = thunderx_gpio_set_multiple; 538 538 chip->set_config = thunderx_gpio_set_config; 539 539 girq = &chip->irq; 540 540 gpio_irq_chip_set_chip(girq, &thunderx_gpio_irq_chip);
+1 -1
drivers/gpio/gpio-timberdale.c
··· 253 253 gc->direction_input = timbgpio_gpio_direction_input; 254 254 gc->get = timbgpio_gpio_get; 255 255 gc->direction_output = timbgpio_gpio_direction_output; 256 - gc->set_rv = timbgpio_gpio_set; 256 + gc->set = timbgpio_gpio_set; 257 257 gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL; 258 258 gc->dbg_show = NULL; 259 259 gc->base = pdata->gpio_base;
+2 -2
drivers/gpio/gpio-tpic2810.c
··· 80 80 .owner = THIS_MODULE, 81 81 .get_direction = tpic2810_get_direction, 82 82 .direction_output = tpic2810_direction_output, 83 - .set_rv = tpic2810_set, 84 - .set_multiple_rv = tpic2810_set_multiple, 83 + .set = tpic2810_set, 84 + .set_multiple = tpic2810_set_multiple, 85 85 .base = -1, 86 86 .ngpio = 8, 87 87 .can_sleep = true,
+1 -1
drivers/gpio/gpio-tps65086.c
··· 69 69 .direction_input = tps65086_gpio_direction_input, 70 70 .direction_output = tps65086_gpio_direction_output, 71 71 .get = tps65086_gpio_get, 72 - .set_rv = tps65086_gpio_set, 72 + .set = tps65086_gpio_set, 73 73 .base = -1, 74 74 .ngpio = 4, 75 75 .can_sleep = true,
+1 -1
drivers/gpio/gpio-tps65218.c
··· 169 169 .request = tps65218_gpio_request, 170 170 .direction_output = tps65218_gpio_output, 171 171 .get = tps65218_gpio_get, 172 - .set_rv = tps65218_gpio_set, 172 + .set = tps65218_gpio_set, 173 173 .set_config = tps65218_gpio_set_config, 174 174 .can_sleep = true, 175 175 .ngpio = 3,
+2 -2
drivers/gpio/gpio-tps65219.c
··· 203 203 .direction_input = tps65219_gpio_direction_input, 204 204 .direction_output = tps65219_gpio_direction_output, 205 205 .get = tps65219_gpio_get, 206 - .set_rv = tps65219_gpio_set, 206 + .set = tps65219_gpio_set, 207 207 .base = -1, 208 208 .ngpio = 2, 209 209 .can_sleep = true, ··· 216 216 .direction_input = tps65219_gpio_direction_input, 217 217 .direction_output = tps65219_gpio_direction_output, 218 218 .get = tps65219_gpio_get, 219 - .set_rv = tps65219_gpio_set, 219 + .set = tps65219_gpio_set, 220 220 .base = -1, 221 221 .ngpio = 3, 222 222 .can_sleep = true,
+1 -1
drivers/gpio/gpio-tps6586x.c
··· 98 98 99 99 /* FIXME: add handling of GPIOs as dedicated inputs */ 100 100 tps6586x_gpio->gpio_chip.direction_output = tps6586x_gpio_output; 101 - tps6586x_gpio->gpio_chip.set_rv = tps6586x_gpio_set; 101 + tps6586x_gpio->gpio_chip.set = tps6586x_gpio_set; 102 102 tps6586x_gpio->gpio_chip.get = tps6586x_gpio_get; 103 103 tps6586x_gpio->gpio_chip.to_irq = tps6586x_gpio_to_irq; 104 104
+1 -1
drivers/gpio/gpio-tps65910.c
··· 139 139 tps65910_gpio->gpio_chip.can_sleep = true; 140 140 tps65910_gpio->gpio_chip.direction_input = tps65910_gpio_input; 141 141 tps65910_gpio->gpio_chip.direction_output = tps65910_gpio_output; 142 - tps65910_gpio->gpio_chip.set_rv = tps65910_gpio_set; 142 + tps65910_gpio->gpio_chip.set = tps65910_gpio_set; 143 143 tps65910_gpio->gpio_chip.get = tps65910_gpio_get; 144 144 tps65910_gpio->gpio_chip.parent = &pdev->dev; 145 145
+1 -1
drivers/gpio/gpio-tps65912.c
··· 92 92 .direction_input = tps65912_gpio_direction_input, 93 93 .direction_output = tps65912_gpio_direction_output, 94 94 .get = tps65912_gpio_get, 95 - .set_rv = tps65912_gpio_set, 95 + .set = tps65912_gpio_set, 96 96 .base = -1, 97 97 .ngpio = 5, 98 98 .can_sleep = true,
+1 -1
drivers/gpio/gpio-tps68470.c
··· 142 142 tps68470_gpio->gc.direction_output = tps68470_gpio_output; 143 143 tps68470_gpio->gc.get = tps68470_gpio_get; 144 144 tps68470_gpio->gc.get_direction = tps68470_gpio_get_direction; 145 - tps68470_gpio->gc.set_rv = tps68470_gpio_set; 145 + tps68470_gpio->gc.set = tps68470_gpio_set; 146 146 tps68470_gpio->gc.can_sleep = true; 147 147 tps68470_gpio->gc.names = tps68470_names; 148 148 tps68470_gpio->gc.ngpio = TPS68470_N_GPIO;
+1 -1
drivers/gpio/gpio-tqmx86.c
··· 370 370 chip->direction_output = tqmx86_gpio_direction_output; 371 371 chip->get_direction = tqmx86_gpio_get_direction; 372 372 chip->get = tqmx86_gpio_get; 373 - chip->set_rv = tqmx86_gpio_set; 373 + chip->set = tqmx86_gpio_set; 374 374 chip->ngpio = TQMX86_NGPIO; 375 375 chip->parent = pdev->dev.parent; 376 376
+1 -1
drivers/gpio/gpio-ts4900.c
··· 119 119 .direction_input = ts4900_gpio_direction_input, 120 120 .direction_output = ts4900_gpio_direction_output, 121 121 .get = ts4900_gpio_get, 122 - .set_rv = ts4900_gpio_set, 122 + .set = ts4900_gpio_set, 123 123 .base = -1, 124 124 .can_sleep = true, 125 125 };
+1 -1
drivers/gpio/gpio-ts5500.c
··· 340 340 priv->gpio_chip.direction_input = ts5500_gpio_input; 341 341 priv->gpio_chip.direction_output = ts5500_gpio_output; 342 342 priv->gpio_chip.get = ts5500_gpio_get; 343 - priv->gpio_chip.set_rv = ts5500_gpio_set; 343 + priv->gpio_chip.set = ts5500_gpio_set; 344 344 priv->gpio_chip.to_irq = ts5500_gpio_to_irq; 345 345 priv->gpio_chip.base = -1; 346 346
+1 -1
drivers/gpio/gpio-twl4030.c
··· 419 419 .direction_output = twl_direction_out, 420 420 .get_direction = twl_get_direction, 421 421 .get = twl_get, 422 - .set_rv = twl_set, 422 + .set = twl_set, 423 423 .to_irq = twl_to_irq, 424 424 .can_sleep = true, 425 425 };
+1 -1
drivers/gpio/gpio-twl6040.c
··· 69 69 .get = twl6040gpo_get, 70 70 .direction_output = twl6040gpo_direction_out, 71 71 .get_direction = twl6040gpo_get_direction, 72 - .set_rv = twl6040gpo_set, 72 + .set = twl6040gpo_set, 73 73 .can_sleep = true, 74 74 }; 75 75
+2 -2
drivers/gpio/gpio-uniphier.c
··· 386 386 chip->direction_input = uniphier_gpio_direction_input; 387 387 chip->direction_output = uniphier_gpio_direction_output; 388 388 chip->get = uniphier_gpio_get; 389 - chip->set_rv = uniphier_gpio_set; 390 - chip->set_multiple_rv = uniphier_gpio_set_multiple; 389 + chip->set = uniphier_gpio_set; 390 + chip->set_multiple = uniphier_gpio_set_multiple; 391 391 chip->to_irq = uniphier_gpio_to_irq; 392 392 chip->base = -1; 393 393 chip->ngpio = ngpios;
+2 -2
drivers/gpio/gpio-viperboard.c
··· 408 408 vb_gpio->gpioa.base = -1; 409 409 vb_gpio->gpioa.ngpio = 16; 410 410 vb_gpio->gpioa.can_sleep = true; 411 - vb_gpio->gpioa.set_rv = vprbrd_gpioa_set; 411 + vb_gpio->gpioa.set = vprbrd_gpioa_set; 412 412 vb_gpio->gpioa.get = vprbrd_gpioa_get; 413 413 vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input; 414 414 vb_gpio->gpioa.direction_output = vprbrd_gpioa_direction_output; ··· 424 424 vb_gpio->gpiob.base = -1; 425 425 vb_gpio->gpiob.ngpio = 16; 426 426 vb_gpio->gpiob.can_sleep = true; 427 - vb_gpio->gpiob.set_rv = vprbrd_gpiob_set; 427 + vb_gpio->gpiob.set = vprbrd_gpiob_set; 428 428 vb_gpio->gpiob.get = vprbrd_gpiob_get; 429 429 vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input; 430 430 vb_gpio->gpiob.direction_output = vprbrd_gpiob_direction_output;
+1 -1
drivers/gpio/gpio-virtio.c
··· 567 567 vgpio->gc.direction_input = virtio_gpio_direction_input; 568 568 vgpio->gc.direction_output = virtio_gpio_direction_output; 569 569 vgpio->gc.get = virtio_gpio_get; 570 - vgpio->gc.set_rv = virtio_gpio_set; 570 + vgpio->gc.set = virtio_gpio_set; 571 571 vgpio->gc.ngpio = ngpio; 572 572 vgpio->gc.base = -1; /* Allocate base dynamically */ 573 573 vgpio->gc.label = dev_name(dev);
+1 -1
drivers/gpio/gpio-vx855.c
··· 216 216 c->direction_input = vx855gpio_direction_input; 217 217 c->direction_output = vx855gpio_direction_output; 218 218 c->get = vx855gpio_get; 219 - c->set_rv = vx855gpio_set; 219 + c->set = vx855gpio_set; 220 220 c->set_config = vx855gpio_set_config; 221 221 c->dbg_show = NULL; 222 222 c->base = 0;
+1 -1
drivers/gpio/gpio-wcd934x.c
··· 98 98 chip->direction_output = wcd_gpio_direction_output; 99 99 chip->get_direction = wcd_gpio_get_direction; 100 100 chip->get = wcd_gpio_get; 101 - chip->set_rv = wcd_gpio_set; 101 + chip->set = wcd_gpio_set; 102 102 chip->parent = dev; 103 103 chip->base = -1; 104 104 chip->ngpio = WCD934X_NPINS;
+1 -1
drivers/gpio/gpio-wcove.c
··· 439 439 wg->chip.direction_output = wcove_gpio_dir_out; 440 440 wg->chip.get_direction = wcove_gpio_get_direction; 441 441 wg->chip.get = wcove_gpio_get; 442 - wg->chip.set_rv = wcove_gpio_set; 442 + wg->chip.set = wcove_gpio_set; 443 443 wg->chip.set_config = wcove_gpio_set_config; 444 444 wg->chip.base = -1; 445 445 wg->chip.ngpio = WCOVE_VGPIO_NUM;
+1 -1
drivers/gpio/gpio-winbond.c
··· 494 494 .can_sleep = true, 495 495 .get = winbond_gpio_get, 496 496 .direction_input = winbond_gpio_direction_in, 497 - .set_rv = winbond_gpio_set, 497 + .set = winbond_gpio_set, 498 498 .direction_output = winbond_gpio_direction_out, 499 499 }; 500 500
+1 -1
drivers/gpio/gpio-wm831x.c
··· 253 253 .direction_input = wm831x_gpio_direction_in, 254 254 .get = wm831x_gpio_get, 255 255 .direction_output = wm831x_gpio_direction_out, 256 - .set_rv = wm831x_gpio_set, 256 + .set = wm831x_gpio_set, 257 257 .to_irq = wm831x_gpio_to_irq, 258 258 .set_config = wm831x_set_config, 259 259 .dbg_show = wm831x_gpio_dbg_show,
+1 -1
drivers/gpio/gpio-wm8350.c
··· 93 93 .direction_input = wm8350_gpio_direction_in, 94 94 .get = wm8350_gpio_get, 95 95 .direction_output = wm8350_gpio_direction_out, 96 - .set_rv = wm8350_gpio_set, 96 + .set = wm8350_gpio_set, 97 97 .to_irq = wm8350_gpio_to_irq, 98 98 .can_sleep = true, 99 99 };
+1 -1
drivers/gpio/gpio-wm8994.c
··· 256 256 .direction_input = wm8994_gpio_direction_in, 257 257 .get = wm8994_gpio_get, 258 258 .direction_output = wm8994_gpio_direction_out, 259 - .set_rv = wm8994_gpio_set, 259 + .set = wm8994_gpio_set, 260 260 .set_config = wm8994_gpio_set_config, 261 261 .to_irq = wm8994_gpio_to_irq, 262 262 .dbg_show = wm8994_gpio_dbg_show,
+1 -1
drivers/gpio/gpio-xgene.c
··· 178 178 gpio->chip.direction_input = xgene_gpio_dir_in; 179 179 gpio->chip.direction_output = xgene_gpio_dir_out; 180 180 gpio->chip.get = xgene_gpio_get; 181 - gpio->chip.set_rv = xgene_gpio_set; 181 + gpio->chip.set = xgene_gpio_set; 182 182 gpio->chip.label = dev_name(&pdev->dev); 183 183 gpio->chip.base = -1; 184 184
+2 -2
drivers/gpio/gpio-xilinx.c
··· 604 604 chip->gc.direction_input = xgpio_dir_in; 605 605 chip->gc.direction_output = xgpio_dir_out; 606 606 chip->gc.get = xgpio_get; 607 - chip->gc.set_rv = xgpio_set; 607 + chip->gc.set = xgpio_set; 608 608 chip->gc.request = xgpio_request; 609 609 chip->gc.free = xgpio_free; 610 - chip->gc.set_multiple_rv = xgpio_set_multiple; 610 + chip->gc.set_multiple = xgpio_set_multiple; 611 611 612 612 chip->gc.label = dev_name(dev); 613 613
+1 -1
drivers/gpio/gpio-xlp.c
··· 274 274 gc->ngpio = 70; 275 275 gc->direction_output = xlp_gpio_dir_output; 276 276 gc->direction_input = xlp_gpio_dir_input; 277 - gc->set_rv = xlp_gpio_set; 277 + gc->set = xlp_gpio_set; 278 278 gc->get = xlp_gpio_get; 279 279 280 280 spin_lock_init(&priv->lock);
+1 -1
drivers/gpio/gpio-xra1403.c
··· 164 164 xra->chip.direction_output = xra1403_direction_output; 165 165 xra->chip.get_direction = xra1403_get_direction; 166 166 xra->chip.get = xra1403_get; 167 - xra->chip.set_rv = xra1403_set; 167 + xra->chip.set = xra1403_set; 168 168 169 169 xra->chip.dbg_show = xra1403_dbg_show; 170 170
+1 -1
drivers/gpio/gpio-xtensa.c
··· 132 132 .ngpio = 32, 133 133 .get_direction = xtensa_expstate_get_direction, 134 134 .get = xtensa_expstate_get_value, 135 - .set_rv = xtensa_expstate_set_value, 135 + .set = xtensa_expstate_set_value, 136 136 }; 137 137 138 138 static int xtensa_gpio_probe(struct platform_device *pdev)
+1 -1
drivers/gpio/gpio-zevio.c
··· 161 161 static const struct gpio_chip zevio_gpio_chip = { 162 162 .direction_input = zevio_gpio_direction_input, 163 163 .direction_output = zevio_gpio_direction_output, 164 - .set_rv = zevio_gpio_set, 164 + .set = zevio_gpio_set, 165 165 .get = zevio_gpio_get, 166 166 .to_irq = zevio_gpio_to_irq, 167 167 .base = 0,
+1 -1
drivers/gpio/gpio-zynq.c
··· 932 932 chip->owner = THIS_MODULE; 933 933 chip->parent = &pdev->dev; 934 934 chip->get = zynq_gpio_get_value; 935 - chip->set_rv = zynq_gpio_set_value; 935 + chip->set = zynq_gpio_set_value; 936 936 chip->request = zynq_gpio_request; 937 937 chip->free = zynq_gpio_free; 938 938 chip->direction_input = zynq_gpio_dir_in;
+1 -1
drivers/gpio/gpio-zynqmp-modepin.c
··· 130 130 chip->owner = THIS_MODULE; 131 131 chip->parent = &pdev->dev; 132 132 chip->get = modepin_gpio_get_value; 133 - chip->set_rv = modepin_gpio_set_value; 133 + chip->set = modepin_gpio_set_value; 134 134 chip->direction_input = modepin_gpio_dir_in; 135 135 chip->direction_output = modepin_gpio_dir_out; 136 136 chip->label = dev_name(&pdev->dev);
+8 -23
drivers/gpio/gpiolib.c
··· 1037 1037 int base = 0; 1038 1038 int ret; 1039 1039 1040 - /* Only allow one set() and one set_multiple(). */ 1041 - if ((gc->set && gc->set_rv) || 1042 - (gc->set_multiple && gc->set_multiple_rv)) 1043 - return -EINVAL; 1044 - 1045 1040 /* 1046 1041 * First: allocate and populate the internal stat container, and 1047 1042 * set up the struct device. ··· 2886 2891 2887 2892 lockdep_assert_held(&gc->gpiodev->srcu); 2888 2893 2889 - if (WARN_ON(unlikely(!gc->set && !gc->set_rv))) 2894 + if (WARN_ON(unlikely(!gc->set))) 2890 2895 return -EOPNOTSUPP; 2891 2896 2892 - if (gc->set_rv) { 2893 - ret = gc->set_rv(gc, offset, value); 2894 - if (ret > 0) 2895 - ret = -EBADE; 2897 + ret = gc->set(gc, offset, value); 2898 + if (ret > 0) 2899 + ret = -EBADE; 2896 2900 2897 - return ret; 2898 - } 2899 - 2900 - gc->set(gc, offset, value); 2901 - return 0; 2901 + return ret; 2902 2902 } 2903 2903 2904 2904 static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value) ··· 2909 2919 * output-only, but if there is then not even a .set() operation it 2910 2920 * is pretty tricky to drive the output line. 2911 2921 */ 2912 - if (!guard.gc->set && !guard.gc->set_rv && !guard.gc->direction_output) { 2922 + if (!guard.gc->set && !guard.gc->direction_output) { 2913 2923 gpiod_warn(desc, 2914 2924 "%s: missing set() and direction_output() operations\n", 2915 2925 __func__); ··· 3655 3665 3656 3666 lockdep_assert_held(&gc->gpiodev->srcu); 3657 3667 3658 - if (gc->set_multiple_rv) { 3659 - ret = gc->set_multiple_rv(gc, mask, bits); 3668 + if (gc->set_multiple) { 3669 + ret = gc->set_multiple(gc, mask, bits); 3660 3670 if (ret > 0) 3661 3671 ret = -EBADE; 3662 3672 3663 3673 return ret; 3664 - } 3665 - 3666 - if (gc->set_multiple) { 3667 - gc->set_multiple(gc, mask, bits); 3668 - return 0; 3669 3674 } 3670 3675 3671 3676 /* set outputs if the corresponding mask bit is set */
+1 -1
drivers/gpu/drm/bridge/ti-sn65dsi86.c
··· 1836 1836 pdata->gchip.direction_input = ti_sn_bridge_gpio_direction_input; 1837 1837 pdata->gchip.direction_output = ti_sn_bridge_gpio_direction_output; 1838 1838 pdata->gchip.get = ti_sn_bridge_gpio_get; 1839 - pdata->gchip.set_rv = ti_sn_bridge_gpio_set; 1839 + pdata->gchip.set = ti_sn_bridge_gpio_set; 1840 1840 pdata->gchip.can_sleep = true; 1841 1841 pdata->gchip.names = ti_sn_bridge_gpio_names; 1842 1842 pdata->gchip.ngpio = SN_NUM_GPIOS;
+1 -1
drivers/hid/hid-cp2112.c
··· 1288 1288 dev->gc.label = "cp2112_gpio"; 1289 1289 dev->gc.direction_input = cp2112_gpio_direction_input; 1290 1290 dev->gc.direction_output = cp2112_gpio_direction_output; 1291 - dev->gc.set_rv = cp2112_gpio_set; 1291 + dev->gc.set = cp2112_gpio_set; 1292 1292 dev->gc.get = cp2112_gpio_get; 1293 1293 dev->gc.base = -1; 1294 1294 dev->gc.ngpio = CP2112_GPIO_MAX_GPIO;
+2 -2
drivers/hid/hid-mcp2200.c
··· 279 279 .get_direction = mcp_get_direction, 280 280 .direction_input = mcp_direction_input, 281 281 .direction_output = mcp_direction_output, 282 - .set_rv = mcp_set, 283 - .set_multiple_rv = mcp_set_multiple, 282 + .set = mcp_set, 283 + .set_multiple = mcp_set_multiple, 284 284 .get = mcp_get, 285 285 .get_multiple = mcp_get_multiple, 286 286 .base = -1,
+1 -1
drivers/hid/hid-mcp2221.c
··· 1298 1298 mcp->gc->direction_input = mcp_gpio_direction_input; 1299 1299 mcp->gc->direction_output = mcp_gpio_direction_output; 1300 1300 mcp->gc->get_direction = mcp_gpio_get_direction; 1301 - mcp->gc->set_rv = mcp_gpio_set; 1301 + mcp->gc->set = mcp_gpio_set; 1302 1302 mcp->gc->get = mcp_gpio_get; 1303 1303 mcp->gc->ngpio = MCP_NGPIO; 1304 1304 mcp->gc->base = -1;
+2 -2
drivers/hwmon/ltc2992.c
··· 339 339 st->gc.ngpio = ARRAY_SIZE(st->gpio_names); 340 340 st->gc.get = ltc2992_gpio_get; 341 341 st->gc.get_multiple = ltc2992_gpio_get_multiple; 342 - st->gc.set_rv = ltc2992_gpio_set; 343 - st->gc.set_multiple_rv = ltc2992_gpio_set_multiple; 342 + st->gc.set = ltc2992_gpio_set; 343 + st->gc.set_multiple = ltc2992_gpio_set_multiple; 344 344 345 345 ret = devm_gpiochip_add_data(&st->client->dev, &st->gc, st); 346 346 if (ret)
+1 -1
drivers/hwmon/pmbus/ucd9000.c
··· 364 364 data->gpio.direction_input = ucd9000_gpio_direction_input; 365 365 data->gpio.direction_output = ucd9000_gpio_direction_output; 366 366 data->gpio.get = ucd9000_gpio_get; 367 - data->gpio.set_rv = ucd9000_gpio_set; 367 + data->gpio.set = ucd9000_gpio_set; 368 368 data->gpio.can_sleep = true; 369 369 data->gpio.base = -1; 370 370 data->gpio.parent = &client->dev;
+1 -1
drivers/i2c/muxes/i2c-mux-ltc4306.c
··· 164 164 data->gpiochip.direction_input = ltc4306_gpio_direction_input; 165 165 data->gpiochip.direction_output = ltc4306_gpio_direction_output; 166 166 data->gpiochip.get = ltc4306_gpio_get; 167 - data->gpiochip.set_rv = ltc4306_gpio_set; 167 + data->gpiochip.set = ltc4306_gpio_set; 168 168 data->gpiochip.set_config = ltc4306_gpio_set_config; 169 169 data->gpiochip.owner = THIS_MODULE; 170 170
+1 -1
drivers/iio/adc/ad4130.c
··· 2064 2064 st->gc.can_sleep = true; 2065 2065 st->gc.init_valid_mask = ad4130_gpio_init_valid_mask; 2066 2066 st->gc.get_direction = ad4130_gpio_get_direction; 2067 - st->gc.set_rv = ad4130_gpio_set; 2067 + st->gc.set = ad4130_gpio_set; 2068 2068 2069 2069 ret = devm_gpiochip_add_data(dev, &st->gc, st); 2070 2070 if (ret)
+1 -1
drivers/iio/adc/ad4170-4.c
··· 1807 1807 st->gpiochip.direction_input = ad4170_gpio_direction_input; 1808 1808 st->gpiochip.direction_output = ad4170_gpio_direction_output; 1809 1809 st->gpiochip.get = ad4170_gpio_get; 1810 - st->gpiochip.set_rv = ad4170_gpio_set; 1810 + st->gpiochip.set = ad4170_gpio_set; 1811 1811 st->gpiochip.owner = THIS_MODULE; 1812 1812 1813 1813 return devm_gpiochip_add_data(&st->spi->dev, &st->gpiochip, indio_dev);
+1 -1
drivers/iio/adc/ad7768-1.c
··· 673 673 .direction_input = ad7768_gpio_direction_input, 674 674 .direction_output = ad7768_gpio_direction_output, 675 675 .get = ad7768_gpio_get, 676 - .set_rv = ad7768_gpio_set, 676 + .set = ad7768_gpio_set, 677 677 .owner = THIS_MODULE, 678 678 }; 679 679
+2 -2
drivers/iio/adc/rohm-bd79124.c
··· 246 246 static const struct gpio_chip bd79124gpo_chip = { 247 247 .label = "bd79124-gpo", 248 248 .get_direction = bd79124gpo_direction_get, 249 - .set_rv = bd79124gpo_set, 250 - .set_multiple_rv = bd79124gpo_set_multiple, 249 + .set = bd79124gpo_set, 250 + .set_multiple = bd79124gpo_set_multiple, 251 251 .init_valid_mask = bd79124_init_valid_mask, 252 252 .can_sleep = true, 253 253 .ngpio = 8,
+1 -1
drivers/iio/adc/ti-ads7950.c
··· 648 648 st->chip.direction_input = ti_ads7950_direction_input; 649 649 st->chip.direction_output = ti_ads7950_direction_output; 650 650 st->chip.get = ti_ads7950_get; 651 - st->chip.set_rv = ti_ads7950_set; 651 + st->chip.set = ti_ads7950_set; 652 652 653 653 ret = gpiochip_add_data(&st->chip, st); 654 654 if (ret) {
+1 -1
drivers/iio/addac/ad74115.c
··· 1577 1577 .direction_input = ad74115_gpio_direction_input, 1578 1578 .direction_output = ad74115_gpio_direction_output, 1579 1579 .get = ad74115_gpio_get, 1580 - .set_rv = ad74115_gpio_set, 1580 + .set = ad74115_gpio_set, 1581 1581 }; 1582 1582 1583 1583 return devm_gpiochip_add_data(dev, &st->gc, st);
+2 -2
drivers/iio/addac/ad74413r.c
··· 1425 1425 st->gpo_gpiochip.ngpio = st->num_gpo_gpios; 1426 1426 st->gpo_gpiochip.parent = st->dev; 1427 1427 st->gpo_gpiochip.can_sleep = true; 1428 - st->gpo_gpiochip.set_rv = ad74413r_gpio_set; 1429 - st->gpo_gpiochip.set_multiple_rv = ad74413r_gpio_set_multiple; 1428 + st->gpo_gpiochip.set = ad74413r_gpio_set; 1429 + st->gpo_gpiochip.set_multiple = ad74413r_gpio_set_multiple; 1430 1430 st->gpo_gpiochip.set_config = ad74413r_gpio_set_gpo_config; 1431 1431 st->gpo_gpiochip.get_direction = 1432 1432 ad74413r_gpio_get_gpo_direction;
+1 -1
drivers/iio/dac/ad5592r-base.c
··· 129 129 st->gpiochip.direction_input = ad5592r_gpio_direction_input; 130 130 st->gpiochip.direction_output = ad5592r_gpio_direction_output; 131 131 st->gpiochip.get = ad5592r_gpio_get; 132 - st->gpiochip.set_rv = ad5592r_gpio_set; 132 + st->gpiochip.set = ad5592r_gpio_set; 133 133 st->gpiochip.request = ad5592r_gpio_request; 134 134 st->gpiochip.owner = THIS_MODULE; 135 135 st->gpiochip.names = ad5592r_gpio_names;
+1 -1
drivers/input/keyboard/adp5588-keys.c
··· 425 425 kpad->gc.direction_input = adp5588_gpio_direction_input; 426 426 kpad->gc.direction_output = adp5588_gpio_direction_output; 427 427 kpad->gc.get = adp5588_gpio_get_value; 428 - kpad->gc.set_rv = adp5588_gpio_set_value; 428 + kpad->gc.set = adp5588_gpio_set_value; 429 429 kpad->gc.set_config = adp5588_gpio_set_config; 430 430 kpad->gc.can_sleep = 1; 431 431
+1 -1
drivers/input/touchscreen/ad7879.c
··· 475 475 ts->gc.direction_input = ad7879_gpio_direction_input; 476 476 ts->gc.direction_output = ad7879_gpio_direction_output; 477 477 ts->gc.get = ad7879_gpio_get_value; 478 - ts->gc.set_rv = ad7879_gpio_set_value; 478 + ts->gc.set = ad7879_gpio_set_value; 479 479 ts->gc.can_sleep = 1; 480 480 ts->gc.base = -1; 481 481 ts->gc.ngpio = 1;
+1
drivers/irqchip/Kconfig
··· 554 554 tristate "i.MX MU used as MSI controller" 555 555 depends on OF && HAS_IOMEM 556 556 depends on ARCH_MXC || COMPILE_TEST 557 + depends on ARM || ARM64 557 558 default m if ARCH_MXC 558 559 select IRQ_DOMAIN 559 560 select IRQ_DOMAIN_HIERARCHY
-1
drivers/irqchip/irq-gic-v5-its.c
··· 973 973 irqd = irq_get_irq_data(virq + i); 974 974 irqd_set_single_target(irqd); 975 975 irqd_set_affinity_on_activate(irqd); 976 - irqd_set_resend_when_in_progress(irqd); 977 976 } 978 977 979 978 return 0;
+2 -9
drivers/irqchip/irq-gic-v5-iwb.c
··· 241 241 struct gicv5_iwb_chip_data *iwb_node; 242 242 void __iomem *iwb_base; 243 243 struct resource *res; 244 - int ret; 245 244 246 245 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 247 246 if (!res) ··· 253 254 } 254 255 255 256 iwb_node = gicv5_iwb_init_bases(iwb_base, pdev); 256 - if (IS_ERR(iwb_node)) { 257 - ret = PTR_ERR(iwb_node); 258 - goto out_unmap; 259 - } 257 + if (IS_ERR(iwb_node)) 258 + return PTR_ERR(iwb_node); 260 259 261 260 return 0; 262 - 263 - out_unmap: 264 - iounmap(iwb_base); 265 - return ret; 266 261 } 267 262 268 263 static const struct of_device_id gicv5_iwb_of_match[] = {
+3 -3
drivers/irqchip/irq-msi-lib.c
··· 133 133 { 134 134 const struct msi_parent_ops *ops = d->msi_parent_ops; 135 135 u32 busmask = BIT(bus_token); 136 - struct fwnode_handle *fwh; 137 136 138 137 if (!ops) 139 138 return 0; 140 139 141 - fwh = d->flags & IRQ_DOMAIN_FLAG_FWNODE_PARENT ? fwnode_get_parent(fwspec->fwnode) 142 - : fwspec->fwnode; 140 + struct fwnode_handle *fwh __free(fwnode_handle) = 141 + d->flags & IRQ_DOMAIN_FLAG_FWNODE_PARENT ? fwnode_get_parent(fwspec->fwnode) 142 + : fwnode_handle_get(fwspec->fwnode); 143 143 if (fwh != d->fwnode || fwspec->param_count != 0) 144 144 return 0; 145 145
+10
drivers/irqchip/irq-mvebu-gicp.c
··· 177 177 .ops = &gicp_domain_ops, 178 178 }; 179 179 struct mvebu_gicp *gicp; 180 + void __iomem *base; 180 181 int ret, i; 181 182 182 183 gicp = devm_kzalloc(&pdev->dev, sizeof(*gicp), GFP_KERNEL); ··· 235 234 if (!info.parent) { 236 235 dev_err(&pdev->dev, "failed to find parent IRQ domain\n"); 237 236 return -ENODEV; 237 + } 238 + 239 + base = ioremap(gicp->res->start, resource_size(gicp->res)); 240 + if (IS_ERR(base)) { 241 + dev_err(&pdev->dev, "ioremap() failed. Unable to clear pending interrupts.\n"); 242 + } else { 243 + for (i = 0; i < 64; i++) 244 + writel(i, base + GICP_CLRSPI_NSR_OFFSET); 245 + iounmap(base); 238 246 } 239 247 240 248 return msi_create_parent_irq_domain(&info, &gicp_msi_parent_ops) ? 0 : -ENOMEM;
+1 -1
drivers/irqchip/irq-riscv-imsic-platform.c
··· 308 308 int imsic_irqdomain_init(void) 309 309 { 310 310 struct irq_domain_info info = { 311 - .fwnode = imsic->fwnode, 312 311 .ops = &imsic_base_domain_ops, 313 312 .host_data = imsic, 314 313 }; ··· 324 325 } 325 326 326 327 /* Create Base IRQ domain */ 328 + info.fwnode = imsic->fwnode, 327 329 imsic->base_domain = msi_create_parent_irq_domain(&info, &imsic_msi_parent_ops); 328 330 if (!imsic->base_domain) { 329 331 pr_err("%pfwP: failed to create IMSIC base domain\n", imsic->fwnode);
+1 -1
drivers/leds/blink/leds-lgm-sso.c
··· 471 471 gc->get_direction = sso_gpio_get_dir; 472 472 gc->direction_output = sso_gpio_dir_out; 473 473 gc->get = sso_gpio_get; 474 - gc->set_rv = sso_gpio_set; 474 + gc->set = sso_gpio_set; 475 475 476 476 gc->label = "lgm-sso"; 477 477 gc->base = -1;
+1 -1
drivers/leds/leds-pca9532.c
··· 473 473 data->gpio.label = "gpio-pca9532"; 474 474 data->gpio.direction_input = pca9532_gpio_direction_input; 475 475 data->gpio.direction_output = pca9532_gpio_direction_output; 476 - data->gpio.set_rv = pca9532_gpio_set_value; 476 + data->gpio.set = pca9532_gpio_set_value; 477 477 data->gpio.get = pca9532_gpio_get_value; 478 478 data->gpio.request = pca9532_gpio_request_pin; 479 479 data->gpio.can_sleep = 1;
+1 -1
drivers/leds/leds-pca955x.c
··· 737 737 pca955x->gpio.label = "gpio-pca955x"; 738 738 pca955x->gpio.direction_input = pca955x_gpio_direction_input; 739 739 pca955x->gpio.direction_output = pca955x_gpio_direction_output; 740 - pca955x->gpio.set_rv = pca955x_gpio_set_value; 740 + pca955x->gpio.set = pca955x_gpio_set_value; 741 741 pca955x->gpio.get = pca955x_gpio_get_value; 742 742 pca955x->gpio.request = pca955x_gpio_request_pin; 743 743 pca955x->gpio.free = pca955x_gpio_free_pin;
+1 -1
drivers/leds/leds-tca6507.c
··· 637 637 tca->gpio.base = -1; 638 638 tca->gpio.owner = THIS_MODULE; 639 639 tca->gpio.direction_output = tca6507_gpio_direction_output; 640 - tca->gpio.set_rv = tca6507_gpio_set_value; 640 + tca->gpio.set = tca6507_gpio_set_value; 641 641 tca->gpio.parent = dev; 642 642 err = devm_gpiochip_add_data(dev, &tca->gpio, tca); 643 643 if (err) {
+19
drivers/mailbox/Kconfig
··· 36 36 that provides different means of transports: supported extensions 37 37 will be discovered and possibly managed at probe-time. 38 38 39 + config AST2700_MBOX 40 + tristate "ASPEED AST2700 IPC driver" 41 + depends on ARCH_ASPEED || COMPILE_TEST 42 + help 43 + Mailbox driver implementation for ASPEED AST27XX SoCs. This driver 44 + can be used to send message between different processors in SoC. 45 + The driver provides mailbox support for sending interrupts to the 46 + clients. Say Y here if you want to build this driver. 47 + 39 48 config CV1800_MBOX 40 49 tristate "cv1800 mailbox" 41 50 depends on ARCH_SOPHGO || COMPILE_TEST ··· 358 349 11 mailbox channels with different operating mode and every channel 359 350 is unidirectional. Say Y here if you want to use the CIX Mailbox 360 351 support. 352 + 353 + config BCM74110_MAILBOX 354 + tristate "Brcmstb BCM74110 Mailbox" 355 + depends on ARCH_BRCMSTB || COMPILE_TEST 356 + default ARCH_BRCMSTB 357 + help 358 + Broadcom STB mailbox driver present starting with brcmstb bcm74110 359 + SoCs. The mailbox is a communication channel between the host 360 + processor and coprocessor that handles various power management task 361 + and more. 361 362 362 363 endif
+4
drivers/mailbox/Makefile
··· 11 11 12 12 obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o 13 13 14 + obj-$(CONFIG_AST2700_MBOX) += ast2700-mailbox.o 15 + 14 16 obj-$(CONFIG_CV1800_MBOX) += cv1800-mailbox.o 15 17 16 18 obj-$(CONFIG_EXYNOS_MBOX) += exynos-mailbox.o ··· 76 74 obj-$(CONFIG_THEAD_TH1520_MBOX) += mailbox-th1520.o 77 75 78 76 obj-$(CONFIG_CIX_MBOX) += cix-mailbox.o 77 + 78 + obj-$(CONFIG_BCM74110_MAILBOX) += bcm74110-mailbox.o
+235
drivers/mailbox/ast2700-mailbox.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright Aspeed Technology Inc. (C) 2025. All rights reserved 4 + */ 5 + 6 + #include <linux/interrupt.h> 7 + #include <linux/io.h> 8 + #include <linux/iopoll.h> 9 + #include <linux/kernel.h> 10 + #include <linux/mailbox_controller.h> 11 + #include <linux/module.h> 12 + #include <linux/of.h> 13 + #include <linux/platform_device.h> 14 + #include <linux/slab.h> 15 + 16 + /* Each bit in the register represents an IPC ID */ 17 + #define IPCR_TX_TRIG 0x00 18 + #define IPCR_ENABLE 0x04 19 + #define IPCR_STATUS 0x08 20 + #define RX_IRQ(n) BIT(n) 21 + #define RX_IRQ_MASK 0xf 22 + #define IPCR_DATA 0x10 23 + 24 + struct ast2700_mbox_data { 25 + u8 num_chans; 26 + u8 msg_size; 27 + }; 28 + 29 + struct ast2700_mbox { 30 + struct mbox_controller mbox; 31 + u8 msg_size; 32 + void __iomem *tx_regs; 33 + void __iomem *rx_regs; 34 + spinlock_t lock; 35 + }; 36 + 37 + static inline int ch_num(struct mbox_chan *chan) 38 + { 39 + return chan - chan->mbox->chans; 40 + } 41 + 42 + static inline bool ast2700_mbox_tx_done(struct ast2700_mbox *mb, int idx) 43 + { 44 + return !(readl(mb->tx_regs + IPCR_STATUS) & BIT(idx)); 45 + } 46 + 47 + static irqreturn_t ast2700_mbox_irq(int irq, void *p) 48 + { 49 + struct ast2700_mbox *mb = p; 50 + void __iomem *data_reg; 51 + int num_words = mb->msg_size / sizeof(u32); 52 + u32 *word_data; 53 + u32 status; 54 + int n, i; 55 + 56 + /* Only examine channels that are currently enabled. */ 57 + status = readl(mb->rx_regs + IPCR_ENABLE) & 58 + readl(mb->rx_regs + IPCR_STATUS); 59 + 60 + if (!(status & RX_IRQ_MASK)) 61 + return IRQ_NONE; 62 + 63 + for (n = 0; n < mb->mbox.num_chans; ++n) { 64 + struct mbox_chan *chan = &mb->mbox.chans[n]; 65 + 66 + if (!(status & RX_IRQ(n))) 67 + continue; 68 + 69 + data_reg = mb->rx_regs + IPCR_DATA + mb->msg_size * n; 70 + word_data = chan->con_priv; 71 + /* Read the message data */ 72 + for (i = 0; i < num_words; i++) 73 + word_data[i] = readl(data_reg + i * sizeof(u32)); 74 + 75 + mbox_chan_received_data(chan, chan->con_priv); 76 + 77 + /* The IRQ can be cleared only once the FIFO is empty. */ 78 + writel(RX_IRQ(n), mb->rx_regs + IPCR_STATUS); 79 + } 80 + 81 + return IRQ_HANDLED; 82 + } 83 + 84 + static int ast2700_mbox_send_data(struct mbox_chan *chan, void *data) 85 + { 86 + struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); 87 + int idx = ch_num(chan); 88 + void __iomem *data_reg = mb->tx_regs + IPCR_DATA + mb->msg_size * idx; 89 + u32 *word_data = data; 90 + int num_words = mb->msg_size / sizeof(u32); 91 + int i; 92 + 93 + if (!(readl(mb->tx_regs + IPCR_ENABLE) & BIT(idx))) { 94 + dev_warn(mb->mbox.dev, "%s: Ch-%d not enabled yet\n", __func__, idx); 95 + return -ENODEV; 96 + } 97 + 98 + if (!(ast2700_mbox_tx_done(mb, idx))) { 99 + dev_warn(mb->mbox.dev, "%s: Ch-%d last data has not finished\n", __func__, idx); 100 + return -EBUSY; 101 + } 102 + 103 + /* Write the message data */ 104 + for (i = 0 ; i < num_words; i++) 105 + writel(word_data[i], data_reg + i * sizeof(u32)); 106 + 107 + writel(BIT(idx), mb->tx_regs + IPCR_TX_TRIG); 108 + dev_dbg(mb->mbox.dev, "%s: Ch-%d sent\n", __func__, idx); 109 + 110 + return 0; 111 + } 112 + 113 + static int ast2700_mbox_startup(struct mbox_chan *chan) 114 + { 115 + struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); 116 + int idx = ch_num(chan); 117 + void __iomem *reg = mb->rx_regs + IPCR_ENABLE; 118 + unsigned long flags; 119 + 120 + spin_lock_irqsave(&mb->lock, flags); 121 + writel(readl(reg) | BIT(idx), reg); 122 + spin_unlock_irqrestore(&mb->lock, flags); 123 + 124 + return 0; 125 + } 126 + 127 + static void ast2700_mbox_shutdown(struct mbox_chan *chan) 128 + { 129 + struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); 130 + int idx = ch_num(chan); 131 + void __iomem *reg = mb->rx_regs + IPCR_ENABLE; 132 + unsigned long flags; 133 + 134 + spin_lock_irqsave(&mb->lock, flags); 135 + writel(readl(reg) & ~BIT(idx), reg); 136 + spin_unlock_irqrestore(&mb->lock, flags); 137 + } 138 + 139 + static bool ast2700_mbox_last_tx_done(struct mbox_chan *chan) 140 + { 141 + struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); 142 + int idx = ch_num(chan); 143 + 144 + return ast2700_mbox_tx_done(mb, idx); 145 + } 146 + 147 + static const struct mbox_chan_ops ast2700_mbox_chan_ops = { 148 + .send_data = ast2700_mbox_send_data, 149 + .startup = ast2700_mbox_startup, 150 + .shutdown = ast2700_mbox_shutdown, 151 + .last_tx_done = ast2700_mbox_last_tx_done, 152 + }; 153 + 154 + static int ast2700_mbox_probe(struct platform_device *pdev) 155 + { 156 + struct ast2700_mbox *mb; 157 + const struct ast2700_mbox_data *dev_data; 158 + struct device *dev = &pdev->dev; 159 + int irq, ret; 160 + 161 + if (!pdev->dev.of_node) 162 + return -ENODEV; 163 + 164 + dev_data = device_get_match_data(&pdev->dev); 165 + 166 + mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL); 167 + if (!mb) 168 + return -ENOMEM; 169 + 170 + mb->mbox.chans = devm_kcalloc(&pdev->dev, dev_data->num_chans, 171 + sizeof(*mb->mbox.chans), GFP_KERNEL); 172 + if (!mb->mbox.chans) 173 + return -ENOMEM; 174 + 175 + /* con_priv of each channel is used to store the message received */ 176 + for (int i = 0; i < dev_data->num_chans; i++) { 177 + mb->mbox.chans[i].con_priv = devm_kcalloc(dev, dev_data->msg_size, 178 + sizeof(u8), GFP_KERNEL); 179 + if (!mb->mbox.chans[i].con_priv) 180 + return -ENOMEM; 181 + } 182 + 183 + platform_set_drvdata(pdev, mb); 184 + 185 + mb->tx_regs = devm_platform_ioremap_resource_byname(pdev, "tx"); 186 + if (IS_ERR(mb->tx_regs)) 187 + return PTR_ERR(mb->tx_regs); 188 + 189 + mb->rx_regs = devm_platform_ioremap_resource_byname(pdev, "rx"); 190 + if (IS_ERR(mb->rx_regs)) 191 + return PTR_ERR(mb->rx_regs); 192 + 193 + mb->msg_size = dev_data->msg_size; 194 + mb->mbox.dev = dev; 195 + mb->mbox.num_chans = dev_data->num_chans; 196 + mb->mbox.ops = &ast2700_mbox_chan_ops; 197 + mb->mbox.txdone_irq = false; 198 + mb->mbox.txdone_poll = true; 199 + mb->mbox.txpoll_period = 5; 200 + spin_lock_init(&mb->lock); 201 + 202 + irq = platform_get_irq(pdev, 0); 203 + if (irq < 0) 204 + return irq; 205 + 206 + ret = devm_request_irq(dev, irq, ast2700_mbox_irq, 0, dev_name(dev), mb); 207 + if (ret) 208 + return ret; 209 + 210 + return devm_mbox_controller_register(dev, &mb->mbox); 211 + } 212 + 213 + static const struct ast2700_mbox_data ast2700_dev_data = { 214 + .num_chans = 4, 215 + .msg_size = 0x20, 216 + }; 217 + 218 + static const struct of_device_id ast2700_mbox_of_match[] = { 219 + { .compatible = "aspeed,ast2700-mailbox", .data = &ast2700_dev_data }, 220 + {} 221 + }; 222 + MODULE_DEVICE_TABLE(of, ast2700_mbox_of_match); 223 + 224 + static struct platform_driver ast2700_mbox_driver = { 225 + .driver = { 226 + .name = "ast2700-mailbox", 227 + .of_match_table = ast2700_mbox_of_match, 228 + }, 229 + .probe = ast2700_mbox_probe, 230 + }; 231 + module_platform_driver(ast2700_mbox_driver); 232 + 233 + MODULE_AUTHOR("Jammy Huang <jammy_huang@aspeedtech.com>"); 234 + MODULE_DESCRIPTION("ASPEED AST2700 IPC driver"); 235 + MODULE_LICENSE("GPL");
+656
drivers/mailbox/bcm74110-mailbox.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Broadcom BCM74110 Mailbox Driver 4 + * 5 + * Copyright (c) 2025 Broadcom 6 + */ 7 + #include <linux/list.h> 8 + #include <linux/types.h> 9 + #include <linux/workqueue.h> 10 + #include <linux/io-64-nonatomic-hi-lo.h> 11 + #include <linux/interrupt.h> 12 + #include <linux/module.h> 13 + #include <linux/platform_device.h> 14 + #include <linux/of.h> 15 + #include <linux/delay.h> 16 + #include <linux/mailbox_controller.h> 17 + #include <linux/bitfield.h> 18 + #include <linux/slab.h> 19 + 20 + #define BCM_MBOX_BASE(sel) ((sel) * 0x40) 21 + #define BCM_MBOX_IRQ_BASE(sel) (((sel) * 0x20) + 0x800) 22 + 23 + #define BCM_MBOX_CFGA 0x0 24 + #define BCM_MBOX_CFGB 0x4 25 + #define BCM_MBOX_CFGC 0x8 26 + #define BCM_MBOX_CFGD 0xc 27 + #define BCM_MBOX_CTRL 0x10 28 + #define BCM_MBOX_CTRL_EN BIT(0) 29 + #define BCM_MBOX_CTRL_CLR BIT(1) 30 + #define BCM_MBOX_STATUS0 0x14 31 + #define BCM_MBOX_STATUS0_NOT_EMPTY BIT(28) 32 + #define BCM_MBOX_STATUS0_FULL BIT(29) 33 + #define BCM_MBOX_STATUS1 0x18 34 + #define BCM_MBOX_STATUS2 0x1c 35 + #define BCM_MBOX_WDATA 0x20 36 + #define BCM_MBOX_RDATA 0x28 37 + 38 + #define BCM_MBOX_IRQ_STATUS 0x0 39 + #define BCM_MBOX_IRQ_SET 0x4 40 + #define BCM_MBOX_IRQ_CLEAR 0x8 41 + #define BCM_MBOX_IRQ_MASK_STATUS 0xc 42 + #define BCM_MBOX_IRQ_MASK_SET 0x10 43 + #define BCM_MBOX_IRQ_MASK_CLEAR 0x14 44 + #define BCM_MBOX_IRQ_TIMEOUT BIT(0) 45 + #define BCM_MBOX_IRQ_NOT_EMPTY BIT(1) 46 + #define BCM_MBOX_IRQ_FULL BIT(2) 47 + #define BCM_MBOX_IRQ_LOW_WM BIT(3) 48 + #define BCM_MBOX_IRQ_HIGH_WM BIT(4) 49 + 50 + #define BCM_LINK_CODE0 0xbe0 51 + #define BCM_LINK_CODE1 0xbe1 52 + #define BCM_LINK_CODE2 0xbe2 53 + 54 + enum { 55 + BCM_MSG_FUNC_LINK_START = 0, 56 + BCM_MSG_FUNC_LINK_STOP, 57 + BCM_MSG_FUNC_SHMEM_TX, 58 + BCM_MSG_FUNC_SHMEM_RX, 59 + BCM_MSG_FUNC_SHMEM_STOP, 60 + BCM_MSG_FUNC_MAX, 61 + }; 62 + 63 + enum { 64 + BCM_MSG_SVC_INIT = 0, 65 + BCM_MSG_SVC_PMC, 66 + BCM_MSG_SVC_SCMI, 67 + BCM_MSG_SVC_DPFE, 68 + BCM_MSG_SVC_MAX, 69 + }; 70 + 71 + struct bcm74110_mbox_msg { 72 + struct list_head list_entry; 73 + #define BCM_MSG_VERSION_MASK GENMASK(31, 29) 74 + #define BCM_MSG_VERSION 0x1 75 + #define BCM_MSG_REQ_MASK BIT(28) 76 + #define BCM_MSG_RPLY_MASK BIT(27) 77 + #define BCM_MSG_SVC_MASK GENMASK(26, 24) 78 + #define BCM_MSG_FUNC_MASK GENMASK(23, 16) 79 + #define BCM_MSG_LENGTH_MASK GENMASK(15, 4) 80 + #define BCM_MSG_SLOT_MASK GENMASK(3, 0) 81 + 82 + #define BCM_MSG_SET_FIELD(hdr, field, val) \ 83 + do { \ 84 + hdr &= ~BCM_MSG_##field##_MASK; \ 85 + hdr |= FIELD_PREP(BCM_MSG_##field##_MASK, val); \ 86 + } while (0) 87 + 88 + #define BCM_MSG_GET_FIELD(hdr, field) \ 89 + FIELD_GET(BCM_MSG_##field##_MASK, hdr) 90 + u32 msg; 91 + }; 92 + 93 + struct bcm74110_mbox_chan { 94 + struct bcm74110_mbox *mbox; 95 + bool en; 96 + int slot; 97 + int type; 98 + }; 99 + 100 + struct bcm74110_mbox { 101 + struct platform_device *pdev; 102 + void __iomem *base; 103 + 104 + int tx_chan; 105 + int rx_chan; 106 + int rx_irq; 107 + struct list_head rx_svc_init_list; 108 + spinlock_t rx_svc_list_lock; 109 + 110 + struct mbox_controller controller; 111 + struct bcm74110_mbox_chan *mbox_chan; 112 + }; 113 + 114 + #define BCM74110_OFFSET_IO_WRITEL_MACRO(name, offset_base) \ 115 + static void bcm74110_##name##_writel(struct bcm74110_mbox *mbox,\ 116 + u32 val, u32 off) \ 117 + { \ 118 + writel_relaxed(val, mbox->base + offset_base + off); \ 119 + } 120 + BCM74110_OFFSET_IO_WRITEL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan)); 121 + BCM74110_OFFSET_IO_WRITEL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan)); 122 + 123 + #define BCM74110_OFFSET_IO_READL_MACRO(name, offset_base) \ 124 + static u32 bcm74110_##name##_readl(struct bcm74110_mbox *mbox, \ 125 + u32 off) \ 126 + { \ 127 + return readl_relaxed(mbox->base + offset_base + off); \ 128 + } 129 + BCM74110_OFFSET_IO_READL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan)); 130 + BCM74110_OFFSET_IO_READL_MACRO(rx, BCM_MBOX_BASE(mbox->rx_chan)); 131 + BCM74110_OFFSET_IO_READL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan)); 132 + 133 + static inline struct bcm74110_mbox *bcm74110_mbox_from_cntrl( 134 + struct mbox_controller *cntrl) 135 + { 136 + return container_of(cntrl, struct bcm74110_mbox, controller); 137 + } 138 + 139 + static void bcm74110_rx_push_init_msg(struct bcm74110_mbox *mbox, u32 val) 140 + { 141 + struct bcm74110_mbox_msg *msg; 142 + 143 + msg = kzalloc(sizeof(*msg), GFP_ATOMIC); 144 + if (!msg) 145 + return; 146 + 147 + INIT_LIST_HEAD(&msg->list_entry); 148 + msg->msg = val; 149 + 150 + spin_lock(&mbox->rx_svc_list_lock); 151 + list_add_tail(&msg->list_entry, &mbox->rx_svc_init_list); 152 + spin_unlock(&mbox->rx_svc_list_lock); 153 + } 154 + 155 + static void bcm74110_rx_process_msg(struct bcm74110_mbox *mbox) 156 + { 157 + struct device *dev = &mbox->pdev->dev; 158 + struct bcm74110_mbox_chan *chan_priv; 159 + struct mbox_chan *chan; 160 + u32 msg, status; 161 + int type; 162 + 163 + do { 164 + msg = bcm74110_rx_readl(mbox, BCM_MBOX_RDATA); 165 + status = bcm74110_rx_readl(mbox, BCM_MBOX_STATUS0); 166 + 167 + dev_dbg(dev, "rx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n", 168 + BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY), 169 + BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC), 170 + BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT)); 171 + 172 + type = BCM_MSG_GET_FIELD(msg, SVC); 173 + switch (type) { 174 + case BCM_MSG_SVC_INIT: 175 + bcm74110_rx_push_init_msg(mbox, msg); 176 + break; 177 + case BCM_MSG_SVC_PMC: 178 + case BCM_MSG_SVC_SCMI: 179 + case BCM_MSG_SVC_DPFE: 180 + chan = &mbox->controller.chans[type]; 181 + chan_priv = chan->con_priv; 182 + if (chan_priv->en) 183 + mbox_chan_received_data(chan, NULL); 184 + else 185 + dev_warn(dev, "Channel not enabled\n"); 186 + break; 187 + default: 188 + dev_warn(dev, "Unsupported msg received\n"); 189 + } 190 + } while (status & BCM_MBOX_STATUS0_NOT_EMPTY); 191 + } 192 + 193 + static irqreturn_t bcm74110_mbox_isr(int irq, void *data) 194 + { 195 + struct bcm74110_mbox *mbox = data; 196 + u32 status; 197 + 198 + status = bcm74110_irq_readl(mbox, BCM_MBOX_IRQ_STATUS); 199 + 200 + bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR); 201 + 202 + if (status & BCM_MBOX_IRQ_NOT_EMPTY) 203 + bcm74110_rx_process_msg(mbox); 204 + else 205 + dev_warn(&mbox->pdev->dev, "Spurious interrupt\n"); 206 + 207 + return IRQ_HANDLED; 208 + } 209 + 210 + static void bcm74110_mbox_mask_and_clear(struct bcm74110_mbox *mbox) 211 + { 212 + bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_MASK_SET); 213 + bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR); 214 + } 215 + 216 + static int bcm74110_rx_pop_init_msg(struct bcm74110_mbox *mbox, u32 func_type, 217 + u32 *val) 218 + { 219 + struct bcm74110_mbox_msg *msg, *msg_tmp; 220 + unsigned long flags; 221 + bool found = false; 222 + 223 + spin_lock_irqsave(&mbox->rx_svc_list_lock, flags); 224 + list_for_each_entry_safe(msg, msg_tmp, &mbox->rx_svc_init_list, 225 + list_entry) { 226 + if (BCM_MSG_GET_FIELD(msg->msg, FUNC) == func_type) { 227 + list_del(&msg->list_entry); 228 + found = true; 229 + break; 230 + } 231 + } 232 + spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags); 233 + 234 + if (!found) 235 + return -EINVAL; 236 + 237 + *val = msg->msg; 238 + kfree(msg); 239 + 240 + return 0; 241 + } 242 + 243 + static void bcm74110_rx_flush_msg(struct bcm74110_mbox *mbox) 244 + { 245 + struct bcm74110_mbox_msg *msg, *msg_tmp; 246 + LIST_HEAD(list_temp); 247 + unsigned long flags; 248 + 249 + spin_lock_irqsave(&mbox->rx_svc_list_lock, flags); 250 + list_splice_init(&mbox->rx_svc_init_list, &list_temp); 251 + spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags); 252 + 253 + list_for_each_entry_safe(msg, msg_tmp, &list_temp, list_entry) { 254 + list_del(&msg->list_entry); 255 + kfree(msg); 256 + } 257 + } 258 + 259 + #define BCM_DEQUEUE_TIMEOUT_MS 30 260 + static int bcm74110_rx_pop_init_msg_block(struct bcm74110_mbox *mbox, u32 func_type, 261 + u32 *val) 262 + { 263 + int ret, timeout = 0; 264 + 265 + do { 266 + ret = bcm74110_rx_pop_init_msg(mbox, func_type, val); 267 + 268 + if (!ret) 269 + return 0; 270 + 271 + /* TODO: Figure out what is a good sleep here. */ 272 + usleep_range(1000, 2000); 273 + timeout++; 274 + } while (timeout < BCM_DEQUEUE_TIMEOUT_MS); 275 + 276 + dev_warn(&mbox->pdev->dev, "Timeout waiting for service init response\n"); 277 + return -ETIMEDOUT; 278 + } 279 + 280 + static int bcm74110_mbox_create_msg(int req, int rply, int svc, int func, 281 + int length, int slot) 282 + { 283 + u32 msg = 0; 284 + 285 + BCM_MSG_SET_FIELD(msg, REQ, req); 286 + BCM_MSG_SET_FIELD(msg, RPLY, rply); 287 + BCM_MSG_SET_FIELD(msg, SVC, svc); 288 + BCM_MSG_SET_FIELD(msg, FUNC, func); 289 + BCM_MSG_SET_FIELD(msg, LENGTH, length); 290 + BCM_MSG_SET_FIELD(msg, SLOT, slot); 291 + 292 + return msg; 293 + } 294 + 295 + static int bcm74110_mbox_tx_msg(struct bcm74110_mbox *mbox, u32 msg) 296 + { 297 + int val; 298 + 299 + /* We can potentially poll with timeout here instead */ 300 + val = bcm74110_tx_readl(mbox, BCM_MBOX_STATUS0); 301 + if (val & BCM_MBOX_STATUS0_FULL) { 302 + dev_err(&mbox->pdev->dev, "Mailbox full\n"); 303 + return -EINVAL; 304 + } 305 + 306 + dev_dbg(&mbox->pdev->dev, "tx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n", 307 + BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY), 308 + BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC), 309 + BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT)); 310 + 311 + bcm74110_tx_writel(mbox, msg, BCM_MBOX_WDATA); 312 + 313 + return 0; 314 + } 315 + 316 + #define BCM_MBOX_LINK_TRAINING_RETRIES 5 317 + static int bcm74110_mbox_link_training(struct bcm74110_mbox *mbox) 318 + { 319 + int ret, retries = 0; 320 + u32 msg = 0, orig_len = 0, len = BCM_LINK_CODE0; 321 + 322 + do { 323 + switch (len) { 324 + case 0: 325 + retries++; 326 + dev_warn(&mbox->pdev->dev, 327 + "Link train failed, trying again... %d\n", 328 + retries); 329 + if (retries > BCM_MBOX_LINK_TRAINING_RETRIES) 330 + return -EINVAL; 331 + len = BCM_LINK_CODE0; 332 + fallthrough; 333 + case BCM_LINK_CODE0: 334 + case BCM_LINK_CODE1: 335 + case BCM_LINK_CODE2: 336 + msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT, 337 + BCM_MSG_FUNC_LINK_START, 338 + len, BCM_MSG_SVC_INIT); 339 + break; 340 + default: 341 + break; 342 + } 343 + 344 + bcm74110_mbox_tx_msg(mbox, msg); 345 + 346 + /* No response expected for LINK_CODE2 */ 347 + if (len == BCM_LINK_CODE2) 348 + return 0; 349 + 350 + orig_len = len; 351 + 352 + ret = bcm74110_rx_pop_init_msg_block(mbox, 353 + BCM_MSG_GET_FIELD(msg, FUNC), 354 + &msg); 355 + if (ret) { 356 + len = 0; 357 + continue; 358 + } 359 + 360 + if ((BCM_MSG_GET_FIELD(msg, SVC) != BCM_MSG_SVC_INIT) || 361 + (BCM_MSG_GET_FIELD(msg, FUNC) != BCM_MSG_FUNC_LINK_START) || 362 + (BCM_MSG_GET_FIELD(msg, SLOT) != 0) || 363 + (BCM_MSG_GET_FIELD(msg, RPLY) != 1) || 364 + (BCM_MSG_GET_FIELD(msg, REQ) != 0)) { 365 + len = 0; 366 + continue; 367 + } 368 + 369 + len = BCM_MSG_GET_FIELD(msg, LENGTH); 370 + 371 + /* Make sure sequence is good */ 372 + if (len != (orig_len + 1)) { 373 + len = 0; 374 + continue; 375 + } 376 + } while (1); 377 + 378 + return -EINVAL; 379 + } 380 + 381 + static int bcm74110_mbox_tx_msg_and_wait_ack(struct bcm74110_mbox *mbox, u32 msg) 382 + { 383 + int ret; 384 + u32 recv_msg; 385 + 386 + ret = bcm74110_mbox_tx_msg(mbox, msg); 387 + if (ret) 388 + return ret; 389 + 390 + ret = bcm74110_rx_pop_init_msg_block(mbox, BCM_MSG_GET_FIELD(msg, FUNC), 391 + &recv_msg); 392 + if (ret) 393 + return ret; 394 + 395 + /* 396 + * Modify tx message to verify rx ack. 397 + * Flip RPLY/REQ for synchronous messages 398 + */ 399 + if (BCM_MSG_GET_FIELD(msg, REQ) == 1) { 400 + BCM_MSG_SET_FIELD(msg, RPLY, 1); 401 + BCM_MSG_SET_FIELD(msg, REQ, 0); 402 + } 403 + 404 + if (msg != recv_msg) { 405 + dev_err(&mbox->pdev->dev, "Found ack, but ack is invalid\n"); 406 + return -EINVAL; 407 + } 408 + 409 + return 0; 410 + } 411 + 412 + /* Each index points to 0x100 of HAB MEM. IDX size counts from 0 */ 413 + #define BCM_MBOX_HAB_MEM_IDX_START 0x30 414 + #define BCM_MBOX_HAB_MEM_IDX_SIZE 0x0 415 + static int bcm74110_mbox_shmem_init(struct bcm74110_mbox *mbox) 416 + { 417 + u32 msg = 0; 418 + int ret; 419 + 420 + msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT, 421 + BCM_MSG_FUNC_SHMEM_STOP, 422 + 0, BCM_MSG_SVC_INIT); 423 + ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg); 424 + if (ret) 425 + return -EINVAL; 426 + 427 + msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT, 428 + BCM_MSG_FUNC_SHMEM_TX, 429 + BCM_MBOX_HAB_MEM_IDX_START, 430 + BCM_MBOX_HAB_MEM_IDX_SIZE); 431 + ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg); 432 + if (ret) 433 + return -EINVAL; 434 + 435 + msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT, 436 + BCM_MSG_FUNC_SHMEM_RX, 437 + BCM_MBOX_HAB_MEM_IDX_START, 438 + BCM_MBOX_HAB_MEM_IDX_SIZE); 439 + ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg); 440 + if (ret) 441 + return -EINVAL; 442 + 443 + return 0; 444 + } 445 + 446 + static int bcm74110_mbox_init(struct bcm74110_mbox *mbox) 447 + { 448 + int ret = 0; 449 + 450 + /* Disable queues tx/rx */ 451 + bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL); 452 + 453 + /* Clear status & restart tx/rx*/ 454 + bcm74110_tx_writel(mbox, BCM_MBOX_CTRL_EN | BCM_MBOX_CTRL_CLR, 455 + BCM_MBOX_CTRL); 456 + 457 + /* Unmask irq */ 458 + bcm74110_irq_writel(mbox, BCM_MBOX_IRQ_NOT_EMPTY, BCM_MBOX_IRQ_MASK_CLEAR); 459 + 460 + ret = bcm74110_mbox_link_training(mbox); 461 + if (ret) { 462 + dev_err(&mbox->pdev->dev, "Training failed\n"); 463 + return ret; 464 + } 465 + 466 + return bcm74110_mbox_shmem_init(mbox); 467 + } 468 + 469 + static int bcm74110_mbox_send_data(struct mbox_chan *chan, void *data) 470 + { 471 + struct bcm74110_mbox_chan *chan_priv = chan->con_priv; 472 + u32 msg; 473 + 474 + switch (chan_priv->type) { 475 + case BCM_MSG_SVC_PMC: 476 + case BCM_MSG_SVC_SCMI: 477 + case BCM_MSG_SVC_DPFE: 478 + msg = bcm74110_mbox_create_msg(1, 0, chan_priv->type, 0, 479 + 128 + 28, chan_priv->slot); 480 + break; 481 + default: 482 + return -EINVAL; 483 + } 484 + 485 + return bcm74110_mbox_tx_msg(chan_priv->mbox, msg); 486 + } 487 + 488 + static int bcm74110_mbox_chan_startup(struct mbox_chan *chan) 489 + { 490 + struct bcm74110_mbox_chan *chan_priv = chan->con_priv; 491 + 492 + chan_priv->en = true; 493 + 494 + return 0; 495 + } 496 + 497 + static void bcm74110_mbox_chan_shutdown(struct mbox_chan *chan) 498 + { 499 + struct bcm74110_mbox_chan *chan_priv = chan->con_priv; 500 + 501 + chan_priv->en = false; 502 + } 503 + 504 + static const struct mbox_chan_ops bcm74110_mbox_chan_ops = { 505 + .send_data = bcm74110_mbox_send_data, 506 + .startup = bcm74110_mbox_chan_startup, 507 + .shutdown = bcm74110_mbox_chan_shutdown, 508 + }; 509 + 510 + static void bcm74110_mbox_shutdown(struct platform_device *pdev) 511 + { 512 + struct bcm74110_mbox *mbox = dev_get_drvdata(&pdev->dev); 513 + u32 msg; 514 + 515 + msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT, 516 + BCM_MSG_FUNC_LINK_STOP, 517 + 0, 0); 518 + 519 + bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg); 520 + 521 + /* Even if we don't receive ACK, lets shut it down */ 522 + 523 + bcm74110_mbox_mask_and_clear(mbox); 524 + 525 + /* Disable queues tx/rx */ 526 + bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL); 527 + 528 + /* Flush queues */ 529 + bcm74110_rx_flush_msg(mbox); 530 + } 531 + 532 + static struct mbox_chan *bcm74110_mbox_of_xlate(struct mbox_controller *cntrl, 533 + const struct of_phandle_args *p) 534 + { 535 + struct bcm74110_mbox *mbox = bcm74110_mbox_from_cntrl(cntrl); 536 + struct device *dev = &mbox->pdev->dev; 537 + struct bcm74110_mbox_chan *chan_priv; 538 + int slot, type; 539 + 540 + if (p->args_count != 2) { 541 + dev_err(dev, "Invalid arguments\n"); 542 + return ERR_PTR(-EINVAL); 543 + } 544 + 545 + type = p->args[0]; 546 + slot = p->args[1]; 547 + 548 + switch (type) { 549 + case BCM_MSG_SVC_PMC: 550 + case BCM_MSG_SVC_SCMI: 551 + case BCM_MSG_SVC_DPFE: 552 + if (slot > BCM_MBOX_HAB_MEM_IDX_SIZE) { 553 + dev_err(dev, "Not enough shared memory\n"); 554 + return ERR_PTR(-EINVAL); 555 + } 556 + chan_priv = cntrl->chans[type].con_priv; 557 + chan_priv->slot = slot; 558 + chan_priv->type = type; 559 + break; 560 + default: 561 + dev_err(dev, "Invalid channel type: %d\n", type); 562 + return ERR_PTR(-EINVAL); 563 + } 564 + 565 + return &cntrl->chans[type]; 566 + } 567 + 568 + static int bcm74110_mbox_probe(struct platform_device *pdev) 569 + { 570 + struct device *dev = &pdev->dev; 571 + struct bcm74110_mbox *mbox; 572 + int i, ret; 573 + 574 + mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL); 575 + if (!mbox) 576 + return -ENOMEM; 577 + 578 + mbox->pdev = pdev; 579 + platform_set_drvdata(pdev, mbox); 580 + 581 + mbox->base = devm_platform_ioremap_resource(pdev, 0); 582 + if (IS_ERR(mbox->base)) 583 + return dev_err_probe(dev, PTR_ERR(mbox->base), "Failed to iomap\n"); 584 + 585 + ret = of_property_read_u32(dev->of_node, "brcm,tx", &mbox->tx_chan); 586 + if (ret) 587 + return dev_err_probe(dev, ret, "Failed to find tx channel\n"); 588 + 589 + ret = of_property_read_u32(dev->of_node, "brcm,rx", &mbox->rx_chan); 590 + if (ret) 591 + return dev_err_probe(dev, ret, "Failed to find rx channel\n"); 592 + 593 + mbox->rx_irq = platform_get_irq(pdev, 0); 594 + if (mbox->rx_irq < 0) 595 + return mbox->rx_irq; 596 + 597 + INIT_LIST_HEAD(&mbox->rx_svc_init_list); 598 + spin_lock_init(&mbox->rx_svc_list_lock); 599 + bcm74110_mbox_mask_and_clear(mbox); 600 + 601 + ret = devm_request_irq(dev, mbox->rx_irq, bcm74110_mbox_isr, 602 + IRQF_NO_SUSPEND, pdev->name, mbox); 603 + if (ret) 604 + return dev_err_probe(dev, ret, "Failed to request irq\n"); 605 + 606 + mbox->controller.ops = &bcm74110_mbox_chan_ops; 607 + mbox->controller.dev = dev; 608 + mbox->controller.num_chans = BCM_MSG_SVC_MAX; 609 + mbox->controller.of_xlate = &bcm74110_mbox_of_xlate; 610 + mbox->controller.chans = devm_kcalloc(dev, BCM_MSG_SVC_MAX, 611 + sizeof(*mbox->controller.chans), 612 + GFP_KERNEL); 613 + if (!mbox->controller.chans) 614 + return -ENOMEM; 615 + 616 + mbox->mbox_chan = devm_kcalloc(dev, BCM_MSG_SVC_MAX, 617 + sizeof(*mbox->mbox_chan), 618 + GFP_KERNEL); 619 + if (!mbox->mbox_chan) 620 + return -ENOMEM; 621 + 622 + for (i = 0; i < BCM_MSG_SVC_MAX; i++) { 623 + mbox->mbox_chan[i].mbox = mbox; 624 + mbox->controller.chans[i].con_priv = &mbox->mbox_chan[i]; 625 + } 626 + 627 + ret = devm_mbox_controller_register(dev, &mbox->controller); 628 + if (ret) 629 + return ret; 630 + 631 + ret = bcm74110_mbox_init(mbox); 632 + if (ret) 633 + return ret; 634 + 635 + return 0; 636 + } 637 + 638 + static const struct of_device_id bcm74110_mbox_of_match[] = { 639 + { .compatible = "brcm,bcm74110-mbox", }, 640 + { /* sentinel */ }, 641 + }; 642 + MODULE_DEVICE_TABLE(of, bcm74110_mbox_of_match); 643 + 644 + static struct platform_driver bcm74110_mbox_driver = { 645 + .driver = { 646 + .name = "bcm74110-mbox", 647 + .of_match_table = bcm74110_mbox_of_match, 648 + }, 649 + .probe = bcm74110_mbox_probe, 650 + .shutdown = bcm74110_mbox_shutdown, 651 + }; 652 + module_platform_driver(bcm74110_mbox_driver); 653 + 654 + MODULE_AUTHOR("Justin Chen <justin.chen@broadcom.com>"); 655 + MODULE_DESCRIPTION("BCM74110 mailbox driver"); 656 + MODULE_LICENSE("GPL");
+5 -5
drivers/mailbox/mtk-cmdq-mailbox.c
··· 390 390 391 391 task = kzalloc(sizeof(*task), GFP_ATOMIC); 392 392 if (!task) { 393 - __pm_runtime_put_autosuspend(cmdq->mbox.dev); 393 + pm_runtime_put_autosuspend(cmdq->mbox.dev); 394 394 return -ENOMEM; 395 395 } 396 396 ··· 440 440 list_move_tail(&task->list_entry, &thread->task_busy_list); 441 441 442 442 pm_runtime_mark_last_busy(cmdq->mbox.dev); 443 - __pm_runtime_put_autosuspend(cmdq->mbox.dev); 443 + pm_runtime_put_autosuspend(cmdq->mbox.dev); 444 444 445 445 return 0; 446 446 } ··· 488 488 spin_unlock_irqrestore(&thread->chan->lock, flags); 489 489 490 490 pm_runtime_mark_last_busy(cmdq->mbox.dev); 491 - __pm_runtime_put_autosuspend(cmdq->mbox.dev); 491 + pm_runtime_put_autosuspend(cmdq->mbox.dev); 492 492 } 493 493 494 494 static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout) ··· 528 528 out: 529 529 spin_unlock_irqrestore(&thread->chan->lock, flags); 530 530 pm_runtime_mark_last_busy(cmdq->mbox.dev); 531 - __pm_runtime_put_autosuspend(cmdq->mbox.dev); 531 + pm_runtime_put_autosuspend(cmdq->mbox.dev); 532 532 533 533 return 0; 534 534 ··· 543 543 return -EFAULT; 544 544 } 545 545 pm_runtime_mark_last_busy(cmdq->mbox.dev); 546 - __pm_runtime_put_autosuspend(cmdq->mbox.dev); 546 + pm_runtime_put_autosuspend(cmdq->mbox.dev); 547 547 return 0; 548 548 } 549 549
+98 -4
drivers/mailbox/pcc.c
··· 306 306 pcc_chan_reg_read_modify_write(&pchan->db); 307 307 } 308 308 309 + static void *write_response(struct pcc_chan_info *pchan) 310 + { 311 + struct pcc_header pcc_header; 312 + void *buffer; 313 + int data_len; 314 + 315 + memcpy_fromio(&pcc_header, pchan->chan.shmem, 316 + sizeof(pcc_header)); 317 + data_len = pcc_header.length - sizeof(u32) + sizeof(struct pcc_header); 318 + 319 + buffer = pchan->chan.rx_alloc(pchan->chan.mchan->cl, data_len); 320 + if (buffer != NULL) 321 + memcpy_fromio(buffer, pchan->chan.shmem, data_len); 322 + return buffer; 323 + } 324 + 309 325 /** 310 326 * pcc_mbox_irq - PCC mailbox interrupt handler 311 327 * @irq: interrupt number ··· 333 317 { 334 318 struct pcc_chan_info *pchan; 335 319 struct mbox_chan *chan = p; 320 + struct pcc_header *pcc_header = chan->active_req; 321 + void *handle = NULL; 336 322 337 323 pchan = chan->con_priv; 338 324 ··· 358 340 * required to avoid any possible race in updatation of this flag. 359 341 */ 360 342 pchan->chan_in_use = false; 361 - mbox_chan_received_data(chan, NULL); 343 + 344 + if (pchan->chan.rx_alloc) 345 + handle = write_response(pchan); 346 + 347 + if (chan->active_req) { 348 + pcc_header = chan->active_req; 349 + if (pcc_header->flags & PCC_CMD_COMPLETION_NOTIFY) 350 + mbox_chan_txdone(chan, 0); 351 + } 352 + 353 + mbox_chan_received_data(chan, handle); 362 354 363 355 pcc_chan_acknowledge(pchan); 364 356 ··· 412 384 pcc_mchan = &pchan->chan; 413 385 pcc_mchan->shmem = acpi_os_ioremap(pcc_mchan->shmem_base_addr, 414 386 pcc_mchan->shmem_size); 415 - if (pcc_mchan->shmem) 416 - return pcc_mchan; 387 + if (!pcc_mchan->shmem) 388 + goto err; 417 389 390 + pcc_mchan->manage_writes = false; 391 + 392 + /* This indicates that the channel is ready to accept messages. 393 + * This needs to happen after the channel has registered 394 + * its callback. There is no access point to do that in 395 + * the mailbox API. That implies that the mailbox client must 396 + * have set the allocate callback function prior to 397 + * sending any messages. 398 + */ 399 + if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE) 400 + pcc_chan_reg_read_modify_write(&pchan->cmd_update); 401 + 402 + return pcc_mchan; 403 + 404 + err: 418 405 mbox_free_channel(chan); 419 406 return ERR_PTR(-ENXIO); 420 407 } ··· 460 417 } 461 418 EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); 462 419 420 + static int pcc_write_to_buffer(struct mbox_chan *chan, void *data) 421 + { 422 + struct pcc_chan_info *pchan = chan->con_priv; 423 + struct pcc_mbox_chan *pcc_mbox_chan = &pchan->chan; 424 + struct pcc_header *pcc_header = data; 425 + 426 + if (!pchan->chan.manage_writes) 427 + return 0; 428 + 429 + /* The PCC header length includes the command field 430 + * but not the other values from the header. 431 + */ 432 + int len = pcc_header->length - sizeof(u32) + sizeof(struct pcc_header); 433 + u64 val; 434 + 435 + pcc_chan_reg_read(&pchan->cmd_complete, &val); 436 + if (!val) { 437 + pr_info("%s pchan->cmd_complete not set", __func__); 438 + return -1; 439 + } 440 + memcpy_toio(pcc_mbox_chan->shmem, data, len); 441 + return 0; 442 + } 443 + 444 + 463 445 /** 464 - * pcc_send_data - Called from Mailbox Controller code. Used 446 + * pcc_send_data - Called from Mailbox Controller code. If 447 + * pchan->chan.rx_alloc is set, then the command complete 448 + * flag is checked and the data is written to the shared 449 + * buffer io memory. 450 + * 451 + * If pchan->chan.rx_alloc is not set, then it is used 465 452 * here only to ring the channel doorbell. The PCC client 466 453 * specific read/write is done in the client driver in 467 454 * order to maintain atomicity over PCC channel once ··· 507 434 int ret; 508 435 struct pcc_chan_info *pchan = chan->con_priv; 509 436 437 + ret = pcc_write_to_buffer(chan, data); 438 + if (ret) 439 + return ret; 440 + 510 441 ret = pcc_chan_reg_read_modify_write(&pchan->cmd_update); 511 442 if (ret) 512 443 return ret; 513 444 514 445 ret = pcc_chan_reg_read_modify_write(&pchan->db); 446 + 515 447 if (!ret && pchan->plat_irq > 0) 516 448 pchan->chan_in_use = true; 517 449 518 450 return ret; 519 451 } 452 + 453 + 454 + static bool pcc_last_tx_done(struct mbox_chan *chan) 455 + { 456 + struct pcc_chan_info *pchan = chan->con_priv; 457 + u64 val; 458 + 459 + pcc_chan_reg_read(&pchan->cmd_complete, &val); 460 + if (!val) 461 + return false; 462 + else 463 + return true; 464 + } 465 + 466 + 520 467 521 468 /** 522 469 * pcc_startup - Called from Mailbox Controller code. Used here ··· 583 490 .send_data = pcc_send_data, 584 491 .startup = pcc_startup, 585 492 .shutdown = pcc_shutdown, 493 + .last_tx_done = pcc_last_tx_done, 586 494 }; 587 495 588 496 /**
+1 -2
drivers/mailbox/qcom-ipcc.c
··· 312 312 if (!name) 313 313 return -ENOMEM; 314 314 315 - ipcc->irq_domain = irq_domain_create_tree(of_fwnode_handle(pdev->dev.of_node), 316 - &qcom_ipcc_irq_ops, ipcc); 315 + ipcc->irq_domain = irq_domain_create_tree(dev_fwnode(&pdev->dev), &qcom_ipcc_irq_ops, ipcc); 317 316 if (!ipcc->irq_domain) 318 317 return -ENOMEM; 319 318
+21 -21
drivers/md/dm-raid.c
··· 438 438 /* Return true, if raid set in @rs is recovering */ 439 439 static bool rs_is_recovering(struct raid_set *rs) 440 440 { 441 - return rs->md.recovery_cp < rs->md.dev_sectors; 441 + return rs->md.resync_offset < rs->md.dev_sectors; 442 442 } 443 443 444 444 /* Return true, if raid set in @rs is reshaping */ ··· 768 768 rs->md.layout = raid_type->algorithm; 769 769 rs->md.new_layout = rs->md.layout; 770 770 rs->md.delta_disks = 0; 771 - rs->md.recovery_cp = MaxSector; 771 + rs->md.resync_offset = MaxSector; 772 772 773 773 for (i = 0; i < raid_devs; i++) 774 774 md_rdev_init(&rs->dev[i].rdev); ··· 912 912 rs->md.external = 0; 913 913 rs->md.persistent = 1; 914 914 rs->md.major_version = 2; 915 - } else if (rebuild && !rs->md.recovery_cp) { 915 + } else if (rebuild && !rs->md.resync_offset) { 916 916 /* 917 917 * Without metadata, we will not be able to tell if the array 918 918 * is in-sync or not - we must assume it is not. Therefore, ··· 1695 1695 { 1696 1696 /* raid0 does not recover */ 1697 1697 if (rs_is_raid0(rs)) 1698 - rs->md.recovery_cp = MaxSector; 1698 + rs->md.resync_offset = MaxSector; 1699 1699 /* 1700 1700 * A raid6 set has to be recovered either 1701 1701 * completely or for the grown part to 1702 1702 * ensure proper parity and Q-Syndrome 1703 1703 */ 1704 1704 else if (rs_is_raid6(rs)) 1705 - rs->md.recovery_cp = dev_sectors; 1705 + rs->md.resync_offset = dev_sectors; 1706 1706 /* 1707 1707 * Other raid set types may skip recovery 1708 1708 * depending on the 'nosync' flag. 1709 1709 */ 1710 1710 else 1711 - rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) 1711 + rs->md.resync_offset = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) 1712 1712 ? MaxSector : dev_sectors; 1713 1713 } 1714 1714 ··· 2143 2143 sb->events = cpu_to_le64(mddev->events); 2144 2144 2145 2145 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset); 2146 - sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp); 2146 + sb->array_resync_offset = cpu_to_le64(mddev->resync_offset); 2147 2147 2148 2148 sb->level = cpu_to_le32(mddev->level); 2149 2149 sb->layout = cpu_to_le32(mddev->layout); ··· 2334 2334 } 2335 2335 2336 2336 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) 2337 - mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset); 2337 + mddev->resync_offset = le64_to_cpu(sb->array_resync_offset); 2338 2338 2339 2339 /* 2340 2340 * During load, we set FirstUse if a new superblock was written. 2341 2341 * There are two reasons we might not have a superblock: 2342 2342 * 1) The raid set is brand new - in which case, all of the 2343 2343 * devices must have their In_sync bit set. Also, 2344 - * recovery_cp must be 0, unless forced. 2344 + * resync_offset must be 0, unless forced. 2345 2345 * 2) This is a new device being added to an old raid set 2346 2346 * and the new device needs to be rebuilt - in which 2347 2347 * case the In_sync bit will /not/ be set and 2348 - * recovery_cp must be MaxSector. 2348 + * resync_offset must be MaxSector. 2349 2349 * 3) This is/are a new device(s) being added to an old 2350 2350 * raid set during takeover to a higher raid level 2351 2351 * to provide capacity for redundancy or during reshape ··· 2390 2390 new_devs > 1 ? "s" : ""); 2391 2391 return -EINVAL; 2392 2392 } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) { 2393 - DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)", 2394 - (unsigned long long) mddev->recovery_cp); 2393 + DMERR("'rebuild' specified while raid set is not in-sync (resync_offset=%llu)", 2394 + (unsigned long long) mddev->resync_offset); 2395 2395 return -EINVAL; 2396 2396 } else if (rs_is_reshaping(rs)) { 2397 2397 DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)", ··· 2700 2700 } 2701 2701 out: 2702 2702 /* 2703 - * Raise recovery_cp in case data_offset != 0 to 2703 + * Raise resync_offset in case data_offset != 0 to 2704 2704 * avoid false recovery positives in the constructor. 2705 2705 */ 2706 - if (rs->md.recovery_cp < rs->md.dev_sectors) 2707 - rs->md.recovery_cp += rs->dev[0].rdev.data_offset; 2706 + if (rs->md.resync_offset < rs->md.dev_sectors) 2707 + rs->md.resync_offset += rs->dev[0].rdev.data_offset; 2708 2708 2709 2709 /* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */ 2710 2710 rdev_for_each(rdev, &rs->md) { ··· 2759 2759 } 2760 2760 2761 2761 clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags); 2762 - mddev->recovery_cp = MaxSector; 2762 + mddev->resync_offset = MaxSector; 2763 2763 2764 2764 while (d--) { 2765 2765 rdev = &rs->dev[d].rdev; ··· 2767 2767 if (test_bit(d, (void *) rs->rebuild_disks)) { 2768 2768 clear_bit(In_sync, &rdev->flags); 2769 2769 clear_bit(Faulty, &rdev->flags); 2770 - mddev->recovery_cp = rdev->recovery_offset = 0; 2770 + mddev->resync_offset = rdev->recovery_offset = 0; 2771 2771 /* Bitmap has to be created when we do an "up" takeover */ 2772 2772 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); 2773 2773 } ··· 3225 3225 if (r) 3226 3226 goto bad; 3227 3227 3228 - rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_sectors); 3228 + rs_setup_recovery(rs, rs->md.resync_offset < rs->md.dev_sectors ? rs->md.resync_offset : rs->md.dev_sectors); 3229 3229 } else { 3230 3230 /* This is no size change or it is shrinking, update size and record in superblocks */ 3231 3231 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false); ··· 3449 3449 3450 3450 } else { 3451 3451 if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery)) 3452 - r = mddev->recovery_cp; 3452 + r = mddev->resync_offset; 3453 3453 else 3454 3454 r = mddev->curr_resync_completed; 3455 3455 ··· 4077 4077 } 4078 4078 4079 4079 /* Check for any resize/reshape on @rs and adjust/initiate */ 4080 - if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) { 4080 + if (mddev->resync_offset && mddev->resync_offset < MaxSector) { 4081 4081 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4082 - mddev->resync_min = mddev->recovery_cp; 4082 + mddev->resync_min = mddev->resync_offset; 4083 4083 if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) 4084 4084 mddev->resync_max_sectors = mddev->dev_sectors; 4085 4085 }
+4 -4
drivers/md/md-bitmap.c
··· 1987 1987 1988 1988 md_bitmap_set_memory_bits(bitmap, sec, 1); 1989 1989 md_bitmap_file_set_bit(bitmap, sec); 1990 - if (sec < bitmap->mddev->recovery_cp) 1990 + if (sec < bitmap->mddev->resync_offset) 1991 1991 /* We are asserting that the array is dirty, 1992 - * so move the recovery_cp address back so 1992 + * so move the resync_offset address back so 1993 1993 * that it is obvious that it is dirty 1994 1994 */ 1995 - bitmap->mddev->recovery_cp = sec; 1995 + bitmap->mddev->resync_offset = sec; 1996 1996 } 1997 1997 } 1998 1998 ··· 2258 2258 || bitmap->events_cleared == mddev->events) 2259 2259 /* no need to keep dirty bits to optimise a 2260 2260 * re-add of a missing device */ 2261 - start = mddev->recovery_cp; 2261 + start = mddev->resync_offset; 2262 2262 2263 2263 mutex_lock(&mddev->bitmap_info.mutex); 2264 2264 err = md_bitmap_init_from_disk(bitmap, start);
+8 -8
drivers/md/md-cluster.c
··· 337 337 md_wakeup_thread(mddev->sync_thread); 338 338 339 339 if (hi > 0) { 340 - if (lo < mddev->recovery_cp) 341 - mddev->recovery_cp = lo; 340 + if (lo < mddev->resync_offset) 341 + mddev->resync_offset = lo; 342 342 /* wake up thread to continue resync in case resync 343 343 * is not finished */ 344 - if (mddev->recovery_cp != MaxSector) { 344 + if (mddev->resync_offset != MaxSector) { 345 345 /* 346 346 * clear the REMOTE flag since we will launch 347 347 * resync thread in current node. ··· 863 863 lockres_free(bm_lockres); 864 864 continue; 865 865 } 866 - if ((hi > 0) && (lo < mddev->recovery_cp)) { 866 + if ((hi > 0) && (lo < mddev->resync_offset)) { 867 867 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 868 - mddev->recovery_cp = lo; 868 + mddev->resync_offset = lo; 869 869 md_check_recovery(mddev); 870 870 } 871 871 ··· 1027 1027 * Also, we should send BITMAP_NEEDS_SYNC message in 1028 1028 * case reshaping is interrupted. 1029 1029 */ 1030 - if ((cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) || 1030 + if ((cinfo->slot_number > 0 && mddev->resync_offset != MaxSector) || 1031 1031 (mddev->reshape_position != MaxSector && 1032 1032 test_bit(MD_CLOSING, &mddev->flags))) 1033 1033 resync_bitmap(mddev); ··· 1605 1605 pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn); 1606 1606 goto out; 1607 1607 } 1608 - if ((hi > 0) && (lo < mddev->recovery_cp)) 1609 - mddev->recovery_cp = lo; 1608 + if ((hi > 0) && (lo < mddev->resync_offset)) 1609 + mddev->resync_offset = lo; 1610 1610 } 1611 1611 out: 1612 1612 return err;
+44 -29
drivers/md/md.c
··· 637 637 return; 638 638 639 639 /* 640 + * If array is freed by stopping array, MD_DELETED is set by 641 + * do_md_stop(), MD_DELETED is still set here in case mddev is freed 642 + * directly by closing a mddev that is created by create_on_open. 643 + */ 644 + set_bit(MD_DELETED, &mddev->flags); 645 + /* 640 646 * Call queue_work inside the spinlock so that flush_workqueue() after 641 647 * mddev_find will succeed in waiting for the work to be done. 642 648 */ ··· 1415 1409 mddev->layout = -1; 1416 1410 1417 1411 if (sb->state & (1<<MD_SB_CLEAN)) 1418 - mddev->recovery_cp = MaxSector; 1412 + mddev->resync_offset = MaxSector; 1419 1413 else { 1420 1414 if (sb->events_hi == sb->cp_events_hi && 1421 1415 sb->events_lo == sb->cp_events_lo) { 1422 - mddev->recovery_cp = sb->recovery_cp; 1416 + mddev->resync_offset = sb->resync_offset; 1423 1417 } else 1424 - mddev->recovery_cp = 0; 1418 + mddev->resync_offset = 0; 1425 1419 } 1426 1420 1427 1421 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); ··· 1547 1541 mddev->minor_version = sb->minor_version; 1548 1542 if (mddev->in_sync) 1549 1543 { 1550 - sb->recovery_cp = mddev->recovery_cp; 1544 + sb->resync_offset = mddev->resync_offset; 1551 1545 sb->cp_events_hi = (mddev->events>>32); 1552 1546 sb->cp_events_lo = (u32)mddev->events; 1553 - if (mddev->recovery_cp == MaxSector) 1547 + if (mddev->resync_offset == MaxSector) 1554 1548 sb->state = (1<< MD_SB_CLEAN); 1555 1549 } else 1556 - sb->recovery_cp = 0; 1550 + sb->resync_offset = 0; 1557 1551 1558 1552 sb->layout = mddev->layout; 1559 1553 sb->chunk_size = mddev->chunk_sectors << 9; ··· 1901 1895 mddev->bitmap_info.default_space = (4096-1024) >> 9; 1902 1896 mddev->reshape_backwards = 0; 1903 1897 1904 - mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1898 + mddev->resync_offset = le64_to_cpu(sb->resync_offset); 1905 1899 memcpy(mddev->uuid, sb->set_uuid, 16); 1906 1900 1907 1901 mddev->max_disks = (4096-256)/2; ··· 2087 2081 sb->utime = cpu_to_le64((__u64)mddev->utime); 2088 2082 sb->events = cpu_to_le64(mddev->events); 2089 2083 if (mddev->in_sync) 2090 - sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 2084 + sb->resync_offset = cpu_to_le64(mddev->resync_offset); 2091 2085 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) 2092 2086 sb->resync_offset = cpu_to_le64(MaxSector); 2093 2087 else ··· 2767 2761 /* If this is just a dirty<->clean transition, and the array is clean 2768 2762 * and 'events' is odd, we can roll back to the previous clean state */ 2769 2763 if (nospares 2770 - && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2764 + && (mddev->in_sync && mddev->resync_offset == MaxSector) 2771 2765 && mddev->can_decrease_events 2772 2766 && mddev->events != 1) { 2773 2767 mddev->events--; ··· 4303 4297 static ssize_t 4304 4298 resync_start_show(struct mddev *mddev, char *page) 4305 4299 { 4306 - if (mddev->recovery_cp == MaxSector) 4300 + if (mddev->resync_offset == MaxSector) 4307 4301 return sprintf(page, "none\n"); 4308 - return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 4302 + return sprintf(page, "%llu\n", (unsigned long long)mddev->resync_offset); 4309 4303 } 4310 4304 4311 4305 static ssize_t ··· 4331 4325 err = -EBUSY; 4332 4326 4333 4327 if (!err) { 4334 - mddev->recovery_cp = n; 4328 + mddev->resync_offset = n; 4335 4329 if (mddev->pers) 4336 4330 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 4337 4331 } ··· 6423 6417 mddev->external_size = 0; 6424 6418 mddev->dev_sectors = 0; 6425 6419 mddev->raid_disks = 0; 6426 - mddev->recovery_cp = 0; 6420 + mddev->resync_offset = 0; 6427 6421 mddev->resync_min = 0; 6428 6422 mddev->resync_max = MaxSector; 6429 6423 mddev->reshape_position = MaxSector; ··· 7368 7362 * openned 7369 7363 */ 7370 7364 if (info->state & (1<<MD_SB_CLEAN)) 7371 - mddev->recovery_cp = MaxSector; 7365 + mddev->resync_offset = MaxSector; 7372 7366 else 7373 - mddev->recovery_cp = 0; 7367 + mddev->resync_offset = 0; 7374 7368 mddev->persistent = ! info->not_persistent; 7375 7369 mddev->external = 0; 7376 7370 ··· 8309 8303 seq_printf(seq, "\tresync=REMOTE"); 8310 8304 return 1; 8311 8305 } 8312 - if (mddev->recovery_cp < MaxSector) { 8306 + if (mddev->resync_offset < MaxSector) { 8313 8307 seq_printf(seq, "\tresync=PENDING"); 8314 8308 return 1; 8315 8309 } ··· 8952 8946 return mddev->resync_min; 8953 8947 case ACTION_RESYNC: 8954 8948 if (!mddev->bitmap) 8955 - return mddev->recovery_cp; 8949 + return mddev->resync_offset; 8956 8950 return 0; 8957 8951 case ACTION_RESHAPE: 8958 8952 /* ··· 9190 9184 atomic_read(&mddev->recovery_active) == 0); 9191 9185 mddev->curr_resync_completed = j; 9192 9186 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 9193 - j > mddev->recovery_cp) 9194 - mddev->recovery_cp = j; 9187 + j > mddev->resync_offset) 9188 + mddev->resync_offset = j; 9195 9189 update_time = jiffies; 9196 9190 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 9197 9191 sysfs_notify_dirent_safe(mddev->sysfs_completed); ··· 9311 9305 mddev->curr_resync > MD_RESYNC_ACTIVE) { 9312 9306 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 9313 9307 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 9314 - if (mddev->curr_resync >= mddev->recovery_cp) { 9308 + if (mddev->curr_resync >= mddev->resync_offset) { 9315 9309 pr_debug("md: checkpointing %s of %s.\n", 9316 9310 desc, mdname(mddev)); 9317 9311 if (test_bit(MD_RECOVERY_ERROR, 9318 9312 &mddev->recovery)) 9319 - mddev->recovery_cp = 9313 + mddev->resync_offset = 9320 9314 mddev->curr_resync_completed; 9321 9315 else 9322 - mddev->recovery_cp = 9316 + mddev->resync_offset = 9323 9317 mddev->curr_resync; 9324 9318 } 9325 9319 } else 9326 - mddev->recovery_cp = MaxSector; 9320 + mddev->resync_offset = MaxSector; 9327 9321 } else { 9328 9322 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 9329 9323 mddev->curr_resync = MaxSector; ··· 9427 9421 9428 9422 static bool rdev_addable(struct md_rdev *rdev) 9429 9423 { 9424 + struct mddev *mddev; 9425 + 9426 + mddev = READ_ONCE(rdev->mddev); 9427 + if (!mddev) 9428 + return false; 9429 + 9430 9430 /* rdev is already used, don't add it again. */ 9431 9431 if (test_bit(Candidate, &rdev->flags) || rdev->raid_disk >= 0 || 9432 9432 test_bit(Faulty, &rdev->flags)) ··· 9443 9431 return true; 9444 9432 9445 9433 /* Allow to add if array is read-write. */ 9446 - if (md_is_rdwr(rdev->mddev)) 9434 + if (md_is_rdwr(mddev)) 9447 9435 return true; 9448 9436 9449 9437 /* ··· 9545 9533 } 9546 9534 9547 9535 /* Check if resync is in progress. */ 9548 - if (mddev->recovery_cp < MaxSector) { 9536 + if (mddev->resync_offset < MaxSector) { 9549 9537 remove_spares(mddev, NULL); 9550 9538 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9551 9539 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); ··· 9726 9714 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 9727 9715 (mddev->external == 0 && mddev->safemode == 1) || 9728 9716 (mddev->safemode == 2 9729 - && !mddev->in_sync && mddev->recovery_cp == MaxSector) 9717 + && !mddev->in_sync && mddev->resync_offset == MaxSector) 9730 9718 )) 9731 9719 return; 9732 9720 ··· 9783 9771 * remove disk. 9784 9772 */ 9785 9773 rdev_for_each_safe(rdev, tmp, mddev) { 9786 - if (test_and_clear_bit(ClusterRemove, &rdev->flags) && 9787 - rdev->raid_disk < 0) 9774 + if (rdev->raid_disk < 0 && 9775 + test_and_clear_bit(ClusterRemove, &rdev->flags)) 9788 9776 md_kick_rdev_from_array(rdev); 9789 9777 } 9790 9778 } ··· 10090 10078 10091 10079 /* Check for change of roles in the active devices */ 10092 10080 rdev_for_each_safe(rdev2, tmp, mddev) { 10093 - if (test_bit(Faulty, &rdev2->flags)) 10081 + if (test_bit(Faulty, &rdev2->flags)) { 10082 + if (test_bit(ClusterRemove, &rdev2->flags)) 10083 + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 10094 10084 continue; 10085 + } 10095 10086 10096 10087 /* Check if the roles changed */ 10097 10088 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
+1 -1
drivers/md/md.h
··· 523 523 unsigned long normal_io_events; /* IO event timestamp */ 524 524 atomic_t recovery_active; /* blocks scheduled, but not written */ 525 525 wait_queue_head_t recovery_wait; 526 - sector_t recovery_cp; 526 + sector_t resync_offset; 527 527 sector_t resync_min; /* user requested sync 528 528 * starts here */ 529 529 sector_t resync_max; /* resync should pause
+3 -3
drivers/md/raid0.c
··· 674 674 mddev->raid_disks--; 675 675 mddev->delta_disks = -1; 676 676 /* make sure it will be not marked as dirty */ 677 - mddev->recovery_cp = MaxSector; 677 + mddev->resync_offset = MaxSector; 678 678 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); 679 679 680 680 create_strip_zones(mddev, &priv_conf); ··· 717 717 mddev->raid_disks += mddev->delta_disks; 718 718 mddev->degraded = 0; 719 719 /* make sure it will be not marked as dirty */ 720 - mddev->recovery_cp = MaxSector; 720 + mddev->resync_offset = MaxSector; 721 721 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); 722 722 723 723 create_strip_zones(mddev, &priv_conf); ··· 760 760 mddev->delta_disks = 1 - mddev->raid_disks; 761 761 mddev->raid_disks = 1; 762 762 /* make sure it will be not marked as dirty */ 763 - mddev->recovery_cp = MaxSector; 763 + mddev->resync_offset = MaxSector; 764 764 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); 765 765 766 766 create_strip_zones(mddev, &priv_conf);
+1 -1
drivers/md/raid1-10.c
··· 283 283 static inline bool raid1_should_read_first(struct mddev *mddev, 284 284 sector_t this_sector, int len) 285 285 { 286 - if ((mddev->recovery_cp < this_sector + len)) 286 + if ((mddev->resync_offset < this_sector + len)) 287 287 return true; 288 288 289 289 if (mddev_is_clustered(mddev) &&
+35 -59
drivers/md/raid1.c
··· 127 127 return get_resync_pages(bio)->raid_bio; 128 128 } 129 129 130 - static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 130 + static void *r1bio_pool_alloc(gfp_t gfp_flags, struct r1conf *conf) 131 131 { 132 - struct pool_info *pi = data; 133 - int size = offsetof(struct r1bio, bios[pi->raid_disks]); 132 + int size = offsetof(struct r1bio, bios[conf->raid_disks * 2]); 134 133 135 134 /* allocate a r1bio with room for raid_disks entries in the bios array */ 136 135 return kzalloc(size, gfp_flags); ··· 144 145 145 146 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) 146 147 { 147 - struct pool_info *pi = data; 148 + struct r1conf *conf = data; 148 149 struct r1bio *r1_bio; 149 150 struct bio *bio; 150 151 int need_pages; 151 152 int j; 152 153 struct resync_pages *rps; 153 154 154 - r1_bio = r1bio_pool_alloc(gfp_flags, pi); 155 + r1_bio = r1bio_pool_alloc(gfp_flags, conf); 155 156 if (!r1_bio) 156 157 return NULL; 157 158 158 - rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages), 159 + rps = kmalloc_array(conf->raid_disks * 2, sizeof(struct resync_pages), 159 160 gfp_flags); 160 161 if (!rps) 161 162 goto out_free_r1bio; ··· 163 164 /* 164 165 * Allocate bios : 1 for reading, n-1 for writing 165 166 */ 166 - for (j = pi->raid_disks ; j-- ; ) { 167 + for (j = conf->raid_disks * 2; j-- ; ) { 167 168 bio = bio_kmalloc(RESYNC_PAGES, gfp_flags); 168 169 if (!bio) 169 170 goto out_free_bio; ··· 176 177 * If this is a user-requested check/repair, allocate 177 178 * RESYNC_PAGES for each bio. 178 179 */ 179 - if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) 180 - need_pages = pi->raid_disks; 180 + if (test_bit(MD_RECOVERY_REQUESTED, &conf->mddev->recovery)) 181 + need_pages = conf->raid_disks * 2; 181 182 else 182 183 need_pages = 1; 183 - for (j = 0; j < pi->raid_disks; j++) { 184 + for (j = 0; j < conf->raid_disks * 2; j++) { 184 185 struct resync_pages *rp = &rps[j]; 185 186 186 187 bio = r1_bio->bios[j]; ··· 206 207 resync_free_pages(&rps[j]); 207 208 208 209 out_free_bio: 209 - while (++j < pi->raid_disks) { 210 + while (++j < conf->raid_disks * 2) { 210 211 bio_uninit(r1_bio->bios[j]); 211 212 kfree(r1_bio->bios[j]); 212 213 } ··· 219 220 220 221 static void r1buf_pool_free(void *__r1_bio, void *data) 221 222 { 222 - struct pool_info *pi = data; 223 + struct r1conf *conf = data; 223 224 int i; 224 225 struct r1bio *r1bio = __r1_bio; 225 226 struct resync_pages *rp = NULL; 226 227 227 - for (i = pi->raid_disks; i--; ) { 228 + for (i = conf->raid_disks * 2; i--; ) { 228 229 rp = get_resync_pages(r1bio->bios[i]); 229 230 resync_free_pages(rp); 230 231 bio_uninit(r1bio->bios[i]); ··· 254 255 struct r1conf *conf = r1_bio->mddev->private; 255 256 256 257 put_all_bios(conf, r1_bio); 257 - mempool_free(r1_bio, &conf->r1bio_pool); 258 + mempool_free(r1_bio, conf->r1bio_pool); 258 259 } 259 260 260 261 static void put_buf(struct r1bio *r1_bio) ··· 1304 1305 struct r1conf *conf = mddev->private; 1305 1306 struct r1bio *r1_bio; 1306 1307 1307 - r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO); 1308 - /* Ensure no bio records IO_BLOCKED */ 1309 - memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0])); 1308 + r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 1309 + memset(r1_bio, 0, offsetof(struct r1bio, bios[conf->raid_disks * 2])); 1310 1310 init_r1bio(r1_bio, mddev, bio); 1311 1311 return r1_bio; 1312 1312 } ··· 2745 2747 BUG_ON(mempool_initialized(&conf->r1buf_pool)); 2746 2748 2747 2749 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc, 2748 - r1buf_pool_free, conf->poolinfo); 2750 + r1buf_pool_free, conf); 2749 2751 } 2750 2752 2751 2753 static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) ··· 2755 2757 struct bio *bio; 2756 2758 int i; 2757 2759 2758 - for (i = conf->poolinfo->raid_disks; i--; ) { 2760 + for (i = conf->raid_disks * 2; i--; ) { 2759 2761 bio = r1bio->bios[i]; 2760 2762 rps = bio->bi_private; 2761 2763 bio_reset(bio, NULL, 0); ··· 2820 2822 } 2821 2823 2822 2824 if (mddev->bitmap == NULL && 2823 - mddev->recovery_cp == MaxSector && 2825 + mddev->resync_offset == MaxSector && 2824 2826 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 2825 2827 conf->fullsync == 0) { 2826 2828 *skipped = 1; ··· 3083 3085 int i; 3084 3086 struct raid1_info *disk; 3085 3087 struct md_rdev *rdev; 3088 + size_t r1bio_size; 3086 3089 int err = -ENOMEM; 3087 3090 3088 3091 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL); ··· 3120 3121 if (!conf->tmppage) 3121 3122 goto abort; 3122 3123 3123 - conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); 3124 - if (!conf->poolinfo) 3125 - goto abort; 3126 - conf->poolinfo->raid_disks = mddev->raid_disks * 2; 3127 - err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc, 3128 - rbio_pool_free, conf->poolinfo); 3129 - if (err) 3124 + r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]); 3125 + conf->r1bio_pool = mempool_create_kmalloc_pool(NR_RAID_BIOS, r1bio_size); 3126 + if (!conf->r1bio_pool) 3130 3127 goto abort; 3131 3128 3132 3129 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0); 3133 3130 if (err) 3134 3131 goto abort; 3135 - 3136 - conf->poolinfo->mddev = mddev; 3137 3132 3138 3133 err = -EINVAL; 3139 3134 spin_lock_init(&conf->device_lock); ··· 3191 3198 3192 3199 abort: 3193 3200 if (conf) { 3194 - mempool_exit(&conf->r1bio_pool); 3201 + mempool_destroy(conf->r1bio_pool); 3195 3202 kfree(conf->mirrors); 3196 3203 safe_put_page(conf->tmppage); 3197 - kfree(conf->poolinfo); 3198 3204 kfree(conf->nr_pending); 3199 3205 kfree(conf->nr_waiting); 3200 3206 kfree(conf->nr_queued); ··· 3274 3282 } 3275 3283 3276 3284 if (conf->raid_disks - mddev->degraded == 1) 3277 - mddev->recovery_cp = MaxSector; 3285 + mddev->resync_offset = MaxSector; 3278 3286 3279 - if (mddev->recovery_cp != MaxSector) 3287 + if (mddev->resync_offset != MaxSector) 3280 3288 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n", 3281 3289 mdname(mddev)); 3282 3290 pr_info("md/raid1:%s: active with %d out of %d mirrors\n", ··· 3303 3311 { 3304 3312 struct r1conf *conf = priv; 3305 3313 3306 - mempool_exit(&conf->r1bio_pool); 3314 + mempool_destroy(conf->r1bio_pool); 3307 3315 kfree(conf->mirrors); 3308 3316 safe_put_page(conf->tmppage); 3309 - kfree(conf->poolinfo); 3310 3317 kfree(conf->nr_pending); 3311 3318 kfree(conf->nr_waiting); 3312 3319 kfree(conf->nr_queued); ··· 3336 3345 3337 3346 md_set_array_sectors(mddev, newsize); 3338 3347 if (sectors > mddev->dev_sectors && 3339 - mddev->recovery_cp > mddev->dev_sectors) { 3340 - mddev->recovery_cp = mddev->dev_sectors; 3348 + mddev->resync_offset > mddev->dev_sectors) { 3349 + mddev->resync_offset = mddev->dev_sectors; 3341 3350 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3342 3351 } 3343 3352 mddev->dev_sectors = sectors; ··· 3358 3367 * At the same time, we "pack" the devices so that all the missing 3359 3368 * devices have the higher raid_disk numbers. 3360 3369 */ 3361 - mempool_t newpool, oldpool; 3362 - struct pool_info *newpoolinfo; 3370 + mempool_t *newpool, *oldpool; 3371 + size_t new_r1bio_size; 3363 3372 struct raid1_info *newmirrors; 3364 3373 struct r1conf *conf = mddev->private; 3365 3374 int cnt, raid_disks; 3366 3375 unsigned long flags; 3367 3376 int d, d2; 3368 - int ret; 3369 - 3370 - memset(&newpool, 0, sizeof(newpool)); 3371 - memset(&oldpool, 0, sizeof(oldpool)); 3372 3377 3373 3378 /* Cannot change chunk_size, layout, or level */ 3374 3379 if (mddev->chunk_sectors != mddev->new_chunk_sectors || ··· 3390 3403 return -EBUSY; 3391 3404 } 3392 3405 3393 - newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL); 3394 - if (!newpoolinfo) 3406 + new_r1bio_size = offsetof(struct r1bio, bios[raid_disks * 2]); 3407 + newpool = mempool_create_kmalloc_pool(NR_RAID_BIOS, new_r1bio_size); 3408 + if (!newpool) { 3395 3409 return -ENOMEM; 3396 - newpoolinfo->mddev = mddev; 3397 - newpoolinfo->raid_disks = raid_disks * 2; 3398 - 3399 - ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc, 3400 - rbio_pool_free, newpoolinfo); 3401 - if (ret) { 3402 - kfree(newpoolinfo); 3403 - return ret; 3404 3410 } 3405 3411 newmirrors = kzalloc(array3_size(sizeof(struct raid1_info), 3406 3412 raid_disks, 2), 3407 3413 GFP_KERNEL); 3408 3414 if (!newmirrors) { 3409 - kfree(newpoolinfo); 3410 - mempool_exit(&newpool); 3415 + mempool_destroy(newpool); 3411 3416 return -ENOMEM; 3412 3417 } 3413 3418 ··· 3408 3429 /* ok, everything is stopped */ 3409 3430 oldpool = conf->r1bio_pool; 3410 3431 conf->r1bio_pool = newpool; 3411 - init_waitqueue_head(&conf->r1bio_pool.wait); 3412 3432 3413 3433 for (d = d2 = 0; d < conf->raid_disks; d++) { 3414 3434 struct md_rdev *rdev = conf->mirrors[d].rdev; ··· 3424 3446 } 3425 3447 kfree(conf->mirrors); 3426 3448 conf->mirrors = newmirrors; 3427 - kfree(conf->poolinfo); 3428 - conf->poolinfo = newpoolinfo; 3429 3449 3430 3450 spin_lock_irqsave(&conf->device_lock, flags); 3431 3451 mddev->degraded += (raid_disks - conf->raid_disks); ··· 3437 3461 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3438 3462 md_wakeup_thread(mddev->thread); 3439 3463 3440 - mempool_exit(&oldpool); 3464 + mempool_destroy(oldpool); 3441 3465 return 0; 3442 3466 } 3443 3467
+1 -21
drivers/md/raid1.h
··· 49 49 sector_t seq_start; 50 50 }; 51 51 52 - /* 53 - * memory pools need a pointer to the mddev, so they can force an unplug 54 - * when memory is tight, and a count of the number of drives that the 55 - * pool was allocated for, so they know how much to allocate and free. 56 - * mddev->raid_disks cannot be used, as it can change while a pool is active 57 - * These two datums are stored in a kmalloced struct. 58 - * The 'raid_disks' here is twice the raid_disks in r1conf. 59 - * This allows space for each 'real' device can have a replacement in the 60 - * second half of the array. 61 - */ 62 - 63 - struct pool_info { 64 - struct mddev *mddev; 65 - int raid_disks; 66 - }; 67 - 68 52 struct r1conf { 69 53 struct mddev *mddev; 70 54 struct raid1_info *mirrors; /* twice 'raid_disks' to ··· 98 114 */ 99 115 int recovery_disabled; 100 116 101 - /* poolinfo contains information about the content of the 102 - * mempools - it changes when the array grows or shrinks 103 - */ 104 - struct pool_info *poolinfo; 105 - mempool_t r1bio_pool; 117 + mempool_t *r1bio_pool; 106 118 mempool_t r1buf_pool; 107 119 108 120 struct bio_set bio_split;
+8 -8
drivers/md/raid10.c
··· 2117 2117 int last = conf->geo.raid_disks - 1; 2118 2118 struct raid10_info *p; 2119 2119 2120 - if (mddev->recovery_cp < MaxSector) 2120 + if (mddev->resync_offset < MaxSector) 2121 2121 /* only hot-add to in-sync arrays, as recovery is 2122 2122 * very different from resync 2123 2123 */ ··· 3185 3185 * of a clean array, like RAID1 does. 3186 3186 */ 3187 3187 if (mddev->bitmap == NULL && 3188 - mddev->recovery_cp == MaxSector && 3188 + mddev->resync_offset == MaxSector && 3189 3189 mddev->reshape_position == MaxSector && 3190 3190 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 3191 3191 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && ··· 4145 4145 disk->recovery_disabled = mddev->recovery_disabled - 1; 4146 4146 } 4147 4147 4148 - if (mddev->recovery_cp != MaxSector) 4148 + if (mddev->resync_offset != MaxSector) 4149 4149 pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n", 4150 4150 mdname(mddev)); 4151 4151 pr_info("md/raid10:%s: active with %d out of %d devices\n", ··· 4245 4245 4246 4246 md_set_array_sectors(mddev, size); 4247 4247 if (sectors > mddev->dev_sectors && 4248 - mddev->recovery_cp > oldsize) { 4249 - mddev->recovery_cp = oldsize; 4248 + mddev->resync_offset > oldsize) { 4249 + mddev->resync_offset = oldsize; 4250 4250 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4251 4251 } 4252 4252 calc_sectors(conf, sectors); ··· 4275 4275 mddev->delta_disks = mddev->raid_disks; 4276 4276 mddev->raid_disks *= 2; 4277 4277 /* make sure it will be not marked as dirty */ 4278 - mddev->recovery_cp = MaxSector; 4278 + mddev->resync_offset = MaxSector; 4279 4279 mddev->dev_sectors = size; 4280 4280 4281 4281 conf = setup_conf(mddev); ··· 5087 5087 return; 5088 5088 5089 5089 if (mddev->delta_disks > 0) { 5090 - if (mddev->recovery_cp > mddev->resync_max_sectors) { 5091 - mddev->recovery_cp = mddev->resync_max_sectors; 5090 + if (mddev->resync_offset > mddev->resync_max_sectors) { 5091 + mddev->resync_offset = mddev->resync_max_sectors; 5092 5092 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5093 5093 } 5094 5094 mddev->resync_max_sectors = mddev->array_sectors;
+3 -3
drivers/md/raid5-ppl.c
··· 1163 1163 le64_to_cpu(pplhdr->generation)); 1164 1164 1165 1165 /* attempt to recover from log if we are starting a dirty array */ 1166 - if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector) 1166 + if (pplhdr && !mddev->pers && mddev->resync_offset != MaxSector) 1167 1167 ret = ppl_recover(log, pplhdr, pplhdr_offset); 1168 1168 1169 1169 /* write empty header if we are starting the array */ ··· 1422 1422 1423 1423 if (ret) { 1424 1424 goto err; 1425 - } else if (!mddev->pers && mddev->recovery_cp == 0 && 1425 + } else if (!mddev->pers && mddev->resync_offset == 0 && 1426 1426 ppl_conf->recovered_entries > 0 && 1427 1427 ppl_conf->mismatch_count == 0) { 1428 1428 /* 1429 1429 * If we are starting a dirty array and the recovery succeeds 1430 1430 * without any issues, set the array as clean. 1431 1431 */ 1432 - mddev->recovery_cp = MaxSector; 1432 + mddev->resync_offset = MaxSector; 1433 1433 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 1434 1434 } else if (mddev->pers && ppl_conf->mismatch_count > 0) { 1435 1435 /* no mismatch allowed when enabling PPL for a running array */
+15 -15
drivers/md/raid5.c
··· 3740 3740 && !test_bit(Faulty, &rdev->flags) 3741 3741 && !test_bit(In_sync, &rdev->flags) 3742 3742 && (rdev->recovery_offset <= sh->sector 3743 - || rdev->mddev->recovery_cp <= sh->sector)) 3743 + || rdev->mddev->resync_offset <= sh->sector)) 3744 3744 rv = 1; 3745 3745 return rv; 3746 3746 } ··· 3832 3832 * is missing/faulty, then we need to read everything we can. 3833 3833 */ 3834 3834 if (!force_rcw && 3835 - sh->sector < sh->raid_conf->mddev->recovery_cp) 3835 + sh->sector < sh->raid_conf->mddev->resync_offset) 3836 3836 /* reconstruct-write isn't being forced */ 3837 3837 return 0; 3838 3838 for (i = 0; i < s->failed && i < 2; i++) { ··· 4097 4097 int disks) 4098 4098 { 4099 4099 int rmw = 0, rcw = 0, i; 4100 - sector_t recovery_cp = conf->mddev->recovery_cp; 4100 + sector_t resync_offset = conf->mddev->resync_offset; 4101 4101 4102 4102 /* Check whether resync is now happening or should start. 4103 4103 * If yes, then the array is dirty (after unclean shutdown or ··· 4107 4107 * generate correct data from the parity. 4108 4108 */ 4109 4109 if (conf->rmw_level == PARITY_DISABLE_RMW || 4110 - (recovery_cp < MaxSector && sh->sector >= recovery_cp && 4110 + (resync_offset < MaxSector && sh->sector >= resync_offset && 4111 4111 s->failed == 0)) { 4112 4112 /* Calculate the real rcw later - for now make it 4113 4113 * look like rcw is cheaper 4114 4114 */ 4115 4115 rcw = 1; rmw = 2; 4116 - pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", 4117 - conf->rmw_level, (unsigned long long)recovery_cp, 4116 + pr_debug("force RCW rmw_level=%u, resync_offset=%llu sh->sector=%llu\n", 4117 + conf->rmw_level, (unsigned long long)resync_offset, 4118 4118 (unsigned long long)sh->sector); 4119 4119 } else for (i = disks; i--; ) { 4120 4120 /* would I have to read this buffer for read_modify_write */ ··· 4770 4770 if (test_bit(STRIPE_SYNCING, &sh->state)) { 4771 4771 /* If there is a failed device being replaced, 4772 4772 * we must be recovering. 4773 - * else if we are after recovery_cp, we must be syncing 4773 + * else if we are after resync_offset, we must be syncing 4774 4774 * else if MD_RECOVERY_REQUESTED is set, we also are syncing. 4775 4775 * else we can only be replacing 4776 4776 * sync and recovery both need to read all devices, and so 4777 4777 * use the same flag. 4778 4778 */ 4779 4779 if (do_recovery || 4780 - sh->sector >= conf->mddev->recovery_cp || 4780 + sh->sector >= conf->mddev->resync_offset || 4781 4781 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) 4782 4782 s->syncing = 1; 4783 4783 else ··· 7780 7780 int first = 1; 7781 7781 int ret = -EIO; 7782 7782 7783 - if (mddev->recovery_cp != MaxSector) 7783 + if (mddev->resync_offset != MaxSector) 7784 7784 pr_notice("md/raid:%s: not clean -- starting background reconstruction\n", 7785 7785 mdname(mddev)); 7786 7786 ··· 7921 7921 mdname(mddev)); 7922 7922 mddev->ro = 1; 7923 7923 set_disk_ro(mddev->gendisk, 1); 7924 - } else if (mddev->recovery_cp == MaxSector) 7924 + } else if (mddev->resync_offset == MaxSector) 7925 7925 set_bit(MD_JOURNAL_CLEAN, &mddev->flags); 7926 7926 } 7927 7927 ··· 7988 7988 mddev->resync_max_sectors = mddev->dev_sectors; 7989 7989 7990 7990 if (mddev->degraded > dirty_parity_disks && 7991 - mddev->recovery_cp != MaxSector) { 7991 + mddev->resync_offset != MaxSector) { 7992 7992 if (test_bit(MD_HAS_PPL, &mddev->flags)) 7993 7993 pr_crit("md/raid:%s: starting dirty degraded array with PPL.\n", 7994 7994 mdname(mddev)); ··· 8328 8328 8329 8329 md_set_array_sectors(mddev, newsize); 8330 8330 if (sectors > mddev->dev_sectors && 8331 - mddev->recovery_cp > mddev->dev_sectors) { 8332 - mddev->recovery_cp = mddev->dev_sectors; 8331 + mddev->resync_offset > mddev->dev_sectors) { 8332 + mddev->resync_offset = mddev->dev_sectors; 8333 8333 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8334 8334 } 8335 8335 mddev->dev_sectors = sectors; ··· 8423 8423 return -EINVAL; 8424 8424 8425 8425 /* raid5 can't handle concurrent reshape and recovery */ 8426 - if (mddev->recovery_cp < MaxSector) 8426 + if (mddev->resync_offset < MaxSector) 8427 8427 return -EBUSY; 8428 8428 for (i = 0; i < conf->raid_disks; i++) 8429 8429 if (conf->disks[i].replacement) ··· 8648 8648 mddev->raid_disks += 1; 8649 8649 mddev->delta_disks = 1; 8650 8650 /* make sure it will be not marked as dirty */ 8651 - mddev->recovery_cp = MaxSector; 8651 + mddev->resync_offset = MaxSector; 8652 8652 8653 8653 return setup_conf(mddev); 8654 8654 }
+1 -1
drivers/media/dvb-frontends/cxd2820r_core.c
··· 651 651 priv->gpio_chip.parent = &client->dev; 652 652 priv->gpio_chip.owner = THIS_MODULE; 653 653 priv->gpio_chip.direction_output = cxd2820r_gpio_direction_output; 654 - priv->gpio_chip.set_rv = cxd2820r_gpio_set; 654 + priv->gpio_chip.set = cxd2820r_gpio_set; 655 655 priv->gpio_chip.get = cxd2820r_gpio_get; 656 656 priv->gpio_chip.base = -1; /* Dynamic allocation */ 657 657 priv->gpio_chip.ngpio = GPIO_COUNT;
+1 -1
drivers/media/i2c/ds90ub913.c
··· 235 235 gc->ngpio = UB913_NUM_GPIOS; 236 236 gc->get_direction = ub913_gpio_get_direction; 237 237 gc->direction_output = ub913_gpio_direction_out; 238 - gc->set_rv = ub913_gpio_set; 238 + gc->set = ub913_gpio_set; 239 239 gc->of_xlate = ub913_gpio_of_xlate; 240 240 gc->of_gpio_n_cells = 2; 241 241
+1 -1
drivers/media/i2c/ds90ub953.c
··· 361 361 gc->direction_input = ub953_gpio_direction_in; 362 362 gc->direction_output = ub953_gpio_direction_out; 363 363 gc->get = ub953_gpio_get; 364 - gc->set_rv = ub953_gpio_set; 364 + gc->set = ub953_gpio_set; 365 365 gc->of_xlate = ub953_gpio_of_xlate; 366 366 gc->of_gpio_n_cells = 2; 367 367
+1 -1
drivers/media/i2c/max9286.c
··· 1220 1220 gpio->owner = THIS_MODULE; 1221 1221 gpio->ngpio = 2; 1222 1222 gpio->base = -1; 1223 - gpio->set_rv = max9286_gpiochip_set; 1223 + gpio->set = max9286_gpiochip_set; 1224 1224 gpio->get = max9286_gpiochip_get; 1225 1225 gpio->can_sleep = true; 1226 1226
+1 -1
drivers/media/i2c/max96717.c
··· 355 355 gc->get_direction = max96717_gpio_get_direction; 356 356 gc->direction_input = max96717_gpio_direction_in; 357 357 gc->direction_output = max96717_gpio_direction_out; 358 - gc->set_rv = max96717_gpiochip_set; 358 + gc->set = max96717_gpiochip_set; 359 359 gc->get = max96717_gpiochip_get; 360 360 361 361 /* Disable GPIO forwarding */
+1 -1
drivers/media/pci/solo6x10/solo6x10-gpio.c
··· 158 158 159 159 solo_dev->gpio_dev.get_direction = solo_gpiochip_get_direction; 160 160 solo_dev->gpio_dev.get = solo_gpiochip_get; 161 - solo_dev->gpio_dev.set_rv = solo_gpiochip_set; 161 + solo_dev->gpio_dev.set = solo_gpiochip_set; 162 162 163 163 ret = gpiochip_add_data(&solo_dev->gpio_dev, solo_dev); 164 164
+1 -1
drivers/mfd/sm501.c
··· 965 965 .ngpio = 32, 966 966 .direction_input = sm501_gpio_input, 967 967 .direction_output = sm501_gpio_output, 968 - .set_rv = sm501_gpio_set, 968 + .set = sm501_gpio_set, 969 969 .get = sm501_gpio_get, 970 970 }; 971 971
+1 -1
drivers/mfd/tps65010.c
··· 620 620 tps->chip.parent = &client->dev; 621 621 tps->chip.owner = THIS_MODULE; 622 622 623 - tps->chip.set_rv = tps65010_gpio_set; 623 + tps->chip.set = tps65010_gpio_set; 624 624 tps->chip.direction_output = tps65010_output; 625 625 626 626 /* NOTE: only partial support for inputs; nyet IRQs */
+1 -1
drivers/mfd/ucb1x00-core.c
··· 570 570 ucb->gpio.owner = THIS_MODULE; 571 571 ucb->gpio.base = pdata->gpio_base; 572 572 ucb->gpio.ngpio = 10; 573 - ucb->gpio.set_rv = ucb1x00_gpio_set; 573 + ucb->gpio.set = ucb1x00_gpio_set; 574 574 ucb->gpio.get = ucb1x00_gpio_get; 575 575 ucb->gpio.direction_input = ucb1x00_gpio_direction_input; 576 576 ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
+1 -1
drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
··· 438 438 gchip->direction_output = pci1xxxx_gpio_direction_output; 439 439 gchip->get_direction = pci1xxxx_gpio_get_direction; 440 440 gchip->get = pci1xxxx_gpio_get; 441 - gchip->set_rv = pci1xxxx_gpio_set; 441 + gchip->set = pci1xxxx_gpio_set; 442 442 gchip->set_config = pci1xxxx_gpio_set_config; 443 443 gchip->dbg_show = NULL; 444 444 gchip->base = -1;
+1 -1
drivers/misc/ti_fpc202.c
··· 333 333 priv->gpio.base = -1; 334 334 priv->gpio.direction_input = fpc202_gpio_direction_input; 335 335 priv->gpio.direction_output = fpc202_gpio_direction_output; 336 - priv->gpio.set_rv = fpc202_gpio_set; 336 + priv->gpio.set = fpc202_gpio_set; 337 337 priv->gpio.get = fpc202_gpio_get; 338 338 priv->gpio.ngpio = FPC202_GPIO_COUNT; 339 339 priv->gpio.parent = dev;
+2 -2
drivers/net/can/spi/mcp251x.c
··· 607 607 gpio->get_direction = mcp251x_gpio_get_direction; 608 608 gpio->get = mcp251x_gpio_get; 609 609 gpio->get_multiple = mcp251x_gpio_get_multiple; 610 - gpio->set_rv = mcp251x_gpio_set; 611 - gpio->set_multiple_rv = mcp251x_gpio_set_multiple; 610 + gpio->set = mcp251x_gpio_set; 611 + gpio->set_multiple = mcp251x_gpio_set_multiple; 612 612 gpio->base = -1; 613 613 gpio->ngpio = ARRAY_SIZE(mcp251x_gpio_names); 614 614 gpio->names = mcp251x_gpio_names;
+11 -9
drivers/net/dsa/microchip/ksz8.c
··· 36 36 37 37 static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) 38 38 { 39 - regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0); 39 + ksz_rmw8(dev, addr, bits, set ? bits : 0); 40 40 } 41 41 42 42 static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, 43 43 bool set) 44 44 { 45 - regmap_update_bits(ksz_regmap_8(dev), 46 - dev->dev_ops->get_port_addr(port, offset), 47 - bits, set ? bits : 0); 45 + ksz_rmw8(dev, dev->dev_ops->get_port_addr(port, offset), bits, 46 + set ? bits : 0); 48 47 } 49 48 50 49 /** ··· 1954 1955 ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true); 1955 1956 1956 1957 /* Enable aggressive back off algorithm in half duplex mode. */ 1957 - regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_1, 1958 - SW_AGGR_BACKOFF, SW_AGGR_BACKOFF); 1958 + ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_AGGR_BACKOFF, SW_AGGR_BACKOFF); 1959 + if (ret) 1960 + return ret; 1959 1961 1960 1962 /* 1961 1963 * Make sure unicast VLAN boundary is set as default and 1962 1964 * enable no excessive collision drop. 1963 1965 */ 1964 - regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_2, 1965 - UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP, 1966 - UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP); 1966 + ret = ksz_rmw8(dev, REG_SW_CTRL_2, 1967 + UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP, 1968 + UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP); 1969 + if (ret) 1970 + return ret; 1967 1971 1968 1972 ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false); 1969 1973
+1
drivers/net/dsa/microchip/ksz_common.c
··· 1447 1447 regmap_reg_range(0x3f, 0x3f), 1448 1448 1449 1449 /* advanced control registers */ 1450 + regmap_reg_range(0x43, 0x43), 1450 1451 regmap_reg_range(0x60, 0x6f), 1451 1452 regmap_reg_range(0x70, 0x75), 1452 1453 regmap_reg_range(0x76, 0x78),
+1 -1
drivers/net/dsa/mt7530.c
··· 2187 2187 gc->direction_input = mt7530_gpio_direction_input; 2188 2188 gc->direction_output = mt7530_gpio_direction_output; 2189 2189 gc->get = mt7530_gpio_get; 2190 - gc->set_rv = mt7530_gpio_set; 2190 + gc->set = mt7530_gpio_set; 2191 2191 gc->base = -1; 2192 2192 gc->ngpio = 15; 2193 2193 gc->can_sleep = true;
+1 -1
drivers/net/dsa/vitesse-vsc73xx-core.c
··· 2317 2317 vsc->gc.parent = vsc->dev; 2318 2318 vsc->gc.base = -1; 2319 2319 vsc->gc.get = vsc73xx_gpio_get; 2320 - vsc->gc.set_rv = vsc73xx_gpio_set; 2320 + vsc->gc.set = vsc73xx_gpio_set; 2321 2321 vsc->gc.direction_input = vsc73xx_gpio_direction_input; 2322 2322 vsc->gc.direction_output = vsc73xx_gpio_direction_output; 2323 2323 vsc->gc.get_direction = vsc73xx_gpio_get_direction;
+17 -4
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 926 926 927 927 static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping, 928 928 struct bnxt_rx_ring_info *rxr, 929 + unsigned int *offset, 929 930 gfp_t gfp) 930 931 { 931 932 netmem_ref netmem; 932 933 933 - netmem = page_pool_alloc_netmems(rxr->page_pool, gfp); 934 + if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 935 + netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp); 936 + } else { 937 + netmem = page_pool_alloc_netmems(rxr->page_pool, gfp); 938 + *offset = 0; 939 + } 934 940 if (!netmem) 935 941 return 0; 936 942 937 - *mapping = page_pool_get_dma_addr_netmem(netmem); 943 + *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset; 938 944 return netmem; 939 945 } 940 946 ··· 1035 1029 dma_addr_t mapping; 1036 1030 netmem_ref netmem; 1037 1031 1038 - netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, gfp); 1032 + netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp); 1039 1033 if (!netmem) 1040 1034 return -ENOMEM; 1041 1035 ··· 3825 3819 if (BNXT_RX_PAGE_MODE(bp)) 3826 3820 pp.pool_size += bp->rx_ring_size / rx_size_fac; 3827 3821 pp.nid = numa_node; 3828 - pp.napi = &rxr->bnapi->napi; 3829 3822 pp.netdev = bp->dev; 3830 3823 pp.dev = &bp->pdev->dev; 3831 3824 pp.dma_dir = bp->rx_dir; ··· 3854 3849 page_pool_destroy(rxr->page_pool); 3855 3850 rxr->page_pool = NULL; 3856 3851 return PTR_ERR(pool); 3852 + } 3853 + 3854 + static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr) 3855 + { 3856 + page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi); 3857 + page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi); 3857 3858 } 3858 3859 3859 3860 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) ··· 3900 3889 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node); 3901 3890 if (rc) 3902 3891 return rc; 3892 + bnxt_enable_rx_page_pool(rxr); 3903 3893 3904 3894 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); 3905 3895 if (rc < 0) ··· 16045 16033 goto err_reset; 16046 16034 } 16047 16035 16036 + bnxt_enable_rx_page_pool(rxr); 16048 16037 napi_enable_locked(&bnapi->napi); 16049 16038 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 16050 16039
+5 -9
drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
··· 53 53 { 54 54 int ret; 55 55 56 - ASSERT_RTNL(); 56 + if (test_and_set_bit(HBG_NIC_STATE_RESETTING, &priv->state)) 57 + return -EBUSY; 57 58 58 59 if (netif_running(priv->netdev)) { 60 + clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); 59 61 dev_warn(&priv->pdev->dev, 60 62 "failed to reset because port is up\n"); 61 63 return -EBUSY; ··· 66 64 netif_device_detach(priv->netdev); 67 65 68 66 priv->reset_type = type; 69 - set_bit(HBG_NIC_STATE_RESETTING, &priv->state); 70 67 clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state); 71 68 ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET); 72 69 if (ret) { ··· 85 84 type != priv->reset_type) 86 85 return 0; 87 86 88 - ASSERT_RTNL(); 89 - 90 - clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); 91 87 ret = hbg_rebuild(priv); 92 88 if (ret) { 93 89 priv->stats.reset_fail_cnt++; 94 90 set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state); 91 + clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); 95 92 dev_err(&priv->pdev->dev, "failed to rebuild after reset\n"); 96 93 return ret; 97 94 } 98 95 99 96 netif_device_attach(priv->netdev); 97 + clear_bit(HBG_NIC_STATE_RESETTING, &priv->state); 100 98 101 99 dev_info(&priv->pdev->dev, "reset done\n"); 102 100 return ret; 103 101 } 104 102 105 - /* must be protected by rtnl lock */ 106 103 int hbg_reset(struct hbg_priv *priv) 107 104 { 108 105 int ret; 109 106 110 - ASSERT_RTNL(); 111 107 ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION); 112 108 if (ret) 113 109 return ret; ··· 169 171 struct net_device *netdev = pci_get_drvdata(pdev); 170 172 struct hbg_priv *priv = netdev_priv(netdev); 171 173 172 - rtnl_lock(); 173 174 hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR); 174 175 } 175 176 ··· 178 181 struct hbg_priv *priv = netdev_priv(netdev); 179 182 180 183 hbg_reset_done(priv, HBG_RESET_TYPE_FLR); 181 - rtnl_unlock(); 182 184 } 183 185 184 186 static const struct pci_error_handlers hbg_pci_err_handler = {
+13 -2
drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
··· 12 12 13 13 #define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000) 14 14 #define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000) 15 + #define HBG_MAC_LINK_WAIT_TIMEOUT_US (500 * 1000) 16 + #define HBG_MAC_LINK_WAIT_INTERVAL_US (5 * 1000) 15 17 /* little endian or big endian. 16 18 * ctrl means packet description, data means skb packet data 17 19 */ ··· 230 228 231 229 void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex) 232 230 { 231 + u32 link_status; 232 + int ret; 233 + 233 234 hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE); 234 235 235 236 hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR, ··· 244 239 245 240 hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE); 246 241 247 - if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR, 248 - HBG_REG_AN_NEG_STATE_NP_LINK_OK_B)) 242 + /* wait MAC link up */ 243 + ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR, 244 + link_status, 245 + FIELD_GET(HBG_REG_AN_NEG_STATE_NP_LINK_OK_B, 246 + link_status), 247 + HBG_MAC_LINK_WAIT_INTERVAL_US, 248 + HBG_MAC_LINK_WAIT_TIMEOUT_US); 249 + if (ret) 249 250 hbg_np_link_fail_task_schedule(priv); 250 251 } 251 252
+6 -1
drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
··· 29 29 30 30 static inline u32 hbg_get_queue_used_num(struct hbg_ring *ring) 31 31 { 32 - return (ring->ntu + ring->len - ring->ntc) % ring->len; 32 + u32 len = READ_ONCE(ring->len); 33 + 34 + if (!len) 35 + return 0; 36 + 37 + return (READ_ONCE(ring->ntu) + len - READ_ONCE(ring->ntc)) % len; 33 38 } 34 39 35 40 netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+1
drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
··· 543 543 544 544 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; 545 545 attrs.phys.port_number = adapter->hw.bus.func; 546 + attrs.no_phys_port_name = 1; 546 547 ixgbe_devlink_set_switch_id(adapter, &attrs.switch_id); 547 548 548 549 devlink_port_attrs_set(devlink_port, &attrs);
+2 -11
drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
··· 330 330 if (IS_ERR(plat_dat)) 331 331 return PTR_ERR(plat_dat); 332 332 333 - ret = devm_clk_bulk_get_all(&pdev->dev, &plat_dat->clks); 333 + ret = devm_clk_bulk_get_all_enabled(&pdev->dev, &plat_dat->clks); 334 334 if (ret < 0) 335 - return dev_err_probe(&pdev->dev, ret, "Failed to retrieve all required clocks\n"); 335 + return dev_err_probe(&pdev->dev, ret, "Failed to retrieve and enable all required clocks\n"); 336 336 plat_dat->num_clks = ret; 337 - 338 - ret = clk_bulk_prepare_enable(plat_dat->num_clks, plat_dat->clks); 339 - if (ret) 340 - return dev_err_probe(&pdev->dev, ret, "Failed to enable clocks\n"); 341 337 342 338 plat_dat->stmmac_clk = stmmac_pltfr_find_clk(plat_dat, 343 339 data->stmmac_clk_name); ··· 342 346 ret = data->probe(pdev, plat_dat, &stmmac_res); 343 347 if (ret < 0) { 344 348 dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n"); 345 - clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks); 346 349 return ret; 347 350 } 348 351 ··· 365 370 static void dwc_eth_dwmac_remove(struct platform_device *pdev) 366 371 { 367 372 const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev); 368 - struct plat_stmmacenet_data *plat_dat = dev_get_platdata(&pdev->dev); 369 373 370 374 stmmac_dvr_remove(&pdev->dev); 371 375 372 376 if (data->remove) 373 377 data->remove(pdev); 374 - 375 - if (plat_dat) 376 - clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks); 377 378 } 378 379 379 380 static const struct of_device_id dwc_eth_dwmac_match[] = {
+5 -1
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
··· 1788 1788 1789 1789 static void rk_gmac_remove(struct platform_device *pdev) 1790 1790 { 1791 - struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev); 1791 + struct stmmac_priv *priv = netdev_priv(platform_get_drvdata(pdev)); 1792 + struct rk_priv_data *bsp_priv = priv->plat->bsp_priv; 1792 1793 1793 1794 stmmac_dvr_remove(&pdev->dev); 1794 1795 1795 1796 rk_gmac_powerdown(bsp_priv); 1797 + 1798 + if (priv->plat->phy_node && bsp_priv->integrated_phy) 1799 + clk_put(bsp_priv->clk_phy); 1796 1800 } 1797 1801 1798 1802 static const struct of_device_id rk_gmac_dwmac_match[] = {
+14
drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
··· 211 211 struct stmmac_resources stmmac_res; 212 212 struct plat_stmmacenet_data *plat; 213 213 struct thead_dwmac *dwmac; 214 + struct clk *apb_clk; 214 215 void __iomem *apb; 215 216 int ret; 216 217 ··· 224 223 if (IS_ERR(plat)) 225 224 return dev_err_probe(&pdev->dev, PTR_ERR(plat), 226 225 "dt configuration failed\n"); 226 + 227 + /* 228 + * The APB clock is essential for accessing glue registers. However, 229 + * old devicetrees don't describe it correctly. We continue to probe 230 + * and emit a warning if it isn't present. 231 + */ 232 + apb_clk = devm_clk_get_enabled(&pdev->dev, "apb"); 233 + if (PTR_ERR(apb_clk) == -ENOENT) 234 + dev_warn(&pdev->dev, 235 + "cannot get apb clock, link may break after speed changes\n"); 236 + else if (IS_ERR(apb_clk)) 237 + return dev_err_probe(&pdev->dev, PTR_ERR(apb_clk), 238 + "failed to get apb clock\n"); 227 239 228 240 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); 229 241 if (!dwmac)
+2 -1
drivers/net/ethernet/ti/icssg/icss_iep.c
··· 621 621 622 622 static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on) 623 623 { 624 - u32 val, cap, ret = 0; 624 + u32 val, cap; 625 + int ret = 0; 625 626 626 627 mutex_lock(&iep->ptp_clk_mutex); 627 628
+6
drivers/net/ethernet/ti/icssg/icssg_prueth.c
··· 50 50 /* CTRLMMR_ICSSG_RGMII_CTRL register bits */ 51 51 #define ICSSG_CTRL_RGMII_ID_MODE BIT(24) 52 52 53 + static void emac_adjust_link(struct net_device *ndev); 54 + 53 55 static int emac_get_tx_ts(struct prueth_emac *emac, 54 56 struct emac_tx_ts_response *rsp) 55 57 { ··· 231 229 ret = icssg_config(prueth, emac, slice); 232 230 if (ret) 233 231 goto disable_class; 232 + 233 + mutex_lock(&emac->ndev->phydev->lock); 234 + emac_adjust_link(emac->ndev); 235 + mutex_unlock(&emac->ndev->phydev->lock); 234 236 } 235 237 236 238 ret = prueth_emac_start(prueth);
+1 -1
drivers/net/hamradio/bpqether.c
··· 138 138 139 139 static inline int dev_is_ethdev(struct net_device *dev) 140 140 { 141 - return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5); 141 + return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev); 142 142 } 143 143 144 144 /* ------------------------------------------------------------------------ */
+3
drivers/net/hyperv/hyperv_net.h
··· 1061 1061 struct net_device __rcu *vf_netdev; 1062 1062 struct netvsc_vf_pcpu_stats __percpu *vf_stats; 1063 1063 struct delayed_work vf_takeover; 1064 + struct delayed_work vfns_work; 1064 1065 1065 1066 /* 1: allocated, serial number is valid. 0: not allocated */ 1066 1067 u32 vf_alloc; ··· 1075 1074 /* Used to temporarily save the config info across hibernation */ 1076 1075 struct netvsc_device_info *saved_netvsc_dev_info; 1077 1076 }; 1077 + 1078 + void netvsc_vfns_work(struct work_struct *w); 1078 1079 1079 1080 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented 1080 1081 * packets. We can use ethtool to change UDP hash level when necessary.
+28 -1
drivers/net/hyperv/netvsc_drv.c
··· 2522 2522 spin_lock_init(&net_device_ctx->lock); 2523 2523 INIT_LIST_HEAD(&net_device_ctx->reconfig_events); 2524 2524 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); 2525 + INIT_DELAYED_WORK(&net_device_ctx->vfns_work, netvsc_vfns_work); 2525 2526 2526 2527 net_device_ctx->vf_stats 2527 2528 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); ··· 2667 2666 cancel_delayed_work_sync(&ndev_ctx->dwork); 2668 2667 2669 2668 rtnl_lock(); 2669 + cancel_delayed_work_sync(&ndev_ctx->vfns_work); 2670 + 2670 2671 nvdev = rtnl_dereference(ndev_ctx->nvdev); 2671 2672 if (nvdev) { 2672 2673 cancel_work_sync(&nvdev->subchan_work); ··· 2710 2707 cancel_delayed_work_sync(&ndev_ctx->dwork); 2711 2708 2712 2709 rtnl_lock(); 2710 + cancel_delayed_work_sync(&ndev_ctx->vfns_work); 2713 2711 2714 2712 nvdev = rtnl_dereference(ndev_ctx->nvdev); 2715 2713 if (nvdev == NULL) { ··· 2804 2800 } 2805 2801 } 2806 2802 2803 + void netvsc_vfns_work(struct work_struct *w) 2804 + { 2805 + struct net_device_context *ndev_ctx = 2806 + container_of(w, struct net_device_context, vfns_work.work); 2807 + struct net_device *ndev; 2808 + 2809 + if (!rtnl_trylock()) { 2810 + schedule_delayed_work(&ndev_ctx->vfns_work, 1); 2811 + return; 2812 + } 2813 + 2814 + ndev = hv_get_drvdata(ndev_ctx->device_ctx); 2815 + if (!ndev) 2816 + goto out; 2817 + 2818 + netvsc_event_set_vf_ns(ndev); 2819 + 2820 + out: 2821 + rtnl_unlock(); 2822 + } 2823 + 2807 2824 /* 2808 2825 * On Hyper-V, every VF interface is matched with a corresponding 2809 2826 * synthetic interface. The synthetic interface is presented first ··· 2835 2810 unsigned long event, void *ptr) 2836 2811 { 2837 2812 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 2813 + struct net_device_context *ndev_ctx; 2838 2814 int ret = 0; 2839 2815 2840 2816 if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) { 2841 - netvsc_event_set_vf_ns(event_dev); 2817 + ndev_ctx = netdev_priv(event_dev); 2818 + schedule_delayed_work(&ndev_ctx->vfns_work, 0); 2842 2819 return NOTIFY_DONE; 2843 2820 } 2844 2821
+7 -3
drivers/net/netdevsim/netdev.c
··· 710 710 static void nsim_queue_free(struct net_device *dev, struct nsim_rq *rq) 711 711 { 712 712 hrtimer_cancel(&rq->napi_timer); 713 - local_bh_disable(); 714 - dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen); 715 - local_bh_enable(); 713 + 714 + if (rq->skb_queue.qlen) { 715 + local_bh_disable(); 716 + dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen); 717 + local_bh_enable(); 718 + } 719 + 716 720 skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE); 717 721 kfree(rq); 718 722 }
+1
drivers/net/phy/mdio_bus.c
··· 91 91 if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev) 92 92 return -EINVAL; 93 93 94 + gpiod_put(mdiodev->reset_gpio); 94 95 reset_control_put(mdiodev->reset_ctrl); 95 96 96 97 mdiodev->bus->mdio_map[mdiodev->addr] = NULL;
-3
drivers/net/phy/mdio_bus_provider.c
··· 443 443 if (!mdiodev) 444 444 continue; 445 445 446 - if (mdiodev->reset_gpio) 447 - gpiod_put(mdiodev->reset_gpio); 448 - 449 446 mdiodev->device_remove(mdiodev); 450 447 mdiodev->device_free(mdiodev); 451 448 }
+13 -10
drivers/net/phy/nxp-c45-tja11xx.c
··· 1965 1965 return macsec_ability; 1966 1966 } 1967 1967 1968 + static bool tja11xx_phy_id_compare(struct phy_device *phydev, 1969 + const struct phy_driver *phydrv) 1970 + { 1971 + u32 id = phydev->is_c45 ? phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] : 1972 + phydev->phy_id; 1973 + 1974 + return phy_id_compare(id, phydrv->phy_id, phydrv->phy_id_mask); 1975 + } 1976 + 1968 1977 static int tja11xx_no_macsec_match_phy_device(struct phy_device *phydev, 1969 1978 const struct phy_driver *phydrv) 1970 1979 { 1971 - if (!phy_id_compare(phydev->phy_id, phydrv->phy_id, 1972 - phydrv->phy_id_mask)) 1973 - return 0; 1974 - 1975 - return !nxp_c45_macsec_ability(phydev); 1980 + return tja11xx_phy_id_compare(phydev, phydrv) && 1981 + !nxp_c45_macsec_ability(phydev); 1976 1982 } 1977 1983 1978 1984 static int tja11xx_macsec_match_phy_device(struct phy_device *phydev, 1979 1985 const struct phy_driver *phydrv) 1980 1986 { 1981 - if (!phy_id_compare(phydev->phy_id, phydrv->phy_id, 1982 - phydrv->phy_id_mask)) 1983 - return 0; 1984 - 1985 - return nxp_c45_macsec_ability(phydev); 1987 + return tja11xx_phy_id_compare(phydev, phydrv) && 1988 + nxp_c45_macsec_ability(phydev); 1986 1989 } 1987 1990 1988 1991 static const struct nxp_c45_regmap tja1120_regmap = {
+1 -1
drivers/net/phy/qcom/qca807x.c
··· 427 427 gc->get_direction = qca807x_gpio_get_direction; 428 428 gc->direction_output = qca807x_gpio_dir_out; 429 429 gc->get = qca807x_gpio_get; 430 - gc->set_rv = qca807x_gpio_set; 430 + gc->set = qca807x_gpio_set; 431 431 432 432 return devm_gpiochip_add_data(dev, gc, priv); 433 433 }
+1
drivers/net/usb/asix_devices.c
··· 676 676 priv->mdio->read = &asix_mdio_bus_read; 677 677 priv->mdio->write = &asix_mdio_bus_write; 678 678 priv->mdio->name = "Asix MDIO Bus"; 679 + priv->mdio->phy_mask = ~(BIT(priv->phy_addr) | BIT(AX_EMBD_PHY_ADDR)); 679 680 /* mii bus name is usb-<usb bus number>-<usb device number> */ 680 681 snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", 681 682 dev->udev->bus->busnum, dev->udev->devnum);
+1
drivers/net/usb/qmi_wwan.c
··· 1361 1361 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */ 1362 1362 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */ 1363 1363 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990A */ 1364 + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1077, 2)}, /* Telit FN990A w/audio */ 1364 1365 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990A */ 1365 1366 {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */ 1366 1367 {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
+1 -1
drivers/net/wan/lapbether.c
··· 81 81 82 82 static __inline__ int dev_is_ethdev(struct net_device *dev) 83 83 { 84 - return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5); 84 + return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev); 85 85 } 86 86 87 87 /* ------------------------------------------------------------------------ */
+2 -2
drivers/nvme/host/auth.c
··· 742 742 "%s: qid %d failed to generate digest, error %d\n", 743 743 __func__, chap->qid, ret); 744 744 goto out_free_psk; 745 - }; 745 + } 746 746 dev_dbg(ctrl->device, "%s: generated digest %s\n", 747 747 __func__, digest); 748 748 ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len, ··· 752 752 "%s: qid %d failed to derive TLS psk, error %d\n", 753 753 __func__, chap->qid, ret); 754 754 goto out_free_digest; 755 - }; 755 + } 756 756 757 757 tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring, 758 758 ctrl->opts->host->nqn,
+16
drivers/nvme/host/core.c
··· 3158 3158 return ctrl->opts && ctrl->opts->discovery_nqn; 3159 3159 } 3160 3160 3161 + static inline bool nvme_admin_ctrl(struct nvme_ctrl *ctrl) 3162 + { 3163 + return ctrl->cntrltype == NVME_CTRL_ADMIN; 3164 + } 3165 + 3161 3166 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, 3162 3167 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 3163 3168 { ··· 3674 3669 ret = nvme_init_identify(ctrl); 3675 3670 if (ret) 3676 3671 return ret; 3672 + 3673 + if (nvme_admin_ctrl(ctrl)) { 3674 + /* 3675 + * An admin controller has one admin queue, but no I/O queues. 3676 + * Override queue_count so it only creates an admin queue. 3677 + */ 3678 + dev_dbg(ctrl->device, 3679 + "Subsystem %s is an administrative controller", 3680 + ctrl->subsys->subnqn); 3681 + ctrl->queue_count = 1; 3682 + } 3677 3683 3678 3684 ret = nvme_configure_apst(ctrl); 3679 3685 if (ret < 0)
+2 -2
drivers/nvme/host/fc.c
··· 1363 1363 * down, and the related FC-NVME Association ID and Connection IDs 1364 1364 * become invalid. 1365 1365 * 1366 - * The behavior of the fc-nvme initiator is such that it's 1366 + * The behavior of the fc-nvme initiator is such that its 1367 1367 * understanding of the association and connections will implicitly 1368 1368 * be torn down. The action is implicit as it may be due to a loss of 1369 1369 * connectivity with the fc-nvme target, so you may never get a ··· 2777 2777 * as WRITE ZEROES will return a non-zero rq payload_bytes yet 2778 2778 * there is no actual payload to be transferred. 2779 2779 * To get it right, key data transmission on there being 1 or 2780 - * more physical segments in the sg list. If there is no 2780 + * more physical segments in the sg list. If there are no 2781 2781 * physical segments, there is no payload. 2782 2782 */ 2783 2783 if (blk_rq_nr_phys_segments(rq)) {
+1 -1
drivers/nvme/host/pci.c
··· 935 935 936 936 nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped); 937 937 if (unlikely(iter->status)) 938 - nvme_free_sgls(req); 938 + nvme_unmap_data(req); 939 939 return iter->status; 940 940 } 941 941
+1 -1
drivers/nvme/host/tcp.c
··· 2179 2179 2180 2180 /* 2181 2181 * Only start IO queues for which we have allocated the tagset 2182 - * and limitted it to the available queues. On reconnects, the 2182 + * and limited it to the available queues. On reconnects, the 2183 2183 * queue number might have changed. 2184 2184 */ 2185 2185 nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
+9 -9
drivers/nvme/target/core.c
··· 1960 1960 if (!nvmet_wq) 1961 1961 goto out_free_buffered_work_queue; 1962 1962 1963 - error = nvmet_init_discovery(); 1963 + error = nvmet_init_debugfs(); 1964 1964 if (error) 1965 1965 goto out_free_nvmet_work_queue; 1966 1966 1967 - error = nvmet_init_debugfs(); 1968 - if (error) 1969 - goto out_exit_discovery; 1970 - 1971 - error = nvmet_init_configfs(); 1967 + error = nvmet_init_discovery(); 1972 1968 if (error) 1973 1969 goto out_exit_debugfs; 1974 1970 1971 + error = nvmet_init_configfs(); 1972 + if (error) 1973 + goto out_exit_discovery; 1974 + 1975 1975 return 0; 1976 1976 1977 - out_exit_debugfs: 1978 - nvmet_exit_debugfs(); 1979 1977 out_exit_discovery: 1980 1978 nvmet_exit_discovery(); 1979 + out_exit_debugfs: 1980 + nvmet_exit_debugfs(); 1981 1981 out_free_nvmet_work_queue: 1982 1982 destroy_workqueue(nvmet_wq); 1983 1983 out_free_buffered_work_queue: ··· 1992 1992 static void __exit nvmet_exit(void) 1993 1993 { 1994 1994 nvmet_exit_configfs(); 1995 - nvmet_exit_debugfs(); 1996 1995 nvmet_exit_discovery(); 1996 + nvmet_exit_debugfs(); 1997 1997 ida_destroy(&cntlid_ida); 1998 1998 destroy_workqueue(nvmet_wq); 1999 1999 destroy_workqueue(buffered_io_wq);
+3 -3
drivers/nvme/target/fc.c
··· 459 459 * down, and the related FC-NVME Association ID and Connection IDs 460 460 * become invalid. 461 461 * 462 - * The behavior of the fc-nvme target is such that it's 462 + * The behavior of the fc-nvme target is such that its 463 463 * understanding of the association and connections will implicitly 464 464 * be torn down. The action is implicit as it may be due to a loss of 465 465 * connectivity with the fc-nvme host, so the target may never get a ··· 2313 2313 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); 2314 2314 if (ret) { 2315 2315 /* 2316 - * should be ok to set w/o lock as its in the thread of 2316 + * should be ok to set w/o lock as it's in the thread of 2317 2317 * execution (not an async timer routine) and doesn't 2318 2318 * contend with any clearing action 2319 2319 */ ··· 2629 2629 * and the api of the FC LLDD which may issue a hw command to send the 2630 2630 * response, but the LLDD may not get the hw completion for that command 2631 2631 * and upcall the nvmet_fc layer before a new command may be 2632 - * asynchronously received - its possible for a command to be received 2632 + * asynchronously received - it's possible for a command to be received 2633 2633 * before the LLDD and nvmet_fc have recycled the job structure. It gives 2634 2634 * the appearance of more commands received than fits in the sq. 2635 2635 * To alleviate this scenario, a temporary queue is maintained in the
+2
drivers/nvme/target/passthru.c
··· 533 533 case NVME_FEAT_HOST_ID: 534 534 req->execute = nvmet_execute_get_features; 535 535 return NVME_SC_SUCCESS; 536 + case NVME_FEAT_FDP: 537 + return nvmet_setup_passthru_command(req); 536 538 default: 537 539 return nvmet_passthru_get_set_features(req); 538 540 }
+3 -3
drivers/nvme/target/rdma.c
··· 1731 1731 * We registered an ib_client to handle device removal for queues, 1732 1732 * so we only need to handle the listening port cm_ids. In this case 1733 1733 * we nullify the priv to prevent double cm_id destruction and destroying 1734 - * the cm_id implicitely by returning a non-zero rc to the callout. 1734 + * the cm_id implicitly by returning a non-zero rc to the callout. 1735 1735 */ 1736 1736 static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, 1737 1737 struct nvmet_rdma_queue *queue) ··· 1742 1742 /* 1743 1743 * This is a queue cm_id. we have registered 1744 1744 * an ib_client to handle queues removal 1745 - * so don't interfear and just return. 1745 + * so don't interfere and just return. 1746 1746 */ 1747 1747 return 0; 1748 1748 } ··· 1760 1760 1761 1761 /* 1762 1762 * We need to return 1 so that the core will destroy 1763 - * it's own ID. What a great API design.. 1763 + * its own ID. What a great API design.. 1764 1764 */ 1765 1765 return 1; 1766 1766 }
+3 -1
drivers/pci/controller/vmd.c
··· 280 280 static void vmd_msi_free(struct irq_domain *domain, unsigned int virq, 281 281 unsigned int nr_irqs) 282 282 { 283 + struct irq_data *irq_data; 283 284 struct vmd_irq *vmdirq; 284 285 285 286 for (int i = 0; i < nr_irqs; ++i) { 286 - vmdirq = irq_get_chip_data(virq + i); 287 + irq_data = irq_domain_get_irq_data(domain, virq + i); 288 + vmdirq = irq_data->chip_data; 287 289 288 290 synchronize_srcu(&vmdirq->irq->srcu); 289 291
+1 -1
drivers/pinctrl/actions/pinctrl-owl.c
··· 962 962 pctrl->chip.direction_input = owl_gpio_direction_input; 963 963 pctrl->chip.direction_output = owl_gpio_direction_output; 964 964 pctrl->chip.get = owl_gpio_get; 965 - pctrl->chip.set_rv = owl_gpio_set; 965 + pctrl->chip.set = owl_gpio_set; 966 966 pctrl->chip.request = owl_gpio_request; 967 967 pctrl->chip.free = owl_gpio_free; 968 968
+2 -2
drivers/pinctrl/bcm/pinctrl-bcm2835.c
··· 397 397 .direction_output = bcm2835_gpio_direction_output, 398 398 .get_direction = bcm2835_gpio_get_direction, 399 399 .get = bcm2835_gpio_get, 400 - .set_rv = bcm2835_gpio_set, 400 + .set = bcm2835_gpio_set, 401 401 .set_config = gpiochip_generic_config, 402 402 .base = -1, 403 403 .ngpio = BCM2835_NUM_GPIOS, ··· 414 414 .direction_output = bcm2835_gpio_direction_output, 415 415 .get_direction = bcm2835_gpio_get_direction, 416 416 .get = bcm2835_gpio_get, 417 - .set_rv = bcm2835_gpio_set, 417 + .set = bcm2835_gpio_set, 418 418 .set_config = gpiochip_generic_config, 419 419 .base = -1, 420 420 .ngpio = BCM2711_NUM_GPIOS,
+1 -1
drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
··· 865 865 gc->direction_input = iproc_gpio_direction_input; 866 866 gc->direction_output = iproc_gpio_direction_output; 867 867 gc->get_direction = iproc_gpio_get_direction; 868 - gc->set_rv = iproc_gpio_set; 868 + gc->set = iproc_gpio_set; 869 869 gc->get = iproc_gpio_get; 870 870 871 871 chip->pinmux_is_supported = of_property_read_bool(dev->of_node,
+1 -1
drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
··· 656 656 gc->direction_input = nsp_gpio_direction_input; 657 657 gc->direction_output = nsp_gpio_direction_output; 658 658 gc->get_direction = nsp_gpio_get_direction; 659 - gc->set_rv = nsp_gpio_set; 659 + gc->set = nsp_gpio_set; 660 660 gc->get = nsp_gpio_get; 661 661 662 662 /* optional GPIO interrupt support */
+1 -1
drivers/pinctrl/cirrus/pinctrl-cs42l43.c
··· 555 555 priv->gpio_chip.direction_output = cs42l43_gpio_direction_out; 556 556 priv->gpio_chip.add_pin_ranges = cs42l43_gpio_add_pin_ranges; 557 557 priv->gpio_chip.get = cs42l43_gpio_get; 558 - priv->gpio_chip.set_rv = cs42l43_gpio_set; 558 + priv->gpio_chip.set = cs42l43_gpio_set; 559 559 priv->gpio_chip.label = dev_name(priv->dev); 560 560 priv->gpio_chip.parent = priv->dev; 561 561 priv->gpio_chip.can_sleep = true;
+1 -1
drivers/pinctrl/cirrus/pinctrl-lochnagar.c
··· 1161 1161 priv->gpio_chip.request = gpiochip_generic_request; 1162 1162 priv->gpio_chip.free = gpiochip_generic_free; 1163 1163 priv->gpio_chip.direction_output = lochnagar_gpio_direction_out; 1164 - priv->gpio_chip.set_rv = lochnagar_gpio_set; 1164 + priv->gpio_chip.set = lochnagar_gpio_set; 1165 1165 priv->gpio_chip.can_sleep = true; 1166 1166 priv->gpio_chip.parent = dev; 1167 1167 priv->gpio_chip.base = -1;
+1 -1
drivers/pinctrl/intel/pinctrl-baytrail.c
··· 1231 1231 .direction_input = byt_gpio_direction_input, 1232 1232 .direction_output = byt_gpio_direction_output, 1233 1233 .get = byt_gpio_get, 1234 - .set_rv = byt_gpio_set, 1234 + .set = byt_gpio_set, 1235 1235 .set_config = gpiochip_generic_config, 1236 1236 .dbg_show = byt_gpio_dbg_show, 1237 1237 };
+1 -1
drivers/pinctrl/intel/pinctrl-cherryview.c
··· 1168 1168 .direction_input = chv_gpio_direction_input, 1169 1169 .direction_output = chv_gpio_direction_output, 1170 1170 .get = chv_gpio_get, 1171 - .set_rv = chv_gpio_set, 1171 + .set = chv_gpio_set, 1172 1172 }; 1173 1173 1174 1174 static void chv_gpio_irq_ack(struct irq_data *d)
+1 -1
drivers/pinctrl/intel/pinctrl-intel.c
··· 1114 1114 .direction_input = intel_gpio_direction_input, 1115 1115 .direction_output = intel_gpio_direction_output, 1116 1116 .get = intel_gpio_get, 1117 - .set_rv = intel_gpio_set, 1117 + .set = intel_gpio_set, 1118 1118 .set_config = gpiochip_generic_config, 1119 1119 }; 1120 1120
+1 -1
drivers/pinctrl/intel/pinctrl-lynxpoint.c
··· 777 777 gc->direction_input = lp_gpio_direction_input; 778 778 gc->direction_output = lp_gpio_direction_output; 779 779 gc->get = lp_gpio_get; 780 - gc->set_rv = lp_gpio_set; 780 + gc->set = lp_gpio_set; 781 781 gc->set_config = gpiochip_generic_config; 782 782 gc->get_direction = lp_gpio_get_direction; 783 783 gc->base = -1;
+1 -1
drivers/pinctrl/mediatek/pinctrl-airoha.c
··· 2418 2418 gc->free = gpiochip_generic_free; 2419 2419 gc->direction_input = pinctrl_gpio_direction_input; 2420 2420 gc->direction_output = airoha_gpio_direction_output; 2421 - gc->set_rv = airoha_gpio_set; 2421 + gc->set = airoha_gpio_set; 2422 2422 gc->get = airoha_gpio_get; 2423 2423 gc->base = -1; 2424 2424 gc->ngpio = AIROHA_NUM_PINS;
+1 -1
drivers/pinctrl/mediatek/pinctrl-moore.c
··· 569 569 chip->direction_input = pinctrl_gpio_direction_input; 570 570 chip->direction_output = mtk_gpio_direction_output; 571 571 chip->get = mtk_gpio_get; 572 - chip->set_rv = mtk_gpio_set; 572 + chip->set = mtk_gpio_set; 573 573 chip->to_irq = mtk_gpio_to_irq; 574 574 chip->set_config = mtk_gpio_set_config; 575 575 chip->base = -1;
+1 -1
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
··· 898 898 .direction_input = pinctrl_gpio_direction_input, 899 899 .direction_output = mtk_gpio_direction_output, 900 900 .get = mtk_gpio_get, 901 - .set_rv = mtk_gpio_set, 901 + .set = mtk_gpio_set, 902 902 .to_irq = mtk_gpio_to_irq, 903 903 .set_config = mtk_gpio_set_config, 904 904 };
+1 -1
drivers/pinctrl/mediatek/pinctrl-paris.c
··· 949 949 chip->direction_input = mtk_gpio_direction_input; 950 950 chip->direction_output = mtk_gpio_direction_output; 951 951 chip->get = mtk_gpio_get; 952 - chip->set_rv = mtk_gpio_set; 952 + chip->set = mtk_gpio_set; 953 953 chip->to_irq = mtk_gpio_to_irq; 954 954 chip->set_config = mtk_gpio_set_config; 955 955 chip->base = -1;
+1 -1
drivers/pinctrl/meson/pinctrl-amlogic-a4.c
··· 888 888 .request = gpiochip_generic_request, 889 889 .free = gpiochip_generic_free, 890 890 .set_config = gpiochip_generic_config, 891 - .set_rv = aml_gpio_set, 891 + .set = aml_gpio_set, 892 892 .get = aml_gpio_get, 893 893 .direction_input = aml_gpio_direction_input, 894 894 .direction_output = aml_gpio_direction_output,
+1 -1
drivers/pinctrl/meson/pinctrl-meson.c
··· 616 616 pc->chip.direction_input = meson_gpio_direction_input; 617 617 pc->chip.direction_output = meson_gpio_direction_output; 618 618 pc->chip.get = meson_gpio_get; 619 - pc->chip.set_rv = meson_gpio_set; 619 + pc->chip.set = meson_gpio_set; 620 620 pc->chip.base = -1; 621 621 pc->chip.ngpio = pc->data->num_pins; 622 622 pc->chip.can_sleep = false;
+1 -1
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
··· 518 518 static const struct gpio_chip armada_37xx_gpiolib_chip = { 519 519 .request = gpiochip_generic_request, 520 520 .free = gpiochip_generic_free, 521 - .set_rv = armada_37xx_gpio_set, 521 + .set = armada_37xx_gpio_set, 522 522 .get = armada_37xx_gpio_get, 523 523 .get_direction = armada_37xx_gpio_get_direction, 524 524 .direction_input = armada_37xx_gpio_direction_input,
+1 -1
drivers/pinctrl/nomadik/pinctrl-abx500.c
··· 536 536 .direction_input = abx500_gpio_direction_input, 537 537 .get = abx500_gpio_get, 538 538 .direction_output = abx500_gpio_direction_output, 539 - .set_rv = abx500_gpio_set, 539 + .set = abx500_gpio_set, 540 540 .to_irq = abx500_gpio_to_irq, 541 541 .dbg_show = abx500_gpio_dbg_show, 542 542 };
+1 -1
drivers/pinctrl/nuvoton/pinctrl-ma35.c
··· 526 526 bank->chip.direction_input = ma35_gpio_core_direction_in; 527 527 bank->chip.direction_output = ma35_gpio_core_direction_out; 528 528 bank->chip.get = ma35_gpio_core_get; 529 - bank->chip.set_rv = ma35_gpio_core_set; 529 + bank->chip.set = ma35_gpio_core_set; 530 530 bank->chip.base = -1; 531 531 bank->chip.ngpio = bank->nr_pins; 532 532 bank->chip.can_sleep = false;
+1 -1
drivers/pinctrl/pinctrl-amd.c
··· 1187 1187 gpio_dev->gc.direction_input = amd_gpio_direction_input; 1188 1188 gpio_dev->gc.direction_output = amd_gpio_direction_output; 1189 1189 gpio_dev->gc.get = amd_gpio_get_value; 1190 - gpio_dev->gc.set_rv = amd_gpio_set_value; 1190 + gpio_dev->gc.set = amd_gpio_set_value; 1191 1191 gpio_dev->gc.set_config = amd_gpio_set_config; 1192 1192 gpio_dev->gc.dbg_show = amd_gpio_dbg_show; 1193 1193
+1 -1
drivers/pinctrl/pinctrl-amdisp.c
··· 151 151 gc->direction_input = amdisp_gpio_direction_input; 152 152 gc->direction_output = amdisp_gpio_direction_output; 153 153 gc->get = amdisp_gpio_get; 154 - gc->set_rv = amdisp_gpio_set; 154 + gc->set = amdisp_gpio_set; 155 155 gc->base = -1; 156 156 gc->ngpio = ARRAY_SIZE(amdisp_range_pins); 157 157
+1 -1
drivers/pinctrl/pinctrl-apple-gpio.c
··· 378 378 pctl->gpio_chip.direction_input = apple_gpio_direction_input; 379 379 pctl->gpio_chip.direction_output = apple_gpio_direction_output; 380 380 pctl->gpio_chip.get = apple_gpio_get; 381 - pctl->gpio_chip.set_rv = apple_gpio_set; 381 + pctl->gpio_chip.set = apple_gpio_set; 382 382 pctl->gpio_chip.base = -1; 383 383 pctl->gpio_chip.ngpio = pctl->pinctrl_desc.npins; 384 384 pctl->gpio_chip.parent = pctl->dev;
+1 -1
drivers/pinctrl/pinctrl-as3722.c
··· 529 529 .request = gpiochip_generic_request, 530 530 .free = gpiochip_generic_free, 531 531 .get = as3722_gpio_get, 532 - .set_rv = as3722_gpio_set, 532 + .set = as3722_gpio_set, 533 533 .direction_input = pinctrl_gpio_direction_input, 534 534 .direction_output = as3722_gpio_direction_output, 535 535 .to_irq = as3722_gpio_to_irq,
+2 -2
drivers/pinctrl/pinctrl-at91-pio4.c
··· 442 442 .get = atmel_gpio_get, 443 443 .get_multiple = atmel_gpio_get_multiple, 444 444 .direction_output = atmel_gpio_direction_output, 445 - .set_rv = atmel_gpio_set, 446 - .set_multiple_rv = atmel_gpio_set_multiple, 445 + .set = atmel_gpio_set, 446 + .set_multiple = atmel_gpio_set_multiple, 447 447 .to_irq = atmel_gpio_to_irq, 448 448 .base = 0, 449 449 };
+2 -2
drivers/pinctrl/pinctrl-at91.c
··· 1801 1801 .direction_input = at91_gpio_direction_input, 1802 1802 .get = at91_gpio_get, 1803 1803 .direction_output = at91_gpio_direction_output, 1804 - .set_rv = at91_gpio_set, 1805 - .set_multiple_rv = at91_gpio_set_multiple, 1804 + .set = at91_gpio_set, 1805 + .set_multiple = at91_gpio_set_multiple, 1806 1806 .dbg_show = at91_gpio_dbg_show, 1807 1807 .can_sleep = false, 1808 1808 .ngpio = MAX_NB_GPIO_PER_BANK,
+2 -2
drivers/pinctrl/pinctrl-aw9523.c
··· 785 785 gc->direction_output = aw9523_direction_output; 786 786 gc->get = aw9523_gpio_get; 787 787 gc->get_multiple = aw9523_gpio_get_multiple; 788 - gc->set_rv = aw9523_gpio_set; 789 - gc->set_multiple_rv = aw9523_gpio_set_multiple; 788 + gc->set = aw9523_gpio_set; 789 + gc->set_multiple = aw9523_gpio_set_multiple; 790 790 gc->set_config = gpiochip_generic_config; 791 791 gc->parent = dev; 792 792 gc->owner = THIS_MODULE;
+2 -2
drivers/pinctrl/pinctrl-axp209.c
··· 192 192 static int axp20x_gpio_output(struct gpio_chip *chip, unsigned int offset, 193 193 int value) 194 194 { 195 - return chip->set_rv(chip, offset, value); 195 + return chip->set(chip, offset, value); 196 196 } 197 197 198 198 static int axp20x_gpio_set(struct gpio_chip *chip, unsigned int offset, ··· 463 463 pctl->chip.owner = THIS_MODULE; 464 464 pctl->chip.get = axp20x_gpio_get; 465 465 pctl->chip.get_direction = axp20x_gpio_get_direction; 466 - pctl->chip.set_rv = axp20x_gpio_set; 466 + pctl->chip.set = axp20x_gpio_set; 467 467 pctl->chip.direction_input = pinctrl_gpio_direction_input; 468 468 pctl->chip.direction_output = axp20x_gpio_output; 469 469
+2 -2
drivers/pinctrl/pinctrl-cy8c95x0.c
··· 939 939 gc->direction_input = cy8c95x0_gpio_direction_input; 940 940 gc->direction_output = cy8c95x0_gpio_direction_output; 941 941 gc->get = cy8c95x0_gpio_get_value; 942 - gc->set_rv = cy8c95x0_gpio_set_value; 942 + gc->set = cy8c95x0_gpio_set_value; 943 943 gc->get_direction = cy8c95x0_gpio_get_direction; 944 944 gc->get_multiple = cy8c95x0_gpio_get_multiple; 945 - gc->set_multiple_rv = cy8c95x0_gpio_set_multiple; 945 + gc->set_multiple = cy8c95x0_gpio_set_multiple; 946 946 gc->set_config = gpiochip_generic_config; 947 947 gc->can_sleep = true; 948 948 gc->add_pin_ranges = cy8c95x0_add_pin_ranges;
+1 -1
drivers/pinctrl/pinctrl-da9062.c
··· 233 233 static const struct gpio_chip reference_gc = { 234 234 .owner = THIS_MODULE, 235 235 .get = da9062_gpio_get, 236 - .set_rv = da9062_gpio_set, 236 + .set = da9062_gpio_set, 237 237 .get_direction = da9062_gpio_get_direction, 238 238 .direction_input = da9062_gpio_direction_input, 239 239 .direction_output = da9062_gpio_direction_output,
+1 -1
drivers/pinctrl/pinctrl-digicolor.c
··· 248 248 chip->direction_input = dc_gpio_direction_input; 249 249 chip->direction_output = dc_gpio_direction_output; 250 250 chip->get = dc_gpio_get; 251 - chip->set_rv = dc_gpio_set; 251 + chip->set = dc_gpio_set; 252 252 chip->base = -1; 253 253 chip->ngpio = PINS_COUNT; 254 254
+1 -1
drivers/pinctrl/pinctrl-ingenic.c
··· 4451 4451 jzgc->gc.fwnode = fwnode; 4452 4452 jzgc->gc.owner = THIS_MODULE; 4453 4453 4454 - jzgc->gc.set_rv = ingenic_gpio_set; 4454 + jzgc->gc.set = ingenic_gpio_set; 4455 4455 jzgc->gc.get = ingenic_gpio_get; 4456 4456 jzgc->gc.direction_input = pinctrl_gpio_direction_input; 4457 4457 jzgc->gc.direction_output = ingenic_gpio_direction_output;
+1 -1
drivers/pinctrl/pinctrl-keembay.c
··· 1481 1481 gc->direction_input = keembay_gpio_set_direction_in; 1482 1482 gc->direction_output = keembay_gpio_set_direction_out; 1483 1483 gc->get = keembay_gpio_get; 1484 - gc->set_rv = keembay_gpio_set; 1484 + gc->set = keembay_gpio_set; 1485 1485 gc->set_config = gpiochip_generic_config; 1486 1486 gc->base = -1; 1487 1487 gc->ngpio = kpc->npins;
+2 -2
drivers/pinctrl/pinctrl-mcp23s08.c
··· 632 632 mcp->chip.get = mcp23s08_get; 633 633 mcp->chip.get_multiple = mcp23s08_get_multiple; 634 634 mcp->chip.direction_output = mcp23s08_direction_output; 635 - mcp->chip.set_rv = mcp23s08_set; 636 - mcp->chip.set_multiple_rv = mcp23s08_set_multiple; 635 + mcp->chip.set = mcp23s08_set; 636 + mcp->chip.set_multiple = mcp23s08_set_multiple; 637 637 638 638 mcp->chip.base = base; 639 639 mcp->chip.can_sleep = true;
+1 -1
drivers/pinctrl/pinctrl-microchip-sgpio.c
··· 858 858 gc->direction_input = microchip_sgpio_direction_input; 859 859 gc->direction_output = microchip_sgpio_direction_output; 860 860 gc->get = microchip_sgpio_get_value; 861 - gc->set_rv = microchip_sgpio_set_value; 861 + gc->set = microchip_sgpio_set_value; 862 862 gc->request = gpiochip_generic_request; 863 863 gc->free = gpiochip_generic_free; 864 864 gc->of_xlate = microchip_sgpio_of_xlate;
+1 -1
drivers/pinctrl/pinctrl-ocelot.c
··· 1997 1997 static const struct gpio_chip ocelot_gpiolib_chip = { 1998 1998 .request = gpiochip_generic_request, 1999 1999 .free = gpiochip_generic_free, 2000 - .set_rv = ocelot_gpio_set, 2000 + .set = ocelot_gpio_set, 2001 2001 .get = ocelot_gpio_get, 2002 2002 .get_direction = ocelot_gpio_get_direction, 2003 2003 .direction_input = pinctrl_gpio_direction_input,
+1 -1
drivers/pinctrl/pinctrl-pic32.c
··· 2120 2120 .direction_input = pic32_gpio_direction_input, \ 2121 2121 .direction_output = pic32_gpio_direction_output, \ 2122 2122 .get = pic32_gpio_get, \ 2123 - .set_rv = pic32_gpio_set, \ 2123 + .set = pic32_gpio_set, \ 2124 2124 .ngpio = _npins, \ 2125 2125 .base = GPIO_BANK_START(_bank), \ 2126 2126 .owner = THIS_MODULE, \
+1 -1
drivers/pinctrl/pinctrl-pistachio.c
··· 1331 1331 .direction_input = pistachio_gpio_direction_input, \ 1332 1332 .direction_output = pistachio_gpio_direction_output, \ 1333 1333 .get = pistachio_gpio_get, \ 1334 - .set_rv = pistachio_gpio_set, \ 1334 + .set = pistachio_gpio_set, \ 1335 1335 .base = _pin_base, \ 1336 1336 .ngpio = _npins, \ 1337 1337 }, \
+1 -1
drivers/pinctrl/pinctrl-rk805.c
··· 378 378 .free = gpiochip_generic_free, 379 379 .get_direction = rk805_gpio_get_direction, 380 380 .get = rk805_gpio_get, 381 - .set_rv = rk805_gpio_set, 381 + .set = rk805_gpio_set, 382 382 .direction_input = pinctrl_gpio_direction_input, 383 383 .direction_output = rk805_gpio_direction_output, 384 384 .can_sleep = true,
+1 -1
drivers/pinctrl/pinctrl-rp1.c
··· 851 851 .direction_output = rp1_gpio_direction_output, 852 852 .get_direction = rp1_gpio_get_direction, 853 853 .get = rp1_gpio_get, 854 - .set_rv = rp1_gpio_set, 854 + .set = rp1_gpio_set, 855 855 .base = -1, 856 856 .set_config = rp1_gpio_set_config, 857 857 .ngpio = RP1_NUM_GPIOS,
+1 -1
drivers/pinctrl/pinctrl-st.c
··· 1467 1467 .request = gpiochip_generic_request, 1468 1468 .free = gpiochip_generic_free, 1469 1469 .get = st_gpio_get, 1470 - .set_rv = st_gpio_set, 1470 + .set = st_gpio_set, 1471 1471 .direction_input = pinctrl_gpio_direction_input, 1472 1472 .direction_output = st_gpio_direction_output, 1473 1473 .get_direction = st_gpio_get_direction,
+1 -1
drivers/pinctrl/pinctrl-stmfx.c
··· 697 697 pctl->gpio_chip.direction_input = stmfx_gpio_direction_input; 698 698 pctl->gpio_chip.direction_output = stmfx_gpio_direction_output; 699 699 pctl->gpio_chip.get = stmfx_gpio_get; 700 - pctl->gpio_chip.set_rv = stmfx_gpio_set; 700 + pctl->gpio_chip.set = stmfx_gpio_set; 701 701 pctl->gpio_chip.set_config = gpiochip_generic_config; 702 702 pctl->gpio_chip.base = -1; 703 703 pctl->gpio_chip.ngpio = pctl->pctl_desc.npins;
+2 -2
drivers/pinctrl/pinctrl-sx150x.c
··· 1176 1176 pctl->gpio.direction_input = sx150x_gpio_direction_input; 1177 1177 pctl->gpio.direction_output = sx150x_gpio_direction_output; 1178 1178 pctl->gpio.get = sx150x_gpio_get; 1179 - pctl->gpio.set_rv = sx150x_gpio_set; 1179 + pctl->gpio.set = sx150x_gpio_set; 1180 1180 pctl->gpio.set_config = gpiochip_generic_config; 1181 1181 pctl->gpio.parent = dev; 1182 1182 pctl->gpio.can_sleep = true; ··· 1191 1191 * would require locking that is not in place at this time. 1192 1192 */ 1193 1193 if (pctl->data->model != SX150X_789) 1194 - pctl->gpio.set_multiple_rv = sx150x_gpio_set_multiple; 1194 + pctl->gpio.set_multiple = sx150x_gpio_set_multiple; 1195 1195 1196 1196 /* Add Interrupt support if an irq is specified */ 1197 1197 if (client->irq > 0) {
+1 -1
drivers/pinctrl/pinctrl-xway.c
··· 1354 1354 .direction_input = xway_gpio_dir_in, 1355 1355 .direction_output = xway_gpio_dir_out, 1356 1356 .get = xway_gpio_get, 1357 - .set_rv = xway_gpio_set, 1357 + .set = xway_gpio_set, 1358 1358 .request = gpiochip_generic_request, 1359 1359 .free = gpiochip_generic_free, 1360 1360 .to_irq = xway_gpio_to_irq,
+1 -1
drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
··· 398 398 .direction_input = lpi_gpio_direction_input, 399 399 .direction_output = lpi_gpio_direction_output, 400 400 .get = lpi_gpio_get, 401 - .set_rv = lpi_gpio_set, 401 + .set = lpi_gpio_set, 402 402 .request = gpiochip_generic_request, 403 403 .free = gpiochip_generic_free, 404 404 .dbg_show = lpi_gpio_dbg_show,
+1 -1
drivers/pinctrl/qcom/pinctrl-msm.c
··· 792 792 .direction_output = msm_gpio_direction_output, 793 793 .get_direction = msm_gpio_get_direction, 794 794 .get = msm_gpio_get, 795 - .set_rv = msm_gpio_set, 795 + .set = msm_gpio_set, 796 796 .request = gpiochip_generic_request, 797 797 .free = gpiochip_generic_free, 798 798 .dbg_show = msm_gpio_dbg_show,
+1 -1
drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
··· 802 802 .direction_input = pmic_gpio_direction_input, 803 803 .direction_output = pmic_gpio_direction_output, 804 804 .get = pmic_gpio_get, 805 - .set_rv = pmic_gpio_set, 805 + .set = pmic_gpio_set, 806 806 .request = gpiochip_generic_request, 807 807 .free = gpiochip_generic_free, 808 808 .of_xlate = pmic_gpio_of_xlate,
+1 -1
drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
··· 638 638 .direction_input = pmic_mpp_direction_input, 639 639 .direction_output = pmic_mpp_direction_output, 640 640 .get = pmic_mpp_get, 641 - .set_rv = pmic_mpp_set, 641 + .set = pmic_mpp_set, 642 642 .request = gpiochip_generic_request, 643 643 .free = gpiochip_generic_free, 644 644 .of_xlate = pmic_mpp_of_xlate,
+1 -1
drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
··· 597 597 .direction_input = pm8xxx_gpio_direction_input, 598 598 .direction_output = pm8xxx_gpio_direction_output, 599 599 .get = pm8xxx_gpio_get, 600 - .set_rv = pm8xxx_gpio_set, 600 + .set = pm8xxx_gpio_set, 601 601 .of_xlate = pm8xxx_gpio_of_xlate, 602 602 .dbg_show = pm8xxx_gpio_dbg_show, 603 603 .owner = THIS_MODULE,
+1 -1
drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
··· 634 634 .direction_input = pm8xxx_mpp_direction_input, 635 635 .direction_output = pm8xxx_mpp_direction_output, 636 636 .get = pm8xxx_mpp_get, 637 - .set_rv = pm8xxx_mpp_set, 637 + .set = pm8xxx_mpp_set, 638 638 .of_xlate = pm8xxx_mpp_of_xlate, 639 639 .dbg_show = pm8xxx_mpp_dbg_show, 640 640 .owner = THIS_MODULE,
+1 -1
drivers/pinctrl/renesas/gpio.c
··· 234 234 gc->direction_input = gpio_pin_direction_input; 235 235 gc->get = gpio_pin_get; 236 236 gc->direction_output = gpio_pin_direction_output; 237 - gc->set_rv = gpio_pin_set; 237 + gc->set = gpio_pin_set; 238 238 gc->to_irq = gpio_pin_to_irq; 239 239 240 240 gc->label = pfc->info->name;
+1 -1
drivers/pinctrl/renesas/pinctrl-rza1.c
··· 846 846 .direction_input = rza1_gpio_direction_input, 847 847 .direction_output = rza1_gpio_direction_output, 848 848 .get = rza1_gpio_get, 849 - .set_rv = rza1_gpio_set, 849 + .set = rza1_gpio_set, 850 850 }; 851 851 /* ---------------------------------------------------------------------------- 852 852 * pinctrl operations
+1 -1
drivers/pinctrl/renesas/pinctrl-rza2.c
··· 237 237 .direction_input = rza2_chip_direction_input, 238 238 .direction_output = rza2_chip_direction_output, 239 239 .get = rza2_chip_get, 240 - .set_rv = rza2_chip_set, 240 + .set = rza2_chip_set, 241 241 }; 242 242 243 243 static int rza2_gpio_register(struct rza2_pinctrl_priv *priv)
+1 -1
drivers/pinctrl/renesas/pinctrl-rzg2l.c
··· 2795 2795 chip->direction_input = rzg2l_gpio_direction_input; 2796 2796 chip->direction_output = rzg2l_gpio_direction_output; 2797 2797 chip->get = rzg2l_gpio_get; 2798 - chip->set_rv = rzg2l_gpio_set; 2798 + chip->set = rzg2l_gpio_set; 2799 2799 chip->label = name; 2800 2800 chip->parent = pctrl->dev; 2801 2801 chip->owner = THIS_MODULE;
+1 -1
drivers/pinctrl/renesas/pinctrl-rzv2m.c
··· 957 957 chip->direction_input = rzv2m_gpio_direction_input; 958 958 chip->direction_output = rzv2m_gpio_direction_output; 959 959 chip->get = rzv2m_gpio_get; 960 - chip->set_rv = rzv2m_gpio_set; 960 + chip->set = rzv2m_gpio_set; 961 961 chip->label = name; 962 962 chip->parent = pctrl->dev; 963 963 chip->owner = THIS_MODULE;
+1 -1
drivers/pinctrl/samsung/pinctrl-samsung.c
··· 1067 1067 static const struct gpio_chip samsung_gpiolib_chip = { 1068 1068 .request = gpiochip_generic_request, 1069 1069 .free = gpiochip_generic_free, 1070 - .set_rv = samsung_gpio_set, 1070 + .set = samsung_gpio_set, 1071 1071 .get = samsung_gpio_get, 1072 1072 .direction_input = samsung_gpio_direction_input, 1073 1073 .direction_output = samsung_gpio_direction_output,
+1 -1
drivers/pinctrl/spear/pinctrl-plgpio.c
··· 582 582 plgpio->chip.direction_input = plgpio_direction_input; 583 583 plgpio->chip.direction_output = plgpio_direction_output; 584 584 plgpio->chip.get = plgpio_get_value; 585 - plgpio->chip.set_rv = plgpio_set_value; 585 + plgpio->chip.set = plgpio_set_value; 586 586 plgpio->chip.label = dev_name(&pdev->dev); 587 587 plgpio->chip.parent = &pdev->dev; 588 588 plgpio->chip.owner = THIS_MODULE;
+1 -1
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
··· 1302 1302 sfp->gc.direction_input = starfive_gpio_direction_input; 1303 1303 sfp->gc.direction_output = starfive_gpio_direction_output; 1304 1304 sfp->gc.get = starfive_gpio_get; 1305 - sfp->gc.set_rv = starfive_gpio_set; 1305 + sfp->gc.set = starfive_gpio_set; 1306 1306 sfp->gc.set_config = starfive_gpio_set_config; 1307 1307 sfp->gc.add_pin_ranges = starfive_gpio_add_pin_ranges; 1308 1308 sfp->gc.base = -1;
+1 -1
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
··· 935 935 sfp->gc.direction_input = jh7110_gpio_direction_input; 936 936 sfp->gc.direction_output = jh7110_gpio_direction_output; 937 937 sfp->gc.get = jh7110_gpio_get; 938 - sfp->gc.set_rv = jh7110_gpio_set; 938 + sfp->gc.set = jh7110_gpio_set; 939 939 sfp->gc.set_config = jh7110_gpio_set_config; 940 940 sfp->gc.add_pin_ranges = jh7110_gpio_add_pin_ranges; 941 941 sfp->gc.base = info->gc_base;
+1 -1
drivers/pinctrl/stm32/pinctrl-stm32.c
··· 433 433 .request = stm32_gpio_request, 434 434 .free = stm32_gpio_free, 435 435 .get = stm32_gpio_get, 436 - .set_rv = stm32_gpio_set, 436 + .set = stm32_gpio_set, 437 437 .direction_input = pinctrl_gpio_direction_input, 438 438 .direction_output = stm32_gpio_direction_output, 439 439 .to_irq = stm32_gpio_to_irq,
+1 -1
drivers/pinctrl/sunplus/sppctl.c
··· 547 547 gchip->direction_input = sppctl_gpio_direction_input; 548 548 gchip->direction_output = sppctl_gpio_direction_output; 549 549 gchip->get = sppctl_gpio_get; 550 - gchip->set_rv = sppctl_gpio_set; 550 + gchip->set = sppctl_gpio_set; 551 551 gchip->set_config = sppctl_gpio_set_config; 552 552 gchip->dbg_show = IS_ENABLED(CONFIG_DEBUG_FS) ? 553 553 sppctl_gpio_dbg_show : NULL;
+1 -1
drivers/pinctrl/sunxi/pinctrl-sunxi.c
··· 1604 1604 pctl->chip->direction_input = sunxi_pinctrl_gpio_direction_input; 1605 1605 pctl->chip->direction_output = sunxi_pinctrl_gpio_direction_output; 1606 1606 pctl->chip->get = sunxi_pinctrl_gpio_get; 1607 - pctl->chip->set_rv = sunxi_pinctrl_gpio_set; 1607 + pctl->chip->set = sunxi_pinctrl_gpio_set; 1608 1608 pctl->chip->of_xlate = sunxi_pinctrl_gpio_of_xlate; 1609 1609 pctl->chip->to_irq = sunxi_pinctrl_gpio_to_irq; 1610 1610 pctl->chip->of_gpio_n_cells = 3;
+1 -1
drivers/pinctrl/vt8500/pinctrl-wmt.c
··· 549 549 .direction_input = pinctrl_gpio_direction_input, 550 550 .direction_output = wmt_gpio_direction_output, 551 551 .get = wmt_gpio_get_value, 552 - .set_rv = wmt_gpio_set_value, 552 + .set = wmt_gpio_set_value, 553 553 .can_sleep = false, 554 554 }; 555 555
+2 -2
drivers/platform/cznic/turris-omnia-mcu-gpio.c
··· 1024 1024 mcu->gc.direction_output = omnia_gpio_direction_output; 1025 1025 mcu->gc.get = omnia_gpio_get; 1026 1026 mcu->gc.get_multiple = omnia_gpio_get_multiple; 1027 - mcu->gc.set_rv = omnia_gpio_set; 1028 - mcu->gc.set_multiple_rv = omnia_gpio_set_multiple; 1027 + mcu->gc.set = omnia_gpio_set; 1028 + mcu->gc.set_multiple = omnia_gpio_set_multiple; 1029 1029 mcu->gc.init_valid_mask = omnia_gpio_init_valid_mask; 1030 1030 mcu->gc.can_sleep = true; 1031 1031 mcu->gc.names = omnia_mcu_gpio_names;
+1 -1
drivers/platform/x86/barco-p50-gpio.c
··· 316 316 p50->gc.base = -1; 317 317 p50->gc.get_direction = p50_gpio_get_direction; 318 318 p50->gc.get = p50_gpio_get; 319 - p50->gc.set_rv = p50_gpio_set; 319 + p50->gc.set = p50_gpio_set; 320 320 321 321 322 322 /* reset mbox */
+1 -1
drivers/platform/x86/intel/int0002_vgpio.c
··· 193 193 chip->parent = dev; 194 194 chip->owner = THIS_MODULE; 195 195 chip->get = int0002_gpio_get; 196 - chip->set_rv = int0002_gpio_set; 196 + chip->set = int0002_gpio_set; 197 197 chip->direction_input = int0002_gpio_get; 198 198 chip->direction_output = int0002_gpio_direction_output; 199 199 chip->base = -1;
+2 -2
drivers/platform/x86/portwell-ec.c
··· 86 86 return pwec_read(PORTWELL_GPIO_VAL_REG) & BIT(offset) ? 1 : 0; 87 87 } 88 88 89 - static int pwec_gpio_set_rv(struct gpio_chip *chip, unsigned int offset, int val) 89 + static int pwec_gpio_set(struct gpio_chip *chip, unsigned int offset, int val) 90 90 { 91 91 u8 tmp = pwec_read(PORTWELL_GPIO_VAL_REG); 92 92 ··· 130 130 .direction_input = pwec_gpio_direction_input, 131 131 .direction_output = pwec_gpio_direction_output, 132 132 .get = pwec_gpio_get, 133 - .set_rv = pwec_gpio_set_rv, 133 + .set = pwec_gpio_set, 134 134 .base = -1, 135 135 .ngpio = PORTWELL_GPIO_PINS, 136 136 };
+1 -1
drivers/platform/x86/silicom-platform.c
··· 466 466 .direction_input = silicom_gpio_direction_input, 467 467 .direction_output = silicom_gpio_direction_output, 468 468 .get = silicom_gpio_get, 469 - .set_rv = silicom_gpio_set, 469 + .set = silicom_gpio_set, 470 470 .base = -1, 471 471 .ngpio = ARRAY_SIZE(plat_0222_gpio_channels), 472 472 .names = plat_0222_gpio_names,
+5
drivers/ptp/ptp_private.h
··· 24 24 #define PTP_DEFAULT_MAX_VCLOCKS 20 25 25 #define PTP_MAX_CHANNELS 2048 26 26 27 + enum { 28 + PTP_LOCK_PHYSICAL = 0, 29 + PTP_LOCK_VIRTUAL, 30 + }; 31 + 27 32 struct timestamp_event_queue { 28 33 struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS]; 29 34 int head;
+7
drivers/ptp/ptp_vclock.c
··· 154 154 return PTP_VCLOCK_REFRESH_INTERVAL; 155 155 } 156 156 157 + static void ptp_vclock_set_subclass(struct ptp_clock *ptp) 158 + { 159 + lockdep_set_subclass(&ptp->clock.rwsem, PTP_LOCK_VIRTUAL); 160 + } 161 + 157 162 static const struct ptp_clock_info ptp_vclock_info = { 158 163 .owner = THIS_MODULE, 159 164 .name = "ptp virtual clock", ··· 217 212 kfree(vclock); 218 213 return NULL; 219 214 } 215 + 216 + ptp_vclock_set_subclass(vclock->clock); 220 217 221 218 timecounter_init(&vclock->tc, &vclock->cc, 0); 222 219 ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL);
+1 -1
drivers/pwm/pwm-pca9685.c
··· 323 323 pca->gpio.direction_input = pca9685_pwm_gpio_direction_input; 324 324 pca->gpio.direction_output = pca9685_pwm_gpio_direction_output; 325 325 pca->gpio.get = pca9685_pwm_gpio_get; 326 - pca->gpio.set_rv = pca9685_pwm_gpio_set; 326 + pca->gpio.set = pca9685_pwm_gpio_set; 327 327 pca->gpio.base = -1; 328 328 pca->gpio.ngpio = PCA9685_MAXCHAN; 329 329 pca->gpio.can_sleep = true;
+1 -1
drivers/regulator/core.c
··· 3884 3884 new_delta = ret; 3885 3885 3886 3886 /* check that voltage is converging quickly enough */ 3887 - if (new_delta - delta > rdev->constraints->max_uV_step) { 3887 + if (delta - new_delta < rdev->constraints->max_uV_step) { 3888 3888 ret = -EWOULDBLOCK; 3889 3889 goto out; 3890 3890 }
+1 -1
drivers/regulator/rpi-panel-attiny-regulator.c
··· 351 351 state->gc.base = -1; 352 352 state->gc.ngpio = NUM_GPIO; 353 353 354 - state->gc.set_rv = attiny_gpio_set; 354 + state->gc.set = attiny_gpio_set; 355 355 state->gc.get_direction = attiny_gpio_get_direction; 356 356 state->gc.can_sleep = true; 357 357
+2 -2
drivers/soc/fsl/qe/gpio.c
··· 321 321 gc->direction_input = qe_gpio_dir_in; 322 322 gc->direction_output = qe_gpio_dir_out; 323 323 gc->get = qe_gpio_get; 324 - gc->set_rv = qe_gpio_set; 325 - gc->set_multiple_rv = qe_gpio_set_multiple; 324 + gc->set = qe_gpio_set; 325 + gc->set_multiple = qe_gpio_set_multiple; 326 326 327 327 ret = of_mm_gpiochip_add_data(np, mm_gc, qe_gc); 328 328 if (ret)
+1 -1
drivers/soc/renesas/pwc-rzv2m.c
··· 64 64 .label = "gpio_rzv2m_pwc", 65 65 .owner = THIS_MODULE, 66 66 .get = rzv2m_pwc_gpio_get, 67 - .set_rv = rzv2m_pwc_gpio_set, 67 + .set = rzv2m_pwc_gpio_set, 68 68 .direction_output = rzv2m_pwc_gpio_direction_output, 69 69 .can_sleep = false, 70 70 .ngpio = 2,
+1 -1
drivers/spi/spi-cs42l43.c
··· 295 295 struct spi_board_info *info; 296 296 297 297 if (spkid >= 0) { 298 - props = devm_kmalloc(priv->dev, sizeof(*props), GFP_KERNEL); 298 + props = devm_kcalloc(priv->dev, 2, sizeof(*props), GFP_KERNEL); 299 299 if (!props) 300 300 return NULL; 301 301
+1 -1
drivers/spi/spi-xcomm.c
··· 70 70 return 0; 71 71 72 72 spi_xcomm->gc.get_direction = spi_xcomm_gpio_get_direction; 73 - spi_xcomm->gc.set_rv = spi_xcomm_gpio_set_value; 73 + spi_xcomm->gc.set = spi_xcomm_gpio_set_value; 74 74 spi_xcomm->gc.can_sleep = 1; 75 75 spi_xcomm->gc.base = -1; 76 76 spi_xcomm->gc.ngpio = 1;
+2 -2
drivers/ssb/driver_gpio.c
··· 225 225 chip->request = ssb_gpio_chipco_request; 226 226 chip->free = ssb_gpio_chipco_free; 227 227 chip->get = ssb_gpio_chipco_get_value; 228 - chip->set_rv = ssb_gpio_chipco_set_value; 228 + chip->set = ssb_gpio_chipco_set_value; 229 229 chip->direction_input = ssb_gpio_chipco_direction_input; 230 230 chip->direction_output = ssb_gpio_chipco_direction_output; 231 231 #if IS_ENABLED(CONFIG_SSB_EMBEDDED) ··· 422 422 chip->label = "ssb_extif_gpio"; 423 423 chip->owner = THIS_MODULE; 424 424 chip->get = ssb_gpio_extif_get_value; 425 - chip->set_rv = ssb_gpio_extif_set_value; 425 + chip->set = ssb_gpio_extif_set_value; 426 426 chip->direction_input = ssb_gpio_extif_direction_input; 427 427 chip->direction_output = ssb_gpio_extif_direction_output; 428 428 #if IS_ENABLED(CONFIG_SSB_EMBEDDED)
+1 -1
drivers/staging/greybus/gpio.c
··· 551 551 gpio->direction_input = gb_gpio_direction_input; 552 552 gpio->direction_output = gb_gpio_direction_output; 553 553 gpio->get = gb_gpio_get; 554 - gpio->set_rv = gb_gpio_set; 554 + gpio->set = gb_gpio_set; 555 555 gpio->set_config = gb_gpio_set_config; 556 556 gpio->base = -1; /* Allocate base dynamically */ 557 557 gpio->ngpio = ggc->line_max + 1;
+1 -1
drivers/tty/serial/max310x.c
··· 1414 1414 s->gpio.direction_input = max310x_gpio_direction_input; 1415 1415 s->gpio.get = max310x_gpio_get; 1416 1416 s->gpio.direction_output= max310x_gpio_direction_output; 1417 - s->gpio.set_rv = max310x_gpio_set; 1417 + s->gpio.set = max310x_gpio_set; 1418 1418 s->gpio.set_config = max310x_gpio_set_config; 1419 1419 s->gpio.base = -1; 1420 1420 s->gpio.ngpio = devtype->nr * 4;
+1 -1
drivers/tty/serial/sc16is7xx.c
··· 1425 1425 s->gpio.direction_input = sc16is7xx_gpio_direction_input; 1426 1426 s->gpio.get = sc16is7xx_gpio_get; 1427 1427 s->gpio.direction_output = sc16is7xx_gpio_direction_output; 1428 - s->gpio.set_rv = sc16is7xx_gpio_set; 1428 + s->gpio.set = sc16is7xx_gpio_set; 1429 1429 s->gpio.base = -1; 1430 1430 s->gpio.ngpio = s->devtype->nr_gpio; 1431 1431 s->gpio.can_sleep = 1;
+1 -1
drivers/usb/serial/cp210x.c
··· 1962 1962 priv->gc.direction_input = cp210x_gpio_direction_input; 1963 1963 priv->gc.direction_output = cp210x_gpio_direction_output; 1964 1964 priv->gc.get = cp210x_gpio_get; 1965 - priv->gc.set_rv = cp210x_gpio_set; 1965 + priv->gc.set = cp210x_gpio_set; 1966 1966 priv->gc.set_config = cp210x_gpio_set_config; 1967 1967 priv->gc.init_valid_mask = cp210x_gpio_init_valid_mask; 1968 1968 priv->gc.owner = THIS_MODULE;
+2 -2
drivers/usb/serial/ftdi_sio.c
··· 2150 2150 priv->gc.direction_output = ftdi_gpio_direction_output; 2151 2151 priv->gc.init_valid_mask = ftdi_gpio_init_valid_mask; 2152 2152 priv->gc.get = ftdi_gpio_get; 2153 - priv->gc.set_rv = ftdi_gpio_set; 2153 + priv->gc.set = ftdi_gpio_set; 2154 2154 priv->gc.get_multiple = ftdi_gpio_get_multiple; 2155 - priv->gc.set_multiple_rv = ftdi_gpio_set_multiple; 2155 + priv->gc.set_multiple = ftdi_gpio_set_multiple; 2156 2156 priv->gc.owner = THIS_MODULE; 2157 2157 priv->gc.parent = &serial->interface->dev; 2158 2158 priv->gc.base = -1;
+1 -1
drivers/video/fbdev/via/via-gpio.c
··· 145 145 .label = "VIAFB onboard GPIO", 146 146 .owner = THIS_MODULE, 147 147 .direction_output = via_gpio_dir_out, 148 - .set_rv = via_gpio_set, 148 + .set = via_gpio_set, 149 149 .direction_input = via_gpio_dir_input, 150 150 .get = via_gpio_get, 151 151 .base = -1,
+6 -5
fs/btrfs/extent_io.c
··· 4331 4331 unsigned long end = index + (PAGE_SIZE >> fs_info->nodesize_bits) - 1; 4332 4332 int ret; 4333 4333 4334 - xa_lock_irq(&fs_info->buffer_tree); 4334 + rcu_read_lock(); 4335 4335 xa_for_each_range(&fs_info->buffer_tree, index, eb, start, end) { 4336 4336 /* 4337 4337 * The same as try_release_extent_buffer(), to ensure the eb 4338 4338 * won't disappear out from under us. 4339 4339 */ 4340 4340 spin_lock(&eb->refs_lock); 4341 + rcu_read_unlock(); 4342 + 4341 4343 if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { 4342 4344 spin_unlock(&eb->refs_lock); 4345 + rcu_read_lock(); 4343 4346 continue; 4344 4347 } 4345 4348 ··· 4361 4358 * check the folio private at the end. And 4362 4359 * release_extent_buffer() will release the refs_lock. 4363 4360 */ 4364 - xa_unlock_irq(&fs_info->buffer_tree); 4365 4361 release_extent_buffer(eb); 4366 - xa_lock_irq(&fs_info->buffer_tree); 4362 + rcu_read_lock(); 4367 4363 } 4368 - xa_unlock_irq(&fs_info->buffer_tree); 4364 + rcu_read_unlock(); 4369 4365 4370 4366 /* 4371 4367 * Finally to check if we have cleared folio private, as if we have ··· 4377 4375 ret = 0; 4378 4376 spin_unlock(&folio->mapping->i_private_lock); 4379 4377 return ret; 4380 - 4381 4378 } 4382 4379 4383 4380 int try_release_extent_buffer(struct folio *folio)
+5 -3
fs/btrfs/inode.c
··· 401 401 402 402 while (index <= end_index) { 403 403 folio = filemap_get_folio(inode->vfs_inode.i_mapping, index); 404 - index++; 405 - if (IS_ERR(folio)) 404 + if (IS_ERR(folio)) { 405 + index++; 406 406 continue; 407 + } 407 408 409 + index = folio_end(folio) >> PAGE_SHIFT; 408 410 /* 409 411 * Here we just clear all Ordered bits for every page in the 410 412 * range, then btrfs_mark_ordered_io_finished() will handle ··· 2015 2013 * cleaered by the caller. 2016 2014 */ 2017 2015 if (ret < 0) 2018 - btrfs_cleanup_ordered_extents(inode, file_pos, end); 2016 + btrfs_cleanup_ordered_extents(inode, file_pos, len); 2019 2017 return ret; 2020 2018 } 2021 2019
+1 -2
fs/btrfs/qgroup.c
··· 1453 1453 struct btrfs_qgroup *src, int sign) 1454 1454 { 1455 1455 struct btrfs_qgroup *qgroup; 1456 - struct btrfs_qgroup *cur; 1457 1456 LIST_HEAD(qgroup_list); 1458 1457 u64 num_bytes = src->excl; 1459 1458 int ret = 0; ··· 1462 1463 goto out; 1463 1464 1464 1465 qgroup_iterator_add(&qgroup_list, qgroup); 1465 - list_for_each_entry(cur, &qgroup_list, iterator) { 1466 + list_for_each_entry(qgroup, &qgroup_list, iterator) { 1466 1467 struct btrfs_qgroup_list *glist; 1467 1468 1468 1469 qgroup->rfer += sign * num_bytes;
+19
fs/btrfs/relocation.c
··· 602 602 if (btrfs_root_id(root) == objectid) { 603 603 u64 commit_root_gen; 604 604 605 + /* 606 + * Relocation will wait for cleaner thread, and any half-dropped 607 + * subvolume will be fully cleaned up at mount time. 608 + * So here we shouldn't hit a subvolume with non-zero drop_progress. 609 + * 610 + * If this isn't the case, error out since it can make us attempt to 611 + * drop references for extents that were already dropped before. 612 + */ 613 + if (unlikely(btrfs_disk_key_objectid(&root->root_item.drop_progress))) { 614 + struct btrfs_key cpu_key; 615 + 616 + btrfs_disk_key_to_cpu(&cpu_key, &root->root_item.drop_progress); 617 + btrfs_err(fs_info, 618 + "cannot relocate partially dropped subvolume %llu, drop progress key (%llu %u %llu)", 619 + objectid, cpu_key.objectid, cpu_key.type, cpu_key.offset); 620 + ret = -EUCLEAN; 621 + goto fail; 622 + } 623 + 605 624 /* called by btrfs_init_reloc_root */ 606 625 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, 607 626 BTRFS_TREE_RELOC_OBJECTID);
+7 -12
fs/btrfs/tree-log.c
··· 2605 2605 /* 2606 2606 * Correctly adjust the reserved bytes occupied by a log tree extent buffer 2607 2607 */ 2608 - static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start) 2608 + static int unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start) 2609 2609 { 2610 2610 struct btrfs_block_group *cache; 2611 2611 2612 2612 cache = btrfs_lookup_block_group(fs_info, start); 2613 2613 if (!cache) { 2614 2614 btrfs_err(fs_info, "unable to find block group for %llu", start); 2615 - return; 2615 + return -ENOENT; 2616 2616 } 2617 2617 2618 2618 spin_lock(&cache->space_info->lock); ··· 2623 2623 spin_unlock(&cache->space_info->lock); 2624 2624 2625 2625 btrfs_put_block_group(cache); 2626 + 2627 + return 0; 2626 2628 } 2627 2629 2628 2630 static int clean_log_buffer(struct btrfs_trans_handle *trans, 2629 2631 struct extent_buffer *eb) 2630 2632 { 2631 - int ret; 2632 - 2633 2633 btrfs_tree_lock(eb); 2634 2634 btrfs_clear_buffer_dirty(trans, eb); 2635 2635 wait_on_extent_buffer_writeback(eb); 2636 2636 btrfs_tree_unlock(eb); 2637 2637 2638 - if (trans) { 2639 - ret = btrfs_pin_reserved_extent(trans, eb); 2640 - if (ret) 2641 - return ret; 2642 - } else { 2643 - unaccount_log_buffer(eb->fs_info, eb->start); 2644 - } 2638 + if (trans) 2639 + return btrfs_pin_reserved_extent(trans, eb); 2645 2640 2646 - return 0; 2641 + return unaccount_log_buffer(eb->fs_info, eb->start); 2647 2642 } 2648 2643 2649 2644 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
+1 -1
fs/btrfs/zoned.c
··· 2650 2650 2651 2651 spin_lock(&block_group->lock); 2652 2652 if (block_group->reserved || block_group->alloc_offset == 0 || 2653 - (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) || 2653 + !(block_group->flags & BTRFS_BLOCK_GROUP_DATA) || 2654 2654 test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) { 2655 2655 spin_unlock(&block_group->lock); 2656 2656 continue;
+10 -10
fs/erofs/Kconfig
··· 3 3 config EROFS_FS 4 4 tristate "EROFS filesystem support" 5 5 depends on BLOCK 6 + select CACHEFILES if EROFS_FS_ONDEMAND 6 7 select CRC32 8 + select CRYPTO if EROFS_FS_ZIP_ACCEL 9 + select CRYPTO_DEFLATE if EROFS_FS_ZIP_ACCEL 7 10 select FS_IOMAP 11 + select LZ4_DECOMPRESS if EROFS_FS_ZIP 12 + select NETFS_SUPPORT if EROFS_FS_ONDEMAND 13 + select XXHASH if EROFS_FS_XATTR 14 + select XZ_DEC if EROFS_FS_ZIP_LZMA 15 + select XZ_DEC_MICROLZMA if EROFS_FS_ZIP_LZMA 16 + select ZLIB_INFLATE if EROFS_FS_ZIP_DEFLATE 17 + select ZSTD_DECOMPRESS if EROFS_FS_ZIP_ZSTD 8 18 help 9 19 EROFS (Enhanced Read-Only File System) is a lightweight read-only 10 20 file system with modern designs (e.g. no buffer heads, inline ··· 48 38 config EROFS_FS_XATTR 49 39 bool "EROFS extended attributes" 50 40 depends on EROFS_FS 51 - select XXHASH 52 41 default y 53 42 help 54 43 Extended attributes are name:value pairs associated with inodes by ··· 103 94 config EROFS_FS_ZIP 104 95 bool "EROFS Data Compression Support" 105 96 depends on EROFS_FS 106 - select LZ4_DECOMPRESS 107 97 default y 108 98 help 109 99 Enable transparent compression support for EROFS file systems. ··· 112 104 config EROFS_FS_ZIP_LZMA 113 105 bool "EROFS LZMA compressed data support" 114 106 depends on EROFS_FS_ZIP 115 - select XZ_DEC 116 - select XZ_DEC_MICROLZMA 117 107 help 118 108 Saying Y here includes support for reading EROFS file systems 119 109 containing LZMA compressed data, specifically called microLZMA. It ··· 123 117 config EROFS_FS_ZIP_DEFLATE 124 118 bool "EROFS DEFLATE compressed data support" 125 119 depends on EROFS_FS_ZIP 126 - select ZLIB_INFLATE 127 120 help 128 121 Saying Y here includes support for reading EROFS file systems 129 122 containing DEFLATE compressed data. It gives better compression ··· 137 132 config EROFS_FS_ZIP_ZSTD 138 133 bool "EROFS Zstandard compressed data support" 139 134 depends on EROFS_FS_ZIP 140 - select ZSTD_DECOMPRESS 141 135 help 142 136 Saying Y here includes support for reading EROFS file systems 143 137 containing Zstandard compressed data. It gives better compression ··· 151 147 config EROFS_FS_ZIP_ACCEL 152 148 bool "EROFS hardware decompression support" 153 149 depends on EROFS_FS_ZIP 154 - select CRYPTO 155 - select CRYPTO_DEFLATE 156 150 help 157 151 Saying Y here includes hardware accelerator support for reading 158 152 EROFS file systems containing compressed data. It gives better ··· 165 163 config EROFS_FS_ONDEMAND 166 164 bool "EROFS fscache-based on-demand read support (deprecated)" 167 165 depends on EROFS_FS 168 - select NETFS_SUPPORT 169 166 select FSCACHE 170 - select CACHEFILES 171 167 select CACHEFILES_ONDEMAND 172 168 help 173 169 This permits EROFS to use fscache-backed data blobs with on-demand
+16 -12
fs/erofs/super.c
··· 174 174 if (!erofs_is_fileio_mode(sbi)) { 175 175 dif->dax_dev = fs_dax_get_by_bdev(file_bdev(file), 176 176 &dif->dax_part_off, NULL, NULL); 177 + if (!dif->dax_dev && test_opt(&sbi->opt, DAX_ALWAYS)) { 178 + erofs_info(sb, "DAX unsupported by %s. Turning off DAX.", 179 + dif->path); 180 + clear_opt(&sbi->opt, DAX_ALWAYS); 181 + } 177 182 } else if (!S_ISREG(file_inode(file)->i_mode)) { 178 183 fput(file); 179 184 return -EINVAL; ··· 215 210 ondisk_extradevs, sbi->devs->extra_devices); 216 211 return -EINVAL; 217 212 } 218 - if (!ondisk_extradevs) 213 + if (!ondisk_extradevs) { 214 + if (test_opt(&sbi->opt, DAX_ALWAYS) && !sbi->dif0.dax_dev) { 215 + erofs_info(sb, "DAX unsupported by block device. Turning off DAX."); 216 + clear_opt(&sbi->opt, DAX_ALWAYS); 217 + } 219 218 return 0; 219 + } 220 220 221 221 if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb)) 222 222 sbi->devs->flatdev = true; ··· 323 313 sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact)); 324 314 if (erofs_sb_has_48bit(sbi) && dsb->rootnid_8b) { 325 315 sbi->root_nid = le64_to_cpu(dsb->rootnid_8b); 326 - sbi->dif0.blocks = (sbi->dif0.blocks << 32) | 327 - le16_to_cpu(dsb->rb.blocks_hi); 316 + sbi->dif0.blocks = sbi->dif0.blocks | 317 + ((u64)le16_to_cpu(dsb->rb.blocks_hi) << 32); 328 318 } else { 329 319 sbi->root_nid = le16_to_cpu(dsb->rb.rootnid_2b); 330 320 } ··· 348 338 if (ret < 0) 349 339 goto out; 350 340 351 - /* handle multiple devices */ 352 341 ret = erofs_scan_devices(sb, dsb); 353 342 354 343 if (erofs_sb_has_48bit(sbi)) ··· 680 671 return invalfc(fc, "cannot use fsoffset in fscache mode"); 681 672 } 682 673 683 - if (test_opt(&sbi->opt, DAX_ALWAYS)) { 684 - if (!sbi->dif0.dax_dev) { 685 - errorfc(fc, "DAX unsupported by block device. Turning off DAX."); 686 - clear_opt(&sbi->opt, DAX_ALWAYS); 687 - } else if (sbi->blkszbits != PAGE_SHIFT) { 688 - errorfc(fc, "unsupported blocksize for DAX"); 689 - clear_opt(&sbi->opt, DAX_ALWAYS); 690 - } 674 + if (test_opt(&sbi->opt, DAX_ALWAYS) && sbi->blkszbits != PAGE_SHIFT) { 675 + erofs_info(sb, "unsupported blocksize for DAX"); 676 + clear_opt(&sbi->opt, DAX_ALWAYS); 691 677 } 692 678 693 679 sb->s_time_gran = 1;
+11 -2
fs/erofs/zdata.c
··· 1432 1432 } 1433 1433 #endif 1434 1434 1435 + /* Use (kthread_)work in atomic contexts to minimize scheduling overhead */ 1436 + static inline bool z_erofs_in_atomic(void) 1437 + { 1438 + if (IS_ENABLED(CONFIG_PREEMPTION) && rcu_preempt_depth()) 1439 + return true; 1440 + if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) 1441 + return true; 1442 + return !preemptible(); 1443 + } 1444 + 1435 1445 static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, 1436 1446 int bios) 1437 1447 { ··· 1456 1446 1457 1447 if (atomic_add_return(bios, &io->pending_bios)) 1458 1448 return; 1459 - /* Use (kthread_)work and sync decompression for atomic contexts only */ 1460 - if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) { 1449 + if (z_erofs_in_atomic()) { 1461 1450 #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD 1462 1451 struct kthread_worker *worker; 1463 1452
+2 -2
fs/nfs/blocklayout/blocklayout.c
··· 149 149 150 150 /* limit length to what the device mapping allows */ 151 151 end = disk_addr + *len; 152 - if (end >= map->start + map->len) 153 - *len = map->start + map->len - disk_addr; 152 + if (end >= map->disk_offset + map->len) 153 + *len = map->disk_offset + map->len - disk_addr; 154 154 155 155 retry: 156 156 if (!bio) {
+3 -2
fs/nfs/blocklayout/dev.c
··· 257 257 struct pnfs_block_dev *child; 258 258 u64 chunk; 259 259 u32 chunk_idx; 260 + u64 disk_chunk; 260 261 u64 disk_offset; 261 262 262 263 chunk = div_u64(offset, dev->chunk_size); 263 - div_u64_rem(chunk, dev->nr_children, &chunk_idx); 264 + disk_chunk = div_u64_rem(chunk, dev->nr_children, &chunk_idx); 264 265 265 266 if (chunk_idx >= dev->nr_children) { 266 267 dprintk("%s: invalid chunk idx %d (%lld/%lld)\n", ··· 274 273 offset = chunk * dev->chunk_size; 275 274 276 275 /* disk offset of the stripe */ 277 - disk_offset = div_u64(offset, dev->nr_children); 276 + disk_offset = disk_chunk * dev->chunk_size; 278 277 279 278 child = &dev->children[chunk_idx]; 280 279 child->map(child, disk_offset, map);
+91 -13
fs/nfs/blocklayout/extent_tree.c
··· 6 6 #include <linux/vmalloc.h> 7 7 8 8 #include "blocklayout.h" 9 + #include "../nfs4trace.h" 9 10 10 11 #define NFSDBG_FACILITY NFSDBG_PNFS_LD 11 12 ··· 521 520 return xdr_encode_hyper(p, be->be_length << SECTOR_SHIFT); 522 521 } 523 522 524 - static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p, 523 + /** 524 + * ext_tree_try_encode_commit - try to encode all extents into the buffer 525 + * @bl: pointer to the layout 526 + * @p: pointer to the output buffer 527 + * @buffer_size: size of the output buffer 528 + * @count: output pointer to the number of encoded extents 529 + * @lastbyte: output pointer to the last written byte 530 + * 531 + * Return values: 532 + * %0: Success, all required extents encoded, outputs are valid 533 + * %-ENOSPC: Buffer too small, nothing encoded, outputs are invalid 534 + */ 535 + static int 536 + ext_tree_try_encode_commit(struct pnfs_block_layout *bl, __be32 *p, 525 537 size_t buffer_size, size_t *count, __u64 *lastbyte) 526 538 { 527 539 struct pnfs_block_extent *be; 540 + 541 + spin_lock(&bl->bl_ext_lock); 542 + for (be = ext_tree_first(&bl->bl_ext_rw); be; be = ext_tree_next(be)) { 543 + if (be->be_state != PNFS_BLOCK_INVALID_DATA || 544 + be->be_tag != EXTENT_WRITTEN) 545 + continue; 546 + 547 + (*count)++; 548 + if (ext_tree_layoutupdate_size(bl, *count) > buffer_size) { 549 + spin_unlock(&bl->bl_ext_lock); 550 + return -ENOSPC; 551 + } 552 + } 553 + for (be = ext_tree_first(&bl->bl_ext_rw); be; be = ext_tree_next(be)) { 554 + if (be->be_state != PNFS_BLOCK_INVALID_DATA || 555 + be->be_tag != EXTENT_WRITTEN) 556 + continue; 557 + 558 + if (bl->bl_scsi_layout) 559 + p = encode_scsi_range(be, p); 560 + else 561 + p = encode_block_extent(be, p); 562 + be->be_tag = EXTENT_COMMITTING; 563 + } 564 + *lastbyte = (bl->bl_lwb != 0) ? bl->bl_lwb - 1 : U64_MAX; 565 + bl->bl_lwb = 0; 566 + spin_unlock(&bl->bl_ext_lock); 567 + 568 + return 0; 569 + } 570 + 571 + /** 572 + * ext_tree_encode_commit - encode as much as possible extents into the buffer 573 + * @bl: pointer to the layout 574 + * @p: pointer to the output buffer 575 + * @buffer_size: size of the output buffer 576 + * @count: output pointer to the number of encoded extents 577 + * @lastbyte: output pointer to the last written byte 578 + * 579 + * Return values: 580 + * %0: Success, all required extents encoded, outputs are valid 581 + * %-ENOSPC: Buffer too small, some extents are encoded, outputs are valid 582 + */ 583 + static int 584 + ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p, 585 + size_t buffer_size, size_t *count, __u64 *lastbyte) 586 + { 587 + struct pnfs_block_extent *be, *be_prev; 528 588 int ret = 0; 529 589 530 590 spin_lock(&bl->bl_ext_lock); ··· 596 534 597 535 (*count)++; 598 536 if (ext_tree_layoutupdate_size(bl, *count) > buffer_size) { 599 - /* keep counting.. */ 537 + (*count)--; 600 538 ret = -ENOSPC; 601 - continue; 539 + break; 602 540 } 603 541 604 542 if (bl->bl_scsi_layout) ··· 606 544 else 607 545 p = encode_block_extent(be, p); 608 546 be->be_tag = EXTENT_COMMITTING; 547 + be_prev = be; 609 548 } 610 - *lastbyte = bl->bl_lwb - 1; 611 - bl->bl_lwb = 0; 549 + if (!ret) { 550 + *lastbyte = (bl->bl_lwb != 0) ? bl->bl_lwb - 1 : U64_MAX; 551 + bl->bl_lwb = 0; 552 + } else { 553 + *lastbyte = be_prev->be_f_offset + be_prev->be_length; 554 + *lastbyte <<= SECTOR_SHIFT; 555 + *lastbyte -= 1; 556 + } 612 557 spin_unlock(&bl->bl_ext_lock); 613 558 614 559 return ret; 615 560 } 616 561 562 + /** 563 + * ext_tree_prepare_commit - encode extents that need to be committed 564 + * @arg: layout commit data 565 + * 566 + * Return values: 567 + * %0: Success, all required extents are encoded 568 + * %-ENOSPC: Some extents are encoded, but not all, due to RPC size limit 569 + * %-ENOMEM: Out of memory, extents not encoded 570 + */ 617 571 int 618 572 ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg) 619 573 { ··· 638 560 __be32 *start_p; 639 561 int ret; 640 562 641 - dprintk("%s enter\n", __func__); 642 - 643 563 arg->layoutupdate_page = alloc_page(GFP_NOFS); 644 564 if (!arg->layoutupdate_page) 645 565 return -ENOMEM; 646 566 start_p = page_address(arg->layoutupdate_page); 647 567 arg->layoutupdate_pages = &arg->layoutupdate_page; 648 568 649 - retry: 650 - ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count, &arg->lastbytewritten); 569 + ret = ext_tree_try_encode_commit(bl, start_p + 1, buffer_size, 570 + &count, &arg->lastbytewritten); 651 571 if (unlikely(ret)) { 652 572 ext_tree_free_commitdata(arg, buffer_size); 653 573 654 - buffer_size = ext_tree_layoutupdate_size(bl, count); 574 + buffer_size = NFS_SERVER(arg->inode)->wsize; 655 575 count = 0; 656 576 657 577 arg->layoutupdate_pages = ··· 664 588 return -ENOMEM; 665 589 } 666 590 667 - goto retry; 591 + ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, 592 + &count, &arg->lastbytewritten); 668 593 } 669 594 670 595 *start_p = cpu_to_be32(count); ··· 684 607 } 685 608 } 686 609 687 - dprintk("%s found %zu ranges\n", __func__, count); 688 - return 0; 610 + trace_bl_ext_tree_prepare_commit(ret, count, 611 + arg->lastbytewritten, !!ret); 612 + return ret; 689 613 } 690 614 691 615 void
+43 -4
fs/nfs/client.c
··· 682 682 } 683 683 EXPORT_SYMBOL_GPL(nfs_init_client); 684 684 685 + static void nfs4_server_set_init_caps(struct nfs_server *server) 686 + { 687 + #if IS_ENABLED(CONFIG_NFS_V4) 688 + /* Set the basic capabilities */ 689 + server->caps = server->nfs_client->cl_mvops->init_caps; 690 + if (server->flags & NFS_MOUNT_NORDIRPLUS) 691 + server->caps &= ~NFS_CAP_READDIRPLUS; 692 + if (server->nfs_client->cl_proto == XPRT_TRANSPORT_RDMA) 693 + server->caps &= ~NFS_CAP_READ_PLUS; 694 + 695 + /* 696 + * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower 697 + * authentication. 698 + */ 699 + if (nfs4_disable_idmapping && 700 + server->client->cl_auth->au_flavor == RPC_AUTH_UNIX) 701 + server->caps |= NFS_CAP_UIDGID_NOMAP; 702 + #endif 703 + } 704 + 705 + void nfs_server_set_init_caps(struct nfs_server *server) 706 + { 707 + switch (server->nfs_client->rpc_ops->version) { 708 + case 2: 709 + server->caps = NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS; 710 + break; 711 + case 3: 712 + server->caps = NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS; 713 + if (!(server->flags & NFS_MOUNT_NORDIRPLUS)) 714 + server->caps |= NFS_CAP_READDIRPLUS; 715 + break; 716 + default: 717 + nfs4_server_set_init_caps(server); 718 + break; 719 + } 720 + } 721 + EXPORT_SYMBOL_GPL(nfs_server_set_init_caps); 722 + 685 723 /* 686 724 * Create a version 2 or 3 client 687 725 */ ··· 764 726 /* Initialise the client representation from the mount data */ 765 727 server->flags = ctx->flags; 766 728 server->options = ctx->options; 767 - server->caps |= NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS; 768 729 769 730 switch (clp->rpc_ops->version) { 770 731 case 2: ··· 798 761 ctx->selected_flavor); 799 762 if (error < 0) 800 763 goto error; 764 + 765 + nfs_server_set_init_caps(server); 801 766 802 767 /* Preserve the values of mount_server-related mount options */ 803 768 if (ctx->mount_server.addrlen) { ··· 853 814 server->wsize = max_rpc_payload; 854 815 if (server->wsize > NFS_MAX_FILE_IO_SIZE) 855 816 server->wsize = NFS_MAX_FILE_IO_SIZE; 856 - server->wpages = (server->wsize + PAGE_SIZE - 1) >> PAGE_SHIFT; 857 817 858 818 server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL); 859 819 ··· 869 831 870 832 server->maxfilesize = fsinfo->maxfilesize; 871 833 872 - server->time_delta = fsinfo->time_delta; 873 834 server->change_attr_type = fsinfo->change_attr_type; 874 835 875 836 server->clone_blksize = fsinfo->clone_blksize; ··· 973 936 target->acregmax = source->acregmax; 974 937 target->acdirmin = source->acdirmin; 975 938 target->acdirmax = source->acdirmax; 976 - target->caps = source->caps; 977 939 target->options = source->options; 978 940 target->auth_info = source->auth_info; 979 941 target->port = source->port; ··· 1043 1007 INIT_LIST_HEAD(&server->ss_src_copies); 1044 1008 1045 1009 atomic_set(&server->active, 0); 1010 + atomic_long_set(&server->nr_active_delegations, 0); 1046 1011 1047 1012 server->io_stats = nfs_alloc_iostats(); 1048 1013 if (!server->io_stats) { ··· 1206 1169 flavor); 1207 1170 if (error < 0) 1208 1171 goto out_free_server; 1172 + 1173 + nfs_server_set_init_caps(server); 1209 1174 1210 1175 /* probe the filesystem info for this server filesystem */ 1211 1176 error = nfs_probe_server(server, fh);
+73 -41
fs/nfs/delegation.c
··· 27 27 28 28 #define NFS_DEFAULT_DELEGATION_WATERMARK (5000U) 29 29 30 - static atomic_long_t nfs_active_delegations; 31 30 static unsigned nfs_delegation_watermark = NFS_DEFAULT_DELEGATION_WATERMARK; 31 + module_param_named(delegation_watermark, nfs_delegation_watermark, uint, 0644); 32 + 33 + static struct hlist_head *nfs_delegation_hash(struct nfs_server *server, 34 + const struct nfs_fh *fhandle) 35 + { 36 + return server->delegation_hash_table + 37 + (nfs_fhandle_hash(fhandle) & server->delegation_hash_mask); 38 + } 32 39 33 40 static void __nfs_free_delegation(struct nfs_delegation *delegation) 34 41 { ··· 44 37 kfree_rcu(delegation, rcu); 45 38 } 46 39 47 - static void nfs_mark_delegation_revoked(struct nfs_delegation *delegation) 40 + static void nfs_mark_delegation_revoked(struct nfs_server *server, 41 + struct nfs_delegation *delegation) 48 42 { 49 43 if (!test_and_set_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { 50 44 delegation->stateid.type = NFS4_INVALID_STATEID_TYPE; 51 - atomic_long_dec(&nfs_active_delegations); 45 + atomic_long_dec(&server->nr_active_delegations); 52 46 if (!test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 53 47 nfs_clear_verifier_delegated(delegation->inode); 54 48 } ··· 67 59 __nfs_free_delegation(delegation); 68 60 } 69 61 70 - static void nfs_free_delegation(struct nfs_delegation *delegation) 62 + static void nfs_free_delegation(struct nfs_server *server, 63 + struct nfs_delegation *delegation) 71 64 { 72 - nfs_mark_delegation_revoked(delegation); 65 + nfs_mark_delegation_revoked(server, delegation); 73 66 nfs_put_delegation(delegation); 74 67 } 75 68 ··· 246 237 247 238 rcu_read_lock(); 248 239 delegation = rcu_dereference(NFS_I(inode)->delegation); 249 - if (delegation != NULL) { 250 - spin_lock(&delegation->lock); 251 - nfs4_stateid_copy(&delegation->stateid, stateid); 252 - delegation->type = type; 253 - delegation->pagemod_limit = pagemod_limit; 254 - oldcred = delegation->cred; 255 - delegation->cred = get_cred(cred); 256 - switch (deleg_type) { 257 - case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG: 258 - case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG: 259 - set_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags); 260 - break; 261 - default: 262 - clear_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags); 263 - } 264 - clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); 265 - if (test_and_clear_bit(NFS_DELEGATION_REVOKED, 266 - &delegation->flags)) 267 - atomic_long_inc(&nfs_active_delegations); 268 - spin_unlock(&delegation->lock); 269 - rcu_read_unlock(); 270 - put_cred(oldcred); 271 - trace_nfs4_reclaim_delegation(inode, type); 272 - } else { 240 + if (!delegation) { 273 241 rcu_read_unlock(); 274 242 nfs_inode_set_delegation(inode, cred, type, stateid, 275 243 pagemod_limit, deleg_type); 244 + return; 276 245 } 246 + 247 + spin_lock(&delegation->lock); 248 + nfs4_stateid_copy(&delegation->stateid, stateid); 249 + delegation->type = type; 250 + delegation->pagemod_limit = pagemod_limit; 251 + oldcred = delegation->cred; 252 + delegation->cred = get_cred(cred); 253 + switch (deleg_type) { 254 + case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG: 255 + case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG: 256 + set_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags); 257 + break; 258 + default: 259 + clear_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags); 260 + } 261 + clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); 262 + if (test_and_clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) 263 + atomic_long_inc(&NFS_SERVER(inode)->nr_active_delegations); 264 + spin_unlock(&delegation->lock); 265 + rcu_read_unlock(); 266 + put_cred(oldcred); 267 + trace_nfs4_reclaim_delegation(inode, type); 277 268 } 278 269 279 270 static int nfs_do_return_delegation(struct inode *inode, ··· 364 355 rcu_dereference_protected(nfsi->delegation, 365 356 lockdep_is_held(&clp->cl_lock)); 366 357 358 + trace_nfs4_detach_delegation(&nfsi->vfs_inode, delegation->type); 359 + 367 360 if (deleg_cur == NULL || delegation != deleg_cur) 368 361 return NULL; 369 362 ··· 374 363 spin_unlock(&delegation->lock); 375 364 return NULL; 376 365 } 366 + hlist_del_init_rcu(&delegation->hash); 377 367 list_del_rcu(&delegation->super_list); 378 368 delegation->inode = NULL; 379 369 rcu_assign_pointer(nfsi->delegation, NULL); ··· 422 410 } 423 411 424 412 static void 425 - nfs_update_inplace_delegation(struct nfs_delegation *delegation, 413 + nfs_update_inplace_delegation(struct nfs_server *server, 414 + struct nfs_delegation *delegation, 426 415 const struct nfs_delegation *update) 427 416 { 428 417 if (nfs4_stateid_is_newer(&update->stateid, &delegation->stateid)) { ··· 436 423 nfs_update_delegation_cred(delegation, update->cred); 437 424 /* smp_mb__before_atomic() is implicit due to xchg() */ 438 425 clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags); 439 - atomic_long_inc(&nfs_active_delegations); 426 + atomic_long_inc(&server->nr_active_delegations); 440 427 } 441 428 } 442 429 } ··· 491 478 if (nfs4_stateid_match_other(&old_delegation->stateid, 492 479 &delegation->stateid)) { 493 480 spin_lock(&old_delegation->lock); 494 - nfs_update_inplace_delegation(old_delegation, 481 + nfs_update_inplace_delegation(server, old_delegation, 495 482 delegation); 496 483 spin_unlock(&old_delegation->lock); 497 484 goto out; ··· 537 524 spin_unlock(&inode->i_lock); 538 525 539 526 list_add_tail_rcu(&delegation->super_list, &server->delegations); 527 + hlist_add_head_rcu(&delegation->hash, 528 + nfs_delegation_hash(server, &NFS_I(inode)->fh)); 540 529 rcu_assign_pointer(nfsi->delegation, delegation); 541 530 delegation = NULL; 542 531 543 - atomic_long_inc(&nfs_active_delegations); 532 + atomic_long_inc(&server->nr_active_delegations); 544 533 545 534 trace_nfs4_set_delegation(inode, type); 546 535 ··· 556 541 __nfs_free_delegation(delegation); 557 542 if (freeme != NULL) { 558 543 nfs_do_return_delegation(inode, freeme, 0); 559 - nfs_free_delegation(freeme); 544 + nfs_free_delegation(server, freeme); 560 545 } 561 546 return status; 562 547 } ··· 606 591 static bool nfs_delegation_need_return(struct nfs_delegation *delegation) 607 592 { 608 593 bool ret = false; 594 + 595 + trace_nfs_delegation_need_return(delegation); 609 596 610 597 if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) 611 598 ret = true; ··· 768 751 set_bit(NFS_DELEGATION_RETURNING, &delegation->flags); 769 752 set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags); 770 753 nfs_do_return_delegation(inode, delegation, 1); 771 - nfs_free_delegation(delegation); 754 + nfs_free_delegation(NFS_SERVER(inode), delegation); 772 755 } 773 756 } 774 757 ··· 854 837 if (!delegation) 855 838 goto out; 856 839 if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) || 857 - atomic_long_read(&nfs_active_delegations) >= nfs_delegation_watermark) { 840 + atomic_long_read(&NFS_SERVER(inode)->nr_active_delegations) >= 841 + nfs_delegation_watermark) { 858 842 spin_lock(&delegation->lock); 859 843 if (delegation->inode && 860 844 list_empty(&NFS_I(inode)->open_files) && ··· 1031 1013 } 1032 1014 spin_unlock(&delegation->lock); 1033 1015 } 1034 - nfs_mark_delegation_revoked(delegation); 1016 + nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation); 1035 1017 ret = true; 1036 1018 out: 1037 1019 rcu_read_unlock(); ··· 1063 1045 delegation->stateid.seqid = stateid->seqid; 1064 1046 } 1065 1047 1066 - nfs_mark_delegation_revoked(delegation); 1048 + nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation); 1067 1049 clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags); 1068 1050 spin_unlock(&delegation->lock); 1069 1051 if (nfs_detach_delegation(NFS_I(inode), delegation, NFS_SERVER(inode))) ··· 1176 1158 nfs_delegation_find_inode_server(struct nfs_server *server, 1177 1159 const struct nfs_fh *fhandle) 1178 1160 { 1161 + struct hlist_head *head = nfs_delegation_hash(server, fhandle); 1179 1162 struct nfs_delegation *delegation; 1180 1163 struct super_block *freeme = NULL; 1181 1164 struct inode *res = NULL; 1182 1165 1183 - list_for_each_entry_rcu(delegation, &server->delegations, super_list) { 1166 + hlist_for_each_entry_rcu(delegation, head, hash) { 1184 1167 spin_lock(&delegation->lock); 1185 1168 if (delegation->inode != NULL && 1186 1169 !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) && ··· 1284 1265 if (delegation != NULL) { 1285 1266 if (nfs_detach_delegation(NFS_I(inode), delegation, 1286 1267 server) != NULL) 1287 - nfs_free_delegation(delegation); 1268 + nfs_free_delegation(server, delegation); 1288 1269 /* Match nfs_start_delegation_return_locked */ 1289 1270 nfs_put_delegation(delegation); 1290 1271 } ··· 1589 1570 return ret; 1590 1571 } 1591 1572 1592 - module_param_named(delegation_watermark, nfs_delegation_watermark, uint, 0644); 1573 + int nfs4_delegation_hash_alloc(struct nfs_server *server) 1574 + { 1575 + int delegation_buckets, i; 1576 + 1577 + delegation_buckets = roundup_pow_of_two(nfs_delegation_watermark / 16); 1578 + server->delegation_hash_mask = delegation_buckets - 1; 1579 + server->delegation_hash_table = kmalloc_array(delegation_buckets, 1580 + sizeof(*server->delegation_hash_table), GFP_KERNEL); 1581 + if (!server->delegation_hash_table) 1582 + return -ENOMEM; 1583 + for (i = 0; i < delegation_buckets; i++) 1584 + INIT_HLIST_HEAD(&server->delegation_hash_table[i]); 1585 + return 0; 1586 + }
+3
fs/nfs/delegation.h
··· 14 14 * NFSv4 delegation 15 15 */ 16 16 struct nfs_delegation { 17 + struct hlist_node hash; 17 18 struct list_head super_list; 18 19 const struct cred *cred; 19 20 struct inode *inode; ··· 123 122 return NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE, 124 123 NFS_DELEGATION_FLAG_TIME); 125 124 } 125 + 126 + int nfs4_delegation_hash_alloc(struct nfs_server *server); 126 127 127 128 #endif
+1 -3
fs/nfs/dir.c
··· 1828 1828 1829 1829 static void unblock_revalidate(struct dentry *dentry) 1830 1830 { 1831 - /* store_release ensures wait_var_event() sees the update */ 1832 - smp_store_release(&dentry->d_fsdata, NULL); 1833 - wake_up_var(&dentry->d_fsdata); 1831 + store_release_wake_up(&dentry->d_fsdata, NULL); 1834 1832 } 1835 1833 1836 1834 /*
+9 -2
fs/nfs/export.c
··· 66 66 { 67 67 struct nfs_fattr *fattr = NULL; 68 68 struct nfs_fh *server_fh = nfs_exp_embedfh(fid->raw); 69 - size_t fh_size = offsetof(struct nfs_fh, data) + server_fh->size; 69 + size_t fh_size = offsetof(struct nfs_fh, data); 70 70 const struct nfs_rpc_ops *rpc_ops; 71 71 struct dentry *dentry; 72 72 struct inode *inode; 73 - int len = EMBED_FH_OFF + XDR_QUADLEN(fh_size); 73 + int len = EMBED_FH_OFF; 74 74 u32 *p = fid->raw; 75 75 int ret; 76 76 77 + /* Initial check of bounds */ 78 + if (fh_len < len + XDR_QUADLEN(fh_size) || 79 + fh_len > XDR_QUADLEN(NFS_MAXFHSIZE)) 80 + return NULL; 81 + /* Calculate embedded filehandle size */ 82 + fh_size += server_fh->size; 83 + len += XDR_QUADLEN(fh_size); 77 84 /* NULL translates to ESTALE */ 78 85 if (fh_len < len || fh_type != len) 79 86 return NULL;
+16 -10
fs/nfs/flexfilelayout/flexfilelayout.c
··· 762 762 { 763 763 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); 764 764 struct nfs4_ff_layout_mirror *mirror; 765 - struct nfs4_pnfs_ds *ds; 765 + struct nfs4_pnfs_ds *ds = ERR_PTR(-EAGAIN); 766 766 u32 idx; 767 767 768 768 /* mirrors are initially sorted by efficiency */ 769 769 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) { 770 770 mirror = FF_LAYOUT_COMP(lseg, idx); 771 771 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false); 772 - if (!ds) 772 + if (IS_ERR(ds)) 773 773 continue; 774 774 775 775 if (check_device && ··· 777 777 continue; 778 778 779 779 *best_idx = idx; 780 - return ds; 780 + break; 781 781 } 782 782 783 - return NULL; 783 + return ds; 784 784 } 785 785 786 786 static struct nfs4_pnfs_ds * ··· 942 942 for (i = 0; i < pgio->pg_mirror_count; i++) { 943 943 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i); 944 944 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true); 945 - if (!ds) { 945 + if (IS_ERR(ds)) { 946 946 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg)) 947 947 goto out_mds; 948 948 pnfs_generic_pg_cleanup(pgio); ··· 1867 1867 u32 idx = hdr->pgio_mirror_idx; 1868 1868 int vers; 1869 1869 struct nfs_fh *fh; 1870 + bool ds_fatal_error = false; 1870 1871 1871 1872 dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n", 1872 1873 __func__, hdr->inode->i_ino, ··· 1875 1874 1876 1875 mirror = FF_LAYOUT_COMP(lseg, idx); 1877 1876 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false); 1878 - if (!ds) 1877 + if (IS_ERR(ds)) { 1878 + ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds)); 1879 1879 goto out_failed; 1880 + } 1880 1881 1881 1882 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp, 1882 1883 hdr->inode); ··· 1926 1923 return PNFS_ATTEMPTED; 1927 1924 1928 1925 out_failed: 1929 - if (ff_layout_avoid_mds_available_ds(lseg)) 1926 + if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error) 1930 1927 return PNFS_TRY_AGAIN; 1931 1928 trace_pnfs_mds_fallback_read_pagelist(hdr->inode, 1932 1929 hdr->args.offset, hdr->args.count, ··· 1948 1945 int vers; 1949 1946 struct nfs_fh *fh; 1950 1947 u32 idx = hdr->pgio_mirror_idx; 1948 + bool ds_fatal_error = false; 1951 1949 1952 1950 mirror = FF_LAYOUT_COMP(lseg, idx); 1953 1951 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true); 1954 - if (!ds) 1952 + if (IS_ERR(ds)) { 1953 + ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds)); 1955 1954 goto out_failed; 1955 + } 1956 1956 1957 1957 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp, 1958 1958 hdr->inode); ··· 2006 2000 return PNFS_ATTEMPTED; 2007 2001 2008 2002 out_failed: 2009 - if (ff_layout_avoid_mds_available_ds(lseg)) 2003 + if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error) 2010 2004 return PNFS_TRY_AGAIN; 2011 2005 trace_pnfs_mds_fallback_write_pagelist(hdr->inode, 2012 2006 hdr->args.offset, hdr->args.count, ··· 2049 2043 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index); 2050 2044 mirror = FF_LAYOUT_COMP(lseg, idx); 2051 2045 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true); 2052 - if (!ds) 2046 + if (IS_ERR(ds)) 2053 2047 goto out_err; 2054 2048 2055 2049 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
+3 -3
fs/nfs/flexfilelayout/flexfilelayoutdev.c
··· 370 370 struct nfs4_ff_layout_mirror *mirror, 371 371 bool fail_return) 372 372 { 373 - struct nfs4_pnfs_ds *ds = NULL; 373 + struct nfs4_pnfs_ds *ds; 374 374 struct inode *ino = lseg->pls_layout->plh_inode; 375 375 struct nfs_server *s = NFS_SERVER(ino); 376 376 unsigned int max_payload; 377 - int status; 377 + int status = -EAGAIN; 378 378 379 379 if (!ff_layout_init_mirror_ds(lseg->pls_layout, mirror)) 380 380 goto noconnect; ··· 418 418 ff_layout_send_layouterror(lseg); 419 419 if (fail_return || !ff_layout_has_available_ds(lseg)) 420 420 pnfs_error_mark_layout_for_return(ino, lseg); 421 - ds = NULL; 421 + ds = ERR_PTR(status); 422 422 out: 423 423 return ds; 424 424 }
+42
fs/nfs/fs_context.c
··· 96 96 Opt_wsize, 97 97 Opt_write, 98 98 Opt_xprtsec, 99 + Opt_cert_serial, 100 + Opt_privkey_serial, 99 101 }; 100 102 101 103 enum { ··· 223 221 fsparam_enum ("write", Opt_write, nfs_param_enums_write), 224 222 fsparam_u32 ("wsize", Opt_wsize), 225 223 fsparam_string("xprtsec", Opt_xprtsec), 224 + fsparam_s32("cert_serial", Opt_cert_serial), 225 + fsparam_s32("privkey_serial", Opt_privkey_serial), 226 226 {} 227 227 }; 228 228 ··· 555 551 return 0; 556 552 } 557 553 554 + #ifdef CONFIG_KEYS 555 + static int nfs_tls_key_verify(key_serial_t key_id) 556 + { 557 + struct key *key = key_lookup(key_id); 558 + int error = 0; 559 + 560 + if (IS_ERR(key)) { 561 + pr_err("key id %08x not found\n", key_id); 562 + return PTR_ERR(key); 563 + } 564 + if (test_bit(KEY_FLAG_REVOKED, &key->flags) || 565 + test_bit(KEY_FLAG_INVALIDATED, &key->flags)) { 566 + pr_err("key id %08x revoked\n", key_id); 567 + error = -EKEYREVOKED; 568 + } 569 + 570 + key_put(key); 571 + return error; 572 + } 573 + #else 574 + static inline int nfs_tls_key_verify(key_serial_t key_id) 575 + { 576 + return -ENOENT; 577 + } 578 + #endif /* CONFIG_KEYS */ 579 + 558 580 /* 559 581 * Parse a single mount parameter. 560 582 */ ··· 836 806 ret = nfs_parse_xprtsec_policy(fc, param); 837 807 if (ret < 0) 838 808 return ret; 809 + break; 810 + case Opt_cert_serial: 811 + ret = nfs_tls_key_verify(result.int_32); 812 + if (ret < 0) 813 + return ret; 814 + ctx->xprtsec.cert_serial = result.int_32; 815 + break; 816 + case Opt_privkey_serial: 817 + ret = nfs_tls_key_verify(result.int_32); 818 + if (ret < 0) 819 + return ret; 820 + ctx->xprtsec.privkey_serial = result.int_32; 839 821 break; 840 822 841 823 case Opt_proto:
+64 -5
fs/nfs/inode.c
··· 197 197 if (!(flags & NFS_INO_REVAL_FORCED)) 198 198 flags &= ~(NFS_INO_INVALID_MODE | 199 199 NFS_INO_INVALID_OTHER | 200 + NFS_INO_INVALID_BTIME | 200 201 NFS_INO_INVALID_XATTR); 201 202 flags &= ~(NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE); 202 203 } ··· 523 522 inode_set_atime(inode, 0, 0); 524 523 inode_set_mtime(inode, 0, 0); 525 524 inode_set_ctime(inode, 0, 0); 525 + memset(&nfsi->btime, 0, sizeof(nfsi->btime)); 526 526 inode_set_iversion_raw(inode, 0); 527 527 inode->i_size = 0; 528 528 clear_nlink(inode); ··· 547 545 inode_set_ctime_to_ts(inode, fattr->ctime); 548 546 else if (fattr_supported & NFS_ATTR_FATTR_CTIME) 549 547 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CTIME); 548 + if (fattr->valid & NFS_ATTR_FATTR_BTIME) 549 + nfsi->btime = fattr->btime; 550 + else if (fattr_supported & NFS_ATTR_FATTR_BTIME) 551 + nfs_set_cache_invalid(inode, NFS_INO_INVALID_BTIME); 550 552 if (fattr->valid & NFS_ATTR_FATTR_CHANGE) 551 553 inode_set_iversion_raw(inode, fattr->change_attr); 552 554 else ··· 937 931 938 932 static u32 nfs_get_valid_attrmask(struct inode *inode) 939 933 { 934 + u64 fattr_valid = NFS_SERVER(inode)->fattr_valid; 940 935 unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity); 941 936 u32 reply_mask = STATX_INO | STATX_TYPE; 942 937 ··· 957 950 reply_mask |= STATX_UID | STATX_GID; 958 951 if (!(cache_validity & NFS_INO_INVALID_BLOCKS)) 959 952 reply_mask |= STATX_BLOCKS; 953 + if (!(cache_validity & NFS_INO_INVALID_BTIME) && 954 + (fattr_valid & NFS_ATTR_FATTR_BTIME)) 955 + reply_mask |= STATX_BTIME; 960 956 if (!(cache_validity & NFS_INO_INVALID_CHANGE)) 961 957 reply_mask |= STATX_CHANGE_COOKIE; 962 958 return reply_mask; ··· 970 960 { 971 961 struct inode *inode = d_inode(path->dentry); 972 962 struct nfs_server *server = NFS_SERVER(inode); 963 + u64 fattr_valid = server->fattr_valid; 973 964 unsigned long cache_validity; 974 965 int err = 0; 975 966 bool force_sync = query_flags & AT_STATX_FORCE_SYNC; ··· 981 970 982 971 request_mask &= STATX_TYPE | STATX_MODE | STATX_NLINK | STATX_UID | 983 972 STATX_GID | STATX_ATIME | STATX_MTIME | STATX_CTIME | 984 - STATX_INO | STATX_SIZE | STATX_BLOCKS | 973 + STATX_INO | STATX_SIZE | STATX_BLOCKS | STATX_BTIME | 985 974 STATX_CHANGE_COOKIE; 975 + 976 + if (!(fattr_valid & NFS_ATTR_FATTR_BTIME)) 977 + request_mask &= ~STATX_BTIME; 986 978 987 979 if ((query_flags & AT_STATX_DONT_SYNC) && !force_sync) { 988 980 if (readdirplus_enabled) ··· 1018 1004 /* Is the user requesting attributes that might need revalidation? */ 1019 1005 if (!(request_mask & (STATX_MODE|STATX_NLINK|STATX_ATIME|STATX_CTIME| 1020 1006 STATX_MTIME|STATX_UID|STATX_GID| 1021 - STATX_SIZE|STATX_BLOCKS| 1007 + STATX_SIZE|STATX_BLOCKS|STATX_BTIME| 1022 1008 STATX_CHANGE_COOKIE))) 1023 1009 goto out_no_revalidate; 1024 1010 ··· 1042 1028 do_update |= cache_validity & NFS_INO_INVALID_OTHER; 1043 1029 if (request_mask & STATX_BLOCKS) 1044 1030 do_update |= cache_validity & NFS_INO_INVALID_BLOCKS; 1031 + if (request_mask & STATX_BTIME) 1032 + do_update |= cache_validity & NFS_INO_INVALID_BTIME; 1045 1033 1046 1034 if (do_update) { 1047 1035 if (readdirplus_enabled) ··· 1065 1049 stat->attributes |= STATX_ATTR_CHANGE_MONOTONIC; 1066 1050 if (S_ISDIR(inode->i_mode)) 1067 1051 stat->blksize = NFS_SERVER(inode)->dtsize; 1052 + stat->btime = NFS_I(inode)->btime; 1068 1053 out: 1069 1054 trace_nfs_getattr_exit(inode, err); 1070 1055 return err; ··· 1960 1943 NFS_INO_INVALID_ATIME | NFS_INO_INVALID_CTIME | 1961 1944 NFS_INO_INVALID_MTIME | NFS_INO_INVALID_SIZE | 1962 1945 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_OTHER | 1963 - NFS_INO_INVALID_NLINK; 1946 + NFS_INO_INVALID_NLINK | NFS_INO_INVALID_BTIME; 1964 1947 unsigned long cache_validity = NFS_I(inode)->cache_validity; 1965 1948 enum nfs4_change_attr_type ctype = NFS_SERVER(inode)->change_attr_type; 1966 1949 ··· 2226 2209 bool attr_changed = false; 2227 2210 bool have_delegation; 2228 2211 2229 - dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n", 2212 + dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%llx)\n", 2230 2213 __func__, inode->i_sb->s_id, inode->i_ino, 2231 2214 nfs_display_fhandle_hash(NFS_FH(inode)), 2232 2215 atomic_read(&inode->i_count), fattr->valid); ··· 2321 2304 | NFS_INO_INVALID_BLOCKS 2322 2305 | NFS_INO_INVALID_NLINK 2323 2306 | NFS_INO_INVALID_MODE 2324 - | NFS_INO_INVALID_OTHER; 2307 + | NFS_INO_INVALID_OTHER 2308 + | NFS_INO_INVALID_BTIME; 2325 2309 if (S_ISDIR(inode->i_mode)) 2326 2310 nfs_force_lookup_revalidate(inode); 2327 2311 attr_changed = true; ··· 2355 2337 else if (fattr_supported & NFS_ATTR_FATTR_CTIME) 2356 2338 nfsi->cache_validity |= 2357 2339 save_cache_validity & NFS_INO_INVALID_CTIME; 2340 + 2341 + if (fattr->valid & NFS_ATTR_FATTR_BTIME) 2342 + nfsi->btime = fattr->btime; 2343 + else if (fattr_supported & NFS_ATTR_FATTR_BTIME) 2344 + nfsi->cache_validity |= 2345 + save_cache_validity & NFS_INO_INVALID_BTIME; 2358 2346 2359 2347 /* Check if our cached file size is stale */ 2360 2348 if (fattr->valid & NFS_ATTR_FATTR_SIZE) { ··· 2649 2625 .size = sizeof(struct nfs_net), 2650 2626 }; 2651 2627 2628 + #ifdef CONFIG_KEYS 2629 + static struct key *nfs_keyring; 2630 + 2631 + static int __init nfs_init_keyring(void) 2632 + { 2633 + nfs_keyring = keyring_alloc(".nfs", 2634 + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 2635 + current_cred(), 2636 + (KEY_POS_ALL & ~KEY_POS_SETATTR) | 2637 + (KEY_USR_ALL & ~KEY_USR_SETATTR), 2638 + KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); 2639 + return PTR_ERR_OR_ZERO(nfs_keyring); 2640 + } 2641 + 2642 + static void nfs_exit_keyring(void) 2643 + { 2644 + key_put(nfs_keyring); 2645 + } 2646 + #else 2647 + static inline int nfs_init_keyring(void) 2648 + { 2649 + return 0; 2650 + } 2651 + 2652 + static inline void nfs_exit_keyring(void) 2653 + { 2654 + } 2655 + #endif /* CONFIG_KEYS */ 2656 + 2652 2657 /* 2653 2658 * Initialize NFS 2654 2659 */ 2655 2660 static int __init init_nfs_fs(void) 2656 2661 { 2657 2662 int err; 2663 + 2664 + err = nfs_init_keyring(); 2665 + if (err) 2666 + return err; 2658 2667 2659 2668 err = nfs_sysfs_init(); 2660 2669 if (err < 0) ··· 2749 2692 out9: 2750 2693 nfs_sysfs_exit(); 2751 2694 out10: 2695 + nfs_exit_keyring(); 2752 2696 return err; 2753 2697 } 2754 2698 ··· 2765 2707 nfs_fs_proc_exit(); 2766 2708 nfsiod_stop(); 2767 2709 nfs_sysfs_exit(); 2710 + nfs_exit_keyring(); 2768 2711 } 2769 2712 2770 2713 /* Not quite true; I just maintain it */
+7 -5
fs/nfs/internal.h
··· 207 207 }; 208 208 209 209 extern int nfs_mount(struct nfs_mount_request *info, int timeo, int retrans); 210 - extern void nfs_umount(const struct nfs_mount_request *info); 211 210 212 211 /* client.c */ 213 212 extern const struct rpc_program nfs_program; ··· 231 232 nfs4_find_client_sessionid(struct net *, const struct sockaddr *, 232 233 struct nfs4_sessionid *, u32); 233 234 extern struct nfs_server *nfs_create_server(struct fs_context *); 234 - extern void nfs4_server_set_init_caps(struct nfs_server *); 235 + extern void nfs_server_set_init_caps(struct nfs_server *); 235 236 extern struct nfs_server *nfs4_create_server(struct fs_context *); 236 237 extern struct nfs_server *nfs4_create_referral_server(struct fs_context *); 237 238 extern int nfs4_update_server(struct nfs_server *server, const char *hostname, ··· 670 671 671 672 static inline gfp_t nfs_io_gfp_mask(void) 672 673 { 673 - if (current->flags & PF_WQ_WORKER) 674 - return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; 675 - return GFP_KERNEL; 674 + gfp_t ret = current_gfp_context(GFP_KERNEL); 675 + 676 + /* For workers __GFP_NORETRY only with __GFP_IO or __GFP_FS */ 677 + if ((current->flags & PF_WQ_WORKER) && ret == GFP_KERNEL) 678 + ret |= __GFP_NORETRY | __GFP_NOWARN; 679 + return ret; 676 680 } 677 681 678 682 /*
+3 -4
fs/nfs/localio.c
··· 500 500 { 501 501 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 502 502 u32 *verf = (u32 *)verifier->data; 503 - int seq = 0; 503 + unsigned int seq; 504 504 505 505 do { 506 - read_seqbegin_or_lock(&clp->cl_boot_lock, &seq); 506 + seq = read_seqbegin(&clp->cl_boot_lock); 507 507 verf[0] = (u32)clp->cl_nfssvc_boot.tv_sec; 508 508 verf[1] = (u32)clp->cl_nfssvc_boot.tv_nsec; 509 - } while (need_seqretry(&clp->cl_boot_lock, seq)); 510 - done_seqretry(&clp->cl_boot_lock, seq); 509 + } while (read_seqretry(&clp->cl_boot_lock, seq)); 511 510 } 512 511 513 512 static void
-68
fs/nfs/mount_clnt.c
··· 223 223 goto out; 224 224 } 225 225 226 - /** 227 - * nfs_umount - Notify a server that we have unmounted this export 228 - * @info: pointer to umount request arguments 229 - * 230 - * MOUNTPROC_UMNT is advisory, so we set a short timeout, and always 231 - * use UDP. 232 - */ 233 - void nfs_umount(const struct nfs_mount_request *info) 234 - { 235 - static const struct rpc_timeout nfs_umnt_timeout = { 236 - .to_initval = 1 * HZ, 237 - .to_maxval = 3 * HZ, 238 - .to_retries = 2, 239 - }; 240 - struct rpc_create_args args = { 241 - .net = info->net, 242 - .protocol = IPPROTO_UDP, 243 - .address = (struct sockaddr *)info->sap, 244 - .addrsize = info->salen, 245 - .timeout = &nfs_umnt_timeout, 246 - .servername = info->hostname, 247 - .program = &mnt_program, 248 - .version = info->version, 249 - .authflavor = RPC_AUTH_UNIX, 250 - .flags = RPC_CLNT_CREATE_NOPING, 251 - .cred = current_cred(), 252 - }; 253 - struct rpc_message msg = { 254 - .rpc_argp = info->dirpath, 255 - }; 256 - struct rpc_clnt *clnt; 257 - int status; 258 - 259 - if (strlen(info->dirpath) > MNTPATHLEN) 260 - return; 261 - 262 - if (info->noresvport) 263 - args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; 264 - 265 - clnt = rpc_create(&args); 266 - if (IS_ERR(clnt)) 267 - goto out_clnt_err; 268 - 269 - dprintk("NFS: sending UMNT request for %s:%s\n", 270 - (info->hostname ? info->hostname : "server"), info->dirpath); 271 - 272 - if (info->version == NFS_MNT3_VERSION) 273 - msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC3_UMNT]; 274 - else 275 - msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC_UMNT]; 276 - 277 - status = rpc_call_sync(clnt, &msg, 0); 278 - rpc_shutdown_client(clnt); 279 - 280 - if (unlikely(status < 0)) 281 - goto out_call_err; 282 - 283 - return; 284 - 285 - out_clnt_err: 286 - dprintk("NFS: failed to create UMNT RPC client, status=%ld\n", 287 - PTR_ERR(clnt)); 288 - return; 289 - 290 - out_call_err: 291 - dprintk("NFS: UMNT request failed, status=%d\n", status); 292 - } 293 - 294 226 /* 295 227 * XDR encode/decode functions for MOUNT 296 228 */
+3 -2
fs/nfs/nfs4_fs.h
··· 63 63 bool (*match_stateid)(const nfs4_stateid *, 64 64 const nfs4_stateid *); 65 65 int (*find_root_sec)(struct nfs_server *, struct nfs_fh *, 66 - struct nfs_fsinfo *); 66 + struct nfs_fattr *); 67 67 void (*free_lock_state)(struct nfs_server *, 68 68 struct nfs4_lock_state *); 69 69 int (*test_and_free_expired)(struct nfs_server *, ··· 296 296 extern void nfs4_init_sequence(struct nfs4_sequence_args *, struct nfs4_sequence_res *, int, int); 297 297 extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, const struct cred *, struct nfs4_setclientid_res *); 298 298 extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, const struct cred *); 299 - extern int nfs4_proc_get_rootfh(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *, bool); 299 + extern int nfs4_proc_get_rootfh(struct nfs_server *, struct nfs_fh *, 300 + struct nfs_fattr *, bool); 300 301 extern int nfs4_proc_bind_conn_to_session(struct nfs_client *, const struct cred *cred); 301 302 extern int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred); 302 303 extern int nfs4_destroy_clientid(struct nfs_client *clp);
+78 -107
fs/nfs/nfs4client.c
··· 802 802 unset_pnfs_layoutdriver(server); 803 803 nfs4_purge_state_owners(server, &freeme); 804 804 nfs4_free_state_owners(&freeme); 805 + kfree(server->delegation_hash_table); 805 806 } 806 807 807 808 /* ··· 896 895 * Set up an NFS4 client 897 896 */ 898 897 static int nfs4_set_client(struct nfs_server *server, 899 - const char *hostname, 900 - const struct sockaddr_storage *addr, 901 - const size_t addrlen, 902 - const char *ip_addr, 903 - int proto, const struct rpc_timeout *timeparms, 904 - u32 minorversion, unsigned int nconnect, 905 - unsigned int max_connect, 906 - struct net *net, 907 - struct xprtsec_parms *xprtsec) 898 + struct nfs_client_initdata *cl_init) 908 899 { 909 - struct nfs_client_initdata cl_init = { 910 - .hostname = hostname, 911 - .addr = addr, 912 - .addrlen = addrlen, 913 - .ip_addr = ip_addr, 914 - .nfs_mod = &nfs_v4, 915 - .proto = proto, 916 - .minorversion = minorversion, 917 - .net = net, 918 - .timeparms = timeparms, 919 - .cred = server->cred, 920 - .xprtsec = *xprtsec, 921 - }; 922 900 struct nfs_client *clp; 923 901 924 - if (minorversion == 0) 925 - __set_bit(NFS_CS_REUSEPORT, &cl_init.init_flags); 926 - else 927 - cl_init.max_connect = max_connect; 928 - switch (proto) { 902 + cl_init->nfs_mod = &nfs_v4; 903 + cl_init->cred = server->cred; 904 + 905 + if (cl_init->minorversion == 0) { 906 + __set_bit(NFS_CS_REUSEPORT, &cl_init->init_flags); 907 + cl_init->max_connect = 0; 908 + } 909 + 910 + switch (cl_init->proto) { 929 911 case XPRT_TRANSPORT_RDMA: 930 912 case XPRT_TRANSPORT_TCP: 931 913 case XPRT_TRANSPORT_TCP_TLS: 932 - cl_init.nconnect = nconnect; 914 + break; 915 + default: 916 + cl_init->nconnect = 0; 933 917 } 934 918 935 919 if (server->flags & NFS_MOUNT_NORESVPORT) 936 - __set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags); 920 + __set_bit(NFS_CS_NORESVPORT, &cl_init->init_flags); 937 921 if (server->options & NFS_OPTION_MIGRATION) 938 - __set_bit(NFS_CS_MIGRATION, &cl_init.init_flags); 922 + __set_bit(NFS_CS_MIGRATION, &cl_init->init_flags); 939 923 if (test_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status)) 940 - __set_bit(NFS_CS_TSM_POSSIBLE, &cl_init.init_flags); 941 - server->port = rpc_get_port((struct sockaddr *)addr); 924 + __set_bit(NFS_CS_TSM_POSSIBLE, &cl_init->init_flags); 925 + server->port = rpc_get_port((struct sockaddr *)cl_init->addr); 942 926 943 927 if (server->flags & NFS_MOUNT_NETUNREACH_FATAL) 944 - __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags); 928 + __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init->init_flags); 945 929 946 930 /* Allocate or find a client reference we can use */ 947 - clp = nfs_get_client(&cl_init); 931 + clp = nfs_get_client(cl_init); 948 932 if (IS_ERR(clp)) 949 933 return PTR_ERR(clp); 950 934 ··· 1074 1088 #endif 1075 1089 } 1076 1090 1077 - void nfs4_server_set_init_caps(struct nfs_server *server) 1078 - { 1079 - /* Set the basic capabilities */ 1080 - server->caps |= server->nfs_client->cl_mvops->init_caps; 1081 - if (server->flags & NFS_MOUNT_NORDIRPLUS) 1082 - server->caps &= ~NFS_CAP_READDIRPLUS; 1083 - if (server->nfs_client->cl_proto == XPRT_TRANSPORT_RDMA) 1084 - server->caps &= ~NFS_CAP_READ_PLUS; 1085 - 1086 - /* 1087 - * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower 1088 - * authentication. 1089 - */ 1090 - if (nfs4_disable_idmapping && 1091 - server->client->cl_auth->au_flavor == RPC_AUTH_UNIX) 1092 - server->caps |= NFS_CAP_UIDGID_NOMAP; 1093 - } 1094 - 1095 1091 static int nfs4_server_common_setup(struct nfs_server *server, 1096 1092 struct nfs_fh *mntfh, bool auth_probe) 1097 1093 { 1098 1094 int error; 1095 + 1096 + error = nfs4_delegation_hash_alloc(server); 1097 + if (error) 1098 + return error; 1099 1099 1100 1100 /* data servers support only a subset of NFSv4.1 */ 1101 1101 if (is_ds_only_client(server->nfs_client)) ··· 1090 1118 /* We must ensure the session is initialised first */ 1091 1119 error = nfs4_init_session(server->nfs_client); 1092 1120 if (error < 0) 1093 - goto out; 1121 + return error; 1094 1122 1095 - nfs4_server_set_init_caps(server); 1123 + nfs_server_set_init_caps(server); 1096 1124 1097 1125 /* Probe the root fh to retrieve its FSID and filehandle */ 1098 1126 error = nfs4_get_rootfh(server, mntfh, auth_probe); 1099 1127 if (error < 0) 1100 - goto out; 1128 + return error; 1101 1129 1102 1130 dprintk("Server FSID: %llx:%llx\n", 1103 1131 (unsigned long long) server->fsid.major, ··· 1106 1134 1107 1135 error = nfs_probe_server(server, mntfh); 1108 1136 if (error < 0) 1109 - goto out; 1137 + return error; 1110 1138 1111 1139 nfs4_session_limit_rwsize(server); 1112 1140 nfs4_session_limit_xasize(server); ··· 1117 1145 nfs_server_insert_lists(server); 1118 1146 server->mount_time = jiffies; 1119 1147 server->destroy = nfs4_destroy_server; 1120 - out: 1121 - return error; 1148 + return 0; 1122 1149 } 1123 1150 1124 1151 /* ··· 1127 1156 { 1128 1157 struct nfs_fs_context *ctx = nfs_fc2context(fc); 1129 1158 struct rpc_timeout timeparms; 1159 + struct nfs_client_initdata cl_init = { 1160 + .hostname = ctx->nfs_server.hostname, 1161 + .addr = &ctx->nfs_server._address, 1162 + .addrlen = ctx->nfs_server.addrlen, 1163 + .ip_addr = ctx->client_address, 1164 + .proto = ctx->nfs_server.protocol, 1165 + .minorversion = ctx->minorversion, 1166 + .net = fc->net_ns, 1167 + .timeparms = &timeparms, 1168 + .xprtsec = ctx->xprtsec, 1169 + .nconnect = ctx->nfs_server.nconnect, 1170 + .max_connect = ctx->nfs_server.max_connect, 1171 + }; 1130 1172 int error; 1131 1173 1132 1174 nfs_init_timeout_values(&timeparms, ctx->nfs_server.protocol, ··· 1159 1175 ctx->selected_flavor = RPC_AUTH_UNIX; 1160 1176 1161 1177 /* Get a client record */ 1162 - error = nfs4_set_client(server, 1163 - ctx->nfs_server.hostname, 1164 - &ctx->nfs_server._address, 1165 - ctx->nfs_server.addrlen, 1166 - ctx->client_address, 1167 - ctx->nfs_server.protocol, 1168 - &timeparms, 1169 - ctx->minorversion, 1170 - ctx->nfs_server.nconnect, 1171 - ctx->nfs_server.max_connect, 1172 - fc->net_ns, 1173 - &ctx->xprtsec); 1178 + error = nfs4_set_client(server, &cl_init); 1174 1179 if (error < 0) 1175 1180 return error; 1176 1181 ··· 1219 1246 struct nfs_server *nfs4_create_referral_server(struct fs_context *fc) 1220 1247 { 1221 1248 struct nfs_fs_context *ctx = nfs_fc2context(fc); 1222 - struct nfs_client *parent_client; 1223 - struct nfs_server *server, *parent_server; 1224 - int proto, error; 1249 + struct nfs_server *parent_server = NFS_SB(ctx->clone_data.sb); 1250 + struct nfs_client *parent_client = parent_server->nfs_client; 1251 + struct nfs_client_initdata cl_init = { 1252 + .hostname = ctx->nfs_server.hostname, 1253 + .addr = &ctx->nfs_server._address, 1254 + .addrlen = ctx->nfs_server.addrlen, 1255 + .ip_addr = parent_client->cl_ipaddr, 1256 + .minorversion = parent_client->cl_mvops->minor_version, 1257 + .net = parent_client->cl_net, 1258 + .timeparms = parent_server->client->cl_timeout, 1259 + .xprtsec = parent_client->cl_xprtsec, 1260 + .nconnect = parent_client->cl_nconnect, 1261 + .max_connect = parent_client->cl_max_connect, 1262 + }; 1263 + struct nfs_server *server; 1225 1264 bool auth_probe; 1265 + int error; 1226 1266 1227 1267 server = nfs_alloc_server(); 1228 1268 if (!server) 1229 1269 return ERR_PTR(-ENOMEM); 1230 - 1231 - parent_server = NFS_SB(ctx->clone_data.sb); 1232 - parent_client = parent_server->nfs_client; 1233 1270 1234 1271 server->cred = get_cred(parent_server->cred); 1235 1272 ··· 1249 1266 /* Get a client representation */ 1250 1267 #if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) 1251 1268 rpc_set_port(&ctx->nfs_server.address, NFS_RDMA_PORT); 1252 - error = nfs4_set_client(server, 1253 - ctx->nfs_server.hostname, 1254 - &ctx->nfs_server._address, 1255 - ctx->nfs_server.addrlen, 1256 - parent_client->cl_ipaddr, 1257 - XPRT_TRANSPORT_RDMA, 1258 - parent_server->client->cl_timeout, 1259 - parent_client->cl_mvops->minor_version, 1260 - parent_client->cl_nconnect, 1261 - parent_client->cl_max_connect, 1262 - parent_client->cl_net, 1263 - &parent_client->cl_xprtsec); 1269 + cl_init.proto = XPRT_TRANSPORT_RDMA; 1270 + error = nfs4_set_client(server, &cl_init); 1264 1271 if (!error) 1265 1272 goto init_server; 1266 1273 #endif /* IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) */ 1267 1274 1268 - proto = XPRT_TRANSPORT_TCP; 1275 + cl_init.proto = XPRT_TRANSPORT_TCP; 1269 1276 if (parent_client->cl_xprtsec.policy != RPC_XPRTSEC_NONE) 1270 - proto = XPRT_TRANSPORT_TCP_TLS; 1277 + cl_init.proto = XPRT_TRANSPORT_TCP_TLS; 1271 1278 rpc_set_port(&ctx->nfs_server.address, NFS_PORT); 1272 - error = nfs4_set_client(server, 1273 - ctx->nfs_server.hostname, 1274 - &ctx->nfs_server._address, 1275 - ctx->nfs_server.addrlen, 1276 - parent_client->cl_ipaddr, 1277 - proto, 1278 - parent_server->client->cl_timeout, 1279 - parent_client->cl_mvops->minor_version, 1280 - parent_client->cl_nconnect, 1281 - parent_client->cl_max_connect, 1282 - parent_client->cl_net, 1283 - &parent_client->cl_xprtsec); 1279 + error = nfs4_set_client(server, &cl_init); 1284 1280 if (error < 0) 1285 1281 goto error; 1286 1282 ··· 1315 1353 char buf[INET6_ADDRSTRLEN + 1]; 1316 1354 struct sockaddr_storage address; 1317 1355 struct sockaddr *localaddr = (struct sockaddr *)&address; 1356 + struct nfs_client_initdata cl_init = { 1357 + .hostname = hostname, 1358 + .addr = sap, 1359 + .addrlen = salen, 1360 + .ip_addr = buf, 1361 + .proto = clp->cl_proto, 1362 + .minorversion = clp->cl_minorversion, 1363 + .net = net, 1364 + .timeparms = clnt->cl_timeout, 1365 + .xprtsec = clp->cl_xprtsec, 1366 + .nconnect = clp->cl_nconnect, 1367 + .max_connect = clp->cl_max_connect, 1368 + }; 1318 1369 int error; 1319 1370 1320 1371 error = rpc_switch_client_transport(clnt, &xargs, clnt->cl_timeout); ··· 1343 1368 1344 1369 nfs_server_remove_lists(server); 1345 1370 set_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status); 1346 - error = nfs4_set_client(server, hostname, sap, salen, buf, 1347 - clp->cl_proto, clnt->cl_timeout, 1348 - clp->cl_minorversion, 1349 - clp->cl_nconnect, clp->cl_max_connect, 1350 - net, &clp->cl_xprtsec); 1371 + error = nfs4_set_client(server, &cl_init); 1351 1372 clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status); 1352 1373 if (error != 0) { 1353 1374 nfs_server_insert_lists(server);
+2 -23
fs/nfs/nfs4file.c
··· 253 253 struct nfs_server *server = NFS_SERVER(dst_inode); 254 254 struct inode *src_inode = file_inode(src_file); 255 255 unsigned int bs = server->clone_blksize; 256 - bool same_inode = false; 257 256 int ret; 258 257 259 258 /* NFS does not support deduplication. */ ··· 274 275 goto out; 275 276 } 276 277 277 - if (src_inode == dst_inode) 278 - same_inode = true; 279 - 280 278 /* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */ 281 - if (same_inode) { 282 - inode_lock(src_inode); 283 - } else if (dst_inode < src_inode) { 284 - inode_lock_nested(dst_inode, I_MUTEX_PARENT); 285 - inode_lock_nested(src_inode, I_MUTEX_CHILD); 286 - } else { 287 - inode_lock_nested(src_inode, I_MUTEX_PARENT); 288 - inode_lock_nested(dst_inode, I_MUTEX_CHILD); 289 - } 290 - 279 + lock_two_nondirectories(src_inode, dst_inode); 291 280 /* flush all pending writes on both src and dst so that server 292 281 * has the latest data */ 293 282 ret = nfs_sync_inode(src_inode); ··· 293 306 truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1); 294 307 295 308 out_unlock: 296 - if (same_inode) { 297 - inode_unlock(src_inode); 298 - } else if (dst_inode < src_inode) { 299 - inode_unlock(src_inode); 300 - inode_unlock(dst_inode); 301 - } else { 302 - inode_unlock(dst_inode); 303 - inode_unlock(src_inode); 304 - } 309 + unlock_two_nondirectories(src_inode, dst_inode); 305 310 out: 306 311 return ret < 0 ? ret : count; 307 312 }
+6 -8
fs/nfs/nfs4getroot.c
··· 12 12 13 13 int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh, bool auth_probe) 14 14 { 15 - struct nfs_fsinfo fsinfo; 15 + struct nfs_fattr *fattr = nfs_alloc_fattr(); 16 16 int ret = -ENOMEM; 17 17 18 - fsinfo.fattr = nfs_alloc_fattr(); 19 - if (fsinfo.fattr == NULL) 18 + if (fattr == NULL) 20 19 goto out; 21 20 22 21 /* Start by getting the root filehandle from the server */ 23 - ret = nfs4_proc_get_rootfh(server, mntfh, &fsinfo, auth_probe); 22 + ret = nfs4_proc_get_rootfh(server, mntfh, fattr, auth_probe); 24 23 if (ret < 0) { 25 24 dprintk("nfs4_get_rootfh: getroot error = %d\n", -ret); 26 25 goto out; 27 26 } 28 27 29 - if (!(fsinfo.fattr->valid & NFS_ATTR_FATTR_TYPE) 30 - || !S_ISDIR(fsinfo.fattr->mode)) { 28 + if (!(fattr->valid & NFS_ATTR_FATTR_TYPE) || !S_ISDIR(fattr->mode)) { 31 29 printk(KERN_ERR "nfs4_get_rootfh:" 32 30 " getroot encountered non-directory\n"); 33 31 ret = -ENOTDIR; 34 32 goto out; 35 33 } 36 34 37 - memcpy(&server->fsid, &fsinfo.fattr->fsid, sizeof(server->fsid)); 35 + memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 38 36 out: 39 - nfs_free_fattr(fsinfo.fattr); 37 + nfs_free_fattr(fattr); 40 38 return ret; 41 39 }
+84 -55
fs/nfs/nfs4proc.c
··· 222 222 | FATTR4_WORD1_RAWDEV 223 223 | FATTR4_WORD1_SPACE_USED 224 224 | FATTR4_WORD1_TIME_ACCESS 225 + | FATTR4_WORD1_TIME_CREATE 225 226 | FATTR4_WORD1_TIME_METADATA 226 227 | FATTR4_WORD1_TIME_MODIFY 227 228 | FATTR4_WORD1_MOUNTED_ON_FILEID, ··· 244 243 | FATTR4_WORD1_RAWDEV 245 244 | FATTR4_WORD1_SPACE_USED 246 245 | FATTR4_WORD1_TIME_ACCESS 246 + | FATTR4_WORD1_TIME_CREATE 247 247 | FATTR4_WORD1_TIME_METADATA 248 248 | FATTR4_WORD1_TIME_MODIFY, 249 249 FATTR4_WORD2_MDSTHRESHOLD ··· 324 322 dst[1] &= ~FATTR4_WORD1_MODE; 325 323 if (!(cache_validity & NFS_INO_INVALID_OTHER)) 326 324 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP); 325 + 326 + if (!(cache_validity & NFS_INO_INVALID_BTIME)) 327 + dst[1] &= ~FATTR4_WORD1_TIME_CREATE; 327 328 328 329 if (nfs_have_delegated_mtime(inode)) { 329 330 if (!(cache_validity & NFS_INO_INVALID_ATIME)) ··· 1312 1307 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | 1313 1308 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER | 1314 1309 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK | 1315 - NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR; 1310 + NFS_INO_INVALID_MODE | NFS_INO_INVALID_BTIME | 1311 + NFS_INO_INVALID_XATTR; 1316 1312 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1317 1313 } 1318 1314 nfsi->attrtimeo_timestamp = jiffies; ··· 4053 4047 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME; 4054 4048 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 4055 4049 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 4050 + if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 4051 + server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 4052 + if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_CREATE)) 4053 + server->fattr_valid &= ~NFS_ATTR_FATTR_BTIME; 4056 4054 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 4057 4055 sizeof(server->attr_bitmask)); 4058 4056 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; ··· 4092 4082 }; 4093 4083 int err; 4094 4084 4095 - nfs4_server_set_init_caps(server); 4085 + nfs_server_set_init_caps(server); 4096 4086 do { 4097 4087 err = nfs4_handle_exception(server, 4098 4088 _nfs4_server_capabilities(server, fhandle), ··· 4240 4230 } 4241 4231 4242 4232 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4243 - struct nfs_fsinfo *info) 4233 + struct nfs_fattr *fattr) 4244 4234 { 4245 - u32 bitmask[3]; 4235 + u32 bitmask[3] = { 4236 + [0] = FATTR4_WORD0_TYPE | FATTR4_WORD0_CHANGE | 4237 + FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID, 4238 + }; 4246 4239 struct nfs4_lookup_root_arg args = { 4247 4240 .bitmask = bitmask, 4248 4241 }; 4249 4242 struct nfs4_lookup_res res = { 4250 4243 .server = server, 4251 - .fattr = info->fattr, 4244 + .fattr = fattr, 4252 4245 .fh = fhandle, 4253 4246 }; 4254 4247 struct rpc_message msg = { ··· 4260 4247 .rpc_resp = &res, 4261 4248 }; 4262 4249 4263 - bitmask[0] = nfs4_fattr_bitmap[0]; 4264 - bitmask[1] = nfs4_fattr_bitmap[1]; 4265 - /* 4266 - * Process the label in the upcoming getfattr 4267 - */ 4268 - bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL; 4269 - 4270 - nfs_fattr_init(info->fattr); 4250 + nfs_fattr_init(fattr); 4271 4251 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4272 4252 } 4273 4253 4274 4254 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4275 - struct nfs_fsinfo *info) 4255 + struct nfs_fattr *fattr) 4276 4256 { 4277 4257 struct nfs4_exception exception = { 4278 4258 .interruptible = true, 4279 4259 }; 4280 4260 int err; 4281 4261 do { 4282 - err = _nfs4_lookup_root(server, fhandle, info); 4283 - trace_nfs4_lookup_root(server, fhandle, info->fattr, err); 4262 + err = _nfs4_lookup_root(server, fhandle, fattr); 4263 + trace_nfs4_lookup_root(server, fhandle, fattr, err); 4284 4264 switch (err) { 4285 4265 case 0: 4286 4266 case -NFS4ERR_WRONGSEC: ··· 4286 4280 return err; 4287 4281 } 4288 4282 4289 - static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4290 - struct nfs_fsinfo *info, rpc_authflavor_t flavor) 4283 + static int nfs4_lookup_root_sec(struct nfs_server *server, 4284 + struct nfs_fh *fhandle, struct nfs_fattr *fattr, 4285 + rpc_authflavor_t flavor) 4291 4286 { 4292 4287 struct rpc_auth_create_args auth_args = { 4293 4288 .pseudoflavor = flavor, ··· 4298 4291 auth = rpcauth_create(&auth_args, server->client); 4299 4292 if (IS_ERR(auth)) 4300 4293 return -EACCES; 4301 - return nfs4_lookup_root(server, fhandle, info); 4294 + return nfs4_lookup_root(server, fhandle, fattr); 4302 4295 } 4303 4296 4304 4297 /* ··· 4311 4304 * negative errno value. 4312 4305 */ 4313 4306 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4314 - struct nfs_fsinfo *info) 4307 + struct nfs_fattr *fattr) 4315 4308 { 4316 4309 /* Per 3530bis 15.33.5 */ 4317 4310 static const rpc_authflavor_t flav_array[] = { ··· 4327 4320 if (server->auth_info.flavor_len > 0) { 4328 4321 /* try each flavor specified by user */ 4329 4322 for (i = 0; i < server->auth_info.flavor_len; i++) { 4330 - status = nfs4_lookup_root_sec(server, fhandle, info, 4331 - server->auth_info.flavors[i]); 4323 + status = nfs4_lookup_root_sec( 4324 + server, fhandle, fattr, 4325 + server->auth_info.flavors[i]); 4332 4326 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4333 4327 continue; 4334 4328 break; ··· 4337 4329 } else { 4338 4330 /* no flavors specified by user, try default list */ 4339 4331 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 4340 - status = nfs4_lookup_root_sec(server, fhandle, info, 4332 + status = nfs4_lookup_root_sec(server, fhandle, fattr, 4341 4333 flav_array[i]); 4342 4334 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4343 4335 continue; ··· 4361 4353 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 4362 4354 * @server: initialized nfs_server handle 4363 4355 * @fhandle: we fill in the pseudo-fs root file handle 4364 - * @info: we fill in an FSINFO struct 4356 + * @fattr: we fill in a bare bones struct fattr 4365 4357 * @auth_probe: probe the auth flavours 4366 4358 * 4367 4359 * Returns zero on success, or a negative errno. 4368 4360 */ 4369 4361 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 4370 - struct nfs_fsinfo *info, 4371 - bool auth_probe) 4362 + struct nfs_fattr *fattr, bool auth_probe) 4372 4363 { 4373 4364 int status = 0; 4374 4365 4375 4366 if (!auth_probe) 4376 - status = nfs4_lookup_root(server, fhandle, info); 4367 + status = nfs4_lookup_root(server, fhandle, fattr); 4377 4368 4378 4369 if (auth_probe || status == NFS4ERR_WRONGSEC) 4379 - status = server->nfs_client->cl_mvops->find_root_sec(server, 4380 - fhandle, info); 4381 - 4382 - if (status == 0) 4383 - status = nfs4_server_capabilities(server, fhandle); 4384 - if (status == 0) 4385 - status = nfs4_do_fsinfo(server, fhandle, info); 4370 + status = server->nfs_client->cl_mvops->find_root_sec( 4371 + server, fhandle, fattr); 4386 4372 4387 4373 return nfs4_map_errors(status); 4388 4374 } ··· 5783 5781 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY; 5784 5782 if (cache_validity & NFS_INO_INVALID_BLOCKS) 5785 5783 bitmask[1] |= FATTR4_WORD1_SPACE_USED; 5784 + if (cache_validity & NFS_INO_INVALID_BTIME) 5785 + bitmask[1] |= FATTR4_WORD1_TIME_CREATE; 5786 5786 5787 5787 if (cache_validity & NFS_INO_INVALID_SIZE) 5788 5788 bitmask[0] |= FATTR4_WORD0_SIZE; ··· 10343 10339 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 10344 10340 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 10345 10341 */ 10346 - static int 10347 - _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 10348 - struct nfs_fsinfo *info, 10349 - struct nfs4_secinfo_flavors *flavors, bool use_integrity) 10342 + static int _nfs41_proc_secinfo_no_name(struct nfs_server *server, 10343 + struct nfs_fh *fhandle, 10344 + struct nfs4_secinfo_flavors *flavors, 10345 + bool use_integrity) 10350 10346 { 10351 10347 struct nfs41_secinfo_no_name_args args = { 10352 10348 .style = SECINFO_STYLE_CURRENT_FH, ··· 10390 10386 return status; 10391 10387 } 10392 10388 10393 - static int 10394 - nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 10395 - struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 10389 + static int nfs41_proc_secinfo_no_name(struct nfs_server *server, 10390 + struct nfs_fh *fhandle, 10391 + struct nfs4_secinfo_flavors *flavors) 10396 10392 { 10397 10393 struct nfs4_exception exception = { 10398 10394 .interruptible = true, ··· 10404 10400 10405 10401 /* try to use integrity protection with machine cred */ 10406 10402 if (_nfs4_is_integrity_protected(server->nfs_client)) 10407 - err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 10403 + err = _nfs41_proc_secinfo_no_name(server, fhandle, 10408 10404 flavors, true); 10409 10405 10410 10406 /* ··· 10414 10410 * the current filesystem's rpc_client and the user cred. 10415 10411 */ 10416 10412 if (err == -NFS4ERR_WRONGSEC) 10417 - err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 10413 + err = _nfs41_proc_secinfo_no_name(server, fhandle, 10418 10414 flavors, false); 10419 10415 10420 10416 switch (err) { ··· 10430 10426 return err; 10431 10427 } 10432 10428 10433 - static int 10434 - nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 10435 - struct nfs_fsinfo *info) 10429 + static int nfs41_find_root_sec(struct nfs_server *server, 10430 + struct nfs_fh *fhandle, struct nfs_fattr *fattr) 10436 10431 { 10437 10432 int err; 10438 10433 struct page *page; ··· 10447 10444 } 10448 10445 10449 10446 flavors = page_address(page); 10450 - err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 10447 + err = nfs41_proc_secinfo_no_name(server, fhandle, flavors); 10451 10448 10452 10449 /* 10453 10450 * Fall back on "guess and check" method if 10454 10451 * the server doesn't support SECINFO_NO_NAME 10455 10452 */ 10456 10453 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 10457 - err = nfs4_find_root_sec(server, fhandle, info); 10454 + err = nfs4_find_root_sec(server, fhandle, fattr); 10458 10455 goto out_freepage; 10459 10456 } 10460 10457 if (err) ··· 10479 10476 flavor = RPC_AUTH_MAXFLAVOR; 10480 10477 10481 10478 if (flavor != RPC_AUTH_MAXFLAVOR) { 10482 - err = nfs4_lookup_root_sec(server, fhandle, 10483 - info, flavor); 10479 + err = nfs4_lookup_root_sec(server, fhandle, fattr, 10480 + flavor); 10484 10481 if (!err) 10485 10482 break; 10486 10483 } ··· 10683 10680 static bool nfs41_match_stateid(const nfs4_stateid *s1, 10684 10681 const nfs4_stateid *s2) 10685 10682 { 10683 + trace_nfs41_match_stateid(s1, s2); 10684 + 10686 10685 if (s1->type != s2->type) 10687 10686 return false; 10688 10687 ··· 10702 10697 static bool nfs4_match_stateid(const nfs4_stateid *s1, 10703 10698 const nfs4_stateid *s2) 10704 10699 { 10700 + trace_nfs4_match_stateid(s1, s2); 10701 + 10705 10702 return nfs4_stateid_match(s1, s2); 10706 10703 } 10707 10704 ··· 10874 10867 10875 10868 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 10876 10869 { 10877 - ssize_t error, error2, error3, error4; 10870 + ssize_t error, error2, error3, error4 = 0; 10878 10871 size_t left = size; 10879 10872 10880 10873 error = generic_listxattr(dentry, list, left); ··· 10902 10895 left -= error3; 10903 10896 } 10904 10897 10905 - error4 = security_inode_listsecurity(d_inode(dentry), list, left); 10906 - if (error4 < 0) 10907 - return error4; 10898 + if (!nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) { 10899 + error4 = security_inode_listsecurity(d_inode(dentry), list, left); 10900 + if (error4 < 0) 10901 + return error4; 10902 + } 10908 10903 10909 10904 error += error2 + error3 + error4; 10910 10905 if (size && error > size) ··· 10960 10951 .listxattr = nfs4_listxattr, 10961 10952 }; 10962 10953 10954 + static struct nfs_server *nfs4_clone_server(struct nfs_server *source, 10955 + struct nfs_fh *fh, struct nfs_fattr *fattr, 10956 + rpc_authflavor_t flavor) 10957 + { 10958 + struct nfs_server *server; 10959 + int error; 10960 + 10961 + server = nfs_clone_server(source, fh, fattr, flavor); 10962 + if (IS_ERR(server)) 10963 + return server; 10964 + 10965 + error = nfs4_delegation_hash_alloc(server); 10966 + if (error) { 10967 + nfs_free_server(server); 10968 + return ERR_PTR(error); 10969 + } 10970 + 10971 + return server; 10972 + } 10973 + 10963 10974 const struct nfs_rpc_ops nfs_v4_clientops = { 10964 10975 .version = 4, /* protocol version */ 10965 10976 .dentry_ops = &nfs4_dentry_operations, ··· 11032 11003 .init_client = nfs4_init_client, 11033 11004 .free_client = nfs4_free_client, 11034 11005 .create_server = nfs4_create_server, 11035 - .clone_server = nfs_clone_server, 11006 + .clone_server = nfs4_clone_server, 11036 11007 .discover_trunking = nfs4_discover_trunking, 11037 11008 .enable_swap = nfs4_enable_swap, 11038 11009 .disable_swap = nfs4_disable_swap,
+2
fs/nfs/nfs4trace.c
··· 26 26 EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_write_done); 27 27 EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_read_pagelist); 28 28 EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_mds_fallback_write_pagelist); 29 + EXPORT_TRACEPOINT_SYMBOL_GPL(pnfs_ds_connect); 29 30 30 31 EXPORT_TRACEPOINT_SYMBOL_GPL(ff_layout_read_error); 31 32 EXPORT_TRACEPOINT_SYMBOL_GPL(ff_layout_write_error); 32 33 EXPORT_TRACEPOINT_SYMBOL_GPL(ff_layout_commit_error); 33 34 35 + EXPORT_TRACEPOINT_SYMBOL_GPL(bl_ext_tree_prepare_commit); 34 36 EXPORT_TRACEPOINT_SYMBOL_GPL(bl_pr_key_reg); 35 37 EXPORT_TRACEPOINT_SYMBOL_GPL(bl_pr_key_reg_err); 36 38 EXPORT_TRACEPOINT_SYMBOL_GPL(bl_pr_key_unreg);
+167 -1
fs/nfs/nfs4trace.h
··· 14 14 #include <trace/misc/fs.h> 15 15 #include <trace/misc/nfs.h> 16 16 17 + #include "delegation.h" 18 + 17 19 #define show_nfs_fattr_flags(valid) \ 18 20 __print_flags((unsigned long)valid, "|", \ 19 21 { NFS_ATTR_FATTR_TYPE, "TYPE" }, \ ··· 32 30 { NFS_ATTR_FATTR_CTIME, "CTIME" }, \ 33 31 { NFS_ATTR_FATTR_CHANGE, "CHANGE" }, \ 34 32 { NFS_ATTR_FATTR_OWNER_NAME, "OWNER_NAME" }, \ 35 - { NFS_ATTR_FATTR_GROUP_NAME, "GROUP_NAME" }) 33 + { NFS_ATTR_FATTR_GROUP_NAME, "GROUP_NAME" }, \ 34 + { NFS_ATTR_FATTR_BTIME, "BTIME" }) 36 35 37 36 DECLARE_EVENT_CLASS(nfs4_clientid_event, 38 37 TP_PROTO( ··· 276 273 show_nfs_stable_how(__entry->cb_how) 277 274 ) 278 275 ); 276 + 277 + TRACE_EVENT(pnfs_ds_connect, 278 + TP_PROTO( 279 + char *ds_remotestr, 280 + int status 281 + ), 282 + 283 + TP_ARGS(ds_remotestr, status), 284 + 285 + TP_STRUCT__entry( 286 + __string(ds_ips, ds_remotestr) 287 + __field(int, status) 288 + ), 289 + 290 + TP_fast_assign( 291 + __assign_str(ds_ips); 292 + __entry->status = status; 293 + ), 294 + 295 + TP_printk( 296 + "ds_ips=%s, status=%d", 297 + __get_str(ds_ips), 298 + __entry->status 299 + ) 300 + ); 301 + 279 302 #endif /* CONFIG_NFS_V4_1 */ 280 303 281 304 TRACE_EVENT(nfs4_setup_sequence, ··· 985 956 TP_ARGS(inode, fmode)) 986 957 DEFINE_NFS4_SET_DELEGATION_EVENT(nfs4_set_delegation); 987 958 DEFINE_NFS4_SET_DELEGATION_EVENT(nfs4_reclaim_delegation); 959 + DEFINE_NFS4_SET_DELEGATION_EVENT(nfs4_detach_delegation); 960 + 961 + #define show_delegation_flags(flags) \ 962 + __print_flags(flags, "|", \ 963 + { BIT(NFS_DELEGATION_NEED_RECLAIM), "NEED_RECLAIM" }, \ 964 + { BIT(NFS_DELEGATION_RETURN), "RETURN" }, \ 965 + { BIT(NFS_DELEGATION_RETURN_IF_CLOSED), "RETURN_IF_CLOSED" }, \ 966 + { BIT(NFS_DELEGATION_REFERENCED), "REFERENCED" }, \ 967 + { BIT(NFS_DELEGATION_RETURNING), "RETURNING" }, \ 968 + { BIT(NFS_DELEGATION_REVOKED), "REVOKED" }, \ 969 + { BIT(NFS_DELEGATION_TEST_EXPIRED), "TEST_EXPIRED" }, \ 970 + { BIT(NFS_DELEGATION_INODE_FREEING), "INODE_FREEING" }, \ 971 + { BIT(NFS_DELEGATION_RETURN_DELAYED), "RETURN_DELAYED" }) 972 + 973 + DECLARE_EVENT_CLASS(nfs4_delegation_event, 974 + TP_PROTO( 975 + const struct nfs_delegation *delegation 976 + ), 977 + 978 + TP_ARGS(delegation), 979 + 980 + TP_STRUCT__entry( 981 + __field(u32, fhandle) 982 + __field(unsigned int, fmode) 983 + __field(unsigned long, flags) 984 + ), 985 + 986 + TP_fast_assign( 987 + __entry->fhandle = nfs_fhandle_hash(NFS_FH(delegation->inode)); 988 + __entry->fmode = delegation->type; 989 + __entry->flags = delegation->flags; 990 + ), 991 + 992 + TP_printk( 993 + "fhandle=0x%08x fmode=%s flags=%s", 994 + __entry->fhandle, show_fs_fmode_flags(__entry->fmode), 995 + show_delegation_flags(__entry->flags) 996 + ) 997 + ); 998 + #define DEFINE_NFS4_DELEGATION_EVENT(name) \ 999 + DEFINE_EVENT(nfs4_delegation_event, name, \ 1000 + TP_PROTO( \ 1001 + const struct nfs_delegation *delegation \ 1002 + ), \ 1003 + TP_ARGS(delegation)) 1004 + DEFINE_NFS4_DELEGATION_EVENT(nfs_delegation_need_return); 988 1005 989 1006 TRACE_EVENT(nfs4_delegreturn_exit, 990 1007 TP_PROTO( ··· 1523 1448 TP_ARGS(clp, fhandle, inode, stateid, error)) 1524 1449 DEFINE_NFS4_INODE_STATEID_CALLBACK_EVENT(nfs4_cb_recall); 1525 1450 DEFINE_NFS4_INODE_STATEID_CALLBACK_EVENT(nfs4_cb_layoutrecall_file); 1451 + 1452 + #define show_stateid_type(type) \ 1453 + __print_symbolic(type, \ 1454 + { NFS4_INVALID_STATEID_TYPE, "INVALID" }, \ 1455 + { NFS4_SPECIAL_STATEID_TYPE, "SPECIAL" }, \ 1456 + { NFS4_OPEN_STATEID_TYPE, "OPEN" }, \ 1457 + { NFS4_LOCK_STATEID_TYPE, "LOCK" }, \ 1458 + { NFS4_DELEGATION_STATEID_TYPE, "DELEGATION" }, \ 1459 + { NFS4_LAYOUT_STATEID_TYPE, "LAYOUT" }, \ 1460 + { NFS4_PNFS_DS_STATEID_TYPE, "PNFS_DS" }, \ 1461 + { NFS4_REVOKED_STATEID_TYPE, "REVOKED" }, \ 1462 + { NFS4_FREED_STATEID_TYPE, "FREED" }) 1463 + 1464 + DECLARE_EVENT_CLASS(nfs4_match_stateid_event, 1465 + TP_PROTO( 1466 + const nfs4_stateid *s1, 1467 + const nfs4_stateid *s2 1468 + ), 1469 + 1470 + TP_ARGS(s1, s2), 1471 + 1472 + TP_STRUCT__entry( 1473 + __field(int, s1_seq) 1474 + __field(int, s2_seq) 1475 + __field(u32, s1_hash) 1476 + __field(u32, s2_hash) 1477 + __field(int, s1_type) 1478 + __field(int, s2_type) 1479 + ), 1480 + 1481 + TP_fast_assign( 1482 + __entry->s1_seq = s1->seqid; 1483 + __entry->s1_hash = nfs_stateid_hash(s1); 1484 + __entry->s1_type = s1->type; 1485 + __entry->s2_seq = s2->seqid; 1486 + __entry->s2_hash = nfs_stateid_hash(s2); 1487 + __entry->s2_type = s2->type; 1488 + ), 1489 + 1490 + TP_printk( 1491 + "s1=%s:%x:%u s2=%s:%x:%u", 1492 + show_stateid_type(__entry->s1_type), 1493 + __entry->s1_hash, __entry->s1_seq, 1494 + show_stateid_type(__entry->s2_type), 1495 + __entry->s2_hash, __entry->s2_seq 1496 + ) 1497 + ); 1498 + 1499 + #define DEFINE_NFS4_MATCH_STATEID_EVENT(name) \ 1500 + DEFINE_EVENT(nfs4_match_stateid_event, name, \ 1501 + TP_PROTO( \ 1502 + const nfs4_stateid *s1, \ 1503 + const nfs4_stateid *s2 \ 1504 + ), \ 1505 + TP_ARGS(s1, s2)) 1506 + DEFINE_NFS4_MATCH_STATEID_EVENT(nfs41_match_stateid); 1507 + DEFINE_NFS4_MATCH_STATEID_EVENT(nfs4_match_stateid); 1526 1508 1527 1509 DECLARE_EVENT_CLASS(nfs4_idmap_event, 1528 1510 TP_PROTO( ··· 2292 2160 __entry->offset, __entry->count, 2293 2161 __get_str(dstaddr), __entry->nfs_error, 2294 2162 show_nfs4_status(__entry->nfs_error) 2163 + ) 2164 + ); 2165 + 2166 + TRACE_EVENT(bl_ext_tree_prepare_commit, 2167 + TP_PROTO( 2168 + int ret, 2169 + size_t count, 2170 + u64 lwb, 2171 + bool not_all_ranges 2172 + ), 2173 + 2174 + TP_ARGS(ret, count, lwb, not_all_ranges), 2175 + 2176 + TP_STRUCT__entry( 2177 + __field(int, ret) 2178 + __field(size_t, count) 2179 + __field(u64, lwb) 2180 + __field(bool, not_all_ranges) 2181 + ), 2182 + 2183 + TP_fast_assign( 2184 + __entry->ret = ret; 2185 + __entry->count = count; 2186 + __entry->lwb = lwb; 2187 + __entry->not_all_ranges = not_all_ranges; 2188 + ), 2189 + 2190 + TP_printk( 2191 + "ret=%d, found %zu ranges, lwb=%llu%s", 2192 + __entry->ret, 2193 + __entry->count, 2194 + __entry->lwb, 2195 + __entry->not_all_ranges ? ", not all ranges encoded" : 2196 + "" 2295 2197 ) 2296 2198 ); 2297 2199
+24
fs/nfs/nfs4xdr.c
··· 1623 1623 | FATTR4_WORD1_RAWDEV 1624 1624 | FATTR4_WORD1_SPACE_USED 1625 1625 | FATTR4_WORD1_TIME_ACCESS 1626 + | FATTR4_WORD1_TIME_CREATE 1626 1627 | FATTR4_WORD1_TIME_METADATA 1627 1628 | FATTR4_WORD1_TIME_MODIFY; 1628 1629 attrs[2] |= FATTR4_WORD2_SECURITY_LABEL; ··· 4208 4207 return status; 4209 4208 } 4210 4209 4210 + static int decode_attr_time_create(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec64 *time) 4211 + { 4212 + int status = 0; 4213 + 4214 + time->tv_sec = 0; 4215 + time->tv_nsec = 0; 4216 + if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_CREATE - 1U))) 4217 + return -EIO; 4218 + if (likely(bitmap[1] & FATTR4_WORD1_TIME_CREATE)) { 4219 + status = decode_attr_time(xdr, time); 4220 + if (status == 0) 4221 + status = NFS_ATTR_FATTR_BTIME; 4222 + bitmap[1] &= ~FATTR4_WORD1_TIME_CREATE; 4223 + } 4224 + dprintk("%s: btime=%lld\n", __func__, time->tv_sec); 4225 + return status; 4226 + } 4227 + 4211 4228 static int decode_attr_time_metadata(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec64 *time) 4212 4229 { 4213 4230 int status = 0; ··· 4796 4777 fattr->valid |= status; 4797 4778 4798 4779 status = decode_attr_time_access(xdr, bitmap, &fattr->atime); 4780 + if (status < 0) 4781 + goto xdr_error; 4782 + fattr->valid |= status; 4783 + 4784 + status = decode_attr_time_create(xdr, bitmap, &fattr->btime); 4799 4785 if (status < 0) 4800 4786 goto xdr_error; 4801 4787 fattr->valid |= status;
+8 -3
fs/nfs/nfstrace.h
··· 32 32 { NFS_INO_INVALID_BLOCKS, "INVALID_BLOCKS" }, \ 33 33 { NFS_INO_INVALID_XATTR, "INVALID_XATTR" }, \ 34 34 { NFS_INO_INVALID_NLINK, "INVALID_NLINK" }, \ 35 - { NFS_INO_INVALID_MODE, "INVALID_MODE" }) 35 + { NFS_INO_INVALID_MODE, "INVALID_MODE" }, \ 36 + { NFS_INO_INVALID_BTIME, "INVALID_BTIME" }) 36 37 37 38 #define nfs_show_nfsi_flags(v) \ 38 39 __print_flags(v, "|", \ ··· 57 56 __field(u32, fhandle) 58 57 __field(u64, fileid) 59 58 __field(u64, version) 59 + __field(unsigned long, cache_validity) 60 60 ), 61 61 62 62 TP_fast_assign( ··· 66 64 __entry->fileid = nfsi->fileid; 67 65 __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); 68 66 __entry->version = inode_peek_iversion_raw(inode); 67 + __entry->cache_validity = nfsi->cache_validity; 69 68 ), 70 69 71 70 TP_printk( 72 - "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu ", 71 + "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu cache_validity=0x%lx (%s)", 73 72 MAJOR(__entry->dev), MINOR(__entry->dev), 74 73 (unsigned long long)__entry->fileid, 75 74 __entry->fhandle, 76 - (unsigned long long)__entry->version 75 + (unsigned long long)__entry->version, 76 + __entry->cache_validity, 77 + nfs_show_cache_validity(__entry->cache_validity) 77 78 ) 78 79 ); 79 80
+18 -21
fs/nfs/pnfs.c
··· 306 306 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo) 307 307 { 308 308 struct inode *inode; 309 - unsigned long i_state; 310 309 311 310 if (!lo) 312 311 return; ··· 316 317 if (!list_empty(&lo->plh_segs)) 317 318 WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n"); 318 319 pnfs_detach_layout_hdr(lo); 319 - i_state = inode->i_state; 320 + /* Notify pnfs_destroy_layout_final() that we're done */ 321 + if (inode->i_state & (I_FREEING | I_CLEAR)) 322 + wake_up_var_locked(lo, &inode->i_lock); 320 323 spin_unlock(&inode->i_lock); 321 324 pnfs_free_layout_hdr(lo); 322 - /* Notify pnfs_destroy_layout_final() that we're done */ 323 - if (i_state & (I_FREEING | I_CLEAR)) 324 - wake_up_var(lo); 325 325 } 326 326 } 327 327 ··· 807 809 } 808 810 EXPORT_SYMBOL_GPL(pnfs_destroy_layout); 809 811 810 - static bool pnfs_layout_removed(struct nfs_inode *nfsi, 811 - struct pnfs_layout_hdr *lo) 812 - { 813 - bool ret; 814 - 815 - spin_lock(&nfsi->vfs_inode.i_lock); 816 - ret = nfsi->layout != lo; 817 - spin_unlock(&nfsi->vfs_inode.i_lock); 818 - return ret; 819 - } 820 - 821 812 void pnfs_destroy_layout_final(struct nfs_inode *nfsi) 822 813 { 823 814 struct pnfs_layout_hdr *lo = __pnfs_destroy_layout(nfsi); 815 + struct inode *inode = &nfsi->vfs_inode; 824 816 825 - if (lo) 826 - wait_var_event(lo, pnfs_layout_removed(nfsi, lo)); 817 + if (lo) { 818 + spin_lock(&inode->i_lock); 819 + wait_var_event_spinlock(lo, nfsi->layout != lo, 820 + &inode->i_lock); 821 + spin_unlock(&inode->i_lock); 822 + } 827 823 } 828 824 829 825 static bool ··· 3332 3340 struct nfs_inode *nfsi = NFS_I(inode); 3333 3341 loff_t end_pos; 3334 3342 int status; 3343 + bool mark_as_dirty = false; 3335 3344 3336 3345 if (!pnfs_layoutcommit_outstanding(inode)) 3337 3346 return 0; ··· 3384 3391 if (ld->prepare_layoutcommit) { 3385 3392 status = ld->prepare_layoutcommit(&data->args); 3386 3393 if (status) { 3387 - put_cred(data->cred); 3394 + if (status != -ENOSPC) 3395 + put_cred(data->cred); 3388 3396 spin_lock(&inode->i_lock); 3389 3397 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags); 3390 3398 if (end_pos > nfsi->layout->plh_lwb) 3391 3399 nfsi->layout->plh_lwb = end_pos; 3392 - goto out_unlock; 3400 + if (status != -ENOSPC) 3401 + goto out_unlock; 3402 + spin_unlock(&inode->i_lock); 3403 + mark_as_dirty = true; 3393 3404 } 3394 3405 } 3395 3406 3396 3407 3397 3408 status = nfs4_proc_layoutcommit(data, sync); 3398 3409 out: 3399 - if (status) 3410 + if (status || mark_as_dirty) 3400 3411 mark_inode_dirty_sync(inode); 3401 3412 dprintk("<-- %s status %d\n", __func__, status); 3402 3413 return status;
+9 -5
fs/nfs/pnfs_nfs.c
··· 17 17 #include "internal.h" 18 18 #include "pnfs.h" 19 19 #include "netns.h" 20 + #include "nfs4trace.h" 20 21 21 22 #define NFSDBG_FACILITY NFSDBG_PNFS 22 23 ··· 1008 1007 err = nfs4_wait_ds_connect(ds); 1009 1008 if (err || ds->ds_clp) 1010 1009 goto out; 1011 - if (nfs4_test_deviceid_unavailable(devid)) 1012 - return -ENODEV; 1010 + if (nfs4_test_deviceid_unavailable(devid)) { 1011 + err = -ENODEV; 1012 + goto out; 1013 + } 1013 1014 } while (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) != 0); 1014 1015 1015 1016 if (ds->ds_clp) ··· 1041 1038 if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) { 1042 1039 WARN_ON_ONCE(ds->ds_clp || 1043 1040 !nfs4_test_deviceid_unavailable(devid)); 1044 - return -EINVAL; 1045 - } 1046 - err = nfs_client_init_status(ds->ds_clp); 1041 + err = -EINVAL; 1042 + } else 1043 + err = nfs_client_init_status(ds->ds_clp); 1047 1044 } 1048 1045 1046 + trace_pnfs_ds_connect(ds->ds_remotestr, err); 1049 1047 return err; 1050 1048 } 1051 1049 EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
+6 -2
fs/nfs/write.c
··· 2113 2113 * that we can safely release the inode reference while holding 2114 2114 * the folio lock. 2115 2115 */ 2116 - if (folio_test_private(src)) 2117 - return -EBUSY; 2116 + if (folio_test_private(src)) { 2117 + if (mode == MIGRATE_SYNC) 2118 + nfs_wb_folio(src->mapping->host, src); 2119 + if (folio_test_private(src)) 2120 + return -EBUSY; 2121 + } 2118 2122 2119 2123 if (folio_test_private_2(src)) { /* [DEPRECATED] */ 2120 2124 if (mode == MIGRATE_ASYNC)
+17 -11
fs/nfs_common/nfslocalio.c
··· 177 177 /* nfs_close_local_fh() is doing the 178 178 * close and we must wait. until it unlinks 179 179 */ 180 - wait_var_event_spinlock(nfl, 180 + wait_var_event_spinlock(nfs_uuid, 181 181 list_first_entry_or_null( 182 182 &nfs_uuid->files, 183 183 struct nfs_file_localio, ··· 198 198 /* Now we can allow racing nfs_close_local_fh() to 199 199 * skip the locking. 200 200 */ 201 - RCU_INIT_POINTER(nfl->nfs_uuid, NULL); 202 - wake_up_var_locked(&nfl->nfs_uuid, &nfs_uuid->lock); 201 + store_release_wake_up(&nfl->nfs_uuid, RCU_INITIALIZER(NULL)); 203 202 } 204 203 205 204 /* Remove client from nn->local_clients */ ··· 242 243 } 243 244 EXPORT_SYMBOL_GPL(nfs_localio_invalidate_clients); 244 245 245 - static void nfs_uuid_add_file(nfs_uuid_t *nfs_uuid, struct nfs_file_localio *nfl) 246 + static int nfs_uuid_add_file(nfs_uuid_t *nfs_uuid, struct nfs_file_localio *nfl) 246 247 { 248 + int ret = 0; 249 + 247 250 /* Add nfl to nfs_uuid->files if it isn't already */ 248 251 spin_lock(&nfs_uuid->lock); 249 - if (list_empty(&nfl->list)) { 252 + if (rcu_access_pointer(nfs_uuid->net) == NULL) { 253 + ret = -ENXIO; 254 + } else if (list_empty(&nfl->list)) { 250 255 rcu_assign_pointer(nfl->nfs_uuid, nfs_uuid); 251 256 list_add_tail(&nfl->list, &nfs_uuid->files); 252 257 } 253 258 spin_unlock(&nfs_uuid->lock); 259 + return ret; 254 260 } 255 261 256 262 /* ··· 289 285 } 290 286 rcu_read_unlock(); 291 287 /* We have an implied reference to net thanks to nfsd_net_try_get */ 292 - localio = nfs_to->nfsd_open_local_fh(net, uuid->dom, rpc_clnt, 293 - cred, nfs_fh, pnf, fmode); 288 + localio = nfs_to->nfsd_open_local_fh(net, uuid->dom, rpc_clnt, cred, 289 + nfs_fh, pnf, fmode); 290 + if (!IS_ERR(localio) && nfs_uuid_add_file(uuid, nfl) < 0) { 291 + /* Delete the cached file when racing with nfs_uuid_put() */ 292 + nfs_to_nfsd_file_put_local(pnf); 293 + } 294 294 nfs_to_nfsd_net_put(net); 295 - if (!IS_ERR(localio)) 296 - nfs_uuid_add_file(uuid, nfl); 297 295 298 296 return localio; 299 297 } ··· 320 314 rcu_read_unlock(); 321 315 return; 322 316 } 323 - if (list_empty(&nfs_uuid->files)) { 317 + if (list_empty(&nfl->list)) { 324 318 /* nfs_uuid_put() has started closing files, wait for it 325 319 * to finished 326 320 */ ··· 344 338 */ 345 339 spin_lock(&nfs_uuid->lock); 346 340 list_del_init(&nfl->list); 347 - wake_up_var_locked(&nfl->nfs_uuid, &nfs_uuid->lock); 341 + wake_up_var_locked(nfs_uuid, &nfs_uuid->lock); 348 342 spin_unlock(&nfs_uuid->lock); 349 343 } 350 344 EXPORT_SYMBOL_GPL(nfs_close_local_fh);
+3 -2
fs/nfsd/localio.c
··· 103 103 if (nfsd_file_get(new) == NULL) 104 104 goto again; 105 105 /* 106 - * Drop the ref we were going to install and the 107 - * one we were going to return. 106 + * Drop the ref we were going to install (both file and 107 + * net) and the one we were going to return (only file). 108 108 */ 109 109 nfsd_file_put(localio); 110 + nfsd_net_put(net); 110 111 nfsd_file_put(localio); 111 112 localio = new; 112 113 }
+9 -1
fs/nfsd/vfs.c
··· 470 470 if (!iap->ia_valid) 471 471 return 0; 472 472 473 - iap->ia_valid |= ATTR_CTIME; 473 + /* 474 + * If ATTR_DELEG is set, then this is an update from a client that 475 + * holds a delegation. If this is an update for only the atime, the 476 + * ctime should not be changed. If the update contains the mtime 477 + * too, then ATTR_CTIME should already be set. 478 + */ 479 + if (!(iap->ia_valid & ATTR_DELEG)) 480 + iap->ia_valid |= ATTR_CTIME; 481 + 474 482 return notify_change(&nop_mnt_idmap, dentry, iap, NULL); 475 483 } 476 484
+18 -6
fs/proc/task_mmu.c
··· 340 340 341 341 priv->inode = inode; 342 342 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); 343 - if (IS_ERR_OR_NULL(priv->mm)) { 344 - int err = priv->mm ? PTR_ERR(priv->mm) : -ESRCH; 343 + if (IS_ERR(priv->mm)) { 344 + int err = PTR_ERR(priv->mm); 345 345 346 346 seq_release_private(inode, file); 347 347 return err; ··· 1148 1148 { 1149 1149 struct mem_size_stats *mss = walk->private; 1150 1150 struct vm_area_struct *vma = walk->vma; 1151 - pte_t ptent = huge_ptep_get(walk->mm, addr, pte); 1152 1151 struct folio *folio = NULL; 1153 1152 bool present = false; 1153 + spinlock_t *ptl; 1154 + pte_t ptent; 1154 1155 1156 + ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); 1157 + ptent = huge_ptep_get(walk->mm, addr, pte); 1155 1158 if (pte_present(ptent)) { 1156 1159 folio = page_folio(pte_page(ptent)); 1157 1160 present = true; ··· 1173 1170 else 1174 1171 mss->private_hugetlb += huge_page_size(hstate_vma(vma)); 1175 1172 } 1173 + spin_unlock(ptl); 1176 1174 return 0; 1177 1175 } 1178 1176 #else ··· 2021 2017 struct pagemapread *pm = walk->private; 2022 2018 struct vm_area_struct *vma = walk->vma; 2023 2019 u64 flags = 0, frame = 0; 2020 + spinlock_t *ptl; 2024 2021 int err = 0; 2025 2022 pte_t pte; 2026 2023 2027 2024 if (vma->vm_flags & VM_SOFTDIRTY) 2028 2025 flags |= PM_SOFT_DIRTY; 2029 2026 2027 + ptl = huge_pte_lock(hstate_vma(vma), walk->mm, ptep); 2030 2028 pte = huge_ptep_get(walk->mm, addr, ptep); 2031 2029 if (pte_present(pte)) { 2032 2030 struct folio *folio = page_folio(pte_page(pte)); ··· 2056 2050 2057 2051 err = add_to_pagemap(&pme, pm); 2058 2052 if (err) 2059 - return err; 2053 + break; 2060 2054 if (pm->show_pfn && (flags & PM_PRESENT)) 2061 2055 frame++; 2062 2056 } 2063 2057 2058 + spin_unlock(ptl); 2064 2059 cond_resched(); 2065 2060 2066 2061 return err; ··· 3135 3128 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, 3136 3129 unsigned long addr, unsigned long end, struct mm_walk *walk) 3137 3130 { 3138 - pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte); 3131 + pte_t huge_pte; 3139 3132 struct numa_maps *md; 3140 3133 struct page *page; 3134 + spinlock_t *ptl; 3141 3135 3136 + ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); 3137 + huge_pte = huge_ptep_get(walk->mm, addr, pte); 3142 3138 if (!pte_present(huge_pte)) 3143 - return 0; 3139 + goto out; 3144 3140 3145 3141 page = pte_page(huge_pte); 3146 3142 3147 3143 md = walk->private; 3148 3144 gather_stats(page, md, pte_dirty(huge_pte), 1); 3145 + out: 3146 + spin_unlock(ptl); 3149 3147 return 0; 3150 3148 } 3151 3149
+1 -1
fs/smb/client/Makefile
··· 32 32 33 33 cifs-$(CONFIG_CIFS_ROOT) += cifsroot.o 34 34 35 - cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o cifssmb.o 35 + cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o cifssmb.o cifstransport.o 36 36 37 37 cifs-$(CONFIG_CIFS_COMPRESSION) += compress.o compress/lz77.o
+11 -13
fs/smb/client/cifs_debug.c
··· 60 60 return; 61 61 62 62 cifs_dbg(VFS, "Dump pending requests:\n"); 63 - spin_lock(&server->mid_lock); 63 + spin_lock(&server->mid_queue_lock); 64 64 list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { 65 65 cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n", 66 66 mid_entry->mid_state, ··· 83 83 mid_entry->resp_buf, 62); 84 84 } 85 85 } 86 - spin_unlock(&server->mid_lock); 86 + spin_unlock(&server->mid_queue_lock); 87 87 #endif /* CONFIG_CIFS_DEBUG2 */ 88 88 } 89 89 ··· 412 412 spin_lock(&cifs_tcp_ses_lock); 413 413 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { 414 414 #ifdef CONFIG_CIFS_SMB_DIRECT 415 + struct smbdirect_socket *sc; 415 416 struct smbdirect_socket_parameters *sp; 416 417 #endif 417 418 ··· 437 436 seq_printf(m, "\nSMBDirect transport not available"); 438 437 goto skip_rdma; 439 438 } 440 - sp = &server->smbd_conn->socket.parameters; 439 + sc = &server->smbd_conn->socket; 440 + sp = &sc->parameters; 441 441 442 442 seq_printf(m, "\nSMBDirect (in hex) protocol version: %x " 443 443 "transport status: %x", ··· 467 465 seq_printf(m, "\nRead Queue count_reassembly_queue: %x " 468 466 "count_enqueue_reassembly_queue: %x " 469 467 "count_dequeue_reassembly_queue: %x " 470 - "fragment_reassembly_remaining: %x " 471 468 "reassembly_data_length: %x " 472 469 "reassembly_queue_length: %x", 473 470 server->smbd_conn->count_reassembly_queue, 474 471 server->smbd_conn->count_enqueue_reassembly_queue, 475 472 server->smbd_conn->count_dequeue_reassembly_queue, 476 - server->smbd_conn->fragment_reassembly_remaining, 477 - server->smbd_conn->reassembly_data_length, 478 - server->smbd_conn->reassembly_queue_length); 473 + sc->recv_io.reassembly.data_length, 474 + sc->recv_io.reassembly.queue_length); 479 475 seq_printf(m, "\nCurrent Credits send_credits: %x " 480 476 "receive_credits: %x receive_credit_target: %x", 481 477 atomic_read(&server->smbd_conn->send_credits), ··· 481 481 server->smbd_conn->receive_credit_target); 482 482 seq_printf(m, "\nPending send_pending: %x ", 483 483 atomic_read(&server->smbd_conn->send_pending)); 484 - seq_printf(m, "\nReceive buffers count_receive_queue: %x " 485 - "count_empty_packet_queue: %x", 486 - server->smbd_conn->count_receive_queue, 487 - server->smbd_conn->count_empty_packet_queue); 484 + seq_printf(m, "\nReceive buffers count_receive_queue: %x ", 485 + server->smbd_conn->count_receive_queue); 488 486 seq_printf(m, "\nMR responder_resources: %x " 489 487 "max_frmr_depth: %x mr_type: %x", 490 488 server->smbd_conn->responder_resources, ··· 670 672 671 673 seq_printf(m, "\n\tServer ConnectionId: 0x%llx", 672 674 chan_server->conn_id); 673 - spin_lock(&chan_server->mid_lock); 675 + spin_lock(&chan_server->mid_queue_lock); 674 676 list_for_each_entry(mid_entry, &chan_server->pending_mid_q, qhead) { 675 677 seq_printf(m, "\n\t\tState: %d com: %d pid: %d cbdata: %p mid %llu", 676 678 mid_entry->mid_state, ··· 679 681 mid_entry->callback_data, 680 682 mid_entry->mid); 681 683 } 682 - spin_unlock(&chan_server->mid_lock); 684 + spin_unlock(&chan_server->mid_queue_lock); 683 685 } 684 686 spin_unlock(&ses->chan_lock); 685 687 seq_puts(m, "\n--\n");
+3 -5
fs/smb/client/cifsfs.c
··· 77 77 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Lock */ 78 78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */ 79 79 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Lock */ 80 - spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ 80 + DEFINE_SPINLOCK(GlobalMid_Lock); /* protects above & list operations on midQ entries */ 81 81 82 82 /* 83 83 * Global counters, updated atomically ··· 97 97 atomic_t total_small_buf_alloc_count; 98 98 #endif/* STATS2 */ 99 99 struct list_head cifs_tcp_ses_list; 100 - spinlock_t cifs_tcp_ses_lock; 100 + DEFINE_SPINLOCK(cifs_tcp_ses_lock); 101 101 static const struct super_operations cifs_super_ops; 102 102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; 103 103 module_param(CIFSMaxBufSize, uint, 0444); ··· 723 723 else 724 724 seq_puts(s, ",nativesocket"); 725 725 seq_show_option(s, "symlink", 726 - cifs_symlink_type_str(get_cifs_symlink_type(cifs_sb))); 726 + cifs_symlink_type_str(cifs_symlink_type(cifs_sb))); 727 727 728 728 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize); 729 729 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize); ··· 1863 1863 GlobalCurrentXid = 0; 1864 1864 GlobalTotalActiveXid = 0; 1865 1865 GlobalMaxActiveXid = 0; 1866 - spin_lock_init(&cifs_tcp_ses_lock); 1867 - spin_lock_init(&GlobalMid_Lock); 1868 1866 1869 1867 cifs_lock_secret = get_random_u32(); 1870 1868
+13 -10
fs/smb/client/cifsglob.h
··· 732 732 #endif 733 733 wait_queue_head_t response_q; 734 734 wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/ 735 - spinlock_t mid_lock; /* protect mid queue and it's entries */ 735 + spinlock_t mid_queue_lock; /* protect mid queue */ 736 + spinlock_t mid_counter_lock; 736 737 struct list_head pending_mid_q; 737 738 bool noblocksnd; /* use blocking sendmsg */ 738 739 bool noautotune; /* do not autotune send buf sizes */ ··· 771 770 /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */ 772 771 unsigned int capabilities; /* selective disabling of caps by smb sess */ 773 772 int timeAdj; /* Adjust for difference in server time zone in sec */ 774 - __u64 CurrentMid; /* multiplex id - rotating counter, protected by GlobalMid_Lock */ 773 + __u64 current_mid; /* multiplex id - rotating counter, protected by mid_counter_lock */ 775 774 char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */ 776 775 /* 16th byte of RFC1001 workstation name is always null */ 777 776 char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; ··· 1730 1729 unsigned int resp_buf_size; 1731 1730 int mid_state; /* wish this were enum but can not pass to wait_event */ 1732 1731 int mid_rc; /* rc for MID_RC */ 1733 - unsigned int mid_flags; 1734 1732 __le16 command; /* smb command code */ 1735 1733 unsigned int optype; /* operation type */ 1734 + bool wait_cancelled:1; /* Cancelled while waiting for response */ 1735 + bool deleted_from_q:1; /* Whether Mid has been dequeued frem pending_mid_q */ 1736 1736 bool large_buf:1; /* if valid response, is pointer to large buf */ 1737 1737 bool multiRsp:1; /* multiple trans2 responses for one request */ 1738 1738 bool multiEnd:1; /* both received */ ··· 1895 1893 #define MID_RESPONSE_READY 0x40 /* ready for other process handle the rsp */ 1896 1894 #define MID_RC 0x80 /* mid_rc contains custom rc */ 1897 1895 1898 - /* Flags */ 1899 - #define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */ 1900 - #define MID_DELETED 2 /* Mid has been dequeued/deleted */ 1901 - 1902 1896 /* Types of response buffer returned from SendReceive2 */ 1903 1897 #define CIFS_NO_BUFFER 0 /* Response buffer not returned */ 1904 1898 #define CIFS_SMALL_BUFFER 1 ··· 2005 2007 * GlobalCurrentXid 2006 2008 * GlobalTotalActiveXid 2007 2009 * TCP_Server_Info->srv_lock (anything in struct not protected by another lock and can change) 2008 - * TCP_Server_Info->mid_lock TCP_Server_Info->pending_mid_q cifs_get_tcp_session 2009 - * ->CurrentMid 2010 - * (any changes in mid_q_entry fields) 2010 + * TCP_Server_Info->mid_queue_lock TCP_Server_Info->pending_mid_q cifs_get_tcp_session 2011 + * mid_q_entry->deleted_from_q 2012 + * TCP_Server_Info->mid_counter_lock TCP_Server_Info->current_mid cifs_get_tcp_session 2011 2013 * TCP_Server_Info->req_lock TCP_Server_Info->in_flight cifs_get_tcp_session 2012 2014 * ->credits 2013 2015 * ->echo_credits ··· 2374 2376 } 2375 2377 return ret; 2376 2378 } 2379 + 2380 + #define CIFS_REPARSE_SUPPORT(tcon) \ 2381 + ((tcon)->posix_extensions || \ 2382 + (le32_to_cpu((tcon)->fsAttrInfo.Attributes) & \ 2383 + FILE_SUPPORTS_REPARSE_POINTS)) 2377 2384 2378 2385 #endif /* _CIFS_GLOB_H */
+15
fs/smb/client/cifsproto.h
··· 116 116 int * /* bytes returned */ , const int); 117 117 extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses, 118 118 char *in_buf, int flags); 119 + int cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server); 119 120 extern struct mid_q_entry *cifs_setup_request(struct cifs_ses *, 120 121 struct TCP_Server_Info *, 121 122 struct smb_rqst *); 122 123 extern struct mid_q_entry *cifs_setup_async_request(struct TCP_Server_Info *, 123 124 struct smb_rqst *); 125 + int __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, 126 + struct smb_rqst *rqst); 124 127 extern int cifs_check_receive(struct mid_q_entry *mid, 125 128 struct TCP_Server_Info *server, bool log_error); 129 + int wait_for_free_request(struct TCP_Server_Info *server, const int flags, 130 + unsigned int *instance); 126 131 extern int cifs_wait_mtu_credits(struct TCP_Server_Info *server, 127 132 size_t size, size_t *num, 128 133 struct cifs_credits *credits); 134 + 135 + static inline int 136 + send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst, 137 + struct mid_q_entry *mid) 138 + { 139 + return server->ops->send_cancel ? 140 + server->ops->send_cancel(server, rqst, mid) : 0; 141 + } 142 + 143 + int wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ); 129 144 extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *, 130 145 struct kvec *, int /* nvec to send */, 131 146 int * /* type of buf returned */, const int flags,
+2 -2
fs/smb/client/cifssmb.c
··· 2751 2751 if (cap_unix(tcon->ses)) 2752 2752 return -EOPNOTSUPP; 2753 2753 2754 - if (!(le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS)) 2754 + if (!CIFS_REPARSE_SUPPORT(tcon)) 2755 2755 return -EOPNOTSUPP; 2756 2756 2757 2757 oparms = (struct cifs_open_parms) { ··· 2879 2879 * attempt to create reparse point. This will prevent creating unusable 2880 2880 * empty object on the server. 2881 2881 */ 2882 - if (!(le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS)) 2882 + if (!CIFS_REPARSE_SUPPORT(tcon)) 2883 2883 return ERR_PTR(-EOPNOTSUPP); 2884 2884 2885 2885 #ifndef CONFIG_CIFS_XATTR
+566
fs/smb/client/cifstransport.c
··· 1 + // SPDX-License-Identifier: LGPL-2.1 2 + /* 3 + * 4 + * Copyright (C) International Business Machines Corp., 2002,2008 5 + * Author(s): Steve French (sfrench@us.ibm.com) 6 + * Jeremy Allison (jra@samba.org) 2006. 7 + * 8 + */ 9 + 10 + #include <linux/fs.h> 11 + #include <linux/list.h> 12 + #include <linux/gfp.h> 13 + #include <linux/wait.h> 14 + #include <linux/net.h> 15 + #include <linux/delay.h> 16 + #include <linux/freezer.h> 17 + #include <linux/tcp.h> 18 + #include <linux/bvec.h> 19 + #include <linux/highmem.h> 20 + #include <linux/uaccess.h> 21 + #include <linux/processor.h> 22 + #include <linux/mempool.h> 23 + #include <linux/sched/signal.h> 24 + #include <linux/task_io_accounting_ops.h> 25 + #include "cifspdu.h" 26 + #include "cifsglob.h" 27 + #include "cifsproto.h" 28 + #include "cifs_debug.h" 29 + #include "smb2proto.h" 30 + #include "smbdirect.h" 31 + #include "compress.h" 32 + 33 + /* Max number of iovectors we can use off the stack when sending requests. */ 34 + #define CIFS_MAX_IOV_SIZE 8 35 + 36 + static struct mid_q_entry * 37 + alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) 38 + { 39 + struct mid_q_entry *temp; 40 + 41 + if (server == NULL) { 42 + cifs_dbg(VFS, "%s: null TCP session\n", __func__); 43 + return NULL; 44 + } 45 + 46 + temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); 47 + memset(temp, 0, sizeof(struct mid_q_entry)); 48 + kref_init(&temp->refcount); 49 + temp->mid = get_mid(smb_buffer); 50 + temp->pid = current->pid; 51 + temp->command = cpu_to_le16(smb_buffer->Command); 52 + cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command); 53 + /* easier to use jiffies */ 54 + /* when mid allocated can be before when sent */ 55 + temp->when_alloc = jiffies; 56 + temp->server = server; 57 + 58 + /* 59 + * The default is for the mid to be synchronous, so the 60 + * default callback just wakes up the current task. 61 + */ 62 + get_task_struct(current); 63 + temp->creator = current; 64 + temp->callback = cifs_wake_up_task; 65 + temp->callback_data = current; 66 + 67 + atomic_inc(&mid_count); 68 + temp->mid_state = MID_REQUEST_ALLOCATED; 69 + return temp; 70 + } 71 + 72 + int 73 + smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, 74 + unsigned int smb_buf_length) 75 + { 76 + struct kvec iov[2]; 77 + struct smb_rqst rqst = { .rq_iov = iov, 78 + .rq_nvec = 2 }; 79 + 80 + iov[0].iov_base = smb_buffer; 81 + iov[0].iov_len = 4; 82 + iov[1].iov_base = (char *)smb_buffer + 4; 83 + iov[1].iov_len = smb_buf_length; 84 + 85 + return __smb_send_rqst(server, 1, &rqst); 86 + } 87 + 88 + static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, 89 + struct mid_q_entry **ppmidQ) 90 + { 91 + spin_lock(&ses->ses_lock); 92 + if (ses->ses_status == SES_NEW) { 93 + if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && 94 + (in_buf->Command != SMB_COM_NEGOTIATE)) { 95 + spin_unlock(&ses->ses_lock); 96 + return -EAGAIN; 97 + } 98 + /* else ok - we are setting up session */ 99 + } 100 + 101 + if (ses->ses_status == SES_EXITING) { 102 + /* check if SMB session is bad because we are setting it up */ 103 + if (in_buf->Command != SMB_COM_LOGOFF_ANDX) { 104 + spin_unlock(&ses->ses_lock); 105 + return -EAGAIN; 106 + } 107 + /* else ok - we are shutting down session */ 108 + } 109 + spin_unlock(&ses->ses_lock); 110 + 111 + *ppmidQ = alloc_mid(in_buf, ses->server); 112 + if (*ppmidQ == NULL) 113 + return -ENOMEM; 114 + spin_lock(&ses->server->mid_queue_lock); 115 + list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q); 116 + spin_unlock(&ses->server->mid_queue_lock); 117 + return 0; 118 + } 119 + 120 + struct mid_q_entry * 121 + cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) 122 + { 123 + int rc; 124 + struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; 125 + struct mid_q_entry *mid; 126 + 127 + if (rqst->rq_iov[0].iov_len != 4 || 128 + rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base) 129 + return ERR_PTR(-EIO); 130 + 131 + /* enable signing if server requires it */ 132 + if (server->sign) 133 + hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 134 + 135 + mid = alloc_mid(hdr, server); 136 + if (mid == NULL) 137 + return ERR_PTR(-ENOMEM); 138 + 139 + rc = cifs_sign_rqst(rqst, server, &mid->sequence_number); 140 + if (rc) { 141 + release_mid(mid); 142 + return ERR_PTR(rc); 143 + } 144 + 145 + return mid; 146 + } 147 + 148 + /* 149 + * 150 + * Send an SMB Request. No response info (other than return code) 151 + * needs to be parsed. 152 + * 153 + * flags indicate the type of request buffer and how long to wait 154 + * and whether to log NT STATUS code (error) before mapping it to POSIX error 155 + * 156 + */ 157 + int 158 + SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses, 159 + char *in_buf, int flags) 160 + { 161 + int rc; 162 + struct kvec iov[1]; 163 + struct kvec rsp_iov; 164 + int resp_buf_type; 165 + 166 + iov[0].iov_base = in_buf; 167 + iov[0].iov_len = get_rfc1002_length(in_buf) + 4; 168 + flags |= CIFS_NO_RSP_BUF; 169 + rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov); 170 + cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc); 171 + 172 + return rc; 173 + } 174 + 175 + int 176 + cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, 177 + bool log_error) 178 + { 179 + unsigned int len = get_rfc1002_length(mid->resp_buf) + 4; 180 + 181 + dump_smb(mid->resp_buf, min_t(u32, 92, len)); 182 + 183 + /* convert the length into a more usable form */ 184 + if (server->sign) { 185 + struct kvec iov[2]; 186 + int rc = 0; 187 + struct smb_rqst rqst = { .rq_iov = iov, 188 + .rq_nvec = 2 }; 189 + 190 + iov[0].iov_base = mid->resp_buf; 191 + iov[0].iov_len = 4; 192 + iov[1].iov_base = (char *)mid->resp_buf + 4; 193 + iov[1].iov_len = len - 4; 194 + /* FIXME: add code to kill session */ 195 + rc = cifs_verify_signature(&rqst, server, 196 + mid->sequence_number); 197 + if (rc) 198 + cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n", 199 + rc); 200 + } 201 + 202 + /* BB special case reconnect tid and uid here? */ 203 + return map_and_check_smb_error(mid, log_error); 204 + } 205 + 206 + struct mid_q_entry * 207 + cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored, 208 + struct smb_rqst *rqst) 209 + { 210 + int rc; 211 + struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; 212 + struct mid_q_entry *mid; 213 + 214 + if (rqst->rq_iov[0].iov_len != 4 || 215 + rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base) 216 + return ERR_PTR(-EIO); 217 + 218 + rc = allocate_mid(ses, hdr, &mid); 219 + if (rc) 220 + return ERR_PTR(rc); 221 + rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number); 222 + if (rc) { 223 + delete_mid(mid); 224 + return ERR_PTR(rc); 225 + } 226 + return mid; 227 + } 228 + 229 + int 230 + SendReceive2(const unsigned int xid, struct cifs_ses *ses, 231 + struct kvec *iov, int n_vec, int *resp_buf_type /* ret */, 232 + const int flags, struct kvec *resp_iov) 233 + { 234 + struct smb_rqst rqst; 235 + struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov; 236 + int rc; 237 + 238 + if (n_vec + 1 > CIFS_MAX_IOV_SIZE) { 239 + new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec), 240 + GFP_KERNEL); 241 + if (!new_iov) { 242 + /* otherwise cifs_send_recv below sets resp_buf_type */ 243 + *resp_buf_type = CIFS_NO_BUFFER; 244 + return -ENOMEM; 245 + } 246 + } else 247 + new_iov = s_iov; 248 + 249 + /* 1st iov is a RFC1001 length followed by the rest of the packet */ 250 + memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec)); 251 + 252 + new_iov[0].iov_base = new_iov[1].iov_base; 253 + new_iov[0].iov_len = 4; 254 + new_iov[1].iov_base += 4; 255 + new_iov[1].iov_len -= 4; 256 + 257 + memset(&rqst, 0, sizeof(struct smb_rqst)); 258 + rqst.rq_iov = new_iov; 259 + rqst.rq_nvec = n_vec + 1; 260 + 261 + rc = cifs_send_recv(xid, ses, ses->server, 262 + &rqst, resp_buf_type, flags, resp_iov); 263 + if (n_vec + 1 > CIFS_MAX_IOV_SIZE) 264 + kfree(new_iov); 265 + return rc; 266 + } 267 + 268 + int 269 + SendReceive(const unsigned int xid, struct cifs_ses *ses, 270 + struct smb_hdr *in_buf, struct smb_hdr *out_buf, 271 + int *pbytes_returned, const int flags) 272 + { 273 + int rc = 0; 274 + struct mid_q_entry *midQ; 275 + unsigned int len = be32_to_cpu(in_buf->smb_buf_length); 276 + struct kvec iov = { .iov_base = in_buf, .iov_len = len }; 277 + struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 }; 278 + struct cifs_credits credits = { .value = 1, .instance = 0 }; 279 + struct TCP_Server_Info *server; 280 + 281 + if (ses == NULL) { 282 + cifs_dbg(VFS, "Null smb session\n"); 283 + return -EIO; 284 + } 285 + server = ses->server; 286 + if (server == NULL) { 287 + cifs_dbg(VFS, "Null tcp session\n"); 288 + return -EIO; 289 + } 290 + 291 + spin_lock(&server->srv_lock); 292 + if (server->tcpStatus == CifsExiting) { 293 + spin_unlock(&server->srv_lock); 294 + return -ENOENT; 295 + } 296 + spin_unlock(&server->srv_lock); 297 + 298 + /* Ensure that we do not send more than 50 overlapping requests 299 + to the same server. We may make this configurable later or 300 + use ses->maxReq */ 301 + 302 + if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { 303 + cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n", 304 + len); 305 + return -EIO; 306 + } 307 + 308 + rc = wait_for_free_request(server, flags, &credits.instance); 309 + if (rc) 310 + return rc; 311 + 312 + /* make sure that we sign in the same order that we send on this socket 313 + and avoid races inside tcp sendmsg code that could cause corruption 314 + of smb data */ 315 + 316 + cifs_server_lock(server); 317 + 318 + rc = allocate_mid(ses, in_buf, &midQ); 319 + if (rc) { 320 + cifs_server_unlock(server); 321 + /* Update # of requests on wire to server */ 322 + add_credits(server, &credits, 0); 323 + return rc; 324 + } 325 + 326 + rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number); 327 + if (rc) { 328 + cifs_server_unlock(server); 329 + goto out; 330 + } 331 + 332 + midQ->mid_state = MID_REQUEST_SUBMITTED; 333 + 334 + rc = smb_send(server, in_buf, len); 335 + cifs_save_when_sent(midQ); 336 + 337 + if (rc < 0) 338 + server->sequence_number -= 2; 339 + 340 + cifs_server_unlock(server); 341 + 342 + if (rc < 0) 343 + goto out; 344 + 345 + rc = wait_for_response(server, midQ); 346 + if (rc != 0) { 347 + send_cancel(server, &rqst, midQ); 348 + spin_lock(&server->mid_queue_lock); 349 + if (midQ->mid_state == MID_REQUEST_SUBMITTED || 350 + midQ->mid_state == MID_RESPONSE_RECEIVED) { 351 + /* no longer considered to be "in-flight" */ 352 + midQ->callback = release_mid; 353 + spin_unlock(&server->mid_queue_lock); 354 + add_credits(server, &credits, 0); 355 + return rc; 356 + } 357 + spin_unlock(&server->mid_queue_lock); 358 + } 359 + 360 + rc = cifs_sync_mid_result(midQ, server); 361 + if (rc != 0) { 362 + add_credits(server, &credits, 0); 363 + return rc; 364 + } 365 + 366 + if (!midQ->resp_buf || !out_buf || 367 + midQ->mid_state != MID_RESPONSE_READY) { 368 + rc = -EIO; 369 + cifs_server_dbg(VFS, "Bad MID state?\n"); 370 + goto out; 371 + } 372 + 373 + *pbytes_returned = get_rfc1002_length(midQ->resp_buf); 374 + memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); 375 + rc = cifs_check_receive(midQ, server, 0); 376 + out: 377 + delete_mid(midQ); 378 + add_credits(server, &credits, 0); 379 + 380 + return rc; 381 + } 382 + 383 + /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows 384 + blocking lock to return. */ 385 + 386 + static int 387 + send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon, 388 + struct smb_hdr *in_buf, 389 + struct smb_hdr *out_buf) 390 + { 391 + int bytes_returned; 392 + struct cifs_ses *ses = tcon->ses; 393 + LOCK_REQ *pSMB = (LOCK_REQ *)in_buf; 394 + 395 + /* We just modify the current in_buf to change 396 + the type of lock from LOCKING_ANDX_SHARED_LOCK 397 + or LOCKING_ANDX_EXCLUSIVE_LOCK to 398 + LOCKING_ANDX_CANCEL_LOCK. */ 399 + 400 + pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; 401 + pSMB->Timeout = 0; 402 + pSMB->hdr.Mid = get_next_mid(ses->server); 403 + 404 + return SendReceive(xid, ses, in_buf, out_buf, 405 + &bytes_returned, 0); 406 + } 407 + 408 + int 409 + SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, 410 + struct smb_hdr *in_buf, struct smb_hdr *out_buf, 411 + int *pbytes_returned) 412 + { 413 + int rc = 0; 414 + int rstart = 0; 415 + struct mid_q_entry *midQ; 416 + struct cifs_ses *ses; 417 + unsigned int len = be32_to_cpu(in_buf->smb_buf_length); 418 + struct kvec iov = { .iov_base = in_buf, .iov_len = len }; 419 + struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 }; 420 + unsigned int instance; 421 + struct TCP_Server_Info *server; 422 + 423 + if (tcon == NULL || tcon->ses == NULL) { 424 + cifs_dbg(VFS, "Null smb session\n"); 425 + return -EIO; 426 + } 427 + ses = tcon->ses; 428 + server = ses->server; 429 + 430 + if (server == NULL) { 431 + cifs_dbg(VFS, "Null tcp session\n"); 432 + return -EIO; 433 + } 434 + 435 + spin_lock(&server->srv_lock); 436 + if (server->tcpStatus == CifsExiting) { 437 + spin_unlock(&server->srv_lock); 438 + return -ENOENT; 439 + } 440 + spin_unlock(&server->srv_lock); 441 + 442 + /* Ensure that we do not send more than 50 overlapping requests 443 + to the same server. We may make this configurable later or 444 + use ses->maxReq */ 445 + 446 + if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { 447 + cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n", 448 + len); 449 + return -EIO; 450 + } 451 + 452 + rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance); 453 + if (rc) 454 + return rc; 455 + 456 + /* make sure that we sign in the same order that we send on this socket 457 + and avoid races inside tcp sendmsg code that could cause corruption 458 + of smb data */ 459 + 460 + cifs_server_lock(server); 461 + 462 + rc = allocate_mid(ses, in_buf, &midQ); 463 + if (rc) { 464 + cifs_server_unlock(server); 465 + return rc; 466 + } 467 + 468 + rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number); 469 + if (rc) { 470 + delete_mid(midQ); 471 + cifs_server_unlock(server); 472 + return rc; 473 + } 474 + 475 + midQ->mid_state = MID_REQUEST_SUBMITTED; 476 + rc = smb_send(server, in_buf, len); 477 + cifs_save_when_sent(midQ); 478 + 479 + if (rc < 0) 480 + server->sequence_number -= 2; 481 + 482 + cifs_server_unlock(server); 483 + 484 + if (rc < 0) { 485 + delete_mid(midQ); 486 + return rc; 487 + } 488 + 489 + /* Wait for a reply - allow signals to interrupt. */ 490 + rc = wait_event_interruptible(server->response_q, 491 + (!(midQ->mid_state == MID_REQUEST_SUBMITTED || 492 + midQ->mid_state == MID_RESPONSE_RECEIVED)) || 493 + ((server->tcpStatus != CifsGood) && 494 + (server->tcpStatus != CifsNew))); 495 + 496 + /* Were we interrupted by a signal ? */ 497 + spin_lock(&server->srv_lock); 498 + if ((rc == -ERESTARTSYS) && 499 + (midQ->mid_state == MID_REQUEST_SUBMITTED || 500 + midQ->mid_state == MID_RESPONSE_RECEIVED) && 501 + ((server->tcpStatus == CifsGood) || 502 + (server->tcpStatus == CifsNew))) { 503 + spin_unlock(&server->srv_lock); 504 + 505 + if (in_buf->Command == SMB_COM_TRANSACTION2) { 506 + /* POSIX lock. We send a NT_CANCEL SMB to cause the 507 + blocking lock to return. */ 508 + rc = send_cancel(server, &rqst, midQ); 509 + if (rc) { 510 + delete_mid(midQ); 511 + return rc; 512 + } 513 + } else { 514 + /* Windows lock. We send a LOCKINGX_CANCEL_LOCK 515 + to cause the blocking lock to return. */ 516 + 517 + rc = send_lock_cancel(xid, tcon, in_buf, out_buf); 518 + 519 + /* If we get -ENOLCK back the lock may have 520 + already been removed. Don't exit in this case. */ 521 + if (rc && rc != -ENOLCK) { 522 + delete_mid(midQ); 523 + return rc; 524 + } 525 + } 526 + 527 + rc = wait_for_response(server, midQ); 528 + if (rc) { 529 + send_cancel(server, &rqst, midQ); 530 + spin_lock(&server->mid_queue_lock); 531 + if (midQ->mid_state == MID_REQUEST_SUBMITTED || 532 + midQ->mid_state == MID_RESPONSE_RECEIVED) { 533 + /* no longer considered to be "in-flight" */ 534 + midQ->callback = release_mid; 535 + spin_unlock(&server->mid_queue_lock); 536 + return rc; 537 + } 538 + spin_unlock(&server->mid_queue_lock); 539 + } 540 + 541 + /* We got the response - restart system call. */ 542 + rstart = 1; 543 + spin_lock(&server->srv_lock); 544 + } 545 + spin_unlock(&server->srv_lock); 546 + 547 + rc = cifs_sync_mid_result(midQ, server); 548 + if (rc != 0) 549 + return rc; 550 + 551 + /* rcvd frame is ok */ 552 + if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) { 553 + rc = -EIO; 554 + cifs_tcon_dbg(VFS, "Bad MID state?\n"); 555 + goto out; 556 + } 557 + 558 + *pbytes_returned = get_rfc1002_length(midQ->resp_buf); 559 + memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); 560 + rc = cifs_check_receive(midQ, server, 0); 561 + out: 562 + delete_mid(midQ); 563 + if (rstart && rc == -EACCES) 564 + return -ERESTARTSYS; 565 + return rc; 566 + }
+18 -17
fs/smb/client/connect.c
··· 321 321 /* mark submitted MIDs for retry and issue callback */ 322 322 INIT_LIST_HEAD(&retry_list); 323 323 cifs_dbg(FYI, "%s: moving mids to private list\n", __func__); 324 - spin_lock(&server->mid_lock); 324 + spin_lock(&server->mid_queue_lock); 325 325 list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) { 326 326 kref_get(&mid->refcount); 327 327 if (mid->mid_state == MID_REQUEST_SUBMITTED) 328 328 mid->mid_state = MID_RETRY_NEEDED; 329 329 list_move(&mid->qhead, &retry_list); 330 - mid->mid_flags |= MID_DELETED; 330 + mid->deleted_from_q = true; 331 331 } 332 - spin_unlock(&server->mid_lock); 332 + spin_unlock(&server->mid_queue_lock); 333 333 cifs_server_unlock(server); 334 334 335 335 cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__); ··· 358 358 } 359 359 360 360 cifs_dbg(FYI, "Mark tcp session as need reconnect\n"); 361 - trace_smb3_reconnect(server->CurrentMid, server->conn_id, 361 + trace_smb3_reconnect(server->current_mid, server->conn_id, 362 362 server->hostname); 363 363 server->tcpStatus = CifsNeedReconnect; 364 364 ··· 884 884 * server there should be exactly one pending mid 885 885 * corresponding to SMB1/SMB2 Negotiate packet. 886 886 */ 887 - spin_lock(&server->mid_lock); 887 + spin_lock(&server->mid_queue_lock); 888 888 list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) { 889 889 kref_get(&mid->refcount); 890 890 list_move(&mid->qhead, &dispose_list); 891 - mid->mid_flags |= MID_DELETED; 891 + mid->deleted_from_q = true; 892 892 } 893 - spin_unlock(&server->mid_lock); 893 + spin_unlock(&server->mid_queue_lock); 894 894 895 895 /* Now try to reconnect once with NetBIOS session. */ 896 896 server->with_rfc1001 = true; ··· 957 957 #ifdef CONFIG_CIFS_STATS2 958 958 mid->when_received = jiffies; 959 959 #endif 960 - spin_lock(&mid->server->mid_lock); 960 + spin_lock(&mid->server->mid_queue_lock); 961 961 if (!malformed) 962 962 mid->mid_state = MID_RESPONSE_RECEIVED; 963 963 else ··· 966 966 * Trying to handle/dequeue a mid after the send_recv() 967 967 * function has finished processing it is a bug. 968 968 */ 969 - if (mid->mid_flags & MID_DELETED) { 970 - spin_unlock(&mid->server->mid_lock); 969 + if (mid->deleted_from_q == true) { 970 + spin_unlock(&mid->server->mid_queue_lock); 971 971 pr_warn_once("trying to dequeue a deleted mid\n"); 972 972 } else { 973 973 list_del_init(&mid->qhead); 974 - mid->mid_flags |= MID_DELETED; 975 - spin_unlock(&mid->server->mid_lock); 974 + mid->deleted_from_q = true; 975 + spin_unlock(&mid->server->mid_queue_lock); 976 976 } 977 977 } 978 978 ··· 1101 1101 struct list_head *tmp, *tmp2; 1102 1102 LIST_HEAD(dispose_list); 1103 1103 1104 - spin_lock(&server->mid_lock); 1104 + spin_lock(&server->mid_queue_lock); 1105 1105 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { 1106 1106 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 1107 1107 cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid); 1108 1108 kref_get(&mid_entry->refcount); 1109 1109 mid_entry->mid_state = MID_SHUTDOWN; 1110 1110 list_move(&mid_entry->qhead, &dispose_list); 1111 - mid_entry->mid_flags |= MID_DELETED; 1111 + mid_entry->deleted_from_q = true; 1112 1112 } 1113 - spin_unlock(&server->mid_lock); 1113 + spin_unlock(&server->mid_queue_lock); 1114 1114 1115 1115 /* now walk dispose list and issue callbacks */ 1116 1116 list_for_each_safe(tmp, tmp2, &dispose_list) { ··· 1242 1242 spin_unlock(&server->req_lock); 1243 1243 wake_up(&server->request_q); 1244 1244 1245 - trace_smb3_hdr_credits(server->CurrentMid, 1245 + trace_smb3_hdr_credits(server->current_mid, 1246 1246 server->conn_id, server->hostname, scredits, 1247 1247 le16_to_cpu(shdr->CreditRequest), in_flight); 1248 1248 cifs_server_dbg(FYI, "%s: added %u credits total=%d\n", ··· 1822 1822 tcp_ses->compression.requested = ctx->compress; 1823 1823 spin_lock_init(&tcp_ses->req_lock); 1824 1824 spin_lock_init(&tcp_ses->srv_lock); 1825 - spin_lock_init(&tcp_ses->mid_lock); 1825 + spin_lock_init(&tcp_ses->mid_queue_lock); 1826 + spin_lock_init(&tcp_ses->mid_counter_lock); 1826 1827 INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); 1827 1828 INIT_LIST_HEAD(&tcp_ses->smb_ses_list); 1828 1829 INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
+1 -18
fs/smb/client/fs_context.c
··· 1652 1652 pr_warn_once("conflicting posix mount options specified\n"); 1653 1653 ctx->linux_ext = 1; 1654 1654 ctx->no_linux_ext = 0; 1655 + ctx->nonativesocket = 1; /* POSIX mounts use NFS style reparse points */ 1655 1656 } 1656 1657 break; 1657 1658 case Opt_nocase: ··· 1828 1827 kfree_sensitive(ctx->password2); 1829 1828 ctx->password2 = NULL; 1830 1829 return -EINVAL; 1831 - } 1832 - 1833 - enum cifs_symlink_type get_cifs_symlink_type(struct cifs_sb_info *cifs_sb) 1834 - { 1835 - if (cifs_sb->ctx->symlink_type == CIFS_SYMLINK_TYPE_DEFAULT) { 1836 - if (cifs_sb->ctx->mfsymlinks) 1837 - return CIFS_SYMLINK_TYPE_MFSYMLINKS; 1838 - else if (cifs_sb->ctx->sfu_emul) 1839 - return CIFS_SYMLINK_TYPE_SFU; 1840 - else if (cifs_sb->ctx->linux_ext && !cifs_sb->ctx->no_linux_ext) 1841 - return CIFS_SYMLINK_TYPE_UNIX; 1842 - else if (cifs_sb->ctx->reparse_type != CIFS_REPARSE_TYPE_NONE) 1843 - return CIFS_SYMLINK_TYPE_NATIVE; 1844 - else 1845 - return CIFS_SYMLINK_TYPE_NONE; 1846 - } else { 1847 - return cifs_sb->ctx->symlink_type; 1848 - } 1849 1830 } 1850 1831 1851 1832 int smb3_init_fs_context(struct fs_context *fc)
+17 -1
fs/smb/client/fs_context.h
··· 341 341 342 342 extern const struct fs_parameter_spec smb3_fs_parameters[]; 343 343 344 - extern enum cifs_symlink_type get_cifs_symlink_type(struct cifs_sb_info *cifs_sb); 344 + static inline enum cifs_symlink_type cifs_symlink_type(struct cifs_sb_info *cifs_sb) 345 + { 346 + bool posix = cifs_sb_master_tcon(cifs_sb)->posix_extensions; 347 + 348 + if (cifs_sb->ctx->symlink_type != CIFS_SYMLINK_TYPE_DEFAULT) 349 + return cifs_sb->ctx->symlink_type; 350 + 351 + if (cifs_sb->ctx->mfsymlinks) 352 + return CIFS_SYMLINK_TYPE_MFSYMLINKS; 353 + else if (cifs_sb->ctx->sfu_emul) 354 + return CIFS_SYMLINK_TYPE_SFU; 355 + else if (cifs_sb->ctx->linux_ext && !cifs_sb->ctx->no_linux_ext) 356 + return posix ? CIFS_SYMLINK_TYPE_NATIVE : CIFS_SYMLINK_TYPE_UNIX; 357 + else if (cifs_sb->ctx->reparse_type != CIFS_REPARSE_TYPE_NONE) 358 + return CIFS_SYMLINK_TYPE_NATIVE; 359 + return CIFS_SYMLINK_TYPE_NONE; 360 + } 345 361 346 362 extern int smb3_init_fs_context(struct fs_context *fc); 347 363 extern void smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx);
+4 -9
fs/smb/client/link.c
··· 605 605 606 606 /* BB what if DFS and this volume is on different share? BB */ 607 607 rc = -EOPNOTSUPP; 608 - switch (get_cifs_symlink_type(cifs_sb)) { 609 - case CIFS_SYMLINK_TYPE_DEFAULT: 610 - /* should not happen, get_cifs_symlink_type() resolves the default */ 611 - break; 612 - 613 - case CIFS_SYMLINK_TYPE_NONE: 614 - break; 615 - 608 + switch (cifs_symlink_type(cifs_sb)) { 616 609 case CIFS_SYMLINK_TYPE_UNIX: 617 610 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 618 611 if (pTcon->unix_ext) { ··· 635 642 case CIFS_SYMLINK_TYPE_NATIVE: 636 643 case CIFS_SYMLINK_TYPE_NFS: 637 644 case CIFS_SYMLINK_TYPE_WSL: 638 - if (le32_to_cpu(pTcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS) { 645 + if (CIFS_REPARSE_SUPPORT(pTcon)) { 639 646 rc = create_reparse_symlink(xid, inode, direntry, pTcon, 640 647 full_path, symname); 641 648 goto symlink_exit; 642 649 } 650 + break; 651 + default: 643 652 break; 644 653 } 645 654
+1 -1
fs/smb/client/reparse.c
··· 38 38 struct dentry *dentry, struct cifs_tcon *tcon, 39 39 const char *full_path, const char *symname) 40 40 { 41 - switch (get_cifs_symlink_type(CIFS_SB(inode->i_sb))) { 41 + switch (cifs_symlink_type(CIFS_SB(inode->i_sb))) { 42 42 case CIFS_SYMLINK_TYPE_NATIVE: 43 43 return create_native_symlink(xid, inode, dentry, tcon, full_path, symname); 44 44 case CIFS_SYMLINK_TYPE_NFS:
+10 -9
fs/smb/client/smb1ops.c
··· 95 95 struct smb_hdr *buf = (struct smb_hdr *)buffer; 96 96 struct mid_q_entry *mid; 97 97 98 - spin_lock(&server->mid_lock); 98 + spin_lock(&server->mid_queue_lock); 99 99 list_for_each_entry(mid, &server->pending_mid_q, qhead) { 100 100 if (compare_mid(mid->mid, buf) && 101 101 mid->mid_state == MID_REQUEST_SUBMITTED && 102 102 le16_to_cpu(mid->command) == buf->Command) { 103 103 kref_get(&mid->refcount); 104 - spin_unlock(&server->mid_lock); 104 + spin_unlock(&server->mid_queue_lock); 105 105 return mid; 106 106 } 107 107 } 108 - spin_unlock(&server->mid_lock); 108 + spin_unlock(&server->mid_queue_lock); 109 109 return NULL; 110 110 } 111 111 ··· 169 169 __u16 last_mid, cur_mid; 170 170 bool collision, reconnect = false; 171 171 172 - spin_lock(&server->mid_lock); 173 - 172 + spin_lock(&server->mid_counter_lock); 174 173 /* mid is 16 bit only for CIFS/SMB */ 175 - cur_mid = (__u16)((server->CurrentMid) & 0xffff); 174 + cur_mid = (__u16)((server->current_mid) & 0xffff); 176 175 /* we do not want to loop forever */ 177 176 last_mid = cur_mid; 178 177 cur_mid++; ··· 197 198 cur_mid++; 198 199 199 200 num_mids = 0; 201 + spin_lock(&server->mid_queue_lock); 200 202 list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { 201 203 ++num_mids; 202 204 if (mid_entry->mid == cur_mid && ··· 207 207 break; 208 208 } 209 209 } 210 + spin_unlock(&server->mid_queue_lock); 210 211 211 212 /* 212 213 * if we have more than 32k mids in the list, then something ··· 224 223 225 224 if (!collision) { 226 225 mid = (__u64)cur_mid; 227 - server->CurrentMid = mid; 226 + server->current_mid = mid; 228 227 break; 229 228 } 230 229 cur_mid++; 231 230 } 232 - spin_unlock(&server->mid_lock); 231 + spin_unlock(&server->mid_counter_lock); 233 232 234 233 if (reconnect) { 235 234 cifs_signal_cifsd_for_reconnect(server, false); ··· 1273 1272 */ 1274 1273 return cifs_sfu_make_node(xid, inode, dentry, tcon, 1275 1274 full_path, mode, dev); 1276 - } else if (le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS) { 1275 + } else if (CIFS_REPARSE_SUPPORT(tcon)) { 1277 1276 /* 1278 1277 * mknod via reparse points requires server support for 1279 1278 * storing reparse points, which is available since
+2 -3
fs/smb/client/smb2inode.c
··· 1346 1346 * attempt to create reparse point. This will prevent creating unusable 1347 1347 * empty object on the server. 1348 1348 */ 1349 - if (!(le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS)) 1350 - if (!tcon->posix_extensions) 1351 - return ERR_PTR(-EOPNOTSUPP); 1349 + if (!CIFS_REPARSE_SUPPORT(tcon)) 1350 + return ERR_PTR(-EOPNOTSUPP); 1352 1351 1353 1352 oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, 1354 1353 SYNCHRONIZE | DELETE |
+31 -32
fs/smb/client/smb2ops.c
··· 91 91 if (*val > 65000) { 92 92 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */ 93 93 pr_warn_once("server overflowed SMB3 credits\n"); 94 - trace_smb3_overflow_credits(server->CurrentMid, 94 + trace_smb3_overflow_credits(server->current_mid, 95 95 server->conn_id, server->hostname, *val, 96 96 add, server->in_flight); 97 97 } ··· 136 136 wake_up(&server->request_q); 137 137 138 138 if (reconnect_detected) { 139 - trace_smb3_reconnect_detected(server->CurrentMid, 139 + trace_smb3_reconnect_detected(server->current_mid, 140 140 server->conn_id, server->hostname, scredits, add, in_flight); 141 141 142 142 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n", ··· 144 144 } 145 145 146 146 if (reconnect_with_invalid_credits) { 147 - trace_smb3_reconnect_with_invalid_credits(server->CurrentMid, 147 + trace_smb3_reconnect_with_invalid_credits(server->current_mid, 148 148 server->conn_id, server->hostname, scredits, add, in_flight); 149 149 cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n", 150 150 optype, scredits, add); ··· 176 176 break; 177 177 } 178 178 179 - trace_smb3_add_credits(server->CurrentMid, 179 + trace_smb3_add_credits(server->current_mid, 180 180 server->conn_id, server->hostname, scredits, add, in_flight); 181 181 cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits); 182 182 } ··· 203 203 in_flight = server->in_flight; 204 204 spin_unlock(&server->req_lock); 205 205 206 - trace_smb3_set_credits(server->CurrentMid, 206 + trace_smb3_set_credits(server->current_mid, 207 207 server->conn_id, server->hostname, scredits, val, in_flight); 208 208 cifs_dbg(FYI, "%s: set %u credits\n", __func__, val); 209 209 ··· 288 288 in_flight = server->in_flight; 289 289 spin_unlock(&server->req_lock); 290 290 291 - trace_smb3_wait_credits(server->CurrentMid, 291 + trace_smb3_wait_credits(server->current_mid, 292 292 server->conn_id, server->hostname, scredits, -(credits->value), in_flight); 293 293 cifs_dbg(FYI, "%s: removed %u credits total=%d\n", 294 294 __func__, credits->value, scredits); ··· 316 316 server->credits, server->in_flight, 317 317 new_val - credits->value, 318 318 cifs_trace_rw_credits_no_adjust_up); 319 - trace_smb3_too_many_credits(server->CurrentMid, 319 + trace_smb3_too_many_credits(server->current_mid, 320 320 server->conn_id, server->hostname, 0, credits->value - new_val, 0); 321 321 cifs_server_dbg(VFS, "R=%x[%x] request has less credits (%d) than required (%d)", 322 322 subreq->rreq->debug_id, subreq->subreq.debug_index, ··· 338 338 server->credits, server->in_flight, 339 339 new_val - credits->value, 340 340 cifs_trace_rw_credits_old_session); 341 - trace_smb3_reconnect_detected(server->CurrentMid, 341 + trace_smb3_reconnect_detected(server->current_mid, 342 342 server->conn_id, server->hostname, scredits, 343 343 credits->value - new_val, in_flight); 344 344 cifs_server_dbg(VFS, "R=%x[%x] trying to return %d credits to old session\n", ··· 358 358 spin_unlock(&server->req_lock); 359 359 wake_up(&server->request_q); 360 360 361 - trace_smb3_adj_credits(server->CurrentMid, 361 + trace_smb3_adj_credits(server->current_mid, 362 362 server->conn_id, server->hostname, scredits, 363 363 credits->value - new_val, in_flight); 364 364 cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n", ··· 374 374 { 375 375 __u64 mid; 376 376 /* for SMB2 we need the current value */ 377 - spin_lock(&server->mid_lock); 378 - mid = server->CurrentMid++; 379 - spin_unlock(&server->mid_lock); 377 + spin_lock(&server->mid_counter_lock); 378 + mid = server->current_mid++; 379 + spin_unlock(&server->mid_counter_lock); 380 380 return mid; 381 381 } 382 382 383 383 static void 384 384 smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val) 385 385 { 386 - spin_lock(&server->mid_lock); 387 - if (server->CurrentMid >= val) 388 - server->CurrentMid -= val; 389 - spin_unlock(&server->mid_lock); 386 + spin_lock(&server->mid_counter_lock); 387 + if (server->current_mid >= val) 388 + server->current_mid -= val; 389 + spin_unlock(&server->mid_counter_lock); 390 390 } 391 391 392 392 static struct mid_q_entry * ··· 401 401 return NULL; 402 402 } 403 403 404 - spin_lock(&server->mid_lock); 404 + spin_lock(&server->mid_queue_lock); 405 405 list_for_each_entry(mid, &server->pending_mid_q, qhead) { 406 406 if ((mid->mid == wire_mid) && 407 407 (mid->mid_state == MID_REQUEST_SUBMITTED) && ··· 409 409 kref_get(&mid->refcount); 410 410 if (dequeue) { 411 411 list_del_init(&mid->qhead); 412 - mid->mid_flags |= MID_DELETED; 412 + mid->deleted_from_q = true; 413 413 } 414 - spin_unlock(&server->mid_lock); 414 + spin_unlock(&server->mid_queue_lock); 415 415 return mid; 416 416 } 417 417 } 418 - spin_unlock(&server->mid_lock); 418 + spin_unlock(&server->mid_queue_lock); 419 419 return NULL; 420 420 } 421 421 ··· 460 460 { 461 461 int rc; 462 462 463 - spin_lock(&server->mid_lock); 464 - server->CurrentMid = 0; 465 - spin_unlock(&server->mid_lock); 463 + spin_lock(&server->mid_counter_lock); 464 + server->current_mid = 0; 465 + spin_unlock(&server->mid_counter_lock); 466 466 rc = SMB2_negotiate(xid, ses, server); 467 467 return rc; 468 468 } ··· 2498 2498 spin_unlock(&server->req_lock); 2499 2499 wake_up(&server->request_q); 2500 2500 2501 - trace_smb3_pend_credits(server->CurrentMid, 2501 + trace_smb3_pend_credits(server->current_mid, 2502 2502 server->conn_id, server->hostname, scredits, 2503 2503 le16_to_cpu(shdr->CreditRequest), in_flight); 2504 2504 cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n", ··· 4809 4809 } else { 4810 4810 spin_lock(&dw->server->srv_lock); 4811 4811 if (dw->server->tcpStatus == CifsNeedReconnect) { 4812 - spin_lock(&dw->server->mid_lock); 4812 + spin_lock(&dw->server->mid_queue_lock); 4813 4813 mid->mid_state = MID_RETRY_NEEDED; 4814 - spin_unlock(&dw->server->mid_lock); 4814 + spin_unlock(&dw->server->mid_queue_lock); 4815 4815 spin_unlock(&dw->server->srv_lock); 4816 4816 mid->callback(mid); 4817 4817 } else { 4818 - spin_lock(&dw->server->mid_lock); 4818 + spin_lock(&dw->server->mid_queue_lock); 4819 4819 mid->mid_state = MID_REQUEST_SUBMITTED; 4820 - mid->mid_flags &= ~(MID_DELETED); 4820 + mid->deleted_from_q = false; 4821 4821 list_add_tail(&mid->qhead, 4822 4822 &dw->server->pending_mid_q); 4823 - spin_unlock(&dw->server->mid_lock); 4823 + spin_unlock(&dw->server->mid_queue_lock); 4824 4824 spin_unlock(&dw->server->srv_lock); 4825 4825 } 4826 4826 } ··· 5260 5260 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { 5261 5261 rc = cifs_sfu_make_node(xid, inode, dentry, tcon, 5262 5262 full_path, mode, dev); 5263 - } else if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS) 5264 - || (tcon->posix_extensions)) { 5263 + } else if (CIFS_REPARSE_SUPPORT(tcon)) { 5265 5264 rc = mknod_reparse(xid, inode, dentry, tcon, 5266 - full_path, mode, dev); 5265 + full_path, mode, dev); 5267 5266 } 5268 5267 return rc; 5269 5268 }
+2 -2
fs/smb/client/smb2transport.c
··· 840 840 *mid = smb2_mid_entry_alloc(shdr, server); 841 841 if (*mid == NULL) 842 842 return -ENOMEM; 843 - spin_lock(&server->mid_lock); 843 + spin_lock(&server->mid_queue_lock); 844 844 list_add_tail(&(*mid)->qhead, &server->pending_mid_q); 845 - spin_unlock(&server->mid_lock); 845 + spin_unlock(&server->mid_queue_lock); 846 846 847 847 return 0; 848 848 }
+222 -243
fs/smb/client/smbdirect.c
··· 13 13 #include "cifsproto.h" 14 14 #include "smb2proto.h" 15 15 16 - static struct smbd_response *get_empty_queue_buffer( 17 - struct smbd_connection *info); 18 - static struct smbd_response *get_receive_buffer( 16 + static struct smbdirect_recv_io *get_receive_buffer( 19 17 struct smbd_connection *info); 20 18 static void put_receive_buffer( 21 19 struct smbd_connection *info, 22 - struct smbd_response *response); 20 + struct smbdirect_recv_io *response); 23 21 static int allocate_receive_buffers(struct smbd_connection *info, int num_buf); 24 22 static void destroy_receive_buffers(struct smbd_connection *info); 25 23 26 - static void put_empty_packet( 27 - struct smbd_connection *info, struct smbd_response *response); 28 24 static void enqueue_reassembly( 29 25 struct smbd_connection *info, 30 - struct smbd_response *response, int data_length); 31 - static struct smbd_response *_get_first_reassembly( 26 + struct smbdirect_recv_io *response, int data_length); 27 + static struct smbdirect_recv_io *_get_first_reassembly( 32 28 struct smbd_connection *info); 33 29 34 30 static int smbd_post_recv( 35 31 struct smbd_connection *info, 36 - struct smbd_response *response); 32 + struct smbdirect_recv_io *response); 37 33 38 34 static int smbd_post_send_empty(struct smbd_connection *info); 39 35 ··· 178 182 { 179 183 struct smbd_connection *info = id->context; 180 184 struct smbdirect_socket *sc = &info->socket; 185 + const char *event_name = rdma_event_msg(event->event); 181 186 182 - log_rdma_event(INFO, "event=%d status=%d\n", 183 - event->event, event->status); 187 + log_rdma_event(INFO, "event=%s status=%d\n", 188 + event_name, event->status); 184 189 185 190 switch (event->event) { 186 191 case RDMA_CM_EVENT_ADDR_RESOLVED: ··· 191 194 break; 192 195 193 196 case RDMA_CM_EVENT_ADDR_ERROR: 197 + log_rdma_event(ERR, "connecting failed event=%s\n", event_name); 194 198 info->ri_rc = -EHOSTUNREACH; 195 199 complete(&info->ri_done); 196 200 break; 197 201 198 202 case RDMA_CM_EVENT_ROUTE_ERROR: 203 + log_rdma_event(ERR, "connecting failed event=%s\n", event_name); 199 204 info->ri_rc = -ENETUNREACH; 200 205 complete(&info->ri_done); 201 206 break; 202 207 203 208 case RDMA_CM_EVENT_ESTABLISHED: 204 - log_rdma_event(INFO, "connected event=%d\n", event->event); 209 + log_rdma_event(INFO, "connected event=%s\n", event_name); 205 210 sc->status = SMBDIRECT_SOCKET_CONNECTED; 206 - wake_up_interruptible(&info->conn_wait); 211 + wake_up_interruptible(&info->status_wait); 207 212 break; 208 213 209 214 case RDMA_CM_EVENT_CONNECT_ERROR: 210 215 case RDMA_CM_EVENT_UNREACHABLE: 211 216 case RDMA_CM_EVENT_REJECTED: 212 - log_rdma_event(INFO, "connecting failed event=%d\n", event->event); 217 + log_rdma_event(ERR, "connecting failed event=%s\n", event_name); 213 218 sc->status = SMBDIRECT_SOCKET_DISCONNECTED; 214 - wake_up_interruptible(&info->conn_wait); 219 + wake_up_interruptible(&info->status_wait); 215 220 break; 216 221 217 222 case RDMA_CM_EVENT_DEVICE_REMOVAL: 218 223 case RDMA_CM_EVENT_DISCONNECTED: 219 224 /* This happens when we fail the negotiation */ 220 225 if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_FAILED) { 226 + log_rdma_event(ERR, "event=%s during negotiation\n", event_name); 221 227 sc->status = SMBDIRECT_SOCKET_DISCONNECTED; 222 - wake_up(&info->conn_wait); 228 + wake_up(&info->status_wait); 223 229 break; 224 230 } 225 231 226 232 sc->status = SMBDIRECT_SOCKET_DISCONNECTED; 227 - wake_up_interruptible(&info->disconn_wait); 228 - wake_up_interruptible(&info->wait_reassembly_queue); 233 + wake_up_interruptible(&info->status_wait); 234 + wake_up_interruptible(&sc->recv_io.reassembly.wait_queue); 229 235 wake_up_interruptible_all(&info->wait_send_queue); 230 236 break; 231 237 232 238 default: 239 + log_rdma_event(ERR, "unexpected event=%s status=%d\n", 240 + event_name, event->status); 233 241 break; 234 242 } 235 243 ··· 261 259 } 262 260 } 263 261 264 - static inline void *smbd_request_payload(struct smbd_request *request) 262 + static inline void *smbdirect_send_io_payload(struct smbdirect_send_io *request) 265 263 { 266 264 return (void *)request->packet; 267 265 } 268 266 269 - static inline void *smbd_response_payload(struct smbd_response *response) 267 + static inline void *smbdirect_recv_io_payload(struct smbdirect_recv_io *response) 270 268 { 271 269 return (void *)response->packet; 272 270 } ··· 275 273 static void send_done(struct ib_cq *cq, struct ib_wc *wc) 276 274 { 277 275 int i; 278 - struct smbd_request *request = 279 - container_of(wc->wr_cqe, struct smbd_request, cqe); 280 - struct smbd_connection *info = request->info; 281 - struct smbdirect_socket *sc = &info->socket; 276 + struct smbdirect_send_io *request = 277 + container_of(wc->wr_cqe, struct smbdirect_send_io, cqe); 278 + struct smbdirect_socket *sc = request->socket; 279 + struct smbd_connection *info = 280 + container_of(sc, struct smbd_connection, socket); 282 281 283 - log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n", 282 + log_rdma_send(INFO, "smbdirect_send_io 0x%p completed wc->status=%d\n", 284 283 request, wc->status); 285 - 286 - if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { 287 - log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n", 288 - wc->status, wc->opcode); 289 - smbd_disconnect_rdma_connection(request->info); 290 - } 291 284 292 285 for (i = 0; i < request->num_sge; i++) 293 286 ib_dma_unmap_single(sc->ib.dev, ··· 290 293 request->sge[i].length, 291 294 DMA_TO_DEVICE); 292 295 293 - if (atomic_dec_and_test(&request->info->send_pending)) 294 - wake_up(&request->info->wait_send_pending); 296 + if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { 297 + log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n", 298 + wc->status, wc->opcode); 299 + mempool_free(request, sc->send_io.mem.pool); 300 + smbd_disconnect_rdma_connection(info); 301 + return; 302 + } 295 303 296 - wake_up(&request->info->wait_post_send); 304 + if (atomic_dec_and_test(&info->send_pending)) 305 + wake_up(&info->wait_send_pending); 297 306 298 - mempool_free(request, request->info->request_mempool); 307 + wake_up(&info->wait_post_send); 308 + 309 + mempool_free(request, sc->send_io.mem.pool); 299 310 } 300 311 301 312 static void dump_smbdirect_negotiate_resp(struct smbdirect_negotiate_resp *resp) ··· 322 317 * return value: true if negotiation is a success, false if failed 323 318 */ 324 319 static bool process_negotiation_response( 325 - struct smbd_response *response, int packet_length) 320 + struct smbdirect_recv_io *response, int packet_length) 326 321 { 327 - struct smbd_connection *info = response->info; 328 - struct smbdirect_socket *sc = &info->socket; 322 + struct smbdirect_socket *sc = response->socket; 323 + struct smbd_connection *info = 324 + container_of(sc, struct smbd_connection, socket); 329 325 struct smbdirect_socket_parameters *sp = &sc->parameters; 330 - struct smbdirect_negotiate_resp *packet = smbd_response_payload(response); 326 + struct smbdirect_negotiate_resp *packet = smbdirect_recv_io_payload(response); 331 327 332 328 if (packet_length < sizeof(struct smbdirect_negotiate_resp)) { 333 329 log_rdma_event(ERR, ··· 391 385 info->max_frmr_depth * PAGE_SIZE); 392 386 info->max_frmr_depth = sp->max_read_write_size / PAGE_SIZE; 393 387 388 + sc->recv_io.expected = SMBDIRECT_EXPECT_DATA_TRANSFER; 394 389 return true; 395 390 } 396 391 397 392 static void smbd_post_send_credits(struct work_struct *work) 398 393 { 399 394 int ret = 0; 400 - int use_receive_queue = 1; 401 395 int rc; 402 - struct smbd_response *response; 396 + struct smbdirect_recv_io *response; 403 397 struct smbd_connection *info = 404 398 container_of(work, struct smbd_connection, 405 399 post_send_credits_work); ··· 413 407 if (info->receive_credit_target > 414 408 atomic_read(&info->receive_credits)) { 415 409 while (true) { 416 - if (use_receive_queue) 417 - response = get_receive_buffer(info); 418 - else 419 - response = get_empty_queue_buffer(info); 420 - if (!response) { 421 - /* now switch to empty packet queue */ 422 - if (use_receive_queue) { 423 - use_receive_queue = 0; 424 - continue; 425 - } else 426 - break; 427 - } 410 + response = get_receive_buffer(info); 411 + if (!response) 412 + break; 428 413 429 - response->type = SMBD_TRANSFER_DATA; 430 414 response->first_segment = false; 431 415 rc = smbd_post_recv(info, response); 432 416 if (rc) { ··· 450 454 static void recv_done(struct ib_cq *cq, struct ib_wc *wc) 451 455 { 452 456 struct smbdirect_data_transfer *data_transfer; 453 - struct smbd_response *response = 454 - container_of(wc->wr_cqe, struct smbd_response, cqe); 455 - struct smbd_connection *info = response->info; 457 + struct smbdirect_recv_io *response = 458 + container_of(wc->wr_cqe, struct smbdirect_recv_io, cqe); 459 + struct smbdirect_socket *sc = response->socket; 460 + struct smbd_connection *info = 461 + container_of(sc, struct smbd_connection, socket); 456 462 int data_length = 0; 457 463 458 464 log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n", 459 - response, response->type, wc->status, wc->opcode, 465 + response, sc->recv_io.expected, wc->status, wc->opcode, 460 466 wc->byte_len, wc->pkey_index); 461 467 462 468 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) { 463 469 log_rdma_recv(INFO, "wc->status=%d opcode=%d\n", 464 470 wc->status, wc->opcode); 465 - smbd_disconnect_rdma_connection(info); 466 471 goto error; 467 472 } 468 473 ··· 473 476 response->sge.length, 474 477 DMA_FROM_DEVICE); 475 478 476 - switch (response->type) { 479 + switch (sc->recv_io.expected) { 477 480 /* SMBD negotiation response */ 478 - case SMBD_NEGOTIATE_RESP: 479 - dump_smbdirect_negotiate_resp(smbd_response_payload(response)); 480 - info->full_packet_received = true; 481 + case SMBDIRECT_EXPECT_NEGOTIATE_REP: 482 + dump_smbdirect_negotiate_resp(smbdirect_recv_io_payload(response)); 483 + sc->recv_io.reassembly.full_packet_received = true; 481 484 info->negotiate_done = 482 485 process_negotiation_response(response, wc->byte_len); 486 + put_receive_buffer(info, response); 483 487 complete(&info->negotiate_completion); 484 - break; 488 + return; 485 489 486 490 /* SMBD data transfer packet */ 487 - case SMBD_TRANSFER_DATA: 488 - data_transfer = smbd_response_payload(response); 491 + case SMBDIRECT_EXPECT_DATA_TRANSFER: 492 + data_transfer = smbdirect_recv_io_payload(response); 489 493 data_length = le32_to_cpu(data_transfer->data_length); 490 494 491 - /* 492 - * If this is a packet with data playload place the data in 493 - * reassembly queue and wake up the reading thread 494 - */ 495 495 if (data_length) { 496 - if (info->full_packet_received) 496 + if (sc->recv_io.reassembly.full_packet_received) 497 497 response->first_segment = true; 498 498 499 499 if (le32_to_cpu(data_transfer->remaining_data_length)) 500 - info->full_packet_received = false; 500 + sc->recv_io.reassembly.full_packet_received = false; 501 501 else 502 - info->full_packet_received = true; 503 - 504 - enqueue_reassembly( 505 - info, 506 - response, 507 - data_length); 508 - } else 509 - put_empty_packet(info, response); 510 - 511 - if (data_length) 512 - wake_up_interruptible(&info->wait_reassembly_queue); 502 + sc->recv_io.reassembly.full_packet_received = true; 503 + } 513 504 514 505 atomic_dec(&info->receive_credits); 515 506 info->receive_credit_target = ··· 525 540 info->keep_alive_requested = KEEP_ALIVE_PENDING; 526 541 } 527 542 543 + /* 544 + * If this is a packet with data playload place the data in 545 + * reassembly queue and wake up the reading thread 546 + */ 547 + if (data_length) { 548 + enqueue_reassembly(info, response, data_length); 549 + wake_up_interruptible(&sc->recv_io.reassembly.wait_queue); 550 + } else 551 + put_receive_buffer(info, response); 552 + 528 553 return; 529 554 530 - default: 531 - log_rdma_recv(ERR, 532 - "unexpected response type=%d\n", response->type); 555 + case SMBDIRECT_EXPECT_NEGOTIATE_REQ: 556 + /* Only server... */ 557 + break; 533 558 } 534 559 560 + /* 561 + * This is an internal error! 562 + */ 563 + log_rdma_recv(ERR, "unexpected response type=%d\n", sc->recv_io.expected); 564 + WARN_ON_ONCE(sc->recv_io.expected != SMBDIRECT_EXPECT_DATA_TRANSFER); 535 565 error: 536 566 put_receive_buffer(info, response); 567 + smbd_disconnect_rdma_connection(info); 537 568 } 538 569 539 570 static struct rdma_cm_id *smbd_create_id( ··· 695 694 struct smbdirect_socket_parameters *sp = &sc->parameters; 696 695 struct ib_send_wr send_wr; 697 696 int rc = -ENOMEM; 698 - struct smbd_request *request; 697 + struct smbdirect_send_io *request; 699 698 struct smbdirect_negotiate_req *packet; 700 699 701 - request = mempool_alloc(info->request_mempool, GFP_KERNEL); 700 + request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL); 702 701 if (!request) 703 702 return rc; 704 703 705 - request->info = info; 704 + request->socket = sc; 706 705 707 - packet = smbd_request_payload(request); 706 + packet = smbdirect_send_io_payload(request); 708 707 packet->min_version = cpu_to_le16(SMBDIRECT_V1); 709 708 packet->max_version = cpu_to_le16(SMBDIRECT_V1); 710 709 packet->reserved = 0; ··· 757 756 smbd_disconnect_rdma_connection(info); 758 757 759 758 dma_mapping_failed: 760 - mempool_free(request, info->request_mempool); 759 + mempool_free(request, sc->send_io.mem.pool); 761 760 return rc; 762 761 } 763 762 ··· 801 800 802 801 /* Post the send request */ 803 802 static int smbd_post_send(struct smbd_connection *info, 804 - struct smbd_request *request) 803 + struct smbdirect_send_io *request) 805 804 { 806 805 struct smbdirect_socket *sc = &info->socket; 807 806 struct smbdirect_socket_parameters *sp = &sc->parameters; ··· 850 849 int i, rc; 851 850 int header_length; 852 851 int data_length; 853 - struct smbd_request *request; 852 + struct smbdirect_send_io *request; 854 853 struct smbdirect_data_transfer *packet; 855 854 int new_credits = 0; 856 855 ··· 889 888 goto wait_send_queue; 890 889 } 891 890 892 - request = mempool_alloc(info->request_mempool, GFP_KERNEL); 891 + request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL); 893 892 if (!request) { 894 893 rc = -ENOMEM; 895 894 goto err_alloc; 896 895 } 897 896 898 - request->info = info; 897 + request->socket = sc; 899 898 memset(request->sge, 0, sizeof(request->sge)); 900 899 901 900 /* Fill in the data payload to find out how much data we can add */ 902 901 if (iter) { 903 902 struct smb_extract_to_rdma extract = { 904 903 .nr_sge = 1, 905 - .max_sge = SMBDIRECT_MAX_SEND_SGE, 904 + .max_sge = SMBDIRECT_SEND_IO_MAX_SGE, 906 905 .sge = request->sge, 907 906 .device = sc->ib.dev, 908 907 .local_dma_lkey = sc->ib.pd->local_dma_lkey, ··· 924 923 } 925 924 926 925 /* Fill in the packet header */ 927 - packet = smbd_request_payload(request); 926 + packet = smbdirect_send_io_payload(request); 928 927 packet->credits_requested = cpu_to_le16(sp->send_credit_target); 929 928 930 929 new_credits = manage_credits_prior_sending(info); ··· 983 982 request->sge[i].addr, 984 983 request->sge[i].length, 985 984 DMA_TO_DEVICE); 986 - mempool_free(request, info->request_mempool); 985 + mempool_free(request, sc->send_io.mem.pool); 987 986 988 987 /* roll back receive credits and credits to be offered */ 989 988 spin_lock(&info->lock_new_credits_offered); ··· 1043 1042 * The interaction is controlled by send/receive credit system 1044 1043 */ 1045 1044 static int smbd_post_recv( 1046 - struct smbd_connection *info, struct smbd_response *response) 1045 + struct smbd_connection *info, struct smbdirect_recv_io *response) 1047 1046 { 1048 1047 struct smbdirect_socket *sc = &info->socket; 1049 1048 struct smbdirect_socket_parameters *sp = &sc->parameters; ··· 1070 1069 if (rc) { 1071 1070 ib_dma_unmap_single(sc->ib.dev, response->sge.addr, 1072 1071 response->sge.length, DMA_FROM_DEVICE); 1072 + response->sge.length = 0; 1073 1073 smbd_disconnect_rdma_connection(info); 1074 1074 log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc); 1075 1075 } ··· 1081 1079 /* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */ 1082 1080 static int smbd_negotiate(struct smbd_connection *info) 1083 1081 { 1082 + struct smbdirect_socket *sc = &info->socket; 1084 1083 int rc; 1085 - struct smbd_response *response = get_receive_buffer(info); 1084 + struct smbdirect_recv_io *response = get_receive_buffer(info); 1086 1085 1087 - response->type = SMBD_NEGOTIATE_RESP; 1086 + sc->recv_io.expected = SMBDIRECT_EXPECT_NEGOTIATE_REP; 1088 1087 rc = smbd_post_recv(info, response); 1089 1088 log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n", 1090 1089 rc, response->sge.addr, ··· 1116 1113 return rc; 1117 1114 } 1118 1115 1119 - static void put_empty_packet( 1120 - struct smbd_connection *info, struct smbd_response *response) 1121 - { 1122 - spin_lock(&info->empty_packet_queue_lock); 1123 - list_add_tail(&response->list, &info->empty_packet_queue); 1124 - info->count_empty_packet_queue++; 1125 - spin_unlock(&info->empty_packet_queue_lock); 1126 - 1127 - queue_work(info->workqueue, &info->post_send_credits_work); 1128 - } 1129 - 1130 1116 /* 1131 1117 * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1 1132 1118 * This is a queue for reassembling upper layer payload and present to upper ··· 1128 1136 */ 1129 1137 static void enqueue_reassembly( 1130 1138 struct smbd_connection *info, 1131 - struct smbd_response *response, 1139 + struct smbdirect_recv_io *response, 1132 1140 int data_length) 1133 1141 { 1134 - spin_lock(&info->reassembly_queue_lock); 1135 - list_add_tail(&response->list, &info->reassembly_queue); 1136 - info->reassembly_queue_length++; 1142 + struct smbdirect_socket *sc = &info->socket; 1143 + 1144 + spin_lock(&sc->recv_io.reassembly.lock); 1145 + list_add_tail(&response->list, &sc->recv_io.reassembly.list); 1146 + sc->recv_io.reassembly.queue_length++; 1137 1147 /* 1138 1148 * Make sure reassembly_data_length is updated after list and 1139 1149 * reassembly_queue_length are updated. On the dequeue side ··· 1143 1149 * if reassembly_queue_length and list is up to date 1144 1150 */ 1145 1151 virt_wmb(); 1146 - info->reassembly_data_length += data_length; 1147 - spin_unlock(&info->reassembly_queue_lock); 1152 + sc->recv_io.reassembly.data_length += data_length; 1153 + spin_unlock(&sc->recv_io.reassembly.lock); 1148 1154 info->count_reassembly_queue++; 1149 1155 info->count_enqueue_reassembly_queue++; 1150 1156 } ··· 1154 1160 * Caller is responsible for locking 1155 1161 * return value: the first entry if any, NULL if queue is empty 1156 1162 */ 1157 - static struct smbd_response *_get_first_reassembly(struct smbd_connection *info) 1163 + static struct smbdirect_recv_io *_get_first_reassembly(struct smbd_connection *info) 1158 1164 { 1159 - struct smbd_response *ret = NULL; 1165 + struct smbdirect_socket *sc = &info->socket; 1166 + struct smbdirect_recv_io *ret = NULL; 1160 1167 1161 - if (!list_empty(&info->reassembly_queue)) { 1168 + if (!list_empty(&sc->recv_io.reassembly.list)) { 1162 1169 ret = list_first_entry( 1163 - &info->reassembly_queue, 1164 - struct smbd_response, list); 1170 + &sc->recv_io.reassembly.list, 1171 + struct smbdirect_recv_io, list); 1165 1172 } 1166 - return ret; 1167 - } 1168 - 1169 - static struct smbd_response *get_empty_queue_buffer( 1170 - struct smbd_connection *info) 1171 - { 1172 - struct smbd_response *ret = NULL; 1173 - unsigned long flags; 1174 - 1175 - spin_lock_irqsave(&info->empty_packet_queue_lock, flags); 1176 - if (!list_empty(&info->empty_packet_queue)) { 1177 - ret = list_first_entry( 1178 - &info->empty_packet_queue, 1179 - struct smbd_response, list); 1180 - list_del(&ret->list); 1181 - info->count_empty_packet_queue--; 1182 - } 1183 - spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags); 1184 - 1185 1173 return ret; 1186 1174 } 1187 1175 ··· 1173 1197 * pre-allocated in advance. 1174 1198 * return value: the receive buffer, NULL if none is available 1175 1199 */ 1176 - static struct smbd_response *get_receive_buffer(struct smbd_connection *info) 1200 + static struct smbdirect_recv_io *get_receive_buffer(struct smbd_connection *info) 1177 1201 { 1178 - struct smbd_response *ret = NULL; 1202 + struct smbdirect_socket *sc = &info->socket; 1203 + struct smbdirect_recv_io *ret = NULL; 1179 1204 unsigned long flags; 1180 1205 1181 - spin_lock_irqsave(&info->receive_queue_lock, flags); 1182 - if (!list_empty(&info->receive_queue)) { 1206 + spin_lock_irqsave(&sc->recv_io.free.lock, flags); 1207 + if (!list_empty(&sc->recv_io.free.list)) { 1183 1208 ret = list_first_entry( 1184 - &info->receive_queue, 1185 - struct smbd_response, list); 1209 + &sc->recv_io.free.list, 1210 + struct smbdirect_recv_io, list); 1186 1211 list_del(&ret->list); 1187 1212 info->count_receive_queue--; 1188 1213 info->count_get_receive_buffer++; 1189 1214 } 1190 - spin_unlock_irqrestore(&info->receive_queue_lock, flags); 1215 + spin_unlock_irqrestore(&sc->recv_io.free.lock, flags); 1191 1216 1192 1217 return ret; 1193 1218 } ··· 1200 1223 * receive buffer is returned. 1201 1224 */ 1202 1225 static void put_receive_buffer( 1203 - struct smbd_connection *info, struct smbd_response *response) 1226 + struct smbd_connection *info, struct smbdirect_recv_io *response) 1204 1227 { 1205 1228 struct smbdirect_socket *sc = &info->socket; 1206 1229 unsigned long flags; 1207 1230 1208 - ib_dma_unmap_single(sc->ib.dev, response->sge.addr, 1209 - response->sge.length, DMA_FROM_DEVICE); 1231 + if (likely(response->sge.length != 0)) { 1232 + ib_dma_unmap_single(sc->ib.dev, 1233 + response->sge.addr, 1234 + response->sge.length, 1235 + DMA_FROM_DEVICE); 1236 + response->sge.length = 0; 1237 + } 1210 1238 1211 - spin_lock_irqsave(&info->receive_queue_lock, flags); 1212 - list_add_tail(&response->list, &info->receive_queue); 1239 + spin_lock_irqsave(&sc->recv_io.free.lock, flags); 1240 + list_add_tail(&response->list, &sc->recv_io.free.list); 1213 1241 info->count_receive_queue++; 1214 1242 info->count_put_receive_buffer++; 1215 - spin_unlock_irqrestore(&info->receive_queue_lock, flags); 1243 + spin_unlock_irqrestore(&sc->recv_io.free.lock, flags); 1216 1244 1217 1245 queue_work(info->workqueue, &info->post_send_credits_work); 1218 1246 } ··· 1225 1243 /* Preallocate all receive buffer on transport establishment */ 1226 1244 static int allocate_receive_buffers(struct smbd_connection *info, int num_buf) 1227 1245 { 1246 + struct smbdirect_socket *sc = &info->socket; 1247 + struct smbdirect_recv_io *response; 1228 1248 int i; 1229 - struct smbd_response *response; 1230 1249 1231 - INIT_LIST_HEAD(&info->reassembly_queue); 1232 - spin_lock_init(&info->reassembly_queue_lock); 1233 - info->reassembly_data_length = 0; 1234 - info->reassembly_queue_length = 0; 1250 + INIT_LIST_HEAD(&sc->recv_io.reassembly.list); 1251 + spin_lock_init(&sc->recv_io.reassembly.lock); 1252 + sc->recv_io.reassembly.data_length = 0; 1253 + sc->recv_io.reassembly.queue_length = 0; 1235 1254 1236 - INIT_LIST_HEAD(&info->receive_queue); 1237 - spin_lock_init(&info->receive_queue_lock); 1255 + INIT_LIST_HEAD(&sc->recv_io.free.list); 1256 + spin_lock_init(&sc->recv_io.free.lock); 1238 1257 info->count_receive_queue = 0; 1239 - 1240 - INIT_LIST_HEAD(&info->empty_packet_queue); 1241 - spin_lock_init(&info->empty_packet_queue_lock); 1242 - info->count_empty_packet_queue = 0; 1243 1258 1244 1259 init_waitqueue_head(&info->wait_receive_queues); 1245 1260 1246 1261 for (i = 0; i < num_buf; i++) { 1247 - response = mempool_alloc(info->response_mempool, GFP_KERNEL); 1262 + response = mempool_alloc(sc->recv_io.mem.pool, GFP_KERNEL); 1248 1263 if (!response) 1249 1264 goto allocate_failed; 1250 1265 1251 - response->info = info; 1252 - list_add_tail(&response->list, &info->receive_queue); 1266 + response->socket = sc; 1267 + response->sge.length = 0; 1268 + list_add_tail(&response->list, &sc->recv_io.free.list); 1253 1269 info->count_receive_queue++; 1254 1270 } 1255 1271 1256 1272 return 0; 1257 1273 1258 1274 allocate_failed: 1259 - while (!list_empty(&info->receive_queue)) { 1275 + while (!list_empty(&sc->recv_io.free.list)) { 1260 1276 response = list_first_entry( 1261 - &info->receive_queue, 1262 - struct smbd_response, list); 1277 + &sc->recv_io.free.list, 1278 + struct smbdirect_recv_io, list); 1263 1279 list_del(&response->list); 1264 1280 info->count_receive_queue--; 1265 1281 1266 - mempool_free(response, info->response_mempool); 1282 + mempool_free(response, sc->recv_io.mem.pool); 1267 1283 } 1268 1284 return -ENOMEM; 1269 1285 } 1270 1286 1271 1287 static void destroy_receive_buffers(struct smbd_connection *info) 1272 1288 { 1273 - struct smbd_response *response; 1289 + struct smbdirect_socket *sc = &info->socket; 1290 + struct smbdirect_recv_io *response; 1274 1291 1275 1292 while ((response = get_receive_buffer(info))) 1276 - mempool_free(response, info->response_mempool); 1277 - 1278 - while ((response = get_empty_queue_buffer(info))) 1279 - mempool_free(response, info->response_mempool); 1293 + mempool_free(response, sc->recv_io.mem.pool); 1280 1294 } 1281 1295 1282 1296 /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */ ··· 1310 1332 struct smbd_connection *info = server->smbd_conn; 1311 1333 struct smbdirect_socket *sc; 1312 1334 struct smbdirect_socket_parameters *sp; 1313 - struct smbd_response *response; 1335 + struct smbdirect_recv_io *response; 1314 1336 unsigned long flags; 1315 1337 1316 1338 if (!info) { ··· 1325 1347 rdma_disconnect(sc->rdma.cm_id); 1326 1348 log_rdma_event(INFO, "wait for transport being disconnected\n"); 1327 1349 wait_event_interruptible( 1328 - info->disconn_wait, 1350 + info->status_wait, 1329 1351 sc->status == SMBDIRECT_SOCKET_DISCONNECTED); 1330 1352 } 1331 1353 ··· 1344 1366 /* It's not possible for upper layer to get to reassembly */ 1345 1367 log_rdma_event(INFO, "drain the reassembly queue\n"); 1346 1368 do { 1347 - spin_lock_irqsave(&info->reassembly_queue_lock, flags); 1369 + spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags); 1348 1370 response = _get_first_reassembly(info); 1349 1371 if (response) { 1350 1372 list_del(&response->list); 1351 1373 spin_unlock_irqrestore( 1352 - &info->reassembly_queue_lock, flags); 1374 + &sc->recv_io.reassembly.lock, flags); 1353 1375 put_receive_buffer(info, response); 1354 1376 } else 1355 1377 spin_unlock_irqrestore( 1356 - &info->reassembly_queue_lock, flags); 1378 + &sc->recv_io.reassembly.lock, flags); 1357 1379 } while (response); 1358 - info->reassembly_data_length = 0; 1380 + sc->recv_io.reassembly.data_length = 0; 1359 1381 1360 1382 log_rdma_event(INFO, "free receive buffers\n"); 1361 1383 wait_event(info->wait_receive_queues, 1362 - info->count_receive_queue + info->count_empty_packet_queue 1363 - == sp->recv_credit_max); 1384 + info->count_receive_queue == sp->recv_credit_max); 1364 1385 destroy_receive_buffers(info); 1365 1386 1366 1387 /* ··· 1384 1407 rdma_destroy_id(sc->rdma.cm_id); 1385 1408 1386 1409 /* free mempools */ 1387 - mempool_destroy(info->request_mempool); 1388 - kmem_cache_destroy(info->request_cache); 1410 + mempool_destroy(sc->send_io.mem.pool); 1411 + kmem_cache_destroy(sc->send_io.mem.cache); 1389 1412 1390 - mempool_destroy(info->response_mempool); 1391 - kmem_cache_destroy(info->response_cache); 1413 + mempool_destroy(sc->recv_io.mem.pool); 1414 + kmem_cache_destroy(sc->recv_io.mem.cache); 1392 1415 1393 1416 sc->status = SMBDIRECT_SOCKET_DESTROYED; 1394 1417 ··· 1436 1459 1437 1460 static void destroy_caches_and_workqueue(struct smbd_connection *info) 1438 1461 { 1462 + struct smbdirect_socket *sc = &info->socket; 1463 + 1439 1464 destroy_receive_buffers(info); 1440 1465 destroy_workqueue(info->workqueue); 1441 - mempool_destroy(info->response_mempool); 1442 - kmem_cache_destroy(info->response_cache); 1443 - mempool_destroy(info->request_mempool); 1444 - kmem_cache_destroy(info->request_cache); 1466 + mempool_destroy(sc->recv_io.mem.pool); 1467 + kmem_cache_destroy(sc->recv_io.mem.cache); 1468 + mempool_destroy(sc->send_io.mem.pool); 1469 + kmem_cache_destroy(sc->send_io.mem.cache); 1445 1470 } 1446 1471 1447 1472 #define MAX_NAME_LEN 80 ··· 1457 1478 if (WARN_ON_ONCE(sp->max_recv_size < sizeof(struct smbdirect_data_transfer))) 1458 1479 return -ENOMEM; 1459 1480 1460 - scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info); 1461 - info->request_cache = 1481 + scnprintf(name, MAX_NAME_LEN, "smbdirect_send_io_%p", info); 1482 + sc->send_io.mem.cache = 1462 1483 kmem_cache_create( 1463 1484 name, 1464 - sizeof(struct smbd_request) + 1485 + sizeof(struct smbdirect_send_io) + 1465 1486 sizeof(struct smbdirect_data_transfer), 1466 1487 0, SLAB_HWCACHE_ALIGN, NULL); 1467 - if (!info->request_cache) 1488 + if (!sc->send_io.mem.cache) 1468 1489 return -ENOMEM; 1469 1490 1470 - info->request_mempool = 1491 + sc->send_io.mem.pool = 1471 1492 mempool_create(sp->send_credit_target, mempool_alloc_slab, 1472 - mempool_free_slab, info->request_cache); 1473 - if (!info->request_mempool) 1493 + mempool_free_slab, sc->send_io.mem.cache); 1494 + if (!sc->send_io.mem.pool) 1474 1495 goto out1; 1475 1496 1476 - scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info); 1497 + scnprintf(name, MAX_NAME_LEN, "smbdirect_recv_io_%p", info); 1477 1498 1478 1499 struct kmem_cache_args response_args = { 1479 - .align = __alignof__(struct smbd_response), 1480 - .useroffset = (offsetof(struct smbd_response, packet) + 1500 + .align = __alignof__(struct smbdirect_recv_io), 1501 + .useroffset = (offsetof(struct smbdirect_recv_io, packet) + 1481 1502 sizeof(struct smbdirect_data_transfer)), 1482 1503 .usersize = sp->max_recv_size - sizeof(struct smbdirect_data_transfer), 1483 1504 }; 1484 - info->response_cache = 1505 + sc->recv_io.mem.cache = 1485 1506 kmem_cache_create(name, 1486 - sizeof(struct smbd_response) + sp->max_recv_size, 1507 + sizeof(struct smbdirect_recv_io) + sp->max_recv_size, 1487 1508 &response_args, SLAB_HWCACHE_ALIGN); 1488 - if (!info->response_cache) 1509 + if (!sc->recv_io.mem.cache) 1489 1510 goto out2; 1490 1511 1491 - info->response_mempool = 1512 + sc->recv_io.mem.pool = 1492 1513 mempool_create(sp->recv_credit_max, mempool_alloc_slab, 1493 - mempool_free_slab, info->response_cache); 1494 - if (!info->response_mempool) 1514 + mempool_free_slab, sc->recv_io.mem.cache); 1515 + if (!sc->recv_io.mem.pool) 1495 1516 goto out3; 1496 1517 1497 1518 scnprintf(name, MAX_NAME_LEN, "smbd_%p", info); ··· 1510 1531 out5: 1511 1532 destroy_workqueue(info->workqueue); 1512 1533 out4: 1513 - mempool_destroy(info->response_mempool); 1534 + mempool_destroy(sc->recv_io.mem.pool); 1514 1535 out3: 1515 - kmem_cache_destroy(info->response_cache); 1536 + kmem_cache_destroy(sc->recv_io.mem.cache); 1516 1537 out2: 1517 - mempool_destroy(info->request_mempool); 1538 + mempool_destroy(sc->send_io.mem.pool); 1518 1539 out1: 1519 - kmem_cache_destroy(info->request_cache); 1540 + kmem_cache_destroy(sc->send_io.mem.cache); 1520 1541 return -ENOMEM; 1521 1542 } 1522 1543 ··· 1572 1593 sp->max_recv_size = smbd_max_receive_size; 1573 1594 sp->keepalive_interval_msec = smbd_keep_alive_interval * 1000; 1574 1595 1575 - if (sc->ib.dev->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE || 1576 - sc->ib.dev->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) { 1596 + if (sc->ib.dev->attrs.max_send_sge < SMBDIRECT_SEND_IO_MAX_SGE || 1597 + sc->ib.dev->attrs.max_recv_sge < SMBDIRECT_RECV_IO_MAX_SGE) { 1577 1598 log_rdma_event(ERR, 1578 1599 "device %.*s max_send_sge/max_recv_sge = %d/%d too small\n", 1579 1600 IB_DEVICE_NAME_MAX, ··· 1604 1625 qp_attr.qp_context = info; 1605 1626 qp_attr.cap.max_send_wr = sp->send_credit_target; 1606 1627 qp_attr.cap.max_recv_wr = sp->recv_credit_max; 1607 - qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SEND_SGE; 1608 - qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_RECV_SGE; 1628 + qp_attr.cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE; 1629 + qp_attr.cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE; 1609 1630 qp_attr.cap.max_inline_data = 0; 1610 1631 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 1611 1632 qp_attr.qp_type = IB_QPT_RC; ··· 1650 1671 log_rdma_event(INFO, "connecting to IP %pI4 port %d\n", 1651 1672 &addr_in->sin_addr, port); 1652 1673 1653 - init_waitqueue_head(&info->conn_wait); 1654 - init_waitqueue_head(&info->disconn_wait); 1655 - init_waitqueue_head(&info->wait_reassembly_queue); 1674 + init_waitqueue_head(&info->status_wait); 1675 + init_waitqueue_head(&sc->recv_io.reassembly.wait_queue); 1656 1676 rc = rdma_connect(sc->rdma.cm_id, &conn_param); 1657 1677 if (rc) { 1658 1678 log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc); 1659 1679 goto rdma_connect_failed; 1660 1680 } 1661 1681 1662 - wait_event_interruptible( 1663 - info->conn_wait, sc->status != SMBDIRECT_SOCKET_CONNECTING); 1682 + wait_event_interruptible_timeout( 1683 + info->status_wait, 1684 + sc->status != SMBDIRECT_SOCKET_CONNECTING, 1685 + msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); 1664 1686 1665 1687 if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { 1666 1688 log_rdma_event(ERR, "rdma_connect failed port=%d\n", port); ··· 1715 1735 cancel_delayed_work_sync(&info->idle_timer_work); 1716 1736 destroy_caches_and_workqueue(info); 1717 1737 sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED; 1718 - init_waitqueue_head(&info->conn_wait); 1719 1738 rdma_disconnect(sc->rdma.cm_id); 1720 - wait_event(info->conn_wait, 1739 + wait_event(info->status_wait, 1721 1740 sc->status == SMBDIRECT_SOCKET_DISCONNECTED); 1722 1741 1723 1742 allocate_cache_failed: ··· 1773 1794 int smbd_recv(struct smbd_connection *info, struct msghdr *msg) 1774 1795 { 1775 1796 struct smbdirect_socket *sc = &info->socket; 1776 - struct smbd_response *response; 1797 + struct smbdirect_recv_io *response; 1777 1798 struct smbdirect_data_transfer *data_transfer; 1778 1799 size_t size = iov_iter_count(&msg->msg_iter); 1779 1800 int to_copy, to_read, data_read, offset; ··· 1789 1810 * the only one reading from the front of the queue. The transport 1790 1811 * may add more entries to the back of the queue at the same time 1791 1812 */ 1792 - log_read(INFO, "size=%zd info->reassembly_data_length=%d\n", size, 1793 - info->reassembly_data_length); 1794 - if (info->reassembly_data_length >= size) { 1813 + log_read(INFO, "size=%zd sc->recv_io.reassembly.data_length=%d\n", size, 1814 + sc->recv_io.reassembly.data_length); 1815 + if (sc->recv_io.reassembly.data_length >= size) { 1795 1816 int queue_length; 1796 1817 int queue_removed = 0; 1797 1818 ··· 1803 1824 * updated in SOFTIRQ as more data is received 1804 1825 */ 1805 1826 virt_rmb(); 1806 - queue_length = info->reassembly_queue_length; 1827 + queue_length = sc->recv_io.reassembly.queue_length; 1807 1828 data_read = 0; 1808 1829 to_read = size; 1809 - offset = info->first_entry_offset; 1830 + offset = sc->recv_io.reassembly.first_entry_offset; 1810 1831 while (data_read < size) { 1811 1832 response = _get_first_reassembly(info); 1812 - data_transfer = smbd_response_payload(response); 1833 + data_transfer = smbdirect_recv_io_payload(response); 1813 1834 data_length = le32_to_cpu(data_transfer->data_length); 1814 1835 remaining_data_length = 1815 1836 le32_to_cpu( ··· 1854 1875 list_del(&response->list); 1855 1876 else { 1856 1877 spin_lock_irq( 1857 - &info->reassembly_queue_lock); 1878 + &sc->recv_io.reassembly.lock); 1858 1879 list_del(&response->list); 1859 1880 spin_unlock_irq( 1860 - &info->reassembly_queue_lock); 1881 + &sc->recv_io.reassembly.lock); 1861 1882 } 1862 1883 queue_removed++; 1863 1884 info->count_reassembly_queue--; ··· 1876 1897 to_read, data_read, offset); 1877 1898 } 1878 1899 1879 - spin_lock_irq(&info->reassembly_queue_lock); 1880 - info->reassembly_data_length -= data_read; 1881 - info->reassembly_queue_length -= queue_removed; 1882 - spin_unlock_irq(&info->reassembly_queue_lock); 1900 + spin_lock_irq(&sc->recv_io.reassembly.lock); 1901 + sc->recv_io.reassembly.data_length -= data_read; 1902 + sc->recv_io.reassembly.queue_length -= queue_removed; 1903 + spin_unlock_irq(&sc->recv_io.reassembly.lock); 1883 1904 1884 - info->first_entry_offset = offset; 1905 + sc->recv_io.reassembly.first_entry_offset = offset; 1885 1906 log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n", 1886 - data_read, info->reassembly_data_length, 1887 - info->first_entry_offset); 1907 + data_read, sc->recv_io.reassembly.data_length, 1908 + sc->recv_io.reassembly.first_entry_offset); 1888 1909 read_rfc1002_done: 1889 1910 return data_read; 1890 1911 } 1891 1912 1892 1913 log_read(INFO, "wait_event on more data\n"); 1893 1914 rc = wait_event_interruptible( 1894 - info->wait_reassembly_queue, 1895 - info->reassembly_data_length >= size || 1915 + sc->recv_io.reassembly.wait_queue, 1916 + sc->recv_io.reassembly.data_length >= size || 1896 1917 sc->status != SMBDIRECT_SOCKET_CONNECTED); 1897 1918 /* Don't return any data if interrupted */ 1898 1919 if (rc)
+1 -91
fs/smb/client/smbdirect.h
··· 33 33 KEEP_ALIVE_SENT, 34 34 }; 35 35 36 - enum smbd_connection_status { 37 - SMBD_CREATED, 38 - SMBD_CONNECTING, 39 - SMBD_CONNECTED, 40 - SMBD_NEGOTIATE_FAILED, 41 - SMBD_DISCONNECTING, 42 - SMBD_DISCONNECTED, 43 - SMBD_DESTROYED 44 - }; 45 - 46 36 /* 47 37 * The context for the SMBDirect transport 48 38 * Everything related to the transport is here. It has several logical parts ··· 47 57 48 58 int ri_rc; 49 59 struct completion ri_done; 50 - wait_queue_head_t conn_wait; 51 - wait_queue_head_t disconn_wait; 60 + wait_queue_head_t status_wait; 52 61 53 62 struct completion negotiate_completion; 54 63 bool negotiate_done; ··· 64 75 atomic_t send_credits; 65 76 atomic_t receive_credits; 66 77 int receive_credit_target; 67 - int fragment_reassembly_remaining; 68 78 69 79 /* Memory registrations */ 70 80 /* Maximum number of RDMA read/write outstanding on this connection */ ··· 94 106 wait_queue_head_t wait_post_send; 95 107 96 108 /* Receive queue */ 97 - struct list_head receive_queue; 98 109 int count_receive_queue; 99 - spinlock_t receive_queue_lock; 100 - 101 - struct list_head empty_packet_queue; 102 - int count_empty_packet_queue; 103 - spinlock_t empty_packet_queue_lock; 104 - 105 110 wait_queue_head_t wait_receive_queues; 106 - 107 - /* Reassembly queue */ 108 - struct list_head reassembly_queue; 109 - spinlock_t reassembly_queue_lock; 110 - wait_queue_head_t wait_reassembly_queue; 111 - 112 - /* total data length of reassembly queue */ 113 - int reassembly_data_length; 114 - int reassembly_queue_length; 115 - /* the offset to first buffer in reassembly queue */ 116 - int first_entry_offset; 117 111 118 112 bool send_immediate; 119 113 120 114 wait_queue_head_t wait_send_queue; 121 115 122 - /* 123 - * Indicate if we have received a full packet on the connection 124 - * This is used to identify the first SMBD packet of a assembled 125 - * payload (SMB packet) in reassembly queue so we can return a 126 - * RFC1002 length to upper layer to indicate the length of the SMB 127 - * packet received 128 - */ 129 - bool full_packet_received; 130 - 131 116 struct workqueue_struct *workqueue; 132 117 struct delayed_work idle_timer_work; 133 - 134 - /* Memory pool for preallocating buffers */ 135 - /* request pool for RDMA send */ 136 - struct kmem_cache *request_cache; 137 - mempool_t *request_mempool; 138 - 139 - /* response pool for RDMA receive */ 140 - struct kmem_cache *response_cache; 141 - mempool_t *response_mempool; 142 118 143 119 /* for debug purposes */ 144 120 unsigned int count_get_receive_buffer; ··· 111 159 unsigned int count_enqueue_reassembly_queue; 112 160 unsigned int count_dequeue_reassembly_queue; 113 161 unsigned int count_send_empty; 114 - }; 115 - 116 - enum smbd_message_type { 117 - SMBD_NEGOTIATE_RESP, 118 - SMBD_TRANSFER_DATA, 119 - }; 120 - 121 - /* Maximum number of SGEs used by smbdirect.c in any send work request */ 122 - #define SMBDIRECT_MAX_SEND_SGE 6 123 - 124 - /* The context for a SMBD request */ 125 - struct smbd_request { 126 - struct smbd_connection *info; 127 - struct ib_cqe cqe; 128 - 129 - /* the SGE entries for this work request */ 130 - struct ib_sge sge[SMBDIRECT_MAX_SEND_SGE]; 131 - int num_sge; 132 - 133 - /* SMBD packet header follows this structure */ 134 - u8 packet[]; 135 - }; 136 - 137 - /* Maximum number of SGEs used by smbdirect.c in any receive work request */ 138 - #define SMBDIRECT_MAX_RECV_SGE 1 139 - 140 - /* The context for a SMBD response */ 141 - struct smbd_response { 142 - struct smbd_connection *info; 143 - struct ib_cqe cqe; 144 - struct ib_sge sge; 145 - 146 - enum smbd_message_type type; 147 - 148 - /* Link to receive queue or reassembly queue */ 149 - struct list_head list; 150 - 151 - /* Indicate if this is the 1st packet of a payload */ 152 - bool first_segment; 153 - 154 - /* SMBD packet header and payload follows this structure */ 155 - u8 packet[]; 156 162 }; 157 163 158 164 /* Create a SMBDirect session */
+28 -574
fs/smb/client/transport.c
··· 30 30 #include "smbdirect.h" 31 31 #include "compress.h" 32 32 33 - /* Max number of iovectors we can use off the stack when sending requests. */ 34 - #define CIFS_MAX_IOV_SIZE 8 35 - 36 33 void 37 34 cifs_wake_up_task(struct mid_q_entry *mid) 38 35 { 39 36 if (mid->mid_state == MID_RESPONSE_RECEIVED) 40 37 mid->mid_state = MID_RESPONSE_READY; 41 38 wake_up_process(mid->callback_data); 42 - } 43 - 44 - static struct mid_q_entry * 45 - alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) 46 - { 47 - struct mid_q_entry *temp; 48 - 49 - if (server == NULL) { 50 - cifs_dbg(VFS, "%s: null TCP session\n", __func__); 51 - return NULL; 52 - } 53 - 54 - temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); 55 - memset(temp, 0, sizeof(struct mid_q_entry)); 56 - kref_init(&temp->refcount); 57 - temp->mid = get_mid(smb_buffer); 58 - temp->pid = current->pid; 59 - temp->command = cpu_to_le16(smb_buffer->Command); 60 - cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command); 61 - /* easier to use jiffies */ 62 - /* when mid allocated can be before when sent */ 63 - temp->when_alloc = jiffies; 64 - temp->server = server; 65 - 66 - /* 67 - * The default is for the mid to be synchronous, so the 68 - * default callback just wakes up the current task. 69 - */ 70 - get_task_struct(current); 71 - temp->creator = current; 72 - temp->callback = cifs_wake_up_task; 73 - temp->callback_data = current; 74 - 75 - atomic_inc(&mid_count); 76 - temp->mid_state = MID_REQUEST_ALLOCATED; 77 - return temp; 78 39 } 79 40 80 41 void __release_mid(struct kref *refcount) ··· 50 89 #endif 51 90 struct TCP_Server_Info *server = midEntry->server; 52 91 53 - if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) && 92 + if (midEntry->resp_buf && (midEntry->wait_cancelled) && 54 93 (midEntry->mid_state == MID_RESPONSE_RECEIVED || 55 94 midEntry->mid_state == MID_RESPONSE_READY) && 56 95 server->ops->handle_cancelled_mid) ··· 121 160 void 122 161 delete_mid(struct mid_q_entry *mid) 123 162 { 124 - spin_lock(&mid->server->mid_lock); 125 - if (!(mid->mid_flags & MID_DELETED)) { 163 + spin_lock(&mid->server->mid_queue_lock); 164 + if (mid->deleted_from_q == false) { 126 165 list_del_init(&mid->qhead); 127 - mid->mid_flags |= MID_DELETED; 166 + mid->deleted_from_q = true; 128 167 } 129 - spin_unlock(&mid->server->mid_lock); 168 + spin_unlock(&mid->server->mid_queue_lock); 130 169 131 170 release_mid(mid); 132 171 } ··· 230 269 return buflen; 231 270 } 232 271 233 - static int 234 - __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, 235 - struct smb_rqst *rqst) 272 + int __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, 273 + struct smb_rqst *rqst) 236 274 { 237 275 int rc; 238 276 struct kvec *iov; ··· 357 397 * socket so the server throws away the partial SMB 358 398 */ 359 399 cifs_signal_cifsd_for_reconnect(server, false); 360 - trace_smb3_partial_send_reconnect(server->CurrentMid, 400 + trace_smb3_partial_send_reconnect(server->current_mid, 361 401 server->conn_id, server->hostname); 362 402 } 363 403 smbd_done: ··· 416 456 return rc; 417 457 } 418 458 419 - int 420 - smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, 421 - unsigned int smb_buf_length) 422 - { 423 - struct kvec iov[2]; 424 - struct smb_rqst rqst = { .rq_iov = iov, 425 - .rq_nvec = 2 }; 426 - 427 - iov[0].iov_base = smb_buffer; 428 - iov[0].iov_len = 4; 429 - iov[1].iov_base = (char *)smb_buffer + 4; 430 - iov[1].iov_len = smb_buf_length; 431 - 432 - return __smb_send_rqst(server, 1, &rqst); 433 - } 434 - 435 459 static int 436 460 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits, 437 461 const int timeout, const int flags, ··· 453 509 in_flight = server->in_flight; 454 510 spin_unlock(&server->req_lock); 455 511 456 - trace_smb3_nblk_credits(server->CurrentMid, 512 + trace_smb3_nblk_credits(server->current_mid, 457 513 server->conn_id, server->hostname, scredits, -1, in_flight); 458 514 cifs_dbg(FYI, "%s: remove %u credits total=%d\n", 459 515 __func__, 1, scredits); ··· 486 542 in_flight = server->in_flight; 487 543 spin_unlock(&server->req_lock); 488 544 489 - trace_smb3_credit_timeout(server->CurrentMid, 545 + trace_smb3_credit_timeout(server->current_mid, 490 546 server->conn_id, server->hostname, scredits, 491 547 num_credits, in_flight); 492 548 cifs_server_dbg(VFS, "wait timed out after %d ms\n", ··· 529 585 spin_unlock(&server->req_lock); 530 586 531 587 trace_smb3_credit_timeout( 532 - server->CurrentMid, 588 + server->current_mid, 533 589 server->conn_id, server->hostname, 534 590 scredits, num_credits, in_flight); 535 591 cifs_server_dbg(VFS, "wait timed out after %d ms\n", ··· 559 615 in_flight = server->in_flight; 560 616 spin_unlock(&server->req_lock); 561 617 562 - trace_smb3_waitff_credits(server->CurrentMid, 618 + trace_smb3_waitff_credits(server->current_mid, 563 619 server->conn_id, server->hostname, scredits, 564 620 -(num_credits), in_flight); 565 621 cifs_dbg(FYI, "%s: remove %u credits total=%d\n", ··· 570 626 return 0; 571 627 } 572 628 573 - static int 574 - wait_for_free_request(struct TCP_Server_Info *server, const int flags, 575 - unsigned int *instance) 629 + int wait_for_free_request(struct TCP_Server_Info *server, const int flags, 630 + unsigned int *instance) 576 631 { 577 632 return wait_for_free_credits(server, 1, -1, flags, 578 633 instance); ··· 609 666 */ 610 667 if (server->in_flight == 0) { 611 668 spin_unlock(&server->req_lock); 612 - trace_smb3_insufficient_credits(server->CurrentMid, 669 + trace_smb3_insufficient_credits(server->current_mid, 613 670 server->conn_id, server->hostname, scredits, 614 671 num, in_flight); 615 672 cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n", ··· 633 690 return 0; 634 691 } 635 692 636 - static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, 637 - struct mid_q_entry **ppmidQ) 638 - { 639 - spin_lock(&ses->ses_lock); 640 - if (ses->ses_status == SES_NEW) { 641 - if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && 642 - (in_buf->Command != SMB_COM_NEGOTIATE)) { 643 - spin_unlock(&ses->ses_lock); 644 - return -EAGAIN; 645 - } 646 - /* else ok - we are setting up session */ 647 - } 648 - 649 - if (ses->ses_status == SES_EXITING) { 650 - /* check if SMB session is bad because we are setting it up */ 651 - if (in_buf->Command != SMB_COM_LOGOFF_ANDX) { 652 - spin_unlock(&ses->ses_lock); 653 - return -EAGAIN; 654 - } 655 - /* else ok - we are shutting down session */ 656 - } 657 - spin_unlock(&ses->ses_lock); 658 - 659 - *ppmidQ = alloc_mid(in_buf, ses->server); 660 - if (*ppmidQ == NULL) 661 - return -ENOMEM; 662 - spin_lock(&ses->server->mid_lock); 663 - list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q); 664 - spin_unlock(&ses->server->mid_lock); 665 - return 0; 666 - } 667 - 668 - static int 669 - wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ) 693 + int wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ) 670 694 { 671 695 int error; 672 696 ··· 645 735 return -ERESTARTSYS; 646 736 647 737 return 0; 648 - } 649 - 650 - struct mid_q_entry * 651 - cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) 652 - { 653 - int rc; 654 - struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; 655 - struct mid_q_entry *mid; 656 - 657 - if (rqst->rq_iov[0].iov_len != 4 || 658 - rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base) 659 - return ERR_PTR(-EIO); 660 - 661 - /* enable signing if server requires it */ 662 - if (server->sign) 663 - hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 664 - 665 - mid = alloc_mid(hdr, server); 666 - if (mid == NULL) 667 - return ERR_PTR(-ENOMEM); 668 - 669 - rc = cifs_sign_rqst(rqst, server, &mid->sequence_number); 670 - if (rc) { 671 - release_mid(mid); 672 - return ERR_PTR(rc); 673 - } 674 - 675 - return mid; 676 738 } 677 739 678 740 /* ··· 701 819 mid->mid_state = MID_REQUEST_SUBMITTED; 702 820 703 821 /* put it on the pending_mid_q */ 704 - spin_lock(&server->mid_lock); 822 + spin_lock(&server->mid_queue_lock); 705 823 list_add_tail(&mid->qhead, &server->pending_mid_q); 706 - spin_unlock(&server->mid_lock); 824 + spin_unlock(&server->mid_queue_lock); 707 825 708 826 /* 709 827 * Need to store the time in mid before calling I/O. For call_async, ··· 727 845 return rc; 728 846 } 729 847 730 - /* 731 - * 732 - * Send an SMB Request. No response info (other than return code) 733 - * needs to be parsed. 734 - * 735 - * flags indicate the type of request buffer and how long to wait 736 - * and whether to log NT STATUS code (error) before mapping it to POSIX error 737 - * 738 - */ 739 - int 740 - SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses, 741 - char *in_buf, int flags) 742 - { 743 - int rc; 744 - struct kvec iov[1]; 745 - struct kvec rsp_iov; 746 - int resp_buf_type; 747 - 748 - iov[0].iov_base = in_buf; 749 - iov[0].iov_len = get_rfc1002_length(in_buf) + 4; 750 - flags |= CIFS_NO_RSP_BUF; 751 - rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov); 752 - cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc); 753 - 754 - return rc; 755 - } 756 - 757 - static int 758 - cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) 848 + int cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) 759 849 { 760 850 int rc = 0; 761 851 762 852 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n", 763 853 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state); 764 854 765 - spin_lock(&server->mid_lock); 855 + spin_lock(&server->mid_queue_lock); 766 856 switch (mid->mid_state) { 767 857 case MID_RESPONSE_READY: 768 - spin_unlock(&server->mid_lock); 858 + spin_unlock(&server->mid_queue_lock); 769 859 return rc; 770 860 case MID_RETRY_NEEDED: 771 861 rc = -EAGAIN; ··· 752 898 rc = mid->mid_rc; 753 899 break; 754 900 default: 755 - if (!(mid->mid_flags & MID_DELETED)) { 901 + if (mid->deleted_from_q == false) { 756 902 list_del_init(&mid->qhead); 757 - mid->mid_flags |= MID_DELETED; 903 + mid->deleted_from_q = true; 758 904 } 759 - spin_unlock(&server->mid_lock); 905 + spin_unlock(&server->mid_queue_lock); 760 906 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n", 761 907 __func__, mid->mid, mid->mid_state); 762 908 rc = -EIO; 763 909 goto sync_mid_done; 764 910 } 765 - spin_unlock(&server->mid_lock); 911 + spin_unlock(&server->mid_queue_lock); 766 912 767 913 sync_mid_done: 768 914 release_mid(mid); 769 915 return rc; 770 - } 771 - 772 - static inline int 773 - send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst, 774 - struct mid_q_entry *mid) 775 - { 776 - return server->ops->send_cancel ? 777 - server->ops->send_cancel(server, rqst, mid) : 0; 778 - } 779 - 780 - int 781 - cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, 782 - bool log_error) 783 - { 784 - unsigned int len = get_rfc1002_length(mid->resp_buf) + 4; 785 - 786 - dump_smb(mid->resp_buf, min_t(u32, 92, len)); 787 - 788 - /* convert the length into a more usable form */ 789 - if (server->sign) { 790 - struct kvec iov[2]; 791 - int rc = 0; 792 - struct smb_rqst rqst = { .rq_iov = iov, 793 - .rq_nvec = 2 }; 794 - 795 - iov[0].iov_base = mid->resp_buf; 796 - iov[0].iov_len = 4; 797 - iov[1].iov_base = (char *)mid->resp_buf + 4; 798 - iov[1].iov_len = len - 4; 799 - /* FIXME: add code to kill session */ 800 - rc = cifs_verify_signature(&rqst, server, 801 - mid->sequence_number); 802 - if (rc) 803 - cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n", 804 - rc); 805 - } 806 - 807 - /* BB special case reconnect tid and uid here? */ 808 - return map_and_check_smb_error(mid, log_error); 809 - } 810 - 811 - struct mid_q_entry * 812 - cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored, 813 - struct smb_rqst *rqst) 814 - { 815 - int rc; 816 - struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; 817 - struct mid_q_entry *mid; 818 - 819 - if (rqst->rq_iov[0].iov_len != 4 || 820 - rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base) 821 - return ERR_PTR(-EIO); 822 - 823 - rc = allocate_mid(ses, hdr, &mid); 824 - if (rc) 825 - return ERR_PTR(rc); 826 - rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number); 827 - if (rc) { 828 - delete_mid(mid); 829 - return ERR_PTR(rc); 830 - } 831 - return mid; 832 916 } 833 917 834 918 static void ··· 1005 1213 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n", 1006 1214 midQ[i]->mid, le16_to_cpu(midQ[i]->command)); 1007 1215 send_cancel(server, &rqst[i], midQ[i]); 1008 - spin_lock(&server->mid_lock); 1009 - midQ[i]->mid_flags |= MID_WAIT_CANCELLED; 1216 + spin_lock(&server->mid_queue_lock); 1217 + midQ[i]->wait_cancelled = true; 1010 1218 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED || 1011 1219 midQ[i]->mid_state == MID_RESPONSE_RECEIVED) { 1012 1220 midQ[i]->callback = cifs_cancelled_callback; 1013 1221 cancelled_mid[i] = true; 1014 1222 credits[i].value = 0; 1015 1223 } 1016 - spin_unlock(&server->mid_lock); 1224 + spin_unlock(&server->mid_queue_lock); 1017 1225 } 1018 1226 } 1019 1227 ··· 1096 1304 rqst, resp_buf_type, resp_iov); 1097 1305 } 1098 1306 1099 - int 1100 - SendReceive2(const unsigned int xid, struct cifs_ses *ses, 1101 - struct kvec *iov, int n_vec, int *resp_buf_type /* ret */, 1102 - const int flags, struct kvec *resp_iov) 1103 - { 1104 - struct smb_rqst rqst; 1105 - struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov; 1106 - int rc; 1107 - 1108 - if (n_vec + 1 > CIFS_MAX_IOV_SIZE) { 1109 - new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec), 1110 - GFP_KERNEL); 1111 - if (!new_iov) { 1112 - /* otherwise cifs_send_recv below sets resp_buf_type */ 1113 - *resp_buf_type = CIFS_NO_BUFFER; 1114 - return -ENOMEM; 1115 - } 1116 - } else 1117 - new_iov = s_iov; 1118 - 1119 - /* 1st iov is a RFC1001 length followed by the rest of the packet */ 1120 - memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec)); 1121 - 1122 - new_iov[0].iov_base = new_iov[1].iov_base; 1123 - new_iov[0].iov_len = 4; 1124 - new_iov[1].iov_base += 4; 1125 - new_iov[1].iov_len -= 4; 1126 - 1127 - memset(&rqst, 0, sizeof(struct smb_rqst)); 1128 - rqst.rq_iov = new_iov; 1129 - rqst.rq_nvec = n_vec + 1; 1130 - 1131 - rc = cifs_send_recv(xid, ses, ses->server, 1132 - &rqst, resp_buf_type, flags, resp_iov); 1133 - if (n_vec + 1 > CIFS_MAX_IOV_SIZE) 1134 - kfree(new_iov); 1135 - return rc; 1136 - } 1137 - 1138 - int 1139 - SendReceive(const unsigned int xid, struct cifs_ses *ses, 1140 - struct smb_hdr *in_buf, struct smb_hdr *out_buf, 1141 - int *pbytes_returned, const int flags) 1142 - { 1143 - int rc = 0; 1144 - struct mid_q_entry *midQ; 1145 - unsigned int len = be32_to_cpu(in_buf->smb_buf_length); 1146 - struct kvec iov = { .iov_base = in_buf, .iov_len = len }; 1147 - struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 }; 1148 - struct cifs_credits credits = { .value = 1, .instance = 0 }; 1149 - struct TCP_Server_Info *server; 1150 - 1151 - if (ses == NULL) { 1152 - cifs_dbg(VFS, "Null smb session\n"); 1153 - return -EIO; 1154 - } 1155 - server = ses->server; 1156 - if (server == NULL) { 1157 - cifs_dbg(VFS, "Null tcp session\n"); 1158 - return -EIO; 1159 - } 1160 - 1161 - spin_lock(&server->srv_lock); 1162 - if (server->tcpStatus == CifsExiting) { 1163 - spin_unlock(&server->srv_lock); 1164 - return -ENOENT; 1165 - } 1166 - spin_unlock(&server->srv_lock); 1167 - 1168 - /* Ensure that we do not send more than 50 overlapping requests 1169 - to the same server. We may make this configurable later or 1170 - use ses->maxReq */ 1171 - 1172 - if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { 1173 - cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n", 1174 - len); 1175 - return -EIO; 1176 - } 1177 - 1178 - rc = wait_for_free_request(server, flags, &credits.instance); 1179 - if (rc) 1180 - return rc; 1181 - 1182 - /* make sure that we sign in the same order that we send on this socket 1183 - and avoid races inside tcp sendmsg code that could cause corruption 1184 - of smb data */ 1185 - 1186 - cifs_server_lock(server); 1187 - 1188 - rc = allocate_mid(ses, in_buf, &midQ); 1189 - if (rc) { 1190 - cifs_server_unlock(server); 1191 - /* Update # of requests on wire to server */ 1192 - add_credits(server, &credits, 0); 1193 - return rc; 1194 - } 1195 - 1196 - rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number); 1197 - if (rc) { 1198 - cifs_server_unlock(server); 1199 - goto out; 1200 - } 1201 - 1202 - midQ->mid_state = MID_REQUEST_SUBMITTED; 1203 - 1204 - rc = smb_send(server, in_buf, len); 1205 - cifs_save_when_sent(midQ); 1206 - 1207 - if (rc < 0) 1208 - server->sequence_number -= 2; 1209 - 1210 - cifs_server_unlock(server); 1211 - 1212 - if (rc < 0) 1213 - goto out; 1214 - 1215 - rc = wait_for_response(server, midQ); 1216 - if (rc != 0) { 1217 - send_cancel(server, &rqst, midQ); 1218 - spin_lock(&server->mid_lock); 1219 - if (midQ->mid_state == MID_REQUEST_SUBMITTED || 1220 - midQ->mid_state == MID_RESPONSE_RECEIVED) { 1221 - /* no longer considered to be "in-flight" */ 1222 - midQ->callback = release_mid; 1223 - spin_unlock(&server->mid_lock); 1224 - add_credits(server, &credits, 0); 1225 - return rc; 1226 - } 1227 - spin_unlock(&server->mid_lock); 1228 - } 1229 - 1230 - rc = cifs_sync_mid_result(midQ, server); 1231 - if (rc != 0) { 1232 - add_credits(server, &credits, 0); 1233 - return rc; 1234 - } 1235 - 1236 - if (!midQ->resp_buf || !out_buf || 1237 - midQ->mid_state != MID_RESPONSE_READY) { 1238 - rc = -EIO; 1239 - cifs_server_dbg(VFS, "Bad MID state?\n"); 1240 - goto out; 1241 - } 1242 - 1243 - *pbytes_returned = get_rfc1002_length(midQ->resp_buf); 1244 - memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); 1245 - rc = cifs_check_receive(midQ, server, 0); 1246 - out: 1247 - delete_mid(midQ); 1248 - add_credits(server, &credits, 0); 1249 - 1250 - return rc; 1251 - } 1252 - 1253 - /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows 1254 - blocking lock to return. */ 1255 - 1256 - static int 1257 - send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon, 1258 - struct smb_hdr *in_buf, 1259 - struct smb_hdr *out_buf) 1260 - { 1261 - int bytes_returned; 1262 - struct cifs_ses *ses = tcon->ses; 1263 - LOCK_REQ *pSMB = (LOCK_REQ *)in_buf; 1264 - 1265 - /* We just modify the current in_buf to change 1266 - the type of lock from LOCKING_ANDX_SHARED_LOCK 1267 - or LOCKING_ANDX_EXCLUSIVE_LOCK to 1268 - LOCKING_ANDX_CANCEL_LOCK. */ 1269 - 1270 - pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; 1271 - pSMB->Timeout = 0; 1272 - pSMB->hdr.Mid = get_next_mid(ses->server); 1273 - 1274 - return SendReceive(xid, ses, in_buf, out_buf, 1275 - &bytes_returned, 0); 1276 - } 1277 - 1278 - int 1279 - SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, 1280 - struct smb_hdr *in_buf, struct smb_hdr *out_buf, 1281 - int *pbytes_returned) 1282 - { 1283 - int rc = 0; 1284 - int rstart = 0; 1285 - struct mid_q_entry *midQ; 1286 - struct cifs_ses *ses; 1287 - unsigned int len = be32_to_cpu(in_buf->smb_buf_length); 1288 - struct kvec iov = { .iov_base = in_buf, .iov_len = len }; 1289 - struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 }; 1290 - unsigned int instance; 1291 - struct TCP_Server_Info *server; 1292 - 1293 - if (tcon == NULL || tcon->ses == NULL) { 1294 - cifs_dbg(VFS, "Null smb session\n"); 1295 - return -EIO; 1296 - } 1297 - ses = tcon->ses; 1298 - server = ses->server; 1299 - 1300 - if (server == NULL) { 1301 - cifs_dbg(VFS, "Null tcp session\n"); 1302 - return -EIO; 1303 - } 1304 - 1305 - spin_lock(&server->srv_lock); 1306 - if (server->tcpStatus == CifsExiting) { 1307 - spin_unlock(&server->srv_lock); 1308 - return -ENOENT; 1309 - } 1310 - spin_unlock(&server->srv_lock); 1311 - 1312 - /* Ensure that we do not send more than 50 overlapping requests 1313 - to the same server. We may make this configurable later or 1314 - use ses->maxReq */ 1315 - 1316 - if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { 1317 - cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n", 1318 - len); 1319 - return -EIO; 1320 - } 1321 - 1322 - rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance); 1323 - if (rc) 1324 - return rc; 1325 - 1326 - /* make sure that we sign in the same order that we send on this socket 1327 - and avoid races inside tcp sendmsg code that could cause corruption 1328 - of smb data */ 1329 - 1330 - cifs_server_lock(server); 1331 - 1332 - rc = allocate_mid(ses, in_buf, &midQ); 1333 - if (rc) { 1334 - cifs_server_unlock(server); 1335 - return rc; 1336 - } 1337 - 1338 - rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number); 1339 - if (rc) { 1340 - delete_mid(midQ); 1341 - cifs_server_unlock(server); 1342 - return rc; 1343 - } 1344 - 1345 - midQ->mid_state = MID_REQUEST_SUBMITTED; 1346 - rc = smb_send(server, in_buf, len); 1347 - cifs_save_when_sent(midQ); 1348 - 1349 - if (rc < 0) 1350 - server->sequence_number -= 2; 1351 - 1352 - cifs_server_unlock(server); 1353 - 1354 - if (rc < 0) { 1355 - delete_mid(midQ); 1356 - return rc; 1357 - } 1358 - 1359 - /* Wait for a reply - allow signals to interrupt. */ 1360 - rc = wait_event_interruptible(server->response_q, 1361 - (!(midQ->mid_state == MID_REQUEST_SUBMITTED || 1362 - midQ->mid_state == MID_RESPONSE_RECEIVED)) || 1363 - ((server->tcpStatus != CifsGood) && 1364 - (server->tcpStatus != CifsNew))); 1365 - 1366 - /* Were we interrupted by a signal ? */ 1367 - spin_lock(&server->srv_lock); 1368 - if ((rc == -ERESTARTSYS) && 1369 - (midQ->mid_state == MID_REQUEST_SUBMITTED || 1370 - midQ->mid_state == MID_RESPONSE_RECEIVED) && 1371 - ((server->tcpStatus == CifsGood) || 1372 - (server->tcpStatus == CifsNew))) { 1373 - spin_unlock(&server->srv_lock); 1374 - 1375 - if (in_buf->Command == SMB_COM_TRANSACTION2) { 1376 - /* POSIX lock. We send a NT_CANCEL SMB to cause the 1377 - blocking lock to return. */ 1378 - rc = send_cancel(server, &rqst, midQ); 1379 - if (rc) { 1380 - delete_mid(midQ); 1381 - return rc; 1382 - } 1383 - } else { 1384 - /* Windows lock. We send a LOCKINGX_CANCEL_LOCK 1385 - to cause the blocking lock to return. */ 1386 - 1387 - rc = send_lock_cancel(xid, tcon, in_buf, out_buf); 1388 - 1389 - /* If we get -ENOLCK back the lock may have 1390 - already been removed. Don't exit in this case. */ 1391 - if (rc && rc != -ENOLCK) { 1392 - delete_mid(midQ); 1393 - return rc; 1394 - } 1395 - } 1396 - 1397 - rc = wait_for_response(server, midQ); 1398 - if (rc) { 1399 - send_cancel(server, &rqst, midQ); 1400 - spin_lock(&server->mid_lock); 1401 - if (midQ->mid_state == MID_REQUEST_SUBMITTED || 1402 - midQ->mid_state == MID_RESPONSE_RECEIVED) { 1403 - /* no longer considered to be "in-flight" */ 1404 - midQ->callback = release_mid; 1405 - spin_unlock(&server->mid_lock); 1406 - return rc; 1407 - } 1408 - spin_unlock(&server->mid_lock); 1409 - } 1410 - 1411 - /* We got the response - restart system call. */ 1412 - rstart = 1; 1413 - spin_lock(&server->srv_lock); 1414 - } 1415 - spin_unlock(&server->srv_lock); 1416 - 1417 - rc = cifs_sync_mid_result(midQ, server); 1418 - if (rc != 0) 1419 - return rc; 1420 - 1421 - /* rcvd frame is ok */ 1422 - if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) { 1423 - rc = -EIO; 1424 - cifs_tcon_dbg(VFS, "Bad MID state?\n"); 1425 - goto out; 1426 - } 1427 - 1428 - *pbytes_returned = get_rfc1002_length(midQ->resp_buf); 1429 - memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); 1430 - rc = cifs_check_receive(midQ, server, 0); 1431 - out: 1432 - delete_mid(midQ); 1433 - if (rstart && rc == -EACCES) 1434 - return -ERESTARTSYS; 1435 - return rc; 1436 - } 1437 1307 1438 1308 /* 1439 1309 * Discard any remaining data in the current SMB. To do this, we borrow the
+118
fs/smb/common/smbdirect/smbdirect_socket.h
··· 38 38 } ib; 39 39 40 40 struct smbdirect_socket_parameters parameters; 41 + 42 + /* 43 + * The state for posted send buffers 44 + */ 45 + struct { 46 + /* 47 + * Memory pools for preallocating 48 + * smbdirect_send_io buffers 49 + */ 50 + struct { 51 + struct kmem_cache *cache; 52 + mempool_t *pool; 53 + } mem; 54 + } send_io; 55 + 56 + /* 57 + * The state for posted receive buffers 58 + */ 59 + struct { 60 + /* 61 + * The type of PDU we are expecting 62 + */ 63 + enum { 64 + SMBDIRECT_EXPECT_NEGOTIATE_REQ = 1, 65 + SMBDIRECT_EXPECT_NEGOTIATE_REP = 2, 66 + SMBDIRECT_EXPECT_DATA_TRANSFER = 3, 67 + } expected; 68 + 69 + /* 70 + * Memory pools for preallocating 71 + * smbdirect_recv_io buffers 72 + */ 73 + struct { 74 + struct kmem_cache *cache; 75 + mempool_t *pool; 76 + } mem; 77 + 78 + /* 79 + * The list of free smbdirect_recv_io 80 + * structures 81 + */ 82 + struct { 83 + struct list_head list; 84 + spinlock_t lock; 85 + } free; 86 + 87 + /* 88 + * The list of arrived non-empty smbdirect_recv_io 89 + * structures 90 + * 91 + * This represents the reassembly queue. 92 + */ 93 + struct { 94 + struct list_head list; 95 + spinlock_t lock; 96 + wait_queue_head_t wait_queue; 97 + /* total data length of reassembly queue */ 98 + int data_length; 99 + int queue_length; 100 + /* the offset to first buffer in reassembly queue */ 101 + int first_entry_offset; 102 + /* 103 + * Indicate if we have received a full packet on the 104 + * connection This is used to identify the first SMBD 105 + * packet of a assembled payload (SMB packet) in 106 + * reassembly queue so we can return a RFC1002 length to 107 + * upper layer to indicate the length of the SMB packet 108 + * received 109 + */ 110 + bool full_packet_received; 111 + } reassembly; 112 + } recv_io; 113 + }; 114 + 115 + struct smbdirect_send_io { 116 + struct smbdirect_socket *socket; 117 + struct ib_cqe cqe; 118 + 119 + /* 120 + * The SGE entries for this work request 121 + * 122 + * The first points to the packet header 123 + */ 124 + #define SMBDIRECT_SEND_IO_MAX_SGE 6 125 + size_t num_sge; 126 + struct ib_sge sge[SMBDIRECT_SEND_IO_MAX_SGE]; 127 + 128 + /* 129 + * Link to the list of sibling smbdirect_send_io 130 + * messages. 131 + */ 132 + struct list_head sibling_list; 133 + struct ib_send_wr wr; 134 + 135 + /* SMBD packet header follows this structure */ 136 + u8 packet[]; 137 + }; 138 + 139 + struct smbdirect_recv_io { 140 + struct smbdirect_socket *socket; 141 + struct ib_cqe cqe; 142 + 143 + /* 144 + * For now we only use a single SGE 145 + * as we have just one large buffer 146 + * per posted recv. 147 + */ 148 + #define SMBDIRECT_RECV_IO_MAX_SGE 1 149 + struct ib_sge sge; 150 + 151 + /* Link to free or reassembly list */ 152 + struct list_head list; 153 + 154 + /* Indicate if this is the 1st packet of a payload */ 155 + bool first_segment; 156 + 157 + /* SMBD packet header and payload follows this structure */ 158 + u8 packet[]; 41 159 }; 42 160 43 161 #endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__ */
+1
fs/smb/server/connection.h
··· 46 46 struct mutex srv_mutex; 47 47 int status; 48 48 unsigned int cli_cap; 49 + __be32 inet_addr; 49 50 char *request_buf; 50 51 struct ksmbd_transport *transport; 51 52 struct nls_table *local_nls;
+1 -1
fs/smb/server/smb_common.c
··· 515 515 516 516 p = strrchr(longname, '.'); 517 517 if (p == longname) { /*name starts with a dot*/ 518 - strscpy(extension, "___", strlen("___")); 518 + strscpy(extension, "___", sizeof(extension)); 519 519 } else { 520 520 if (p) { 521 521 p++;
+35 -62
fs/smb/server/transport_rdma.c
··· 129 129 spinlock_t recvmsg_queue_lock; 130 130 struct list_head recvmsg_queue; 131 131 132 - spinlock_t empty_recvmsg_queue_lock; 133 - struct list_head empty_recvmsg_queue; 134 - 135 132 int send_credit_target; 136 133 atomic_t send_credits; 137 134 spinlock_t lock_new_recv_credits; ··· 265 268 static void put_recvmsg(struct smb_direct_transport *t, 266 269 struct smb_direct_recvmsg *recvmsg) 267 270 { 268 - ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, 269 - recvmsg->sge.length, DMA_FROM_DEVICE); 271 + if (likely(recvmsg->sge.length != 0)) { 272 + ib_dma_unmap_single(t->cm_id->device, 273 + recvmsg->sge.addr, 274 + recvmsg->sge.length, 275 + DMA_FROM_DEVICE); 276 + recvmsg->sge.length = 0; 277 + } 270 278 271 279 spin_lock(&t->recvmsg_queue_lock); 272 280 list_add(&recvmsg->list, &t->recvmsg_queue); 273 281 spin_unlock(&t->recvmsg_queue_lock); 274 - } 275 - 276 - static struct 277 - smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t) 278 - { 279 - struct smb_direct_recvmsg *recvmsg = NULL; 280 - 281 - spin_lock(&t->empty_recvmsg_queue_lock); 282 - if (!list_empty(&t->empty_recvmsg_queue)) { 283 - recvmsg = list_first_entry(&t->empty_recvmsg_queue, 284 - struct smb_direct_recvmsg, list); 285 - list_del(&recvmsg->list); 286 - } 287 - spin_unlock(&t->empty_recvmsg_queue_lock); 288 - return recvmsg; 289 - } 290 - 291 - static void put_empty_recvmsg(struct smb_direct_transport *t, 292 - struct smb_direct_recvmsg *recvmsg) 293 - { 294 - ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, 295 - recvmsg->sge.length, DMA_FROM_DEVICE); 296 - 297 - spin_lock(&t->empty_recvmsg_queue_lock); 298 - list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue); 299 - spin_unlock(&t->empty_recvmsg_queue_lock); 300 282 } 301 283 302 284 static void enqueue_reassembly(struct smb_direct_transport *t, ··· 361 385 spin_lock_init(&t->receive_credit_lock); 362 386 spin_lock_init(&t->recvmsg_queue_lock); 363 387 INIT_LIST_HEAD(&t->recvmsg_queue); 364 - 365 - spin_lock_init(&t->empty_recvmsg_queue_lock); 366 - INIT_LIST_HEAD(&t->empty_recvmsg_queue); 367 388 368 389 init_waitqueue_head(&t->wait_send_pending); 369 390 atomic_set(&t->send_pending, 0); ··· 521 548 t = recvmsg->transport; 522 549 523 550 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) { 551 + put_recvmsg(t, recvmsg); 524 552 if (wc->status != IB_WC_WR_FLUSH_ERR) { 525 553 pr_err("Recv error. status='%s (%d)' opcode=%d\n", 526 554 ib_wc_status_msg(wc->status), wc->status, 527 555 wc->opcode); 528 556 smb_direct_disconnect_rdma_connection(t); 529 557 } 530 - put_empty_recvmsg(t, recvmsg); 531 558 return; 532 559 } 533 560 ··· 541 568 switch (recvmsg->type) { 542 569 case SMB_DIRECT_MSG_NEGOTIATE_REQ: 543 570 if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) { 544 - put_empty_recvmsg(t, recvmsg); 571 + put_recvmsg(t, recvmsg); 572 + smb_direct_disconnect_rdma_connection(t); 545 573 return; 546 574 } 547 575 t->negotiation_requested = true; ··· 550 576 t->status = SMB_DIRECT_CS_CONNECTED; 551 577 enqueue_reassembly(t, recvmsg, 0); 552 578 wake_up_interruptible(&t->wait_status); 553 - break; 579 + return; 554 580 case SMB_DIRECT_MSG_DATA_TRANSFER: { 555 581 struct smb_direct_data_transfer *data_transfer = 556 582 (struct smb_direct_data_transfer *)recvmsg->packet; ··· 559 585 560 586 if (wc->byte_len < 561 587 offsetof(struct smb_direct_data_transfer, padding)) { 562 - put_empty_recvmsg(t, recvmsg); 588 + put_recvmsg(t, recvmsg); 589 + smb_direct_disconnect_rdma_connection(t); 563 590 return; 564 591 } 565 592 ··· 568 593 if (data_length) { 569 594 if (wc->byte_len < sizeof(struct smb_direct_data_transfer) + 570 595 (u64)data_length) { 571 - put_empty_recvmsg(t, recvmsg); 596 + put_recvmsg(t, recvmsg); 597 + smb_direct_disconnect_rdma_connection(t); 572 598 return; 573 599 } 574 600 ··· 581 605 else 582 606 t->full_packet_received = true; 583 607 584 - enqueue_reassembly(t, recvmsg, (int)data_length); 585 - wake_up_interruptible(&t->wait_reassembly_queue); 586 - 587 608 spin_lock(&t->receive_credit_lock); 588 609 receive_credits = --(t->recv_credits); 589 610 avail_recvmsg_count = t->count_avail_recvmsg; 590 611 spin_unlock(&t->receive_credit_lock); 591 612 } else { 592 - put_empty_recvmsg(t, recvmsg); 593 - 594 613 spin_lock(&t->receive_credit_lock); 595 614 receive_credits = --(t->recv_credits); 596 615 avail_recvmsg_count = ++(t->count_avail_recvmsg); ··· 607 636 if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count)) 608 637 mod_delayed_work(smb_direct_wq, 609 638 &t->post_recv_credits_work, 0); 610 - break; 639 + 640 + if (data_length) { 641 + enqueue_reassembly(t, recvmsg, (int)data_length); 642 + wake_up_interruptible(&t->wait_reassembly_queue); 643 + } else 644 + put_recvmsg(t, recvmsg); 645 + 646 + return; 611 647 } 612 - default: 613 - break; 614 648 } 649 + 650 + /* 651 + * This is an internal error! 652 + */ 653 + WARN_ON_ONCE(recvmsg->type != SMB_DIRECT_MSG_DATA_TRANSFER); 654 + put_recvmsg(t, recvmsg); 655 + smb_direct_disconnect_rdma_connection(t); 615 656 } 616 657 617 658 static int smb_direct_post_recv(struct smb_direct_transport *t, ··· 653 670 ib_dma_unmap_single(t->cm_id->device, 654 671 recvmsg->sge.addr, recvmsg->sge.length, 655 672 DMA_FROM_DEVICE); 673 + recvmsg->sge.length = 0; 656 674 smb_direct_disconnect_rdma_connection(t); 657 675 return ret; 658 676 } ··· 795 811 struct smb_direct_recvmsg *recvmsg; 796 812 int receive_credits, credits = 0; 797 813 int ret; 798 - int use_free = 1; 799 814 800 815 spin_lock(&t->receive_credit_lock); 801 816 receive_credits = t->recv_credits; ··· 802 819 803 820 if (receive_credits < t->recv_credit_target) { 804 821 while (true) { 805 - if (use_free) 806 - recvmsg = get_free_recvmsg(t); 807 - else 808 - recvmsg = get_empty_recvmsg(t); 809 - if (!recvmsg) { 810 - if (use_free) { 811 - use_free = 0; 812 - continue; 813 - } else { 814 - break; 815 - } 816 - } 822 + recvmsg = get_free_recvmsg(t); 823 + if (!recvmsg) 824 + break; 817 825 818 826 recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER; 819 827 recvmsg->first_segment = false; ··· 1780 1806 1781 1807 while ((recvmsg = get_free_recvmsg(t))) 1782 1808 mempool_free(recvmsg, t->recvmsg_mempool); 1783 - while ((recvmsg = get_empty_recvmsg(t))) 1784 - mempool_free(recvmsg, t->recvmsg_mempool); 1785 1809 1786 1810 mempool_destroy(t->recvmsg_mempool); 1787 1811 t->recvmsg_mempool = NULL; ··· 1835 1863 if (!recvmsg) 1836 1864 goto err; 1837 1865 recvmsg->transport = t; 1866 + recvmsg->sge.length = 0; 1838 1867 list_add(&recvmsg->list, &t->recvmsg_queue); 1839 1868 } 1840 1869 t->count_avail_recvmsg = t->recv_credit_max;
+17
fs/smb/server/transport_tcp.c
··· 85 85 return NULL; 86 86 } 87 87 88 + conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr; 88 89 conn->transport = KSMBD_TRANS(t); 89 90 KSMBD_TRANS(t)->conn = conn; 90 91 KSMBD_TRANS(t)->ops = &ksmbd_tcp_transport_ops; ··· 229 228 { 230 229 struct socket *client_sk = NULL; 231 230 struct interface *iface = (struct interface *)p; 231 + struct inet_sock *csk_inet; 232 + struct ksmbd_conn *conn; 232 233 int ret; 233 234 234 235 while (!kthread_should_stop()) { ··· 248 245 schedule_timeout_interruptible(HZ / 10); 249 246 continue; 250 247 } 248 + 249 + /* 250 + * Limits repeated connections from clients with the same IP. 251 + */ 252 + csk_inet = inet_sk(client_sk->sk); 253 + down_read(&conn_list_lock); 254 + list_for_each_entry(conn, &conn_list, conns_list) 255 + if (csk_inet->inet_daddr == conn->inet_addr) { 256 + ret = -EAGAIN; 257 + break; 258 + } 259 + up_read(&conn_list_lock); 260 + if (ret == -EAGAIN) 261 + continue; 251 262 252 263 if (server_conf.max_connections && 253 264 atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
+29
include/acpi/pcc.h
··· 17 17 u32 latency; 18 18 u32 max_access_rate; 19 19 u16 min_turnaround_time; 20 + 21 + /* Set to true to indicate that the mailbox should manage 22 + * writing the dat to the shared buffer. This differs from 23 + * the case where the drivesr are writing to the buffer and 24 + * using send_data only to ring the doorbell. If this flag 25 + * is set, then the void * data parameter of send_data must 26 + * point to a kernel-memory buffer formatted in accordance with 27 + * the PCC specification. 28 + * 29 + * The active buffer management will include reading the 30 + * notify_on_completion flag, and will then 31 + * call mbox_chan_txdone when the acknowledgment interrupt is 32 + * received. 33 + */ 34 + bool manage_writes; 35 + 36 + /* Optional callback that allows the driver 37 + * to allocate the memory used for receiving 38 + * messages. The return value is the location 39 + * inside the buffer where the mailbox should write the data. 40 + */ 41 + void *(*rx_alloc)(struct mbox_client *cl, int size); 42 + }; 43 + 44 + struct pcc_header { 45 + u32 signature; 46 + u32 flags; 47 + u32 length; 48 + u32 command; 20 49 }; 21 50 22 51 /* Generic Communications Channel Shared Memory Region */
+1 -1
include/crypto/hash.h
··· 184 184 * Worst case is hmac(sha3-224-s390). Its context is a nested 'shash_desc' 185 185 * containing a 'struct s390_sha_ctx'. 186 186 */ 187 - #define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360) 187 + #define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 361) 188 188 #define MAX_SYNC_HASH_REQSIZE (sizeof(struct ahash_request) + \ 189 189 HASH_MAX_DESCSIZE) 190 190
+4
include/linux/efi.h
··· 439 439 440 440 /* OVMF protocol GUIDs */ 441 441 #define OVMF_SEV_MEMORY_ACCEPTANCE_PROTOCOL_GUID EFI_GUID(0xc5a010fe, 0x38a7, 0x4531, 0x8a, 0x4a, 0x05, 0x00, 0xd2, 0xfd, 0x16, 0x49) 442 + #define OVMF_MEMORY_LOG_TABLE_GUID EFI_GUID(0x95305139, 0xb20f, 0x4723, 0x84, 0x25, 0x62, 0x7c, 0x88, 0x8f, 0xf1, 0x21) 442 443 443 444 typedef struct { 444 445 efi_guid_t guid; ··· 643 642 unsigned long esrt; /* ESRT table */ 644 643 unsigned long tpm_log; /* TPM2 Event Log table */ 645 644 unsigned long tpm_final_log; /* TPM2 Final Events Log table */ 645 + unsigned long ovmf_debug_log; 646 646 unsigned long mokvar_table; /* MOK variable config table */ 647 647 unsigned long coco_secret; /* Confidential computing secret table */ 648 648 unsigned long unaccepted; /* Unaccepted memory table */ ··· 1345 1343 } 1346 1344 1347 1345 umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n); 1346 + 1347 + int ovmf_log_probe(unsigned long ovmf_debug_log_table); 1348 1348 1349 1349 /* 1350 1350 * efivar ops event type
+7 -15
include/linux/gpio/driver.h
··· 347 347 * @get: returns value for signal "offset", 0=low, 1=high, or negative error 348 348 * @get_multiple: reads values for multiple signals defined by "mask" and 349 349 * stores them in "bits", returns 0 on success or negative error 350 - * @set: **DEPRECATED** - please use set_rv() instead 351 - * @set_multiple: **DEPRECATED** - please use set_multiple_rv() instead 352 - * @set_rv: assigns output value for signal "offset", returns 0 on success or 353 - * negative error value 354 - * @set_multiple_rv: assigns output values for multiple signals defined by 355 - * "mask", returns 0 on success or negative error value 350 + * @set: assigns output value for signal "offset", returns 0 on success or 351 + * negative error value 352 + * @set_multiple: assigns output values for multiple signals defined by 353 + * "mask", returns 0 on success or negative error value 356 354 * @set_config: optional hook for all kinds of settings. Uses the same 357 355 * packed config format as generic pinconf. Must return 0 on success and 358 356 * a negative error number on failure. ··· 443 445 int (*get_multiple)(struct gpio_chip *gc, 444 446 unsigned long *mask, 445 447 unsigned long *bits); 446 - void (*set)(struct gpio_chip *gc, 447 - unsigned int offset, int value); 448 - void (*set_multiple)(struct gpio_chip *gc, 448 + int (*set)(struct gpio_chip *gc, 449 + unsigned int offset, int value); 450 + int (*set_multiple)(struct gpio_chip *gc, 449 451 unsigned long *mask, 450 452 unsigned long *bits); 451 - int (*set_rv)(struct gpio_chip *gc, 452 - unsigned int offset, 453 - int value); 454 - int (*set_multiple_rv)(struct gpio_chip *gc, 455 - unsigned long *mask, 456 - unsigned long *bits); 457 453 int (*set_config)(struct gpio_chip *gc, 458 454 unsigned int offset, 459 455 unsigned long config);
+2 -2
include/linux/gpio/generic.h
··· 88 88 gpio_generic_chip_set(struct gpio_generic_chip *chip, unsigned int offset, 89 89 int value) 90 90 { 91 - if (WARN_ON(!chip->gc.set_rv)) 91 + if (WARN_ON(!chip->gc.set)) 92 92 return -EOPNOTSUPP; 93 93 94 - return chip->gc.set_rv(&chip->gc, offset, value); 94 + return chip->gc.set(&chip->gc, offset, value); 95 95 } 96 96 97 97 #define gpio_generic_chip_lock(gen_gc) \
+2 -1
include/linux/ioprio.h
··· 60 60 int prio; 61 61 62 62 if (!ioc) 63 - return IOPRIO_DEFAULT; 63 + return IOPRIO_PRIO_VALUE(task_nice_ioclass(p), 64 + task_nice_ioprio(p)); 64 65 65 66 if (p != current) 66 67 lockdep_assert_held(&p->alloc_lock);
+4 -1
include/linux/netdevice.h
··· 2071 2071 * @max_pacing_offload_horizon: max EDT offload horizon in nsec. 2072 2072 * @napi_config: An array of napi_config structures containing per-NAPI 2073 2073 * settings. 2074 + * @num_napi_configs: number of allocated NAPI config structs, 2075 + * always >= max(num_rx_queues, num_tx_queues). 2074 2076 * @gro_flush_timeout: timeout for GRO layer in NAPI 2075 2077 * @napi_defer_hard_irqs: If not zero, provides a counter that would 2076 2078 * allow to avoid NIC hard IRQ, on busy queues. ··· 2484 2482 2485 2483 u64 max_pacing_offload_horizon; 2486 2484 struct napi_config *napi_config; 2487 - unsigned long gro_flush_timeout; 2485 + u32 num_napi_configs; 2488 2486 u32 napi_defer_hard_irqs; 2487 + unsigned long gro_flush_timeout; 2489 2488 2490 2489 /** 2491 2490 * @up: copy of @state's IFF_UP, but safe to read with just @lock.
+8
include/linux/nfs_fs.h
··· 161 161 unsigned long cache_validity; /* bit mask */ 162 162 163 163 /* 164 + * NFS Attributes not included in struct inode 165 + */ 166 + 167 + struct timespec64 btime; 168 + 169 + /* 164 170 * read_cache_jiffies is when we started read-caching this inode. 165 171 * attrtimeo is for how long the cached information is assumed 166 172 * to be valid. A successful attribute revalidation doubles ··· 322 316 #define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */ 323 317 #define NFS_INO_INVALID_NLINK BIT(16) /* cached nlinks is invalid */ 324 318 #define NFS_INO_INVALID_MODE BIT(17) /* cached mode is invalid */ 319 + #define NFS_INO_INVALID_BTIME BIT(18) /* cached btime is invalid */ 325 320 326 321 #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ 327 322 | NFS_INO_INVALID_CTIME \ 328 323 | NFS_INO_INVALID_MTIME \ 324 + | NFS_INO_INVALID_BTIME \ 329 325 | NFS_INO_INVALID_SIZE \ 330 326 | NFS_INO_INVALID_NLINK \ 331 327 | NFS_INO_INVALID_MODE \
+4 -4
include/linux/nfs_fs_sb.h
··· 172 172 #define NFS_MOUNT_FORCE_RDIRPLUS 0x20000000 173 173 #define NFS_MOUNT_NETUNREACH_FATAL 0x40000000 174 174 175 - unsigned int fattr_valid; /* Valid attributes */ 176 175 unsigned int caps; /* server capabilities */ 176 + __u64 fattr_valid; /* Valid attributes */ 177 177 unsigned int rsize; /* read size */ 178 178 unsigned int rpages; /* read size (in pages) */ 179 179 unsigned int wsize; /* write size */ 180 - unsigned int wpages; /* write size (in pages) */ 181 180 unsigned int wtmult; /* server disk block size */ 182 181 unsigned int dtsize; /* readdir size */ 183 182 unsigned short port; /* "port=" setting */ ··· 202 203 struct nfs_fsid fsid; 203 204 int s_sysfs_id; /* sysfs dentry index */ 204 205 __u64 maxfilesize; /* maximum file size */ 205 - struct timespec64 time_delta; /* smallest time granularity */ 206 206 unsigned long mount_time; /* when this fs was mounted */ 207 207 struct super_block *super; /* VFS super block */ 208 208 dev_t s_dev; /* superblock dev numbers */ ··· 246 248 filesystem */ 247 249 struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */ 248 250 struct rpc_wait_queue roc_rpcwaitq; 249 - void *pnfs_ld_data; /* per mount point data */ 250 251 251 252 /* the following fields are protected by nfs_client->cl_lock */ 252 253 struct rb_root state_owners; ··· 254 257 struct list_head state_owners_lru; 255 258 struct list_head layouts; 256 259 struct list_head delegations; 260 + atomic_long_t nr_active_delegations; 261 + unsigned int delegation_hash_mask; 262 + struct hlist_head *delegation_hash_table; 257 263 struct list_head ss_copies; 258 264 struct list_head ss_src_copies; 259 265
+30 -27
include/linux/nfs_xdr.h
··· 45 45 }; 46 46 47 47 struct nfs_fattr { 48 - unsigned int valid; /* which fields are valid */ 48 + __u64 valid; /* which fields are valid */ 49 49 umode_t mode; 50 50 __u32 nlink; 51 51 kuid_t uid; ··· 67 67 struct timespec64 atime; 68 68 struct timespec64 mtime; 69 69 struct timespec64 ctime; 70 + struct timespec64 btime; 70 71 __u64 change_attr; /* NFSv4 change attribute */ 71 72 __u64 pre_change_attr;/* pre-op NFSv4 change attribute */ 72 73 __u64 pre_size; /* pre_op_attr.size */ ··· 81 80 struct nfs4_label *label; 82 81 }; 83 82 84 - #define NFS_ATTR_FATTR_TYPE (1U << 0) 85 - #define NFS_ATTR_FATTR_MODE (1U << 1) 86 - #define NFS_ATTR_FATTR_NLINK (1U << 2) 87 - #define NFS_ATTR_FATTR_OWNER (1U << 3) 88 - #define NFS_ATTR_FATTR_GROUP (1U << 4) 89 - #define NFS_ATTR_FATTR_RDEV (1U << 5) 90 - #define NFS_ATTR_FATTR_SIZE (1U << 6) 91 - #define NFS_ATTR_FATTR_PRESIZE (1U << 7) 92 - #define NFS_ATTR_FATTR_BLOCKS_USED (1U << 8) 93 - #define NFS_ATTR_FATTR_SPACE_USED (1U << 9) 94 - #define NFS_ATTR_FATTR_FSID (1U << 10) 95 - #define NFS_ATTR_FATTR_FILEID (1U << 11) 96 - #define NFS_ATTR_FATTR_ATIME (1U << 12) 97 - #define NFS_ATTR_FATTR_MTIME (1U << 13) 98 - #define NFS_ATTR_FATTR_CTIME (1U << 14) 99 - #define NFS_ATTR_FATTR_PREMTIME (1U << 15) 100 - #define NFS_ATTR_FATTR_PRECTIME (1U << 16) 101 - #define NFS_ATTR_FATTR_CHANGE (1U << 17) 102 - #define NFS_ATTR_FATTR_PRECHANGE (1U << 18) 103 - #define NFS_ATTR_FATTR_V4_LOCATIONS (1U << 19) 104 - #define NFS_ATTR_FATTR_V4_REFERRAL (1U << 20) 105 - #define NFS_ATTR_FATTR_MOUNTPOINT (1U << 21) 106 - #define NFS_ATTR_FATTR_MOUNTED_ON_FILEID (1U << 22) 107 - #define NFS_ATTR_FATTR_OWNER_NAME (1U << 23) 108 - #define NFS_ATTR_FATTR_GROUP_NAME (1U << 24) 109 - #define NFS_ATTR_FATTR_V4_SECURITY_LABEL (1U << 25) 83 + #define NFS_ATTR_FATTR_TYPE BIT_ULL(0) 84 + #define NFS_ATTR_FATTR_MODE BIT_ULL(1) 85 + #define NFS_ATTR_FATTR_NLINK BIT_ULL(2) 86 + #define NFS_ATTR_FATTR_OWNER BIT_ULL(3) 87 + #define NFS_ATTR_FATTR_GROUP BIT_ULL(4) 88 + #define NFS_ATTR_FATTR_RDEV BIT_ULL(5) 89 + #define NFS_ATTR_FATTR_SIZE BIT_ULL(6) 90 + #define NFS_ATTR_FATTR_PRESIZE BIT_ULL(7) 91 + #define NFS_ATTR_FATTR_BLOCKS_USED BIT_ULL(8) 92 + #define NFS_ATTR_FATTR_SPACE_USED BIT_ULL(9) 93 + #define NFS_ATTR_FATTR_FSID BIT_ULL(10) 94 + #define NFS_ATTR_FATTR_FILEID BIT_ULL(11) 95 + #define NFS_ATTR_FATTR_ATIME BIT_ULL(12) 96 + #define NFS_ATTR_FATTR_MTIME BIT_ULL(13) 97 + #define NFS_ATTR_FATTR_CTIME BIT_ULL(14) 98 + #define NFS_ATTR_FATTR_PREMTIME BIT_ULL(15) 99 + #define NFS_ATTR_FATTR_PRECTIME BIT_ULL(16) 100 + #define NFS_ATTR_FATTR_CHANGE BIT_ULL(17) 101 + #define NFS_ATTR_FATTR_PRECHANGE BIT_ULL(18) 102 + #define NFS_ATTR_FATTR_V4_LOCATIONS BIT_ULL(19) 103 + #define NFS_ATTR_FATTR_V4_REFERRAL BIT_ULL(20) 104 + #define NFS_ATTR_FATTR_MOUNTPOINT BIT_ULL(21) 105 + #define NFS_ATTR_FATTR_MOUNTED_ON_FILEID BIT_ULL(22) 106 + #define NFS_ATTR_FATTR_OWNER_NAME BIT_ULL(23) 107 + #define NFS_ATTR_FATTR_GROUP_NAME BIT_ULL(24) 108 + #define NFS_ATTR_FATTR_V4_SECURITY_LABEL BIT_ULL(25) 109 + #define NFS_ATTR_FATTR_BTIME BIT_ULL(26) 110 110 111 111 #define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \ 112 112 | NFS_ATTR_FATTR_MODE \ ··· 128 126 | NFS_ATTR_FATTR_SPACE_USED) 129 127 #define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \ 130 128 | NFS_ATTR_FATTR_SPACE_USED \ 129 + | NFS_ATTR_FATTR_BTIME \ 131 130 | NFS_ATTR_FATTR_V4_SECURITY_LABEL) 132 131 133 132 /*
+1 -18
include/linux/sbitmap.h
··· 210 210 int sbitmap_get(struct sbitmap *sb); 211 211 212 212 /** 213 - * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap, 214 - * limiting the depth used from each word. 215 - * @sb: Bitmap to allocate from. 216 - * @shallow_depth: The maximum number of bits to allocate from a single word. 217 - * 218 - * This rather specific operation allows for having multiple users with 219 - * different allocation limits. E.g., there can be a high-priority class that 220 - * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow() 221 - * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority 222 - * class can only allocate half of the total bits in the bitmap, preventing it 223 - * from starving out the high-priority class. 224 - * 225 - * Return: Non-negative allocated bit number if successful, -1 otherwise. 226 - */ 227 - int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth); 228 - 229 - /** 230 213 * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap. 231 214 * @sb: Bitmap to check. 232 215 * ··· 461 478 * sbitmap_queue, limiting the depth used from each word, with preemption 462 479 * already disabled. 463 480 * @sbq: Bitmap queue to allocate from. 464 - * @shallow_depth: The maximum number of bits to allocate from a single word. 481 + * @shallow_depth: The maximum number of bits to allocate from the queue. 465 482 * See sbitmap_get_shallow(). 466 483 * 467 484 * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
-9
include/linux/sunrpc/xdr.h
··· 130 130 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int len); 131 131 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int len); 132 132 __be32 *xdr_encode_string(__be32 *p, const char *s); 133 - __be32 *xdr_decode_string_inplace(__be32 *p, char **sp, unsigned int *lenp, 134 - unsigned int maxlen); 135 133 __be32 *xdr_encode_netobj(__be32 *p, const struct xdr_netobj *); 136 - __be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *); 137 134 138 135 void xdr_inline_pages(struct xdr_buf *, unsigned int, 139 136 struct page **, unsigned int, unsigned int); ··· 339 342 return xdr->nwords << 2; 340 343 } 341 344 342 - ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, 343 - size_t size); 344 - ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr, 345 - size_t maxlen, gfp_t gfp_flags); 346 - ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, 347 - size_t size); 348 345 ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, 349 346 size_t maxlen, gfp_t gfp_flags); 350 347 ssize_t xdr_stream_decode_opaque_auth(struct xdr_stream *xdr, u32 *flavor,
+5 -1
include/net/devlink.h
··· 78 78 * @flavour: flavour of the port 79 79 * @split: indicates if this is split port 80 80 * @splittable: indicates if the port can be split. 81 + * @no_phys_port_name: skip automatic phys_port_name generation; for 82 + * compatibility only, newly added driver/port instance 83 + * should never set this. 81 84 * @lanes: maximum number of lanes the port supports. 0 value is not passed to netlink. 82 85 * @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL 83 86 * @phys: physical port attributes ··· 90 87 */ 91 88 struct devlink_port_attrs { 92 89 u8 split:1, 93 - splittable:1; 90 + splittable:1, 91 + no_phys_port_name:1; 94 92 u32 lanes; 95 93 enum devlink_port_flavour flavour; 96 94 struct netdev_phys_item_id switch_id;
+13
include/net/ip_vs.h
··· 1163 1163 return housekeeping_cpumask(HK_TYPE_KTHREAD); 1164 1164 } 1165 1165 1166 + static inline const struct cpumask *sysctl_est_preferred_cpulist(struct netns_ipvs *ipvs) 1167 + { 1168 + if (ipvs->est_cpulist_valid) 1169 + return ipvs->sysctl_est_cpulist; 1170 + else 1171 + return NULL; 1172 + } 1173 + 1166 1174 static inline int sysctl_est_nice(struct netns_ipvs *ipvs) 1167 1175 { 1168 1176 return ipvs->sysctl_est_nice; ··· 1276 1268 static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs) 1277 1269 { 1278 1270 return housekeeping_cpumask(HK_TYPE_KTHREAD); 1271 + } 1272 + 1273 + static inline const struct cpumask *sysctl_est_preferred_cpulist(struct netns_ipvs *ipvs) 1274 + { 1275 + return NULL; 1279 1276 } 1280 1277 1281 1278 static inline int sysctl_est_nice(struct netns_ipvs *ipvs)
-1
include/net/kcm.h
··· 71 71 struct list_head wait_psock_list; 72 72 struct sk_buff *seq_skb; 73 73 struct mutex tx_mutex; 74 - u32 tx_stopped : 1; 75 74 76 75 /* Don't use bit fields here, these are set under different locks */ 77 76 bool tx_wait;
+2
include/net/page_pool/types.h
··· 265 265 struct xdp_mem_info; 266 266 267 267 #ifdef CONFIG_PAGE_POOL 268 + void page_pool_enable_direct_recycling(struct page_pool *pool, 269 + struct napi_struct *napi); 268 270 void page_pool_disable_direct_recycling(struct page_pool *pool); 269 271 void page_pool_destroy(struct page_pool *pool); 270 272 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
+1 -1
include/sound/tas2781-tlv.h
··· 15 15 #ifndef __TAS2781_TLV_H__ 16 16 #define __TAS2781_TLV_H__ 17 17 18 - static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0); 18 + static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 50, 0); 19 19 static const __maybe_unused DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0); 20 20 21 21 #endif
+4
include/uapi/linux/io_uring.h
··· 392 392 * the starting buffer ID in cqe->flags as per 393 393 * usual for provided buffer usage. The buffers 394 394 * will be contiguous from the starting buffer ID. 395 + * 396 + * IORING_SEND_VECTORIZED If set, SEND[_ZC] will take a pointer to a io_vec 397 + * to allow vectorized send operations. 395 398 */ 396 399 #define IORING_RECVSEND_POLL_FIRST (1U << 0) 397 400 #define IORING_RECV_MULTISHOT (1U << 1) 398 401 #define IORING_RECVSEND_FIXED_BUF (1U << 2) 399 402 #define IORING_SEND_ZC_REPORT_USAGE (1U << 3) 400 403 #define IORING_RECVSEND_BUNDLE (1U << 4) 404 + #define IORING_SEND_VECTORIZED (1U << 5) 401 405 402 406 /* 403 407 * cqe.res for IORING_CQE_F_NOTIF if
+1 -1
include/uapi/linux/raid/md_p.h
··· 173 173 #else 174 174 #error unspecified endianness 175 175 #endif 176 - __u32 recovery_cp; /* 11 recovery checkpoint sector count */ 176 + __u32 resync_offset; /* 11 resync checkpoint sector count */ 177 177 /* There are only valid for minor_version > 90 */ 178 178 __u64 reshape_position; /* 12,13 next address in array-space for reshape */ 179 179 __u32 new_level; /* 14 new level we are reshaping to */
+17 -17
include/uapi/linux/vt.h
··· 14 14 /* Note: the ioctl VT_GETSTATE does not work for 15 15 consoles 16 and higher (since it returns a short) */ 16 16 17 - /* 'V' to avoid collision with termios and kd */ 17 + /* 0x56 is 'V', to avoid collision with termios and kd */ 18 18 19 - #define VT_OPENQRY _IO('V', 0x00) /* find available vt */ 19 + #define VT_OPENQRY 0x5600 /* find available vt */ 20 20 21 21 struct vt_mode { 22 22 __u8 mode; /* vt mode */ ··· 25 25 __s16 acqsig; /* signal to raise on acquisition */ 26 26 __s16 frsig; /* unused (set to 0) */ 27 27 }; 28 - #define VT_GETMODE _IO('V', 0x01) /* get mode of active vt */ 29 - #define VT_SETMODE _IO('V', 0x02) /* set mode of active vt */ 28 + #define VT_GETMODE 0x5601 /* get mode of active vt */ 29 + #define VT_SETMODE 0x5602 /* set mode of active vt */ 30 30 #define VT_AUTO 0x00 /* auto vt switching */ 31 31 #define VT_PROCESS 0x01 /* process controls switching */ 32 32 #define VT_ACKACQ 0x02 /* acknowledge switch */ ··· 36 36 __u16 v_signal; /* signal to send */ 37 37 __u16 v_state; /* vt bitmask */ 38 38 }; 39 - #define VT_GETSTATE _IO('V', 0x03) /* get global vt state info */ 40 - #define VT_SENDSIG _IO('V', 0x04) /* signal to send to bitmask of vts */ 39 + #define VT_GETSTATE 0x5603 /* get global vt state info */ 40 + #define VT_SENDSIG 0x5604 /* signal to send to bitmask of vts */ 41 41 42 - #define VT_RELDISP _IO('V', 0x05) /* release display */ 42 + #define VT_RELDISP 0x5605 /* release display */ 43 43 44 - #define VT_ACTIVATE _IO('V', 0x06) /* make vt active */ 45 - #define VT_WAITACTIVE _IO('V', 0x07) /* wait for vt active */ 46 - #define VT_DISALLOCATE _IO('V', 0x08) /* free memory associated to vt */ 44 + #define VT_ACTIVATE 0x5606 /* make vt active */ 45 + #define VT_WAITACTIVE 0x5607 /* wait for vt active */ 46 + #define VT_DISALLOCATE 0x5608 /* free memory associated to vt */ 47 47 48 48 struct vt_sizes { 49 49 __u16 v_rows; /* number of rows */ 50 50 __u16 v_cols; /* number of columns */ 51 51 __u16 v_scrollsize; /* number of lines of scrollback */ 52 52 }; 53 - #define VT_RESIZE _IO('V', 0x09) /* set kernel's idea of screensize */ 53 + #define VT_RESIZE 0x5609 /* set kernel's idea of screensize */ 54 54 55 55 struct vt_consize { 56 56 __u16 v_rows; /* number of rows */ ··· 60 60 __u16 v_vcol; /* number of pixel columns on screen */ 61 61 __u16 v_ccol; /* number of pixel columns per character */ 62 62 }; 63 - #define VT_RESIZEX _IO('V', 0x0A) /* set kernel's idea of screensize + more */ 64 - #define VT_LOCKSWITCH _IO('V', 0x0B) /* disallow vt switching */ 65 - #define VT_UNLOCKSWITCH _IO('V', 0x0C) /* allow vt switching */ 66 - #define VT_GETHIFONTMASK _IO('V', 0x0D) /* return hi font mask */ 63 + #define VT_RESIZEX 0x560A /* set kernel's idea of screensize + more */ 64 + #define VT_LOCKSWITCH 0x560B /* disallow vt switching */ 65 + #define VT_UNLOCKSWITCH 0x560C /* allow vt switching */ 66 + #define VT_GETHIFONTMASK 0x560D /* return hi font mask */ 67 67 68 68 struct vt_event { 69 69 __u32 event; ··· 77 77 __u32 pad[4]; /* Padding for expansion */ 78 78 }; 79 79 80 - #define VT_WAITEVENT _IO('V', 0x0E) /* Wait for an event */ 80 + #define VT_WAITEVENT 0x560E /* Wait for an event */ 81 81 82 82 struct vt_setactivate { 83 83 __u32 console; 84 84 struct vt_mode mode; 85 85 }; 86 86 87 - #define VT_SETACTIVATE _IO('V', 0x0F) /* Activate and set the mode of a console */ 87 + #define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */ 88 88 89 89 /* get console size and cursor position */ 90 90 struct vt_consizecsrpos {
+1 -1
io_uring/memmap.c
··· 156 156 unsigned long mmap_offset) 157 157 { 158 158 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN; 159 - unsigned long size = mr->nr_pages << PAGE_SHIFT; 159 + size_t size = (size_t) mr->nr_pages << PAGE_SHIFT; 160 160 unsigned long nr_allocated; 161 161 struct page **pages; 162 162 void *p;
+7 -2
io_uring/net.c
··· 382 382 } 383 383 if (req->flags & REQ_F_BUFFER_SELECT) 384 384 return 0; 385 + 386 + if (sr->flags & IORING_SEND_VECTORIZED) 387 + return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE); 388 + 385 389 return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter); 386 390 } 387 391 ··· 413 409 return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE); 414 410 } 415 411 416 - #define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE) 412 + #define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE | IORING_SEND_VECTORIZED) 417 413 418 414 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 419 415 { ··· 1322 1318 } 1323 1319 1324 1320 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF) 1325 - #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE) 1321 + #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE | \ 1322 + IORING_SEND_VECTORIZED) 1326 1323 1327 1324 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1328 1325 {
+3
kernel/bpf/verifier.c
··· 23114 23114 23115 23115 for (i = 0; i < env->scc_cnt; ++i) { 23116 23116 info = env->scc_info[i]; 23117 + if (!info) 23118 + continue; 23117 23119 for (j = 0; j < info->num_visits; j++) 23118 23120 free_backedges(&info->visits[j]); 23119 23121 kvfree(info); ··· 24556 24554 err = -ENOMEM; 24557 24555 goto exit; 24558 24556 } 24557 + env->scc_cnt = next_scc_id; 24559 24558 exit: 24560 24559 kvfree(stack); 24561 24560 kvfree(pre);
-3
kernel/cpu.c
··· 1309 1309 */ 1310 1310 irq_lock_sparse(); 1311 1311 1312 - /* 1313 - * So now all preempt/rcu users must observe !cpu_active(). 1314 - */ 1315 1312 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); 1316 1313 if (err) { 1317 1314 /* CPU refused to die */
+1 -1
kernel/fork.c
··· 689 689 mm_pasid_drop(mm); 690 690 mm_destroy_cid(mm); 691 691 percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS); 692 + futex_hash_free(mm); 692 693 693 694 free_mm(mm); 694 695 } ··· 1138 1137 if (mm->binfmt) 1139 1138 module_put(mm->binfmt->module); 1140 1139 lru_gen_del_mm(mm); 1141 - futex_hash_free(mm); 1142 1140 mmdrop(mm); 1143 1141 } 1144 1142
+3 -1
kernel/irq/irq_test.c
··· 1 1 // SPDX-License-Identifier: LGPL-2.1+ 2 2 3 + #include <linux/cleanup.h> 3 4 #include <linux/cpu.h> 4 5 #include <linux/cpumask.h> 5 6 #include <linux/interrupt.h> ··· 135 134 disable_irq(virq); 136 135 KUNIT_EXPECT_EQ(test, desc->depth, 1); 137 136 138 - irq_shutdown_and_deactivate(desc); 137 + scoped_guard(raw_spinlock_irqsave, &desc->lock) 138 + irq_shutdown_and_deactivate(desc); 139 139 140 140 KUNIT_EXPECT_FALSE(test, irqd_is_activated(data)); 141 141 KUNIT_EXPECT_FALSE(test, irqd_is_started(data));
+1
kernel/kthread.c
··· 893 893 894 894 return ret; 895 895 } 896 + EXPORT_SYMBOL_GPL(kthread_affine_preferred); 896 897 897 898 /* 898 899 * Re-affine kthreads according to their preferences
+2
kernel/rcu/tree.c
··· 4262 4262 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; 4263 4263 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); 4264 4264 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 4265 + 4266 + rcu_preempt_deferred_qs_init(rdp); 4265 4267 rcu_spawn_rnp_kthreads(rnp); 4266 4268 rcu_spawn_cpu_nocb_kthread(cpu); 4267 4269 ASSERT_EXCLUSIVE_WRITER(rcu_state.n_online_cpus);
+1
kernel/rcu/tree.h
··· 488 488 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 489 489 static void rcu_flavor_sched_clock_irq(int user); 490 490 static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); 491 + static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp); 491 492 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 492 493 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 493 494 static bool rcu_is_callbacks_kthread(struct rcu_data *rdp);
+6 -2
kernel/rcu/tree_plugin.h
··· 763 763 cpu_online(rdp->cpu)) { 764 764 // Get scheduler to re-evaluate and call hooks. 765 765 // If !IRQ_WORK, FQS scan will eventually IPI. 766 - rdp->defer_qs_iw = 767 - IRQ_WORK_INIT_HARD(rcu_preempt_deferred_qs_handler); 768 766 rdp->defer_qs_iw_pending = DEFER_QS_PENDING; 769 767 irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); 770 768 } ··· 902 904 } 903 905 } 904 906 907 + static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp) 908 + { 909 + rdp->defer_qs_iw = IRQ_WORK_INIT_HARD(rcu_preempt_deferred_qs_handler); 910 + } 905 911 #else /* #ifdef CONFIG_PREEMPT_RCU */ 906 912 907 913 /* ··· 1104 1102 { 1105 1103 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); 1106 1104 } 1105 + 1106 + static void rcu_preempt_deferred_qs_init(struct rcu_data *rdp) { } 1107 1107 1108 1108 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 1109 1109
+1 -1
kernel/smp.c
··· 1018 1018 * @cond_func: A callback function that is passed a cpu id and 1019 1019 * the info parameter. The function is called 1020 1020 * with preemption disabled. The function should 1021 - * return a blooean value indicating whether to IPI 1021 + * return a boolean value indicating whether to IPI 1022 1022 * the specified CPU. 1023 1023 * @func: The function to run on all applicable CPUs. 1024 1024 * This must be fast and non-blocking.
+1 -1
lib/ref_tracker.c
··· 434 434 if (dentry && !xa_is_err(dentry)) 435 435 return; 436 436 437 - ret = snprintf(name, sizeof(name), "%s@%px", dir->class, dir); 437 + ret = snprintf(name, sizeof(name), "%s@%p", dir->class, dir); 438 438 name[sizeof(name) - 1] = '\0'; 439 439 440 440 if (ret < sizeof(name)) {
+45 -29
lib/sbitmap.c
··· 208 208 return nr; 209 209 } 210 210 211 + static unsigned int __map_depth_with_shallow(const struct sbitmap *sb, 212 + int index, 213 + unsigned int shallow_depth) 214 + { 215 + u64 shallow_word_depth; 216 + unsigned int word_depth, reminder; 217 + 218 + word_depth = __map_depth(sb, index); 219 + if (shallow_depth >= sb->depth) 220 + return word_depth; 221 + 222 + shallow_word_depth = word_depth * shallow_depth; 223 + reminder = do_div(shallow_word_depth, sb->depth); 224 + 225 + if (reminder >= (index + 1) * word_depth) 226 + shallow_word_depth++; 227 + 228 + return (unsigned int)shallow_word_depth; 229 + } 230 + 211 231 static int sbitmap_find_bit(struct sbitmap *sb, 212 - unsigned int depth, 232 + unsigned int shallow_depth, 213 233 unsigned int index, 214 234 unsigned int alloc_hint, 215 235 bool wrap) ··· 238 218 int nr = -1; 239 219 240 220 for (i = 0; i < sb->map_nr; i++) { 241 - nr = sbitmap_find_bit_in_word(&sb->map[index], 242 - min_t(unsigned int, 243 - __map_depth(sb, index), 244 - depth), 245 - alloc_hint, wrap); 221 + unsigned int depth = __map_depth_with_shallow(sb, index, 222 + shallow_depth); 246 223 224 + if (depth) 225 + nr = sbitmap_find_bit_in_word(&sb->map[index], depth, 226 + alloc_hint, wrap); 247 227 if (nr != -1) { 248 228 nr += index << sb->shift; 249 229 break; ··· 307 287 return sbitmap_find_bit(sb, shallow_depth, index, alloc_hint, true); 308 288 } 309 289 310 - int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth) 290 + /** 291 + * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap, 292 + * limiting the depth used from each word. 293 + * @sb: Bitmap to allocate from. 294 + * @shallow_depth: The maximum number of bits to allocate from the bitmap. 295 + * 296 + * This rather specific operation allows for having multiple users with 297 + * different allocation limits. E.g., there can be a high-priority class that 298 + * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow() 299 + * with a @shallow_depth of (sb->depth >> 1). Then, the low-priority 300 + * class can only allocate half of the total bits in the bitmap, preventing it 301 + * from starving out the high-priority class. 302 + * 303 + * Return: Non-negative allocated bit number if successful, -1 otherwise. 304 + */ 305 + static int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth) 311 306 { 312 307 int nr; 313 308 unsigned int hint, depth; ··· 337 302 338 303 return nr; 339 304 } 340 - EXPORT_SYMBOL_GPL(sbitmap_get_shallow); 341 305 342 306 bool sbitmap_any_bit_set(const struct sbitmap *sb) 343 307 { ··· 440 406 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq, 441 407 unsigned int depth) 442 408 { 443 - unsigned int wake_batch; 444 - unsigned int shallow_depth; 445 - 446 - /* 447 - * Each full word of the bitmap has bits_per_word bits, and there might 448 - * be a partial word. There are depth / bits_per_word full words and 449 - * depth % bits_per_word bits left over. In bitwise arithmetic: 450 - * 451 - * bits_per_word = 1 << shift 452 - * depth / bits_per_word = depth >> shift 453 - * depth % bits_per_word = depth & ((1 << shift) - 1) 454 - * 455 - * Each word can be limited to sbq->min_shallow_depth bits. 456 - */ 457 - shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); 458 - depth = ((depth >> sbq->sb.shift) * shallow_depth + 459 - min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); 460 - wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1, 461 - SBQ_WAKE_BATCH); 462 - 463 - return wake_batch; 409 + return clamp_t(unsigned int, 410 + min(depth, sbq->min_shallow_depth) / SBQ_WAIT_QUEUES, 411 + 1, SBQ_WAKE_BATCH); 464 412 } 465 413 466 414 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
+1 -1
mm/kasan/kasan_test_c.c
··· 47 47 * Some tests use these global variables to store return values from function 48 48 * calls that could otherwise be eliminated by the compiler as dead code. 49 49 */ 50 - static volatile void *kasan_ptr_result; 50 + static void *volatile kasan_ptr_result; 51 51 static volatile int kasan_int_result; 52 52 53 53 /* Probe for console output: obtains test_status lines of interest. */
+1 -1
mm/khugepaged.c
··· 1172 1172 if (result != SCAN_SUCCEED) 1173 1173 goto out_up_write; 1174 1174 /* check if the pmd is still valid */ 1175 + vma_start_write(vma); 1175 1176 result = check_pmd_still_valid(mm, address, pmd); 1176 1177 if (result != SCAN_SUCCEED) 1177 1178 goto out_up_write; 1178 1179 1179 - vma_start_write(vma); 1180 1180 anon_vma_lock_write(vma->anon_vma); 1181 1181 1182 1182 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
+9 -1
mm/kmemleak.c
··· 470 470 { 471 471 unsigned long flags; 472 472 struct kmemleak_object *object; 473 + bool warn = false; 473 474 474 475 /* try the slab allocator first */ 475 476 if (object_cache) { ··· 489 488 else if (mem_pool_free_count) 490 489 object = &mem_pool[--mem_pool_free_count]; 491 490 else 492 - pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n"); 491 + warn = true; 493 492 raw_spin_unlock_irqrestore(&kmemleak_lock, flags); 493 + if (warn) 494 + pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n"); 494 495 495 496 return object; 496 497 } ··· 2184 2181 static void __kmemleak_do_cleanup(void) 2185 2182 { 2186 2183 struct kmemleak_object *object, *tmp; 2184 + unsigned int cnt = 0; 2187 2185 2188 2186 /* 2189 2187 * Kmemleak has already been disabled, no need for RCU list traversal ··· 2193 2189 list_for_each_entry_safe(object, tmp, &object_list, object_list) { 2194 2190 __remove_object(object); 2195 2191 __delete_object(object); 2192 + 2193 + /* Call cond_resched() once per 64 iterations to avoid soft lockup */ 2194 + if (!(++cnt & 0x3f)) 2195 + cond_resched(); 2196 2196 } 2197 2197 } 2198 2198
+10 -13
mm/mprotect.c
··· 120 120 121 121 static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr, 122 122 pte_t oldpte, pte_t *pte, int target_node, 123 - struct folio **foliop) 123 + struct folio *folio) 124 124 { 125 - struct folio *folio = NULL; 126 125 bool ret = true; 127 126 bool toptier; 128 127 int nid; ··· 130 131 if (pte_protnone(oldpte)) 131 132 goto skip; 132 133 133 - folio = vm_normal_folio(vma, addr, oldpte); 134 134 if (!folio) 135 135 goto skip; 136 136 ··· 171 173 folio_xchg_access_time(folio, jiffies_to_msecs(jiffies)); 172 174 173 175 skip: 174 - *foliop = folio; 175 176 return ret; 176 177 } 177 178 ··· 228 231 * retrieve sub-batches. 229 232 */ 230 233 static void commit_anon_folio_batch(struct vm_area_struct *vma, 231 - struct folio *folio, unsigned long addr, pte_t *ptep, 234 + struct folio *folio, struct page *first_page, unsigned long addr, pte_t *ptep, 232 235 pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb) 233 236 { 234 - struct page *first_page = folio_page(folio, 0); 235 237 bool expected_anon_exclusive; 236 238 int sub_batch_idx = 0; 237 239 int len; ··· 247 251 } 248 252 249 253 static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma, 250 - struct folio *folio, unsigned long addr, pte_t *ptep, 254 + struct folio *folio, struct page *page, unsigned long addr, pte_t *ptep, 251 255 pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb) 252 256 { 253 257 bool set_write; ··· 266 270 /* idx = */ 0, set_write, tlb); 267 271 return; 268 272 } 269 - commit_anon_folio_batch(vma, folio, addr, ptep, oldpte, ptent, nr_ptes, tlb); 273 + commit_anon_folio_batch(vma, folio, page, addr, ptep, oldpte, ptent, nr_ptes, tlb); 270 274 } 271 275 272 276 static long change_pte_range(struct mmu_gather *tlb, ··· 301 305 const fpb_t flags = FPB_RESPECT_SOFT_DIRTY | FPB_RESPECT_WRITE; 302 306 int max_nr_ptes = (end - addr) >> PAGE_SHIFT; 303 307 struct folio *folio = NULL; 308 + struct page *page; 304 309 pte_t ptent; 305 310 311 + page = vm_normal_page(vma, addr, oldpte); 312 + if (page) 313 + folio = page_folio(page); 306 314 /* 307 315 * Avoid trapping faults against the zero or KSM 308 316 * pages. See similar comment in change_huge_pmd. 309 317 */ 310 318 if (prot_numa) { 311 319 int ret = prot_numa_skip(vma, addr, oldpte, pte, 312 - target_node, &folio); 320 + target_node, folio); 313 321 if (ret) { 314 322 315 323 /* determine batch to skip */ ··· 322 322 continue; 323 323 } 324 324 } 325 - 326 - if (!folio) 327 - folio = vm_normal_folio(vma, addr, oldpte); 328 325 329 326 nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags); 330 327 ··· 348 351 */ 349 352 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && 350 353 !pte_write(ptent)) 351 - set_write_prot_commit_flush_ptes(vma, folio, 354 + set_write_prot_commit_flush_ptes(vma, folio, page, 352 355 addr, pte, oldpte, ptent, nr_ptes, tlb); 353 356 else 354 357 prot_commit_flush_ptes(vma, addr, pte, oldpte, ptent,
+4
mm/mremap.c
··· 179 179 if (max_nr == 1) 180 180 return 1; 181 181 182 + /* Avoid expensive folio lookup if we stand no chance of benefit. */ 183 + if (pte_batch_hint(ptep, pte) == 1) 184 + return 1; 185 + 182 186 folio = vm_normal_folio(vma, addr, pte); 183 187 if (!folio || !folio_test_large(folio)) 184 188 return 1;
+9 -6
mm/userfaultfd.c
··· 1821 1821 /* Check if we can move the pmd without splitting it. */ 1822 1822 if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) || 1823 1823 !pmd_none(dst_pmdval)) { 1824 - struct folio *folio = pmd_folio(*src_pmd); 1824 + /* Can be a migration entry */ 1825 + if (pmd_present(*src_pmd)) { 1826 + struct folio *folio = pmd_folio(*src_pmd); 1825 1827 1826 - if (!folio || (!is_huge_zero_folio(folio) && 1827 - !PageAnonExclusive(&folio->page))) { 1828 - spin_unlock(ptl); 1829 - err = -EBUSY; 1830 - break; 1828 + if (!is_huge_zero_folio(folio) && 1829 + !PageAnonExclusive(&folio->page)) { 1830 + spin_unlock(ptl); 1831 + err = -EBUSY; 1832 + break; 1833 + } 1831 1834 } 1832 1835 1833 1836 spin_unlock(ptl);
+1
net/bridge/netfilter/Kconfig
··· 43 43 config BRIDGE_NF_EBTABLES_LEGACY 44 44 tristate "Legacy EBTABLES support" 45 45 depends on BRIDGE && NETFILTER_XTABLES_LEGACY 46 + depends on NETFILTER_XTABLES 46 47 default n 47 48 help 48 49 Legacy ebtables packet/frame classifier.
+9 -3
net/core/dev.c
··· 6999 6999 enum netdev_napi_threaded threaded) 7000 7000 { 7001 7001 struct napi_struct *napi; 7002 - int err = 0; 7002 + int i, err = 0; 7003 7003 7004 7004 netdev_assert_locked_or_invisible(dev); 7005 7005 ··· 7020 7020 /* The error should not occur as the kthreads are already created. */ 7021 7021 list_for_each_entry(napi, &dev->napi_list, dev_list) 7022 7022 WARN_ON_ONCE(napi_set_threaded(napi, threaded)); 7023 + 7024 + /* Override the config for all NAPIs even if currently not listed */ 7025 + for (i = 0; i < dev->num_napi_configs; i++) 7026 + dev->napi_config[i].threaded = threaded; 7023 7027 7024 7028 return err; 7025 7029 } ··· 7357 7353 * Clear dev->threaded if kthread creation failed so that 7358 7354 * threaded mode will not be enabled in napi_enable(). 7359 7355 */ 7360 - if (dev->threaded && napi_kthread_create(napi)) 7361 - dev->threaded = NETDEV_NAPI_THREADED_DISABLED; 7356 + if (napi_get_threaded_config(dev, napi)) 7357 + if (napi_kthread_create(napi)) 7358 + dev->threaded = NETDEV_NAPI_THREADED_DISABLED; 7362 7359 netif_napi_set_irq_locked(napi, -1); 7363 7360 } 7364 7361 EXPORT_SYMBOL(netif_napi_add_weight_locked); ··· 11878 11873 goto free_all; 11879 11874 dev->cfg_pending = dev->cfg; 11880 11875 11876 + dev->num_napi_configs = maxqs; 11881 11877 napi_config_sz = array_size(maxqs, sizeof(*dev->napi_config)); 11882 11878 dev->napi_config = kvzalloc(napi_config_sz, GFP_KERNEL_ACCOUNT); 11883 11879 if (!dev->napi_config)
+8
net/core/dev.h
··· 323 323 return NETDEV_NAPI_THREADED_DISABLED; 324 324 } 325 325 326 + static inline enum netdev_napi_threaded 327 + napi_get_threaded_config(struct net_device *dev, struct napi_struct *n) 328 + { 329 + if (n->config) 330 + return n->config->threaded; 331 + return dev->threaded; 332 + } 333 + 326 334 int napi_set_threaded(struct napi_struct *n, 327 335 enum netdev_napi_threaded threaded); 328 336
+29
net/core/page_pool.c
··· 1201 1201 pool->xdp_mem_id = mem->id; 1202 1202 } 1203 1203 1204 + /** 1205 + * page_pool_enable_direct_recycling() - mark page pool as owned by NAPI 1206 + * @pool: page pool to modify 1207 + * @napi: NAPI instance to associate the page pool with 1208 + * 1209 + * Associate a page pool with a NAPI instance for lockless page recycling. 1210 + * This is useful when a new page pool has to be added to a NAPI instance 1211 + * without disabling that NAPI instance, to mark the point at which control 1212 + * path "hands over" the page pool to the NAPI instance. In most cases driver 1213 + * can simply set the @napi field in struct page_pool_params, and does not 1214 + * have to call this helper. 1215 + * 1216 + * The function is idempotent, but does not implement any refcounting. 1217 + * Single page_pool_disable_direct_recycling() will disable recycling, 1218 + * no matter how many times enable was called. 1219 + */ 1220 + void page_pool_enable_direct_recycling(struct page_pool *pool, 1221 + struct napi_struct *napi) 1222 + { 1223 + if (READ_ONCE(pool->p.napi) == napi) 1224 + return; 1225 + WARN_ON(!napi || pool->p.napi); 1226 + 1227 + mutex_lock(&page_pools_lock); 1228 + WRITE_ONCE(pool->p.napi, napi); 1229 + mutex_unlock(&page_pools_lock); 1230 + } 1231 + EXPORT_SYMBOL(page_pool_enable_direct_recycling); 1232 + 1204 1233 void page_pool_disable_direct_recycling(struct page_pool *pool) 1205 1234 { 1206 1235 /* Disable direct recycling based on pool->cpuid.
+1 -1
net/devlink/port.c
··· 1519 1519 struct devlink_port_attrs *attrs = &devlink_port->attrs; 1520 1520 int n = 0; 1521 1521 1522 - if (!devlink_port->attrs_set) 1522 + if (!devlink_port->attrs_set || devlink_port->attrs.no_phys_port_name) 1523 1523 return -EOPNOTSUPP; 1524 1524 1525 1525 switch (attrs->flavour) {
+3
net/ipv4/netfilter/Kconfig
··· 14 14 config IP_NF_IPTABLES_LEGACY 15 15 tristate "Legacy IP tables support" 16 16 depends on NETFILTER_XTABLES_LEGACY 17 + depends on NETFILTER_XTABLES 17 18 default m if NETFILTER_XTABLES_LEGACY 18 19 help 19 20 iptables is a legacy packet classifier. ··· 327 326 config IP_NF_ARPTABLES 328 327 tristate "Legacy ARPTABLES support" 329 328 depends on NETFILTER_XTABLES_LEGACY 329 + depends on NETFILTER_XTABLES 330 330 default n 331 331 help 332 332 arptables is a legacy packet classifier. ··· 345 343 select IP_NF_ARPTABLES 346 344 select NETFILTER_FAMILY_ARP 347 345 depends on NETFILTER_XTABLES_LEGACY 346 + depends on NETFILTER_XTABLES 348 347 help 349 348 ARP packet filtering defines a table `filter', which has a series of 350 349 rules for simple ARP packet filtering at local input and
+1 -1
net/ipv4/udp_offload.c
··· 217 217 remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); 218 218 skb->remcsum_offload = remcsum; 219 219 220 - need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb)); 220 + need_ipsec = (skb_dst(skb) && dst_xfrm(skb_dst(skb))) || skb_sec_path(skb); 221 221 /* Try to offload checksum if possible */ 222 222 offload_csum = !!(need_csum && 223 223 !need_ipsec &&
+1
net/ipv6/netfilter/Kconfig
··· 10 10 config IP6_NF_IPTABLES_LEGACY 11 11 tristate "Legacy IP6 tables support" 12 12 depends on INET && IPV6 && NETFILTER_XTABLES_LEGACY 13 + depends on NETFILTER_XTABLES 13 14 default m if NETFILTER_XTABLES_LEGACY 14 15 help 15 16 ip6tables is a legacy packet classifier.
+1 -1
net/ipv6/xfrm6_tunnel.c
··· 334 334 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 335 335 unsigned int i; 336 336 337 - xfrm_state_flush(net, IPSEC_PROTO_ANY, false); 337 + xfrm_state_flush(net, 0, false); 338 338 xfrm_flush_gc(); 339 339 340 340 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
+2 -8
net/kcm/kcmsock.c
··· 430 430 431 431 /* Check if the socket is reserved so someone is waiting for sending. */ 432 432 kcm = psock->tx_kcm; 433 - if (kcm && !unlikely(kcm->tx_stopped)) 433 + if (kcm) 434 434 queue_work(kcm_wq, &kcm->tx_work); 435 435 436 436 spin_unlock_bh(&mux->lock); ··· 1693 1693 */ 1694 1694 __skb_queue_purge(&sk->sk_write_queue); 1695 1695 1696 - /* Set tx_stopped. This is checked when psock is bound to a kcm and we 1697 - * get a writespace callback. This prevents further work being queued 1698 - * from the callback (unbinding the psock occurs after canceling work. 1699 - */ 1700 - kcm->tx_stopped = 1; 1701 - 1702 1696 release_sock(sk); 1703 1697 1704 1698 spin_lock_bh(&mux->lock); ··· 1708 1714 /* Cancel work. After this point there should be no outside references 1709 1715 * to the kcm socket. 1710 1716 */ 1711 - cancel_work_sync(&kcm->tx_work); 1717 + disable_work_sync(&kcm->tx_work); 1712 1718 1713 1719 lock_sock(sk); 1714 1720 psock = kcm->tx_psock;
-1
net/mctp/test/route-test.c
··· 1586 1586 1587 1587 cleanup: 1588 1588 kfree_skb(skb_sock); 1589 - kfree_skb(skb_pkt); 1590 1589 1591 1590 /* Drop all binds */ 1592 1591 for (size_t i = 0; i < ARRAY_SIZE(lookup_binds); i++)
+2 -1
net/netfilter/ipvs/ip_vs_est.c
··· 265 265 } 266 266 267 267 set_user_nice(kd->task, sysctl_est_nice(ipvs)); 268 - set_cpus_allowed_ptr(kd->task, sysctl_est_cpulist(ipvs)); 268 + if (sysctl_est_preferred_cpulist(ipvs)) 269 + kthread_affine_preferred(kd->task, sysctl_est_preferred_cpulist(ipvs)); 269 270 270 271 pr_info("starting estimator thread %d...\n", kd->id); 271 272 wake_up_process(kd->task);
+30 -35
net/netfilter/nf_conntrack_netlink.c
··· 884 884 885 885 static int ctnetlink_done(struct netlink_callback *cb) 886 886 { 887 - if (cb->args[1]) 888 - nf_ct_put((struct nf_conn *)cb->args[1]); 889 887 kfree(cb->data); 890 888 return 0; 891 889 } ··· 1206 1208 return 0; 1207 1209 } 1208 1210 1211 + static unsigned long ctnetlink_get_id(const struct nf_conn *ct) 1212 + { 1213 + unsigned long id = nf_ct_get_id(ct); 1214 + 1215 + return id ? id : 1; 1216 + } 1217 + 1209 1218 static int 1210 1219 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 1211 1220 { 1212 1221 unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0; 1213 1222 struct net *net = sock_net(skb->sk); 1214 - struct nf_conn *ct, *last; 1223 + unsigned long last_id = cb->args[1]; 1215 1224 struct nf_conntrack_tuple_hash *h; 1216 1225 struct hlist_nulls_node *n; 1217 1226 struct nf_conn *nf_ct_evict[8]; 1227 + struct nf_conn *ct; 1218 1228 int res, i; 1219 1229 spinlock_t *lockp; 1220 1230 1221 - last = (struct nf_conn *)cb->args[1]; 1222 1231 i = 0; 1223 1232 1224 1233 local_bh_disable(); ··· 1262 1257 continue; 1263 1258 1264 1259 if (cb->args[1]) { 1265 - if (ct != last) 1260 + if (ctnetlink_get_id(ct) != last_id) 1266 1261 continue; 1267 1262 cb->args[1] = 0; 1268 1263 } ··· 1275 1270 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 1276 1271 ct, true, flags); 1277 1272 if (res < 0) { 1278 - nf_conntrack_get(&ct->ct_general); 1279 - cb->args[1] = (unsigned long)ct; 1273 + cb->args[1] = ctnetlink_get_id(ct); 1280 1274 spin_unlock(lockp); 1281 1275 goto out; 1282 1276 } ··· 1288 1284 } 1289 1285 out: 1290 1286 local_bh_enable(); 1291 - if (last) { 1287 + if (last_id) { 1292 1288 /* nf ct hash resize happened, now clear the leftover. */ 1293 - if ((struct nf_conn *)cb->args[1] == last) 1289 + if (cb->args[1] == last_id) 1294 1290 cb->args[1] = 0; 1295 - 1296 - nf_ct_put(last); 1297 1291 } 1298 1292 1299 1293 while (i) { ··· 3170 3168 return 0; 3171 3169 } 3172 3170 #endif 3173 - static int ctnetlink_exp_done(struct netlink_callback *cb) 3171 + 3172 + static unsigned long ctnetlink_exp_id(const struct nf_conntrack_expect *exp) 3174 3173 { 3175 - if (cb->args[1]) 3176 - nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]); 3177 - return 0; 3174 + unsigned long id = (unsigned long)exp; 3175 + 3176 + id += nf_ct_get_id(exp->master); 3177 + id += exp->class; 3178 + 3179 + return id ? id : 1; 3178 3180 } 3179 3181 3180 3182 static int 3181 3183 ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 3182 3184 { 3183 3185 struct net *net = sock_net(skb->sk); 3184 - struct nf_conntrack_expect *exp, *last; 3185 3186 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 3186 3187 u_int8_t l3proto = nfmsg->nfgen_family; 3188 + unsigned long last_id = cb->args[1]; 3189 + struct nf_conntrack_expect *exp; 3187 3190 3188 3191 rcu_read_lock(); 3189 - last = (struct nf_conntrack_expect *)cb->args[1]; 3190 3192 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { 3191 3193 restart: 3192 3194 hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]], ··· 3202 3196 continue; 3203 3197 3204 3198 if (cb->args[1]) { 3205 - if (exp != last) 3199 + if (ctnetlink_exp_id(exp) != last_id) 3206 3200 continue; 3207 3201 cb->args[1] = 0; 3208 3202 } ··· 3211 3205 cb->nlh->nlmsg_seq, 3212 3206 IPCTNL_MSG_EXP_NEW, 3213 3207 exp) < 0) { 3214 - if (!refcount_inc_not_zero(&exp->use)) 3215 - continue; 3216 - cb->args[1] = (unsigned long)exp; 3208 + cb->args[1] = ctnetlink_exp_id(exp); 3217 3209 goto out; 3218 3210 } 3219 3211 } ··· 3222 3218 } 3223 3219 out: 3224 3220 rcu_read_unlock(); 3225 - if (last) 3226 - nf_ct_expect_put(last); 3227 - 3228 3221 return skb->len; 3229 3222 } 3230 3223 3231 3224 static int 3232 3225 ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 3233 3226 { 3234 - struct nf_conntrack_expect *exp, *last; 3235 3227 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 3236 3228 struct nf_conn *ct = cb->data; 3237 3229 struct nf_conn_help *help = nfct_help(ct); 3238 3230 u_int8_t l3proto = nfmsg->nfgen_family; 3231 + unsigned long last_id = cb->args[1]; 3232 + struct nf_conntrack_expect *exp; 3239 3233 3240 3234 if (cb->args[0]) 3241 3235 return 0; 3242 3236 3243 3237 rcu_read_lock(); 3244 - last = (struct nf_conntrack_expect *)cb->args[1]; 3238 + 3245 3239 restart: 3246 3240 hlist_for_each_entry_rcu(exp, &help->expectations, lnode) { 3247 3241 if (l3proto && exp->tuple.src.l3num != l3proto) 3248 3242 continue; 3249 3243 if (cb->args[1]) { 3250 - if (exp != last) 3244 + if (ctnetlink_exp_id(exp) != last_id) 3251 3245 continue; 3252 3246 cb->args[1] = 0; 3253 3247 } ··· 3253 3251 cb->nlh->nlmsg_seq, 3254 3252 IPCTNL_MSG_EXP_NEW, 3255 3253 exp) < 0) { 3256 - if (!refcount_inc_not_zero(&exp->use)) 3257 - continue; 3258 - cb->args[1] = (unsigned long)exp; 3254 + cb->args[1] = ctnetlink_exp_id(exp); 3259 3255 goto out; 3260 3256 } 3261 3257 } ··· 3264 3264 cb->args[0] = 1; 3265 3265 out: 3266 3266 rcu_read_unlock(); 3267 - if (last) 3268 - nf_ct_expect_put(last); 3269 - 3270 3267 return skb->len; 3271 3268 } 3272 3269 ··· 3282 3285 struct nf_conntrack_zone zone; 3283 3286 struct netlink_dump_control c = { 3284 3287 .dump = ctnetlink_exp_ct_dump_table, 3285 - .done = ctnetlink_exp_done, 3286 3288 }; 3287 3289 3288 3290 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, ··· 3331 3335 else { 3332 3336 struct netlink_dump_control c = { 3333 3337 .dump = ctnetlink_exp_dump_table, 3334 - .done = ctnetlink_exp_done, 3335 3338 }; 3336 3339 return netlink_dump_start(info->sk, skb, info->nlh, &c); 3337 3340 }
+3 -3
net/netfilter/nf_conntrack_standalone.c
··· 567 567 return ret; 568 568 569 569 if (*(u8 *)table->data == 0) 570 - return ret; 570 + return 0; 571 571 572 572 /* Load nf_log_syslog only if no logger is currently registered */ 573 573 for (i = 0; i < NFPROTO_NUMPROTO; i++) { 574 574 if (nf_log_is_registered(i)) 575 - return ret; 575 + return 0; 576 576 } 577 577 request_module("%s", "nf_log_syslog"); 578 578 579 - return ret; 579 + return 0; 580 580 } 581 581 582 582 static struct ctl_table_header *nf_ct_netfilter_header;
+30
net/netfilter/nf_tables_api.c
··· 2803 2803 struct nft_chain *chain = ctx->chain; 2804 2804 struct nft_chain_hook hook = {}; 2805 2805 struct nft_stats __percpu *stats = NULL; 2806 + struct nftables_pernet *nft_net; 2806 2807 struct nft_hook *h, *next; 2807 2808 struct nf_hook_ops *ops; 2808 2809 struct nft_trans *trans; ··· 2846 2845 if (nft_hook_list_find(&basechain->hook_list, h)) { 2847 2846 list_del(&h->list); 2848 2847 nft_netdev_hook_free(h); 2848 + continue; 2849 + } 2850 + 2851 + nft_net = nft_pernet(ctx->net); 2852 + list_for_each_entry(trans, &nft_net->commit_list, list) { 2853 + if (trans->msg_type != NFT_MSG_NEWCHAIN || 2854 + trans->table != ctx->table || 2855 + !nft_trans_chain_update(trans)) 2856 + continue; 2857 + 2858 + if (nft_hook_list_find(&nft_trans_chain_hooks(trans), h)) { 2859 + nft_chain_release_hook(&hook); 2860 + return -EEXIST; 2861 + } 2849 2862 } 2850 2863 } 2851 2864 } else { ··· 9075 9060 { 9076 9061 const struct nlattr * const *nla = ctx->nla; 9077 9062 struct nft_flowtable_hook flowtable_hook; 9063 + struct nftables_pernet *nft_net; 9078 9064 struct nft_hook *hook, *next; 9079 9065 struct nf_hook_ops *ops; 9080 9066 struct nft_trans *trans; ··· 9092 9076 if (nft_hook_list_find(&flowtable->hook_list, hook)) { 9093 9077 list_del(&hook->list); 9094 9078 nft_netdev_hook_free(hook); 9079 + continue; 9080 + } 9081 + 9082 + nft_net = nft_pernet(ctx->net); 9083 + list_for_each_entry(trans, &nft_net->commit_list, list) { 9084 + if (trans->msg_type != NFT_MSG_NEWFLOWTABLE || 9085 + trans->table != ctx->table || 9086 + !nft_trans_flowtable_update(trans)) 9087 + continue; 9088 + 9089 + if (nft_hook_list_find(&nft_trans_flowtable_hooks(trans), hook)) { 9090 + err = -EEXIST; 9091 + goto err_flowtable_update_hook; 9092 + } 9095 9093 } 9096 9094 } 9097 9095
+2 -3
net/netfilter/nft_set_pipapo.c
··· 426 426 427 427 local_bh_disable(); 428 428 429 - if (unlikely(!raw_cpu_ptr(m->scratch))) 430 - goto out; 431 - 432 429 scratch = *raw_cpu_ptr(m->scratch); 430 + if (unlikely(!scratch)) 431 + goto out; 433 432 434 433 map_index = scratch->map_index; 435 434
+7 -7
net/netfilter/nft_set_pipapo_avx2.c
··· 1150 1150 const u32 *key) 1151 1151 { 1152 1152 struct nft_pipapo *priv = nft_set_priv(set); 1153 + const struct nft_set_ext *ext = NULL; 1153 1154 struct nft_pipapo_scratch *scratch; 1154 1155 u8 genmask = nft_genmask_cur(net); 1155 1156 const struct nft_pipapo_match *m; 1156 1157 const struct nft_pipapo_field *f; 1157 1158 const u8 *rp = (const u8 *)key; 1158 - const struct nft_set_ext *ext; 1159 1159 unsigned long *res, *fill; 1160 1160 bool map_index; 1161 1161 int i; ··· 1246 1246 goto out; 1247 1247 1248 1248 if (last) { 1249 - ext = &f->mt[ret].e->ext; 1250 - if (unlikely(nft_set_elem_expired(ext) || 1251 - !nft_set_elem_active(ext, genmask))) { 1252 - ext = NULL; 1253 - goto next_match; 1254 - } 1249 + const struct nft_set_ext *e = &f->mt[ret].e->ext; 1255 1250 1251 + if (unlikely(nft_set_elem_expired(e) || 1252 + !nft_set_elem_active(e, genmask))) 1253 + goto next_match; 1254 + 1255 + ext = e; 1256 1256 goto out; 1257 1257 } 1258 1258
+1 -1
net/netfilter/nft_socket.c
··· 217 217 218 218 level += err; 219 219 /* Implies a giant cgroup tree */ 220 - if (WARN_ON_ONCE(level > 255)) 220 + if (level > 255) 221 221 return -EOPNOTSUPP; 222 222 223 223 priv->level = level;
+6 -5
net/sched/sch_ets.c
··· 651 651 652 652 sch_tree_lock(sch); 653 653 654 + for (i = nbands; i < oldbands; i++) { 655 + if (i >= q->nstrict && q->classes[i].qdisc->q.qlen) 656 + list_del_init(&q->classes[i].alist); 657 + qdisc_purge_queue(q->classes[i].qdisc); 658 + } 659 + 654 660 WRITE_ONCE(q->nbands, nbands); 655 661 for (i = nstrict; i < q->nstrict; i++) { 656 662 if (q->classes[i].qdisc->q.qlen) { 657 663 list_add_tail(&q->classes[i].alist, &q->active); 658 664 q->classes[i].deficit = quanta[i]; 659 665 } 660 - } 661 - for (i = q->nbands; i < oldbands; i++) { 662 - if (i >= q->nstrict && q->classes[i].qdisc->q.qlen) 663 - list_del_init(&q->classes[i].alist); 664 - qdisc_purge_queue(q->classes[i].qdisc); 665 666 } 666 667 WRITE_ONCE(q->nstrict, nstrict); 667 668 memcpy(q->prio2band, priomap, sizeof(priomap));
+1 -1
net/sctp/input.c
··· 117 117 * it's better to just linearize it otherwise crc computing 118 118 * takes longer. 119 119 */ 120 - if ((!is_gso && skb_linearize(skb)) || 120 + if (((!is_gso || skb_cloned(skb)) && skb_linearize(skb)) || 121 121 !pskb_may_pull(skb, sizeof(struct sctphdr))) 122 122 goto discard_it; 123 123
+2 -2
net/sunrpc/auth_gss/gss_krb5_crypto.c
··· 875 875 * krb5_etm_decrypt - Decrypt using the RFC 8009 rules 876 876 * @kctx: Kerberos context 877 877 * @offset: starting offset of the ciphertext, in bytes 878 - * @len: 879 - * @buf: 878 + * @len: size of ciphertext to unwrap 879 + * @buf: ciphertext to unwrap 880 880 * @headskip: OUT: the enctype's confounder length, in octets 881 881 * @tailskip: OUT: the enctype's HMAC length, in octets 882 882 *
+35 -8
net/sunrpc/svcsock.c
··· 257 257 } 258 258 259 259 static int 260 - svc_tcp_sock_recv_cmsg(struct svc_sock *svsk, struct msghdr *msg) 260 + svc_tcp_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags) 261 261 { 262 262 union { 263 263 struct cmsghdr cmsg; 264 264 u8 buf[CMSG_SPACE(sizeof(u8))]; 265 265 } u; 266 - struct socket *sock = svsk->sk_sock; 266 + u8 alert[2]; 267 + struct kvec alert_kvec = { 268 + .iov_base = alert, 269 + .iov_len = sizeof(alert), 270 + }; 271 + struct msghdr msg = { 272 + .msg_flags = *msg_flags, 273 + .msg_control = &u, 274 + .msg_controllen = sizeof(u), 275 + }; 267 276 int ret; 268 277 269 - msg->msg_control = &u; 270 - msg->msg_controllen = sizeof(u); 278 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1, 279 + alert_kvec.iov_len); 280 + ret = sock_recvmsg(sock, &msg, MSG_DONTWAIT); 281 + if (ret > 0 && 282 + tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) { 283 + iov_iter_revert(&msg.msg_iter, ret); 284 + ret = svc_tcp_sock_process_cmsg(sock, &msg, &u.cmsg, -EAGAIN); 285 + } 286 + return ret; 287 + } 288 + 289 + static int 290 + svc_tcp_sock_recvmsg(struct svc_sock *svsk, struct msghdr *msg) 291 + { 292 + int ret; 293 + struct socket *sock = svsk->sk_sock; 294 + 271 295 ret = sock_recvmsg(sock, msg, MSG_DONTWAIT); 272 - if (unlikely(msg->msg_controllen != sizeof(u))) 273 - ret = svc_tcp_sock_process_cmsg(sock, msg, &u.cmsg, ret); 296 + if (msg->msg_flags & MSG_CTRUNC) { 297 + msg->msg_flags &= ~(MSG_CTRUNC | MSG_EOR); 298 + if (ret == 0 || ret == -EIO) 299 + ret = svc_tcp_sock_recv_cmsg(sock, &msg->msg_flags); 300 + } 274 301 return ret; 275 302 } 276 303 ··· 348 321 iov_iter_advance(&msg.msg_iter, seek); 349 322 buflen -= seek; 350 323 } 351 - len = svc_tcp_sock_recv_cmsg(svsk, &msg); 324 + len = svc_tcp_sock_recvmsg(svsk, &msg); 352 325 if (len > 0) 353 326 svc_flush_bvec(bvec, len, seek); 354 327 ··· 1045 1018 iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen; 1046 1019 iov.iov_len = want; 1047 1020 iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want); 1048 - len = svc_tcp_sock_recv_cmsg(svsk, &msg); 1021 + len = svc_tcp_sock_recvmsg(svsk, &msg); 1049 1022 if (len < 0) 1050 1023 return len; 1051 1024 svsk->sk_tcplen += len;
-110
net/sunrpc/xdr.c
··· 37 37 } 38 38 EXPORT_SYMBOL_GPL(xdr_encode_netobj); 39 39 40 - __be32 * 41 - xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj) 42 - { 43 - unsigned int len; 44 - 45 - if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ) 46 - return NULL; 47 - obj->len = len; 48 - obj->data = (u8 *) p; 49 - return p + XDR_QUADLEN(len); 50 - } 51 - EXPORT_SYMBOL_GPL(xdr_decode_netobj); 52 - 53 40 /** 54 41 * xdr_encode_opaque_fixed - Encode fixed length opaque data 55 42 * @p: pointer to current position in XDR buffer. ··· 88 101 return xdr_encode_array(p, string, strlen(string)); 89 102 } 90 103 EXPORT_SYMBOL_GPL(xdr_encode_string); 91 - 92 - __be32 * 93 - xdr_decode_string_inplace(__be32 *p, char **sp, 94 - unsigned int *lenp, unsigned int maxlen) 95 - { 96 - u32 len; 97 - 98 - len = be32_to_cpu(*p++); 99 - if (len > maxlen) 100 - return NULL; 101 - *lenp = len; 102 - *sp = (char *) p; 103 - return p + XDR_QUADLEN(len); 104 - } 105 - EXPORT_SYMBOL_GPL(xdr_decode_string_inplace); 106 104 107 105 /** 108 106 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf ··· 2215 2243 return ret; 2216 2244 } 2217 2245 EXPORT_SYMBOL_GPL(xdr_process_buf); 2218 - 2219 - /** 2220 - * xdr_stream_decode_opaque - Decode variable length opaque 2221 - * @xdr: pointer to xdr_stream 2222 - * @ptr: location to store opaque data 2223 - * @size: size of storage buffer @ptr 2224 - * 2225 - * Return values: 2226 - * On success, returns size of object stored in *@ptr 2227 - * %-EBADMSG on XDR buffer overflow 2228 - * %-EMSGSIZE on overflow of storage buffer @ptr 2229 - */ 2230 - ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size) 2231 - { 2232 - ssize_t ret; 2233 - void *p; 2234 - 2235 - ret = xdr_stream_decode_opaque_inline(xdr, &p, size); 2236 - if (ret <= 0) 2237 - return ret; 2238 - memcpy(ptr, p, ret); 2239 - return ret; 2240 - } 2241 - EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque); 2242 - 2243 - /** 2244 - * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque 2245 - * @xdr: pointer to xdr_stream 2246 - * @ptr: location to store pointer to opaque data 2247 - * @maxlen: maximum acceptable object size 2248 - * @gfp_flags: GFP mask to use 2249 - * 2250 - * Return values: 2251 - * On success, returns size of object stored in *@ptr 2252 - * %-EBADMSG on XDR buffer overflow 2253 - * %-EMSGSIZE if the size of the object would exceed @maxlen 2254 - * %-ENOMEM on memory allocation failure 2255 - */ 2256 - ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr, 2257 - size_t maxlen, gfp_t gfp_flags) 2258 - { 2259 - ssize_t ret; 2260 - void *p; 2261 - 2262 - ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen); 2263 - if (ret > 0) { 2264 - *ptr = kmemdup(p, ret, gfp_flags); 2265 - if (*ptr != NULL) 2266 - return ret; 2267 - ret = -ENOMEM; 2268 - } 2269 - *ptr = NULL; 2270 - return ret; 2271 - } 2272 - EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup); 2273 - 2274 - /** 2275 - * xdr_stream_decode_string - Decode variable length string 2276 - * @xdr: pointer to xdr_stream 2277 - * @str: location to store string 2278 - * @size: size of storage buffer @str 2279 - * 2280 - * Return values: 2281 - * On success, returns length of NUL-terminated string stored in *@str 2282 - * %-EBADMSG on XDR buffer overflow 2283 - * %-EMSGSIZE on overflow of storage buffer @str 2284 - */ 2285 - ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size) 2286 - { 2287 - ssize_t ret; 2288 - void *p; 2289 - 2290 - ret = xdr_stream_decode_opaque_inline(xdr, &p, size); 2291 - if (ret > 0) { 2292 - memcpy(str, p, ret); 2293 - str[ret] = '\0'; 2294 - return strlen(str); 2295 - } 2296 - *str = '\0'; 2297 - return ret; 2298 - } 2299 - EXPORT_SYMBOL_GPL(xdr_stream_decode_string); 2300 2246 2301 2247 /** 2302 2248 * xdr_stream_decode_string_dup - Decode and duplicate variable length string
+30 -10
net/sunrpc/xprtsock.c
··· 358 358 359 359 static int 360 360 xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg, 361 - struct cmsghdr *cmsg, int ret) 361 + unsigned int *msg_flags, struct cmsghdr *cmsg, int ret) 362 362 { 363 363 u8 content_type = tls_get_record_type(sock->sk, cmsg); 364 364 u8 level, description; ··· 371 371 * record, even though there might be more frames 372 372 * waiting to be decrypted. 373 373 */ 374 - msg->msg_flags &= ~MSG_EOR; 374 + *msg_flags &= ~MSG_EOR; 375 375 break; 376 376 case TLS_RECORD_TYPE_ALERT: 377 377 tls_alert_recv(sock->sk, msg, &level, &description); ··· 386 386 } 387 387 388 388 static int 389 - xs_sock_recv_cmsg(struct socket *sock, struct msghdr *msg, int flags) 389 + xs_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags, int flags) 390 390 { 391 391 union { 392 392 struct cmsghdr cmsg; 393 393 u8 buf[CMSG_SPACE(sizeof(u8))]; 394 394 } u; 395 + u8 alert[2]; 396 + struct kvec alert_kvec = { 397 + .iov_base = alert, 398 + .iov_len = sizeof(alert), 399 + }; 400 + struct msghdr msg = { 401 + .msg_flags = *msg_flags, 402 + .msg_control = &u, 403 + .msg_controllen = sizeof(u), 404 + }; 395 405 int ret; 396 406 397 - msg->msg_control = &u; 398 - msg->msg_controllen = sizeof(u); 399 - ret = sock_recvmsg(sock, msg, flags); 400 - if (msg->msg_controllen != sizeof(u)) 401 - ret = xs_sock_process_cmsg(sock, msg, &u.cmsg, ret); 407 + iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1, 408 + alert_kvec.iov_len); 409 + ret = sock_recvmsg(sock, &msg, flags); 410 + if (ret > 0 && 411 + tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) { 412 + iov_iter_revert(&msg.msg_iter, ret); 413 + ret = xs_sock_process_cmsg(sock, &msg, msg_flags, &u.cmsg, 414 + -EAGAIN); 415 + } 402 416 return ret; 403 417 } 404 418 ··· 422 408 ssize_t ret; 423 409 if (seek != 0) 424 410 iov_iter_advance(&msg->msg_iter, seek); 425 - ret = xs_sock_recv_cmsg(sock, msg, flags); 411 + ret = sock_recvmsg(sock, msg, flags); 412 + /* Handle TLS inband control message lazily */ 413 + if (msg->msg_flags & MSG_CTRUNC) { 414 + msg->msg_flags &= ~(MSG_CTRUNC | MSG_EOR); 415 + if (ret == 0 || ret == -EIO) 416 + ret = xs_sock_recv_cmsg(sock, &msg->msg_flags, flags); 417 + } 426 418 return ret > 0 ? ret + seek : ret; 427 419 } 428 420 ··· 454 434 size_t count) 455 435 { 456 436 iov_iter_discard(&msg->msg_iter, ITER_DEST, count); 457 - return xs_sock_recv_cmsg(sock, msg, flags); 437 + return xs_sock_recvmsg(sock, msg, flags, 0); 458 438 } 459 439 460 440 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+1 -1
net/tls/tls.h
··· 196 196 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb); 197 197 void tls_rx_msg_ready(struct tls_strparser *strp); 198 198 199 - void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh); 199 + bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh); 200 200 int tls_strp_msg_cow(struct tls_sw_context_rx *ctx); 201 201 struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx); 202 202 int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst);
+8 -3
net/tls/tls_strp.c
··· 475 475 strp->stm.offset = offset; 476 476 } 477 477 478 - void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) 478 + bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) 479 479 { 480 480 struct strp_msg *rxm; 481 481 struct tls_msg *tlm; ··· 484 484 DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len); 485 485 486 486 if (!strp->copy_mode && force_refresh) { 487 - if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len)) 488 - return; 487 + if (unlikely(tcp_inq(strp->sk) < strp->stm.full_len)) { 488 + WRITE_ONCE(strp->msg_ready, 0); 489 + memset(&strp->stm, 0, sizeof(strp->stm)); 490 + return false; 491 + } 489 492 490 493 tls_strp_load_anchor_with_queue(strp, strp->stm.full_len); 491 494 } ··· 498 495 rxm->offset = strp->stm.offset; 499 496 tlm = tls_msg(strp->anchor); 500 497 tlm->control = strp->mark; 498 + 499 + return true; 501 500 } 502 501 503 502 /* Called with lock held on lower socket */
+2 -1
net/tls/tls_sw.c
··· 1384 1384 return sock_intr_errno(timeo); 1385 1385 } 1386 1386 1387 - tls_strp_msg_load(&ctx->strp, released); 1387 + if (unlikely(!tls_strp_msg_load(&ctx->strp, released))) 1388 + return tls_rx_rec_wait(sk, psock, nonblock, false); 1388 1389 1389 1390 return 1; 1390 1391 }
+2 -1
net/vmw_vsock/af_vsock.c
··· 689 689 unsigned int i; 690 690 691 691 for (i = 0; i < MAX_PORT_RETRIES; i++) { 692 - if (port <= LAST_RESERVED_PORT) 692 + if (port == VMADDR_PORT_ANY || 693 + port <= LAST_RESERVED_PORT) 693 694 port = LAST_RESERVED_PORT + 1; 694 695 695 696 new_addr.svm_port = port++;
+9 -3
net/xfrm/xfrm_device.c
··· 155 155 return skb; 156 156 } 157 157 158 - if (skb_is_gso(skb) && unlikely(xmit_xfrm_check_overflow(skb))) { 158 + if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) || 159 + unlikely(xmit_xfrm_check_overflow(skb)))) { 159 160 struct sk_buff *segs; 160 161 161 162 /* Packet got rerouted, fixup features and segment it. */ ··· 416 415 struct net_device *dev = x->xso.dev; 417 416 bool check_tunnel_size; 418 417 419 - if (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED) 418 + if (!x->type_offload || 419 + (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED && x->encap)) 420 420 return false; 421 421 422 - if ((dev == xfrm_dst_path(dst)->dev) && !xdst->child->xfrm) { 422 + if ((!dev || dev == xfrm_dst_path(dst)->dev) && 423 + !xdst->child->xfrm) { 423 424 mtu = xfrm_state_mtu(x, xdst->child_mtu_cached); 424 425 if (skb->len <= mtu) 425 426 goto ok; ··· 433 430 return false; 434 431 435 432 ok: 433 + if (!dev) 434 + return true; 435 + 436 436 check_tunnel_size = x->xso.type == XFRM_DEV_OFFLOAD_PACKET && 437 437 x->props.mode == XFRM_MODE_TUNNEL; 438 438 switch (x->props.family) {
+1 -1
net/xfrm/xfrm_state.c
··· 3297 3297 unsigned int sz; 3298 3298 3299 3299 flush_work(&net->xfrm.state_hash_work); 3300 - xfrm_state_flush(net, IPSEC_PROTO_ANY, false); 3300 + xfrm_state_flush(net, 0, false); 3301 3301 flush_work(&xfrm_state_gc_work); 3302 3302 3303 3303 WARN_ON(!list_empty(&net->xfrm.state_all));
+4 -1
sound/hda/codecs/ca0132.c
··· 4802 4802 if (err < 0) 4803 4803 goto exit; 4804 4804 4805 - if (ca0132_alt_select_out_quirk_set(codec) < 0) 4805 + err = ca0132_alt_select_out_quirk_set(codec); 4806 + if (err < 0) 4806 4807 goto exit; 4807 4808 4808 4809 switch (spec->cur_out_type) { ··· 4893 4892 spec->bass_redirection_val); 4894 4893 else 4895 4894 err = ca0132_alt_surround_set_bass_redirection(codec, 0); 4895 + if (err < 0) 4896 + goto exit; 4896 4897 4897 4898 /* Unmute DSP now that we're done with output selection. */ 4898 4899 err = dspio_set_uint_param(codec, 0x96,
+12 -8
sound/hda/codecs/cirrus/Kconfig
··· 2 2 3 3 menuconfig SND_HDA_CODEC_CIRRUS 4 4 tristate "Cirrus Logic HD-audio codec support" 5 + help 6 + Say Y or M here to include Cirrus Logic HD-audio codec support. 7 + 8 + This will enable both CS420x and CS421x HD-audio codec drivers 9 + as default, but you can enable/disable each codec driver 10 + individually, too (only when CONFIG_EXPERT is set). 5 11 6 12 if SND_HDA_CODEC_CIRRUS 7 13 8 14 config SND_HDA_CODEC_CS420X 9 - tristate "Build Cirrus Logic CS420x codec support" 15 + tristate "Build Cirrus Logic CS420x codec support" if EXPERT 10 16 select SND_HDA_GENERIC 11 17 default y 12 18 help 13 - Say Y or M here to include Cirrus Logic CS420x codec support in 14 - snd-hda-intel driver 19 + Say Y or M here to include Cirrus Logic CS420x codec support 15 20 16 21 comment "Set to Y if you want auto-loading the codec driver" 17 22 depends on SND_HDA=y && SND_HDA_CODEC_CS420X=m 18 23 19 24 config SND_HDA_CODEC_CS421X 20 - tristate "Build Cirrus Logic CS421x codec support" 25 + tristate "Build Cirrus Logic CS421x codec support" if EXPERT 21 26 select SND_HDA_GENERIC 22 27 default y 23 28 help 24 - Say Y or M here to include Cirrus Logic CS421x codec support in 25 - snd-hda-intel driver 29 + Say Y or M here to include Cirrus Logic CS421x codec support 26 30 27 31 comment "Set to Y if you want auto-loading the codec driver" 28 32 depends on SND_HDA=y && SND_HDA_CODEC_CS421X=m ··· 35 31 tristate "Build Cirrus Logic HDA bridge support" 36 32 select SND_HDA_GENERIC 37 33 help 38 - Say Y or M here to include Cirrus Logic HDA bridge support in 39 - snd-hda-intel driver, such as CS8409. 34 + Say Y or M here to include Cirrus Logic HDA bridge support 35 + such as CS8409. 40 36 41 37 comment "Set to Y if you want auto-loading the codec driver" 42 38 depends on SND_HDA=y && SND_HDA_CODEC_CS8409=m
+13 -7
sound/hda/codecs/hdmi/Kconfig
··· 2 2 3 3 menuconfig SND_HDA_CODEC_HDMI 4 4 tristate "HD-audio HDMI codec support" 5 + help 6 + Say Y or M here to include HD-audio HDMI/DislayPort codec support. 7 + 8 + This will enable all HDMI/DP codec drivers as default, but you can 9 + enable/disable each codec driver individually, too (only when 10 + CONFIG_EXPERT is set). 5 11 6 12 if SND_HDA_CODEC_HDMI 7 13 8 14 config SND_HDA_CODEC_HDMI_GENERIC 9 - tristate "Generic HDMI/DisplayPort HD-audio codec support" 15 + tristate "Generic HDMI/DisplayPort HD-audio codec support" if EXPERT 10 16 select SND_DYNAMIC_MINORS 11 17 select SND_PCM_ELD 12 18 default y ··· 24 18 to assure the multiple streams for DP-MST support. 25 19 26 20 config SND_HDA_CODEC_HDMI_SIMPLE 27 - tristate "Simple HDMI/DisplayPort HD-audio codec support" 21 + tristate "Simple HDMI/DisplayPort HD-audio codec support" if EXPERT 28 22 default y 29 23 help 30 24 Say Y or M here to include Simple HDMI and DisplayPort HD-audio 31 25 codec support for VIA and other codecs. 32 26 33 27 config SND_HDA_CODEC_HDMI_INTEL 34 - tristate "Intel HDMI/DisplayPort HD-audio codec support" 28 + tristate "Intel HDMI/DisplayPort HD-audio codec support" if EXPERT 35 29 select SND_HDA_CODEC_HDMI_GENERIC 36 30 default y 37 31 help ··· 54 48 are kept reserved both at transmitter and receiver. 55 49 56 50 config SND_HDA_CODEC_HDMI_ATI 57 - tristate "AMD/ATI HDMI/DisplayPort HD-audio codec support" 51 + tristate "AMD/ATI HDMI/DisplayPort HD-audio codec support" if EXPERT 58 52 select SND_HDA_CODEC_HDMI_GENERIC 59 53 default y 60 54 help ··· 62 56 HD-audio codec support. 63 57 64 58 config SND_HDA_CODEC_HDMI_NVIDIA 65 - tristate "Nvidia HDMI/DisplayPort HD-audio codec support" 59 + tristate "Nvidia HDMI/DisplayPort HD-audio codec support" if EXPERT 66 60 select SND_HDA_CODEC_HDMI_GENERIC 67 61 default y 68 62 help ··· 70 64 support for the recent Nvidia graphics cards. 71 65 72 66 config SND_HDA_CODEC_HDMI_NVIDIA_MCP 73 - tristate "Legacy Nvidia HDMI/DisplayPort HD-audio codec support" 67 + tristate "Legacy Nvidia HDMI/DisplayPort HD-audio codec support" if EXPERT 74 68 select SND_HDA_CODEC_HDMI_SIMPLE 75 69 default y 76 70 help ··· 78 72 support for the legacy Nvidia graphics like MCP73, MCP67, MCP77/78. 79 73 80 74 config SND_HDA_CODEC_HDMI_TEGRA 81 - tristate "Nvidia Tegra HDMI/DisplayPort HD-audio codec support" 75 + tristate "Nvidia Tegra HDMI/DisplayPort HD-audio codec support" if EXPERT 82 76 select SND_HDA_CODEC_HDMI_GENERIC 83 77 default y 84 78 help
+16 -12
sound/hda/codecs/realtek/Kconfig
··· 2 2 3 3 menuconfig SND_HDA_CODEC_REALTEK 4 4 tristate "Realtek HD-audio codec support" 5 + help 6 + Say Y or M here to include Realtek HD-audio codec support. 7 + 8 + This will enable all Realtek HD-audio codec drivers as default, 9 + but you can enable/disable each codec driver individually, too 10 + (only when CONFIG_EXPERT is set). 5 11 6 12 if SND_HDA_CODEC_REALTEK 7 13 ··· 18 12 select SND_HDA_SCODEC_COMPONENT 19 13 20 14 config SND_HDA_CODEC_ALC260 21 - tristate "Build Realtek ALC260 HD-audio codec support" 15 + tristate "Build Realtek ALC260 HD-audio codec support" if EXPERT 22 16 depends on INPUT 23 17 select SND_HDA_CODEC_REALTEK_LIB 24 18 default y ··· 26 20 Say Y or M here to include Realtek ALC260 HD-audio codec support 27 21 28 22 config SND_HDA_CODEC_ALC262 29 - tristate "Build Realtek ALC262 HD-audio codec support" 23 + tristate "Build Realtek ALC262 HD-audio codec support" if EXPERT 30 24 depends on INPUT 31 25 select SND_HDA_CODEC_REALTEK_LIB 32 26 default y ··· 34 28 Say Y or M here to include Realtek ALC262 HD-audio codec support 35 29 36 30 config SND_HDA_CODEC_ALC268 37 - tristate "Build Realtek ALC268 HD-audio codec support" 31 + tristate "Build Realtek ALC268 HD-audio codec support" if EXPERT 38 32 depends on INPUT 39 33 select SND_HDA_CODEC_REALTEK_LIB 40 34 default y ··· 43 37 codec support 44 38 45 39 config SND_HDA_CODEC_ALC269 46 - tristate "Build Realtek ALC269 HD-audio codecs support" 40 + tristate "Build Realtek ALC269 HD-audio codecs support" if EXPERT 47 41 depends on INPUT 48 42 select SND_HDA_CODEC_REALTEK_LIB 49 43 default y ··· 52 46 codec support 53 47 54 48 config SND_HDA_CODEC_ALC662 55 - tristate "Build Realtek ALC662 HD-audio codecs support" 49 + tristate "Build Realtek ALC662 HD-audio codecs support" if EXPERT 56 50 depends on INPUT 57 51 select SND_HDA_CODEC_REALTEK_LIB 58 52 default y ··· 61 55 codec support 62 56 63 57 config SND_HDA_CODEC_ALC680 64 - tristate "Build Realtek ALC680 HD-audio codecs support" 58 + tristate "Build Realtek ALC680 HD-audio codecs support" if EXPERT 65 59 depends on INPUT 66 60 select SND_HDA_CODEC_REALTEK_LIB 67 61 default y ··· 69 63 Say Y or M here to include Realtek ALC680 HD-audio codec support 70 64 71 65 config SND_HDA_CODEC_ALC861 72 - tristate "Build Realtek ALC861 HD-audio codecs support" 66 + tristate "Build Realtek ALC861 HD-audio codecs support" if EXPERT 73 67 depends on INPUT 74 68 select SND_HDA_CODEC_REALTEK_LIB 75 69 default y ··· 77 71 Say Y or M here to include Realtek ALC861 HD-audio codec support 78 72 79 73 config SND_HDA_CODEC_ALC861VD 80 - tristate "Build Realtek ALC861-VD HD-audio codecs support" 74 + tristate "Build Realtek ALC861-VD HD-audio codecs support" if EXPERT 81 75 depends on INPUT 82 76 select SND_HDA_CODEC_REALTEK_LIB 83 77 default y ··· 85 79 Say Y or M here to include Realtek ALC861-VD HD-audio codec support 86 80 87 81 config SND_HDA_CODEC_ALC880 88 - tristate "Build Realtek ALC880 HD-audio codecs support" 82 + tristate "Build Realtek ALC880 HD-audio codecs support" if EXPERT 89 83 depends on INPUT 90 84 select SND_HDA_CODEC_REALTEK_LIB 91 85 default y ··· 93 87 Say Y or M here to include Realtek ALC880 HD-audio codec support 94 88 95 89 config SND_HDA_CODEC_ALC882 96 - tristate "Build Realtek ALC882 HD-audio codecs support" 90 + tristate "Build Realtek ALC882 HD-audio codecs support" if EXPERT 97 91 depends on INPUT 98 92 select SND_HDA_CODEC_REALTEK_LIB 99 93 default y ··· 102 96 codec support 103 97 104 98 endif 105 - 106 -
+1
sound/hda/codecs/realtek/alc269.c
··· 7110 7110 SND_PCI_QUIRK(0x1854, 0x0440, "LG CQ6", ALC256_FIXUP_HEADPHONE_AMP_VOL), 7111 7111 SND_PCI_QUIRK(0x1854, 0x0441, "LG CQ6 AIO", ALC256_FIXUP_HEADPHONE_AMP_VOL), 7112 7112 SND_PCI_QUIRK(0x1854, 0x0488, "LG gram 16 (16Z90R)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS), 7113 + SND_PCI_QUIRK(0x1854, 0x0489, "LG gram 16 (16Z90R-A)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS), 7113 7114 SND_PCI_QUIRK(0x1854, 0x048a, "LG gram 17 (17ZD90R)", ALC298_FIXUP_SAMSUNG_AMP_V2_4_AMPS), 7114 7115 SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS), 7115 7116 SND_PCI_QUIRK(0x19e5, 0x320f, "Huawei WRT-WX9 ", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+1 -1
sound/hda/codecs/side-codecs/cirrus_scodec_test.c
··· 86 86 .direction_input = cirrus_scodec_test_gpio_direction_in, 87 87 .get = cirrus_scodec_test_gpio_get, 88 88 .direction_output = cirrus_scodec_test_gpio_direction_out, 89 - .set_rv = cirrus_scodec_test_gpio_set, 89 + .set = cirrus_scodec_test_gpio_set, 90 90 .set_config = cirrus_scodec_test_gpio_set_config, 91 91 .base = -1, 92 92 .ngpio = 32,
+32 -15
sound/hda/codecs/side-codecs/tas2781_hda.c
··· 18 18 19 19 #include "tas2781_hda.h" 20 20 21 + #define CALIBRATION_DATA_AREA_NUM 2 22 + 21 23 const efi_guid_t tasdev_fct_efi_guid[] = { 22 24 /* DELL */ 23 25 EFI_GUID(0xcc92382d, 0x6337, 0x41cb, 0xa8, 0x8b, 0x8e, 0xce, 0x74, ··· 162 160 * manufactory. 163 161 */ 164 162 efi_guid_t efi_guid = tasdev_fct_efi_guid[LENOVO]; 165 - static efi_char16_t efi_name[] = TASDEVICE_CALIBRATION_DATA_NAME; 163 + /* 164 + * Some devices save the calibrated data into L"CALI_DATA", 165 + * and others into L"SmartAmpCalibrationData". 166 + */ 167 + static efi_char16_t *efi_name[CALIBRATION_DATA_AREA_NUM] = { 168 + L"CALI_DATA", 169 + L"SmartAmpCalibrationData", 170 + }; 166 171 struct tasdevice_priv *p = hda->priv; 167 172 struct calidata *cali_data = &p->cali_data; 168 173 unsigned long total_sz = 0; 169 174 unsigned int attr, size; 170 175 unsigned char *data; 171 176 efi_status_t status; 177 + int i; 172 178 173 179 if (hda->catlog_id < LENOVO) 174 180 efi_guid = tasdev_fct_efi_guid[hda->catlog_id]; 175 181 176 182 cali_data->cali_dat_sz_per_dev = 20; 177 183 size = p->ndev * (cali_data->cali_dat_sz_per_dev + 1); 178 - /* Get real size of UEFI variable */ 179 - status = efi.get_variable(efi_name, &efi_guid, &attr, &total_sz, NULL); 180 - cali_data->total_sz = total_sz > size ? total_sz : size; 181 - if (status == EFI_BUFFER_TOO_SMALL) { 182 - /* Allocate data buffer of data_size bytes */ 183 - data = p->cali_data.data = devm_kzalloc(p->dev, 184 - p->cali_data.total_sz, GFP_KERNEL); 185 - if (!data) { 186 - p->cali_data.total_sz = 0; 187 - return -ENOMEM; 184 + for (i = 0; i < CALIBRATION_DATA_AREA_NUM; i++) { 185 + /* Get real size of UEFI variable */ 186 + status = efi.get_variable(efi_name[i], &efi_guid, &attr, 187 + &total_sz, NULL); 188 + cali_data->total_sz = total_sz > size ? total_sz : size; 189 + if (status == EFI_BUFFER_TOO_SMALL) { 190 + /* Allocate data buffer of data_size bytes */ 191 + data = cali_data->data = devm_kzalloc(p->dev, 192 + cali_data->total_sz, GFP_KERNEL); 193 + if (!data) { 194 + status = -ENOMEM; 195 + continue; 196 + } 197 + /* Get variable contents into buffer */ 198 + status = efi.get_variable(efi_name[i], &efi_guid, 199 + &attr, &cali_data->total_sz, data); 188 200 } 189 - /* Get variable contents into buffer */ 190 - status = efi.get_variable(efi_name, &efi_guid, &attr, 191 - &p->cali_data.total_sz, data); 201 + /* Check whether get the calibrated data */ 202 + if (status == EFI_SUCCESS) 203 + break; 192 204 } 205 + 193 206 if (status != EFI_SUCCESS) { 194 - p->cali_data.total_sz = 0; 207 + cali_data->total_sz = 0; 195 208 return status; 196 209 } 197 210
+1 -1
sound/hda/codecs/side-codecs/tas2781_hda.h
··· 11 11 12 12 /* Flag of calibration registers address. */ 13 13 #define TASDEV_UEFI_CALI_REG_ADDR_FLG BIT(7) 14 - #define TASDEVICE_CALIBRATION_DATA_NAME L"CALI_DATA" 14 + 15 15 #define TASDEV_CALIB_N 5 16 16 17 17 /*
+3
sound/soc/amd/acp/acp-sdw-legacy-mach.c
··· 158 158 break; 159 159 case ACP70_PCI_REV: 160 160 case ACP71_PCI_REV: 161 + case ACP72_PCI_REV: 161 162 ret = get_acp70_cpu_pin_id(ffs(soc_end->link_mask - 1), 162 163 *be_id, &cpu_pin_id, dev); 163 164 if (ret) ··· 265 264 case ACP63_PCI_REV: 266 265 case ACP70_PCI_REV: 267 266 case ACP71_PCI_REV: 267 + case ACP72_PCI_REV: 268 268 sdw_platform_component->name = "amd_ps_sdw_dma.0"; 269 269 break; 270 270 default: ··· 313 311 case ACP63_PCI_REV: 314 312 case ACP70_PCI_REV: 315 313 case ACP71_PCI_REV: 314 + case ACP72_PCI_REV: 316 315 pdm_cpu->name = "acp_ps_pdm_dma.0"; 317 316 pdm_platform->name = "acp_ps_pdm_dma.0"; 318 317 break;
+1
sound/soc/amd/acp/acp-sdw-sof-mach.c
··· 130 130 break; 131 131 case ACP70_PCI_REV: 132 132 case ACP71_PCI_REV: 133 + case ACP72_PCI_REV: 133 134 ret = get_acp70_cpu_pin_id(ffs(sof_end->link_mask - 1), 134 135 *be_id, &cpu_pin_id, dev); 135 136 if (ret)
+2
sound/soc/amd/acp/soc_amd_sdw_common.h
··· 21 21 #define ACP63_PCI_REV 0x63 22 22 #define ACP70_PCI_REV 0x70 23 23 #define ACP71_PCI_REV 0x71 24 + #define ACP72_PCI_REV 0x72 25 + 24 26 #define SOC_JACK_JDSRC(quirk) ((quirk) & GENMASK(3, 0)) 25 27 #define ASOC_SDW_FOUR_SPK BIT(4) 26 28 #define ASOC_SDW_ACP_DMIC BIT(5)
+1
sound/soc/amd/ps/acp63.h
··· 14 14 #define ACP63_PCI_REV 0x63 15 15 #define ACP70_PCI_REV 0x70 16 16 #define ACP71_PCI_REV 0x71 17 + #define ACP72_PCI_REV 0x72 17 18 18 19 #define ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK 0x00010001 19 20 #define ACP63_PGFSM_CNTL_POWER_ON_MASK 1
+4
sound/soc/amd/ps/pci-ps.c
··· 117 117 break; 118 118 case ACP70_PCI_REV: 119 119 case ACP71_PCI_REV: 120 + case ACP72_PCI_REV: 120 121 adata->acp70_sdw0_dma_intr_stat[stream_id] = 1; 121 122 break; 122 123 } ··· 142 141 break; 143 142 case ACP70_PCI_REV: 144 143 case ACP71_PCI_REV: 144 + case ACP72_PCI_REV: 145 145 if (ext_intr_stat1 & ACP70_P1_SDW_DMA_IRQ_MASK) { 146 146 for (index = ACP70_P1_AUDIO2_RX_THRESHOLD; 147 147 index <= ACP70_P1_AUDIO0_TX_THRESHOLD; index++) { ··· 554 552 break; 555 553 case ACP70_PCI_REV: 556 554 case ACP71_PCI_REV: 555 + case ACP72_PCI_REV: 557 556 acp70_hw_init_ops(adata->hw_ops); 558 557 break; 559 558 default: ··· 584 581 case ACP63_PCI_REV: 585 582 case ACP70_PCI_REV: 586 583 case ACP71_PCI_REV: 584 + case ACP72_PCI_REV: 587 585 break; 588 586 default: 589 587 dev_dbg(&pci->dev, "acp63/acp70/acp71 pci device not found\n");
+5
sound/soc/amd/ps/ps-sdw-dma.c
··· 269 269 break; 270 270 case ACP70_PCI_REV: 271 271 case ACP71_PCI_REV: 272 + case ACP72_PCI_REV: 272 273 switch (manager_instance) { 273 274 case ACP_SDW0: 274 275 reg_dma_size = acp70_sdw0_dma_reg[stream_id].reg_dma_size; ··· 383 382 break; 384 383 case ACP70_PCI_REV: 385 384 case ACP71_PCI_REV: 385 + case ACP72_PCI_REV: 386 386 switch (stream->instance) { 387 387 case ACP_SDW0: 388 388 sdw_data->acp70_sdw0_dma_stream[stream_id] = substream; ··· 453 451 break; 454 452 case ACP70_PCI_REV: 455 453 case ACP71_PCI_REV: 454 + case ACP72_PCI_REV: 456 455 switch (stream->instance) { 457 456 case ACP_SDW0: 458 457 pos_low_reg = acp70_sdw0_dma_reg[stream->stream_id].pos_low_reg; ··· 532 529 break; 533 530 case ACP70_PCI_REV: 534 531 case ACP71_PCI_REV: 532 + case ACP72_PCI_REV: 535 533 switch (stream->instance) { 536 534 case ACP_SDW0: 537 535 sdw_data->acp70_sdw0_dma_stream[stream->stream_id] = NULL; ··· 578 574 break; 579 575 case ACP70_PCI_REV: 580 576 case ACP71_PCI_REV: 577 + case ACP72_PCI_REV: 581 578 switch (stream->instance) { 582 579 case ACP_SDW0: 583 580 sdw_dma_en_reg = acp70_sdw0_dma_enable_reg[stream_id];
+1 -1
sound/soc/codecs/idt821034.c
··· 1117 1117 idt821034->gpio_chip.direction_input = idt821034_chip_direction_input; 1118 1118 idt821034->gpio_chip.direction_output = idt821034_chip_direction_output; 1119 1119 idt821034->gpio_chip.get = idt821034_chip_gpio_get; 1120 - idt821034->gpio_chip.set_rv = idt821034_chip_gpio_set; 1120 + idt821034->gpio_chip.set = idt821034_chip_gpio_set; 1121 1121 idt821034->gpio_chip.can_sleep = true; 1122 1122 1123 1123 return devm_gpiochip_add_data(&idt821034->spi->dev, &idt821034->gpio_chip,
+1 -1
sound/soc/codecs/peb2466.c
··· 1945 1945 peb2466->gpio.gpio_chip.direction_input = peb2466_chip_direction_input; 1946 1946 peb2466->gpio.gpio_chip.direction_output = peb2466_chip_direction_output; 1947 1947 peb2466->gpio.gpio_chip.get = peb2466_chip_gpio_get; 1948 - peb2466->gpio.gpio_chip.set_rv = peb2466_chip_gpio_set; 1948 + peb2466->gpio.gpio_chip.set = peb2466_chip_gpio_set; 1949 1949 peb2466->gpio.gpio_chip.can_sleep = true; 1950 1950 1951 1951 return devm_gpiochip_add_data(&peb2466->spi->dev, &peb2466->gpio.gpio_chip,
+1 -1
sound/soc/codecs/rt5677.c
··· 4835 4835 .label = RT5677_DRV_NAME, 4836 4836 .owner = THIS_MODULE, 4837 4837 .direction_output = rt5677_gpio_direction_out, 4838 - .set_rv = rt5677_gpio_set, 4838 + .set = rt5677_gpio_set, 4839 4839 .direction_input = rt5677_gpio_direction_in, 4840 4840 .get = rt5677_gpio_get, 4841 4841 .to_irq = rt5677_to_irq,
+1 -1
sound/soc/codecs/tlv320adc3xxx.c
··· 1052 1052 .owner = THIS_MODULE, 1053 1053 .request = adc3xxx_gpio_request, 1054 1054 .direction_output = adc3xxx_gpio_direction_out, 1055 - .set_rv = adc3xxx_gpio_set, 1055 + .set = adc3xxx_gpio_set, 1056 1056 .get = adc3xxx_gpio_get, 1057 1057 .can_sleep = 1, 1058 1058 };
+1 -1
sound/soc/codecs/wm5100.c
··· 2290 2290 .label = "wm5100", 2291 2291 .owner = THIS_MODULE, 2292 2292 .direction_output = wm5100_gpio_direction_out, 2293 - .set_rv = wm5100_gpio_set, 2293 + .set = wm5100_gpio_set, 2294 2294 .direction_input = wm5100_gpio_direction_in, 2295 2295 .get = wm5100_gpio_get, 2296 2296 .can_sleep = 1,
+1 -1
sound/soc/codecs/wm8903.c
··· 1843 1843 .direction_input = wm8903_gpio_direction_in, 1844 1844 .get = wm8903_gpio_get, 1845 1845 .direction_output = wm8903_gpio_direction_out, 1846 - .set_rv = wm8903_gpio_set, 1846 + .set = wm8903_gpio_set, 1847 1847 .can_sleep = 1, 1848 1848 }; 1849 1849
+12 -1
sound/soc/codecs/wm8962.c
··· 82 82 #endif 83 83 84 84 int irq; 85 + bool master_flag; 85 86 }; 86 87 87 88 /* We can't use the same notifier block for more than one supply and ··· 2716 2715 static int wm8962_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) 2717 2716 { 2718 2717 struct snd_soc_component *component = dai->component; 2718 + struct wm8962_priv *wm8962 = snd_soc_component_get_drvdata(component); 2719 2719 int aif0 = 0; 2720 2720 2721 2721 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { ··· 2763 2761 return -EINVAL; 2764 2762 } 2765 2763 2764 + wm8962->master_flag = false; 2766 2765 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 2767 2766 case SND_SOC_DAIFMT_CBP_CFP: 2768 2767 aif0 |= WM8962_MSTR; 2768 + wm8962->master_flag = true; 2769 2769 break; 2770 2770 case SND_SOC_DAIFMT_CBC_CFC: 2771 2771 break; ··· 3446 3442 .owner = THIS_MODULE, 3447 3443 .request = wm8962_gpio_request, 3448 3444 .direction_output = wm8962_gpio_direction_out, 3449 - .set_rv = wm8962_gpio_set, 3445 + .set = wm8962_gpio_set, 3450 3446 .can_sleep = 1, 3451 3447 }; 3452 3448 ··· 3907 3903 WM8962_BIAS_ENA | WM8962_VMID_SEL_MASK, 3908 3904 WM8962_BIAS_ENA | 0x180); 3909 3905 3906 + if (wm8962->master_flag) 3907 + regmap_update_bits(wm8962->regmap, WM8962_AUDIO_INTERFACE_0, 3908 + WM8962_MSTR, WM8962_MSTR); 3910 3909 msleep(5); 3911 3910 3912 3911 return 0; ··· 3922 3915 static int wm8962_runtime_suspend(struct device *dev) 3923 3916 { 3924 3917 struct wm8962_priv *wm8962 = dev_get_drvdata(dev); 3918 + 3919 + if (wm8962->master_flag) 3920 + regmap_update_bits(wm8962->regmap, WM8962_AUDIO_INTERFACE_0, 3921 + WM8962_MSTR, 0); 3925 3922 3926 3923 regmap_update_bits(wm8962->regmap, WM8962_PWR_MGMT_1, 3927 3924 WM8962_VMID_SEL_MASK | WM8962_BIAS_ENA, 0);
+1 -1
sound/soc/codecs/wm8996.c
··· 2186 2186 .label = "wm8996", 2187 2187 .owner = THIS_MODULE, 2188 2188 .direction_output = wm8996_gpio_direction_out, 2189 - .set_rv = wm8996_gpio_set, 2189 + .set = wm8996_gpio_set, 2190 2190 .direction_input = wm8996_gpio_direction_in, 2191 2191 .get = wm8996_gpio_get, 2192 2192 .can_sleep = 1,
+1 -1
sound/soc/codecs/zl38060.c
··· 440 440 .direction_input = chip_direction_input, 441 441 .direction_output = chip_direction_output, 442 442 .get = chip_gpio_get, 443 - .set_rv = chip_gpio_set, 443 + .set = chip_gpio_set, 444 444 445 445 .can_sleep = true, 446 446 };
+2 -1
sound/soc/intel/avs/core.c
··· 445 445 adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL); 446 446 if (!adev) 447 447 return -ENOMEM; 448 + bus = &adev->base.core; 449 + 448 450 ret = avs_bus_init(adev, pci, id); 449 451 if (ret < 0) { 450 452 dev_err(dev, "failed to init avs bus: %d\n", ret); ··· 457 455 if (ret < 0) 458 456 return ret; 459 457 460 - bus = &adev->base.core; 461 458 bus->addr = pci_resource_start(pci, 0); 462 459 bus->remap_addr = pci_ioremap_bar(pci, 0); 463 460 if (!bus->remap_addr) {
+8
sound/soc/intel/boards/sof_sdw.c
··· 741 741 }, 742 742 .driver_data = (void *)(SOC_SDW_CODEC_SPKR), 743 743 }, 744 + { 745 + .callback = sof_sdw_quirk_cb, 746 + .matches = { 747 + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), 748 + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0CCC") 749 + }, 750 + .driver_data = (void *)(SOC_SDW_CODEC_SPKR), 751 + }, 744 752 /* Pantherlake devices*/ 745 753 { 746 754 .callback = sof_sdw_quirk_cb,
+1 -1
sound/soc/soc-ac97.c
··· 125 125 .direction_input = snd_soc_ac97_gpio_direction_in, 126 126 .get = snd_soc_ac97_gpio_get, 127 127 .direction_output = snd_soc_ac97_gpio_direction_out, 128 - .set_rv = snd_soc_ac97_gpio_set, 128 + .set = snd_soc_ac97_gpio_set, 129 129 .can_sleep = 1, 130 130 }; 131 131
+3 -3
sound/soc/sof/amd/acp-loader.c
··· 65 65 dma_size = page_count * ACP_PAGE_SIZE; 66 66 adata->bin_buf = dma_alloc_coherent(&pci->dev, dma_size, 67 67 &adata->sha_dma_addr, 68 - GFP_ATOMIC); 68 + GFP_KERNEL); 69 69 if (!adata->bin_buf) 70 70 return -ENOMEM; 71 71 } ··· 77 77 adata->data_buf = dma_alloc_coherent(&pci->dev, 78 78 ACP_DEFAULT_DRAM_LENGTH, 79 79 &adata->dma_addr, 80 - GFP_ATOMIC); 80 + GFP_KERNEL); 81 81 if (!adata->data_buf) 82 82 return -ENOMEM; 83 83 } ··· 90 90 adata->sram_data_buf = dma_alloc_coherent(&pci->dev, 91 91 ACP_DEFAULT_SRAM_LENGTH, 92 92 &adata->sram_dma_addr, 93 - GFP_ATOMIC); 93 + GFP_KERNEL); 94 94 if (!adata->sram_data_buf) 95 95 return -ENOMEM; 96 96 }
+8
sound/soc/sof/amd/acp.c
··· 59 59 switch (acp_data->pci_rev) { 60 60 case ACP70_PCI_ID: 61 61 case ACP71_PCI_ID: 62 + case ACP72_PCI_ID: 62 63 acp_dma_desc_base_addr = ACP70_DMA_DESC_BASE_ADDR; 63 64 acp_dma_desc_max_num_dscr = ACP70_DMA_DESC_MAX_NUM_DSCR; 64 65 break; ··· 100 99 switch (acp_data->pci_rev) { 101 100 case ACP70_PCI_ID: 102 101 case ACP71_PCI_ID: 102 + case ACP72_PCI_ID: 103 103 acp_dma_cntl_0 = ACP70_DMA_CNTL_0; 104 104 acp_dma_ch_rst_sts = ACP70_DMA_CH_RST_STS; 105 105 acp_dma_dscr_err_sts_0 = ACP70_DMA_ERR_STS_0; ··· 341 339 switch (adata->pci_rev) { 342 340 case ACP70_PCI_ID: 343 341 case ACP71_PCI_ID: 342 + case ACP72_PCI_ID: 344 343 acp_dma_ch_sts = ACP70_DMA_CH_STS; 345 344 break; 346 345 default: ··· 525 522 switch (adata->pci_rev) { 526 523 case ACP70_PCI_ID: 527 524 case ACP71_PCI_ID: 525 + case ACP72_PCI_ID: 528 526 wake_irq_flag = amd_sof_check_and_handle_acp70_sdw_wake_irq(sdev); 529 527 break; 530 528 } ··· 563 559 break; 564 560 case ACP70_PCI_ID: 565 561 case ACP71_PCI_ID: 562 + case ACP72_PCI_ID: 566 563 acp_pgfsm_status_mask = ACP70_PGFSM_STATUS_MASK; 567 564 acp_pgfsm_cntl_mask = ACP70_PGFSM_CNTL_POWER_ON_MASK; 568 565 break; ··· 666 661 switch (acp_data->pci_rev) { 667 662 case ACP70_PCI_ID: 668 663 case ACP71_PCI_ID: 664 + case ACP72_PCI_ID: 669 665 sdw0_wake_en = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP70_SW0_WAKE_EN); 670 666 sdw1_wake_en = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP70_SW1_WAKE_EN); 671 667 if (sdw0_wake_en || sdw1_wake_en) ··· 718 712 switch (acp_data->pci_rev) { 719 713 case ACP70_PCI_ID: 720 714 case ACP71_PCI_ID: 715 + case ACP72_PCI_ID: 721 716 enable = true; 722 717 break; 723 718 } ··· 745 738 switch (acp_data->pci_rev) { 746 739 case ACP70_PCI_ID: 747 740 case ACP71_PCI_ID: 741 + case ACP72_PCI_ID: 748 742 snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP70_PME_EN, 1); 749 743 break; 750 744 }
+1
sound/soc/sof/amd/acp.h
··· 75 75 #define ACP63_PCI_ID 0x63 76 76 #define ACP70_PCI_ID 0x70 77 77 #define ACP71_PCI_ID 0x71 78 + #define ACP72_PCI_ID 0x72 78 79 79 80 #define HOST_BRIDGE_CZN 0x1630 80 81 #define HOST_BRIDGE_VGH 0x1645
+1
sound/soc/sof/amd/pci-acp70.c
··· 77 77 switch (pci->revision) { 78 78 case ACP70_PCI_ID: 79 79 case ACP71_PCI_ID: 80 + case ACP72_PCI_ID: 80 81 break; 81 82 default: 82 83 return -ENODEV;
+2 -1
sound/soc/sof/intel/Kconfig
··· 266 266 267 267 config SND_SOC_SOF_INTEL_LNL 268 268 tristate 269 + select SOUNDWIRE_INTEL if SND_SOC_SOF_INTEL_SOUNDWIRE != n 269 270 select SND_SOC_SOF_HDA_GENERIC 270 271 select SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE 271 - select SND_SOF_SOF_HDA_SDW_BPT if SND_SOC_SOF_INTEL_SOUNDWIRE 272 + select SND_SOF_SOF_HDA_SDW_BPT if SND_SOC_SOF_INTEL_SOUNDWIRE != n 272 273 select SND_SOC_SOF_IPC4 273 274 select SND_SOC_SOF_INTEL_MTL 274 275
+1 -1
sound/soc/ti/davinci-mcasp.c
··· 2218 2218 .request = davinci_mcasp_gpio_request, 2219 2219 .free = davinci_mcasp_gpio_free, 2220 2220 .direction_output = davinci_mcasp_gpio_direction_out, 2221 - .set_rv = davinci_mcasp_gpio_set, 2221 + .set = davinci_mcasp_gpio_set, 2222 2222 .direction_input = davinci_mcasp_gpio_direction_in, 2223 2223 .get = davinci_mcasp_gpio_get, 2224 2224 .get_direction = davinci_mcasp_gpio_get_direction,
+4 -5
sound/usb/pcm.c
··· 1336 1336 1337 1337 for (i = 0; i < urb->number_of_packets; i++) { 1338 1338 cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset + subs->pkt_offset_adj; 1339 - if (urb->iso_frame_desc[i].status && printk_ratelimit()) { 1340 - dev_dbg(&subs->dev->dev, "frame %d active: %d\n", 1341 - i, urb->iso_frame_desc[i].status); 1342 - // continue; 1343 - } 1339 + if (urb->iso_frame_desc[i].status) 1340 + dev_dbg_ratelimited(&subs->dev->dev, 1341 + "frame %d active: %d\n", i, 1342 + urb->iso_frame_desc[i].status); 1344 1343 bytes = urb->iso_frame_desc[i].actual_length; 1345 1344 if (subs->stream_offset_adj > 0) { 1346 1345 unsigned int adj = min(subs->stream_offset_adj, bytes);
+1 -1
sound/x86/intel_hdmi_audio.c
··· 1765 1765 /* setup private data which can be retrieved when required */ 1766 1766 pcm->private_data = ctx; 1767 1767 pcm->info_flags = 0; 1768 - strscpy(pcm->name, card->shortname, strlen(card->shortname)); 1768 + strscpy(pcm->name, card->shortname, sizeof(pcm->name)); 1769 1769 /* setup the ops for playback */ 1770 1770 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &had_pcm_ops); 1771 1771
+8 -5
tools/lib/bpf/libbpf.c
··· 10965 10965 } 10966 10966 link->link.fd = pfd; 10967 10967 } 10968 - if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { 10969 - err = -errno; 10970 - pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n", 10971 - prog->name, pfd, errstr(err)); 10972 - goto err_out; 10968 + 10969 + if (!OPTS_GET(opts, dont_enable, false)) { 10970 + if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { 10971 + err = -errno; 10972 + pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n", 10973 + prog->name, pfd, errstr(err)); 10974 + goto err_out; 10975 + } 10973 10976 } 10974 10977 10975 10978 return &link->link;
+3 -1
tools/lib/bpf/libbpf.h
··· 499 499 __u64 bpf_cookie; 500 500 /* don't use BPF link when attach BPF program */ 501 501 bool force_ioctl_attach; 502 + /* don't automatically enable the event */ 503 + bool dont_enable; 502 504 size_t :0; 503 505 }; 504 - #define bpf_perf_event_opts__last_field force_ioctl_attach 506 + #define bpf_perf_event_opts__last_field dont_enable 505 507 506 508 LIBBPF_API struct bpf_link * 507 509 bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd);
+4 -1
tools/perf/util/bpf-filter.c
··· 451 451 struct bpf_link *link; 452 452 struct perf_bpf_filter_entry *entry; 453 453 bool needs_idx_hash = !target__has_cpu(target); 454 + DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts, 455 + .dont_enable = true); 454 456 455 457 entry = calloc(MAX_FILTERS, sizeof(*entry)); 456 458 if (entry == NULL) ··· 524 522 prog = skel->progs.perf_sample_filter; 525 523 for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) { 526 524 for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) { 527 - link = bpf_program__attach_perf_event(prog, FD(evsel, x, y)); 525 + link = bpf_program__attach_perf_event_opts(prog, FD(evsel, x, y), 526 + &pe_opts); 528 527 if (IS_ERR(link)) { 529 528 pr_err("Failed to attach perf sample-filter program\n"); 530 529 ret = PTR_ERR(link);
+10 -1
tools/power/x86/turbostat/turbostat.8
··· 47 47 MSRs are read as 64-bits, u32 truncates the displayed value to 32-bits. 48 48 default: u64 49 49 50 - format: {\fBraw\fP | \fBdelta\fP | \fBpercent\fP} 50 + format: {\fBraw\fP | \fBdelta\fP | \fBpercent\fP | \fBaverage\fP} 51 51 'raw' shows the MSR contents in hex. 52 52 'delta' shows the difference in values during the measurement interval. 53 53 'percent' shows the delta as a percentage of the cycles elapsed. 54 + 'average' similar to raw, but also averaged for node/package summaries (or when using -S). 54 55 default: delta 55 56 56 57 name: "name_string" ··· 186 185 \fBSAMMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/drm/card0/gt/gt1/rps_cur_freq_mhz or /sys/class/drm/card0/device/tile0/gtN/freq0/cur_freq depending on the graphics driver being used. 187 186 .PP 188 187 \fBSAMAMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/drm/card0/gt/gt1/rps_act_freq_mhz or /sys/class/drm/card0/device/tile0/gtN/freq0/act_freq depending on the graphics driver being used. 188 + .PP 189 + \fBTotl%C0\fP Weighted percentage of time that CPUs are busy. If N CPUs are busy during an interval, the percentage is N * 100%. 190 + .PP 191 + \fBAny%C0\fP Percentage of time that at least one CPU is busy. 192 + .PP 193 + \fBGFX%C0\fP Percentage of time that at least one GFX compute engine is busy. 194 + .PP 195 + \fBCPUGFX%\fP Percentage of time that at least one CPU is busy at the same time as at least one Graphics compute enginer is busy. 189 196 .PP 190 197 \fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters. 191 198 .PP
+540 -220
tools/power/x86/turbostat/turbostat.c
··· 67 67 #include <stdbool.h> 68 68 #include <assert.h> 69 69 #include <linux/kernel.h> 70 + #include <limits.h> 70 71 71 72 #define UNUSED(x) (void)(x) 72 73 ··· 195 194 { 0x0, "APIC", NULL, 0, 0, 0, NULL, 0 }, 196 195 { 0x0, "X2APIC", NULL, 0, 0, 0, NULL, 0 }, 197 196 { 0x0, "Die", NULL, 0, 0, 0, NULL, 0 }, 197 + { 0x0, "L3", NULL, 0, 0, 0, NULL, 0 }, 198 198 { 0x0, "GFXAMHz", NULL, 0, 0, 0, NULL, 0 }, 199 199 { 0x0, "IPC", NULL, 0, 0, 0, NULL, 0 }, 200 200 { 0x0, "CoreThr", NULL, 0, 0, 0, NULL, 0 }, ··· 211 209 { 0x0, "pct_idle", NULL, 0, 0, 0, NULL, 0 }, 212 210 }; 213 211 214 - #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) 215 - #define BIC_USEC (1ULL << 0) 216 - #define BIC_TOD (1ULL << 1) 217 - #define BIC_Package (1ULL << 2) 218 - #define BIC_Node (1ULL << 3) 219 - #define BIC_Avg_MHz (1ULL << 4) 220 - #define BIC_Busy (1ULL << 5) 221 - #define BIC_Bzy_MHz (1ULL << 6) 222 - #define BIC_TSC_MHz (1ULL << 7) 223 - #define BIC_IRQ (1ULL << 8) 224 - #define BIC_SMI (1ULL << 9) 225 - #define BIC_cpuidle (1ULL << 10) 226 - #define BIC_CPU_c1 (1ULL << 11) 227 - #define BIC_CPU_c3 (1ULL << 12) 228 - #define BIC_CPU_c6 (1ULL << 13) 229 - #define BIC_CPU_c7 (1ULL << 14) 230 - #define BIC_ThreadC (1ULL << 15) 231 - #define BIC_CoreTmp (1ULL << 16) 232 - #define BIC_CoreCnt (1ULL << 17) 233 - #define BIC_PkgTmp (1ULL << 18) 234 - #define BIC_GFX_rc6 (1ULL << 19) 235 - #define BIC_GFXMHz (1ULL << 20) 236 - #define BIC_Pkgpc2 (1ULL << 21) 237 - #define BIC_Pkgpc3 (1ULL << 22) 238 - #define BIC_Pkgpc6 (1ULL << 23) 239 - #define BIC_Pkgpc7 (1ULL << 24) 240 - #define BIC_Pkgpc8 (1ULL << 25) 241 - #define BIC_Pkgpc9 (1ULL << 26) 242 - #define BIC_Pkgpc10 (1ULL << 27) 243 - #define BIC_CPU_LPI (1ULL << 28) 244 - #define BIC_SYS_LPI (1ULL << 29) 245 - #define BIC_PkgWatt (1ULL << 30) 246 - #define BIC_CorWatt (1ULL << 31) 247 - #define BIC_GFXWatt (1ULL << 32) 248 - #define BIC_PkgCnt (1ULL << 33) 249 - #define BIC_RAMWatt (1ULL << 34) 250 - #define BIC_PKG__ (1ULL << 35) 251 - #define BIC_RAM__ (1ULL << 36) 252 - #define BIC_Pkg_J (1ULL << 37) 253 - #define BIC_Cor_J (1ULL << 38) 254 - #define BIC_GFX_J (1ULL << 39) 255 - #define BIC_RAM_J (1ULL << 40) 256 - #define BIC_Mod_c6 (1ULL << 41) 257 - #define BIC_Totl_c0 (1ULL << 42) 258 - #define BIC_Any_c0 (1ULL << 43) 259 - #define BIC_GFX_c0 (1ULL << 44) 260 - #define BIC_CPUGFX (1ULL << 45) 261 - #define BIC_Core (1ULL << 46) 262 - #define BIC_CPU (1ULL << 47) 263 - #define BIC_APIC (1ULL << 48) 264 - #define BIC_X2APIC (1ULL << 49) 265 - #define BIC_Die (1ULL << 50) 266 - #define BIC_GFXACTMHz (1ULL << 51) 267 - #define BIC_IPC (1ULL << 52) 268 - #define BIC_CORE_THROT_CNT (1ULL << 53) 269 - #define BIC_UNCORE_MHZ (1ULL << 54) 270 - #define BIC_SAM_mc6 (1ULL << 55) 271 - #define BIC_SAMMHz (1ULL << 56) 272 - #define BIC_SAMACTMHz (1ULL << 57) 273 - #define BIC_Diec6 (1ULL << 58) 274 - #define BIC_SysWatt (1ULL << 59) 275 - #define BIC_Sys_J (1ULL << 60) 276 - #define BIC_NMI (1ULL << 61) 277 - #define BIC_CPU_c1e (1ULL << 62) 278 - #define BIC_pct_idle (1ULL << 63) 212 + /* n.b. bic_names must match the order in bic[], above */ 213 + enum bic_names { 214 + BIC_USEC, 215 + BIC_TOD, 216 + BIC_Package, 217 + BIC_Node, 218 + BIC_Avg_MHz, 219 + BIC_Busy, 220 + BIC_Bzy_MHz, 221 + BIC_TSC_MHz, 222 + BIC_IRQ, 223 + BIC_SMI, 224 + BIC_cpuidle, 225 + BIC_CPU_c1, 226 + BIC_CPU_c3, 227 + BIC_CPU_c6, 228 + BIC_CPU_c7, 229 + BIC_ThreadC, 230 + BIC_CoreTmp, 231 + BIC_CoreCnt, 232 + BIC_PkgTmp, 233 + BIC_GFX_rc6, 234 + BIC_GFXMHz, 235 + BIC_Pkgpc2, 236 + BIC_Pkgpc3, 237 + BIC_Pkgpc6, 238 + BIC_Pkgpc7, 239 + BIC_Pkgpc8, 240 + BIC_Pkgpc9, 241 + BIC_Pkgpc10, 242 + BIC_CPU_LPI, 243 + BIC_SYS_LPI, 244 + BIC_PkgWatt, 245 + BIC_CorWatt, 246 + BIC_GFXWatt, 247 + BIC_PkgCnt, 248 + BIC_RAMWatt, 249 + BIC_PKG__, 250 + BIC_RAM__, 251 + BIC_Pkg_J, 252 + BIC_Cor_J, 253 + BIC_GFX_J, 254 + BIC_RAM_J, 255 + BIC_Mod_c6, 256 + BIC_Totl_c0, 257 + BIC_Any_c0, 258 + BIC_GFX_c0, 259 + BIC_CPUGFX, 260 + BIC_Core, 261 + BIC_CPU, 262 + BIC_APIC, 263 + BIC_X2APIC, 264 + BIC_Die, 265 + BIC_L3, 266 + BIC_GFXACTMHz, 267 + BIC_IPC, 268 + BIC_CORE_THROT_CNT, 269 + BIC_UNCORE_MHZ, 270 + BIC_SAM_mc6, 271 + BIC_SAMMHz, 272 + BIC_SAMACTMHz, 273 + BIC_Diec6, 274 + BIC_SysWatt, 275 + BIC_Sys_J, 276 + BIC_NMI, 277 + BIC_CPU_c1e, 278 + BIC_pct_idle, 279 + MAX_BIC 280 + }; 279 281 280 - #define BIC_GROUP_TOPOLOGY (BIC_Package | BIC_Node | BIC_CoreCnt | BIC_PkgCnt | BIC_Core | BIC_CPU | BIC_Die) 281 - #define BIC_GROUP_THERMAL_PWR (BIC_CoreTmp | BIC_PkgTmp | BIC_PkgWatt | BIC_CorWatt | BIC_GFXWatt | BIC_RAMWatt | BIC_PKG__ | BIC_RAM__ | BIC_SysWatt) 282 - #define BIC_GROUP_FREQUENCY (BIC_Avg_MHz | BIC_Busy | BIC_Bzy_MHz | BIC_TSC_MHz | BIC_GFXMHz | BIC_GFXACTMHz | BIC_SAMMHz | BIC_SAMACTMHz | BIC_UNCORE_MHZ) 283 - #define BIC_GROUP_HW_IDLE (BIC_Busy | BIC_CPU_c1 | BIC_CPU_c3 | BIC_CPU_c6 | BIC_CPU_c7 | BIC_GFX_rc6 | BIC_Pkgpc2 | BIC_Pkgpc3 | BIC_Pkgpc6 | BIC_Pkgpc7 | BIC_Pkgpc8 | BIC_Pkgpc9 | BIC_Pkgpc10 | BIC_CPU_LPI | BIC_SYS_LPI | BIC_Mod_c6 | BIC_Totl_c0 | BIC_Any_c0 | BIC_GFX_c0 | BIC_CPUGFX | BIC_SAM_mc6 | BIC_Diec6) 284 - #define BIC_GROUP_SW_IDLE (BIC_Busy | BIC_cpuidle | BIC_pct_idle ) 285 - #define BIC_GROUP_IDLE (BIC_GROUP_HW_IDLE | BIC_pct_idle) 286 - #define BIC_OTHER (BIC_IRQ | BIC_NMI | BIC_SMI | BIC_ThreadC | BIC_CoreTmp | BIC_IPC) 282 + void print_bic_set(char *s, cpu_set_t *set) 283 + { 284 + int i; 287 285 288 - #define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC | BIC_cpuidle) 286 + assert(MAX_BIC < CPU_SETSIZE); 289 287 290 - unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT); 291 - unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_cpuidle | BIC_pct_idle | BIC_APIC | BIC_X2APIC; 288 + printf("%s:", s); 292 289 293 - #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME) 294 - #define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME) 295 - #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME) 296 - #define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT) 297 - #define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT) 298 - #define BIC_IS_ENABLED(COUNTER_BIT) (bic_enabled & COUNTER_BIT) 290 + for (i = 0; i <= MAX_BIC; ++i) { 291 + 292 + if (CPU_ISSET(i, set)) { 293 + assert(i < MAX_BIC); 294 + printf(" %s", bic[i].name); 295 + } 296 + } 297 + putchar('\n'); 298 + } 299 + 300 + static cpu_set_t bic_group_topology; 301 + static cpu_set_t bic_group_thermal_pwr; 302 + static cpu_set_t bic_group_frequency; 303 + static cpu_set_t bic_group_hw_idle; 304 + static cpu_set_t bic_group_sw_idle; 305 + static cpu_set_t bic_group_idle; 306 + static cpu_set_t bic_group_other; 307 + static cpu_set_t bic_group_disabled_by_default; 308 + static cpu_set_t bic_enabled; 309 + static cpu_set_t bic_present; 310 + 311 + /* modify */ 312 + #define BIC_INIT(set) CPU_ZERO(set) 313 + 314 + #define SET_BIC(COUNTER_NUMBER, set) CPU_SET(COUNTER_NUMBER, set) 315 + #define CLR_BIC(COUNTER_NUMBER, set) CPU_CLR(COUNTER_NUMBER, set) 316 + 317 + #define BIC_PRESENT(COUNTER_NUMBER) SET_BIC(COUNTER_NUMBER, &bic_present) 318 + #define BIC_NOT_PRESENT(COUNTER_NUMBER) CPU_CLR(COUNTER_NUMBER, &bic_present) 319 + 320 + /* test */ 321 + #define BIC_IS_ENABLED(COUNTER_NUMBER) CPU_ISSET(COUNTER_NUMBER, &bic_enabled) 322 + #define DO_BIC_READ(COUNTER_NUMBER) CPU_ISSET(COUNTER_NUMBER, &bic_present) 323 + #define DO_BIC(COUNTER_NUMBER) (CPU_ISSET(COUNTER_NUMBER, &bic_enabled) && CPU_ISSET(COUNTER_NUMBER, &bic_present)) 324 + 325 + static void bic_set_all(cpu_set_t *set) 326 + { 327 + int i; 328 + 329 + assert(MAX_BIC < CPU_SETSIZE); 330 + 331 + for (i = 0; i < MAX_BIC; ++i) 332 + SET_BIC(i, set); 333 + } 334 + 335 + /* 336 + * bic_clear_bits() 337 + * clear all the bits from "clr" in "dst" 338 + */ 339 + static void bic_clear_bits(cpu_set_t *dst, cpu_set_t *clr) 340 + { 341 + int i; 342 + 343 + assert(MAX_BIC < CPU_SETSIZE); 344 + 345 + for (i = 0; i < MAX_BIC; ++i) 346 + if (CPU_ISSET(i, clr)) 347 + CLR_BIC(i, dst); 348 + } 349 + 350 + static void bic_groups_init(void) 351 + { 352 + BIC_INIT(&bic_group_topology); 353 + SET_BIC(BIC_Package, &bic_group_topology); 354 + SET_BIC(BIC_Node, &bic_group_topology); 355 + SET_BIC(BIC_CoreCnt, &bic_group_topology); 356 + SET_BIC(BIC_PkgCnt, &bic_group_topology); 357 + SET_BIC(BIC_Core, &bic_group_topology); 358 + SET_BIC(BIC_CPU, &bic_group_topology); 359 + SET_BIC(BIC_Die, &bic_group_topology); 360 + SET_BIC(BIC_L3, &bic_group_topology); 361 + 362 + BIC_INIT(&bic_group_thermal_pwr); 363 + SET_BIC(BIC_CoreTmp, &bic_group_thermal_pwr); 364 + SET_BIC(BIC_PkgTmp, &bic_group_thermal_pwr); 365 + SET_BIC(BIC_PkgWatt, &bic_group_thermal_pwr); 366 + SET_BIC(BIC_CorWatt, &bic_group_thermal_pwr); 367 + SET_BIC(BIC_GFXWatt, &bic_group_thermal_pwr); 368 + SET_BIC(BIC_RAMWatt, &bic_group_thermal_pwr); 369 + SET_BIC(BIC_PKG__, &bic_group_thermal_pwr); 370 + SET_BIC(BIC_RAM__, &bic_group_thermal_pwr); 371 + SET_BIC(BIC_SysWatt, &bic_group_thermal_pwr); 372 + 373 + BIC_INIT(&bic_group_frequency); 374 + SET_BIC(BIC_Avg_MHz, &bic_group_frequency); 375 + SET_BIC(BIC_Busy, &bic_group_frequency); 376 + SET_BIC(BIC_Bzy_MHz, &bic_group_frequency); 377 + SET_BIC(BIC_TSC_MHz, &bic_group_frequency); 378 + SET_BIC(BIC_GFXMHz, &bic_group_frequency); 379 + SET_BIC(BIC_GFXACTMHz, &bic_group_frequency); 380 + SET_BIC(BIC_SAMMHz, &bic_group_frequency); 381 + SET_BIC(BIC_SAMACTMHz, &bic_group_frequency); 382 + SET_BIC(BIC_UNCORE_MHZ, &bic_group_frequency); 383 + 384 + BIC_INIT(&bic_group_hw_idle); 385 + SET_BIC(BIC_Busy, &bic_group_hw_idle); 386 + SET_BIC(BIC_CPU_c1, &bic_group_hw_idle); 387 + SET_BIC(BIC_CPU_c3, &bic_group_hw_idle); 388 + SET_BIC(BIC_CPU_c6, &bic_group_hw_idle); 389 + SET_BIC(BIC_CPU_c7, &bic_group_hw_idle); 390 + SET_BIC(BIC_GFX_rc6, &bic_group_hw_idle); 391 + SET_BIC(BIC_Pkgpc2, &bic_group_hw_idle); 392 + SET_BIC(BIC_Pkgpc3, &bic_group_hw_idle); 393 + SET_BIC(BIC_Pkgpc6, &bic_group_hw_idle); 394 + SET_BIC(BIC_Pkgpc7, &bic_group_hw_idle); 395 + SET_BIC(BIC_Pkgpc8, &bic_group_hw_idle); 396 + SET_BIC(BIC_Pkgpc9, &bic_group_hw_idle); 397 + SET_BIC(BIC_Pkgpc10, &bic_group_hw_idle); 398 + SET_BIC(BIC_CPU_LPI, &bic_group_hw_idle); 399 + SET_BIC(BIC_SYS_LPI, &bic_group_hw_idle); 400 + SET_BIC(BIC_Mod_c6, &bic_group_hw_idle); 401 + SET_BIC(BIC_Totl_c0, &bic_group_hw_idle); 402 + SET_BIC(BIC_Any_c0, &bic_group_hw_idle); 403 + SET_BIC(BIC_GFX_c0, &bic_group_hw_idle); 404 + SET_BIC(BIC_CPUGFX, &bic_group_hw_idle); 405 + SET_BIC(BIC_SAM_mc6, &bic_group_hw_idle); 406 + SET_BIC(BIC_Diec6, &bic_group_hw_idle); 407 + 408 + BIC_INIT(&bic_group_sw_idle); 409 + SET_BIC(BIC_Busy, &bic_group_sw_idle); 410 + SET_BIC(BIC_cpuidle, &bic_group_sw_idle); 411 + SET_BIC(BIC_pct_idle, &bic_group_sw_idle); 412 + 413 + BIC_INIT(&bic_group_idle); 414 + CPU_OR(&bic_group_idle, &bic_group_idle, &bic_group_hw_idle); 415 + SET_BIC(BIC_pct_idle, &bic_group_idle); 416 + 417 + BIC_INIT(&bic_group_other); 418 + SET_BIC(BIC_IRQ, &bic_group_other); 419 + SET_BIC(BIC_NMI, &bic_group_other); 420 + SET_BIC(BIC_SMI, &bic_group_other); 421 + SET_BIC(BIC_ThreadC, &bic_group_other); 422 + SET_BIC(BIC_CoreTmp, &bic_group_other); 423 + SET_BIC(BIC_IPC, &bic_group_other); 424 + 425 + BIC_INIT(&bic_group_disabled_by_default); 426 + SET_BIC(BIC_USEC, &bic_group_disabled_by_default); 427 + SET_BIC(BIC_TOD, &bic_group_disabled_by_default); 428 + SET_BIC(BIC_cpuidle, &bic_group_disabled_by_default); 429 + SET_BIC(BIC_APIC, &bic_group_disabled_by_default); 430 + SET_BIC(BIC_X2APIC, &bic_group_disabled_by_default); 431 + 432 + BIC_INIT(&bic_enabled); 433 + bic_set_all(&bic_enabled); 434 + bic_clear_bits(&bic_enabled, &bic_group_disabled_by_default); 435 + 436 + BIC_INIT(&bic_present); 437 + SET_BIC(BIC_USEC, &bic_present); 438 + SET_BIC(BIC_TOD, &bic_present); 439 + SET_BIC(BIC_cpuidle, &bic_present); 440 + SET_BIC(BIC_APIC, &bic_present); 441 + SET_BIC(BIC_X2APIC, &bic_present); 442 + SET_BIC(BIC_pct_idle, &bic_present); 443 + } 299 444 300 445 /* 301 446 * MSR_PKG_CST_CONFIG_CONTROL decoding for pkg_cstate_limit: ··· 989 840 }; 990 841 991 842 static const struct platform_features dmr_features = { 992 - .has_msr_misc_feature_control = spr_features.has_msr_misc_feature_control, 993 - .has_msr_misc_pwr_mgmt = spr_features.has_msr_misc_pwr_mgmt, 994 - .has_nhm_msrs = spr_features.has_nhm_msrs, 995 - .has_config_tdp = spr_features.has_config_tdp, 996 - .bclk_freq = spr_features.bclk_freq, 997 - .supported_cstates = spr_features.supported_cstates, 998 - .cst_limit = spr_features.cst_limit, 999 - .has_msr_core_c1_res = spr_features.has_msr_core_c1_res, 1000 - .has_msr_module_c6_res_ms = 1, /* DMR has Dual Core Module and MC6 MSR */ 1001 - .has_irtl_msrs = spr_features.has_irtl_msrs, 1002 - .has_cst_prewake_bit = spr_features.has_cst_prewake_bit, 1003 - .has_fixed_rapl_psys_unit = spr_features.has_fixed_rapl_psys_unit, 1004 - .trl_msrs = spr_features.trl_msrs, 1005 - .rapl_msrs = 0, /* DMR does not have RAPL MSRs */ 843 + .has_msr_misc_feature_control = spr_features.has_msr_misc_feature_control, 844 + .has_msr_misc_pwr_mgmt = spr_features.has_msr_misc_pwr_mgmt, 845 + .has_nhm_msrs = spr_features.has_nhm_msrs, 846 + .bclk_freq = spr_features.bclk_freq, 847 + .supported_cstates = spr_features.supported_cstates, 848 + .cst_limit = spr_features.cst_limit, 849 + .has_msr_core_c1_res = spr_features.has_msr_core_c1_res, 850 + .has_cst_prewake_bit = spr_features.has_cst_prewake_bit, 851 + .has_fixed_rapl_psys_unit = spr_features.has_fixed_rapl_psys_unit, 852 + .trl_msrs = spr_features.trl_msrs, 853 + .has_msr_module_c6_res_ms = 1, /* DMR has Dual-Core-Module and MC6 MSR */ 854 + .rapl_msrs = 0, /* DMR does not have RAPL MSRs */ 855 + .plr_msrs = 0, /* DMR does not have PLR MSRs */ 856 + .has_irtl_msrs = 0, /* DMR does not have IRTL MSRs */ 857 + .has_config_tdp = 0, /* DMR does not have CTDP MSRs */ 1006 858 }; 1007 859 1008 860 static const struct platform_features srf_features = { ··· 1354 1204 int msr_shift; /* Positive mean shift right, negative mean shift left */ 1355 1205 double *platform_rapl_msr_scale; /* Scale applied to values read by MSR (platform dependent, filled at runtime) */ 1356 1206 unsigned int rci_index; /* Maps data from perf counters to global variables */ 1357 - unsigned long long bic; 1207 + unsigned int bic_number; 1358 1208 double compat_scale; /* Some counters require constant scaling to be in the same range as other, similar ones */ 1359 1209 unsigned long long flags; 1360 1210 }; ··· 1369 1219 .msr_shift = 0, 1370 1220 .platform_rapl_msr_scale = &rapl_energy_units, 1371 1221 .rci_index = RAPL_RCI_INDEX_ENERGY_PKG, 1372 - .bic = BIC_PkgWatt | BIC_Pkg_J, 1222 + .bic_number = BIC_PkgWatt, 1223 + .compat_scale = 1.0, 1224 + .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM, 1225 + }, 1226 + { 1227 + .feature_mask = RAPL_PKG, 1228 + .perf_subsys = "power", 1229 + .perf_name = "energy-pkg", 1230 + .msr = MSR_PKG_ENERGY_STATUS, 1231 + .msr_mask = 0xFFFFFFFFFFFFFFFF, 1232 + .msr_shift = 0, 1233 + .platform_rapl_msr_scale = &rapl_energy_units, 1234 + .rci_index = RAPL_RCI_INDEX_ENERGY_PKG, 1235 + .bic_number = BIC_Pkg_J, 1373 1236 .compat_scale = 1.0, 1374 1237 .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM, 1375 1238 }, ··· 1395 1232 .msr_shift = 0, 1396 1233 .platform_rapl_msr_scale = &rapl_energy_units, 1397 1234 .rci_index = RAPL_RCI_INDEX_ENERGY_PKG, 1398 - .bic = BIC_PkgWatt | BIC_Pkg_J, 1235 + .bic_number = BIC_PkgWatt, 1236 + .compat_scale = 1.0, 1237 + .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM, 1238 + }, 1239 + { 1240 + .feature_mask = RAPL_AMD_F17H, 1241 + .perf_subsys = "power", 1242 + .perf_name = "energy-pkg", 1243 + .msr = MSR_PKG_ENERGY_STAT, 1244 + .msr_mask = 0xFFFFFFFFFFFFFFFF, 1245 + .msr_shift = 0, 1246 + .platform_rapl_msr_scale = &rapl_energy_units, 1247 + .rci_index = RAPL_RCI_INDEX_ENERGY_PKG, 1248 + .bic_number = BIC_Pkg_J, 1399 1249 .compat_scale = 1.0, 1400 1250 .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM, 1401 1251 }, ··· 1421 1245 .msr_shift = 0, 1422 1246 .platform_rapl_msr_scale = &rapl_energy_units, 1423 1247 .rci_index = RAPL_RCI_INDEX_ENERGY_CORES, 1424 - .bic = BIC_CorWatt | BIC_Cor_J, 1248 + .bic_number = BIC_CorWatt, 1249 + .compat_scale = 1.0, 1250 + .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM, 1251 + }, 1252 + { 1253 + .feature_mask = RAPL_CORE_ENERGY_STATUS, 1254 + .perf_subsys = "power", 1255 + .perf_name = "energy-cores", 1256 + .msr = MSR_PP0_ENERGY_STATUS, 1257 + .msr_mask = 0xFFFFFFFFFFFFFFFF, 1258 + .msr_shift = 0, 1259 + .platform_rapl_msr_scale = &rapl_energy_units, 1260 + .rci_index = RAPL_RCI_INDEX_ENERGY_CORES, 1261 + .bic_number = BIC_Cor_J, 1425 1262 .compat_scale = 1.0, 1426 1263 .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM, 1427 1264 }, ··· 1447 1258 .msr_shift = 0, 1448 1259 .platform_rapl_msr_scale = &rapl_dram_energy_units, 1449 1260 .rci_index = RAPL_RCI_INDEX_DRAM, 1450 - .bic = BIC_RAMWatt | BIC_RAM_J, 1261 + .bic_number = BIC_RAMWatt, 1262 + .compat_scale = 1.0, 1263 + .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM, 1264 + }, 1265 + { 1266 + .feature_mask = RAPL_DRAM, 1267 + .perf_subsys = "power", 1268 + .perf_name = "energy-ram", 1269 + .msr = MSR_DRAM_ENERGY_STATUS, 1270 + .msr_mask = 0xFFFFFFFFFFFFFFFF, 1271 + .msr_shift = 0, 1272 + .platform_rapl_msr_scale = &rapl_dram_energy_units, 1273 + .rci_index = RAPL_RCI_INDEX_DRAM, 1274 + .bic_number = BIC_RAM_J, 1451 1275 .compat_scale = 1.0, 1452 1276 .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM, 1453 1277 }, ··· 1473 1271 .msr_shift = 0, 1474 1272 .platform_rapl_msr_scale = &rapl_energy_units, 1475 1273 .rci_index = RAPL_RCI_INDEX_GFX, 1476 - .bic = BIC_GFXWatt | BIC_GFX_J, 1274 + .bic_number = BIC_GFXWatt, 1275 + .compat_scale = 1.0, 1276 + .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM, 1277 + }, 1278 + { 1279 + .feature_mask = RAPL_GFX, 1280 + .perf_subsys = "power", 1281 + .perf_name = "energy-gpu", 1282 + .msr = MSR_PP1_ENERGY_STATUS, 1283 + .msr_mask = 0xFFFFFFFFFFFFFFFF, 1284 + .msr_shift = 0, 1285 + .platform_rapl_msr_scale = &rapl_energy_units, 1286 + .rci_index = RAPL_RCI_INDEX_GFX, 1287 + .bic_number = BIC_GFX_J, 1477 1288 .compat_scale = 1.0, 1478 1289 .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM, 1479 1290 }, ··· 1499 1284 .msr_shift = 0, 1500 1285 .platform_rapl_msr_scale = &rapl_time_units, 1501 1286 .rci_index = RAPL_RCI_INDEX_PKG_PERF_STATUS, 1502 - .bic = BIC_PKG__, 1287 + .bic_number = BIC_PKG__, 1503 1288 .compat_scale = 100.0, 1504 1289 .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM, 1505 1290 }, ··· 1512 1297 .msr_shift = 0, 1513 1298 .platform_rapl_msr_scale = &rapl_time_units, 1514 1299 .rci_index = RAPL_RCI_INDEX_DRAM_PERF_STATUS, 1515 - .bic = BIC_RAM__, 1300 + .bic_number = BIC_RAM__, 1516 1301 .compat_scale = 100.0, 1517 1302 .flags = RAPL_COUNTER_FLAG_USE_MSR_SUM, 1518 1303 }, ··· 1525 1310 .msr_shift = 0, 1526 1311 .platform_rapl_msr_scale = &rapl_energy_units, 1527 1312 .rci_index = RAPL_RCI_INDEX_CORE_ENERGY, 1528 - .bic = BIC_CorWatt | BIC_Cor_J, 1313 + .bic_number = BIC_CorWatt, 1314 + .compat_scale = 1.0, 1315 + .flags = 0, 1316 + }, 1317 + { 1318 + .feature_mask = RAPL_AMD_F17H, 1319 + .perf_subsys = NULL, 1320 + .perf_name = NULL, 1321 + .msr = MSR_CORE_ENERGY_STAT, 1322 + .msr_mask = 0xFFFFFFFF, 1323 + .msr_shift = 0, 1324 + .platform_rapl_msr_scale = &rapl_energy_units, 1325 + .rci_index = RAPL_RCI_INDEX_CORE_ENERGY, 1326 + .bic_number = BIC_Cor_J, 1529 1327 .compat_scale = 1.0, 1530 1328 .flags = 0, 1531 1329 }, ··· 1551 1323 .msr_shift = 0, 1552 1324 .platform_rapl_msr_scale = &rapl_psys_energy_units, 1553 1325 .rci_index = RAPL_RCI_INDEX_ENERGY_PLATFORM, 1554 - .bic = BIC_SysWatt | BIC_Sys_J, 1326 + .bic_number = BIC_SysWatt, 1327 + .compat_scale = 1.0, 1328 + .flags = RAPL_COUNTER_FLAG_PLATFORM_COUNTER | RAPL_COUNTER_FLAG_USE_MSR_SUM, 1329 + }, 1330 + { 1331 + .feature_mask = RAPL_PSYS, 1332 + .perf_subsys = "power", 1333 + .perf_name = "energy-psys", 1334 + .msr = MSR_PLATFORM_ENERGY_STATUS, 1335 + .msr_mask = 0x00000000FFFFFFFF, 1336 + .msr_shift = 0, 1337 + .platform_rapl_msr_scale = &rapl_psys_energy_units, 1338 + .rci_index = RAPL_RCI_INDEX_ENERGY_PLATFORM, 1339 + .bic_number = BIC_Sys_J, 1555 1340 .compat_scale = 1.0, 1556 1341 .flags = RAPL_COUNTER_FLAG_PLATFORM_COUNTER | RAPL_COUNTER_FLAG_USE_MSR_SUM, 1557 1342 }, ··· 1613 1372 const char *perf_name; 1614 1373 unsigned long long msr; 1615 1374 unsigned int rci_index; /* Maps data from perf counters to global variables */ 1616 - unsigned long long bic; 1375 + unsigned int bic_number; 1617 1376 unsigned long long flags; 1618 1377 int pkg_cstate_limit; 1619 1378 }; ··· 1625 1384 .perf_name = "c1-residency", 1626 1385 .msr = MSR_CORE_C1_RES, 1627 1386 .rci_index = CCSTATE_RCI_INDEX_C1_RESIDENCY, 1628 - .bic = BIC_CPU_c1, 1387 + .bic_number = BIC_CPU_c1, 1629 1388 .flags = CSTATE_COUNTER_FLAG_COLLECT_PER_THREAD, 1630 1389 .pkg_cstate_limit = 0, 1631 1390 }, ··· 1635 1394 .perf_name = "c3-residency", 1636 1395 .msr = MSR_CORE_C3_RESIDENCY, 1637 1396 .rci_index = CCSTATE_RCI_INDEX_C3_RESIDENCY, 1638 - .bic = BIC_CPU_c3, 1397 + .bic_number = BIC_CPU_c3, 1639 1398 .flags = CSTATE_COUNTER_FLAG_COLLECT_PER_CORE | CSTATE_COUNTER_FLAG_SOFT_C1_DEPENDENCY, 1640 1399 .pkg_cstate_limit = 0, 1641 1400 }, ··· 1645 1404 .perf_name = "c6-residency", 1646 1405 .msr = MSR_CORE_C6_RESIDENCY, 1647 1406 .rci_index = CCSTATE_RCI_INDEX_C6_RESIDENCY, 1648 - .bic = BIC_CPU_c6, 1407 + .bic_number = BIC_CPU_c6, 1649 1408 .flags = CSTATE_COUNTER_FLAG_COLLECT_PER_CORE | CSTATE_COUNTER_FLAG_SOFT_C1_DEPENDENCY, 1650 1409 .pkg_cstate_limit = 0, 1651 1410 }, ··· 1655 1414 .perf_name = "c7-residency", 1656 1415 .msr = MSR_CORE_C7_RESIDENCY, 1657 1416 .rci_index = CCSTATE_RCI_INDEX_C7_RESIDENCY, 1658 - .bic = BIC_CPU_c7, 1417 + .bic_number = BIC_CPU_c7, 1659 1418 .flags = CSTATE_COUNTER_FLAG_COLLECT_PER_CORE | CSTATE_COUNTER_FLAG_SOFT_C1_DEPENDENCY, 1660 1419 .pkg_cstate_limit = 0, 1661 1420 }, ··· 1665 1424 .perf_name = "c2-residency", 1666 1425 .msr = MSR_PKG_C2_RESIDENCY, 1667 1426 .rci_index = PCSTATE_RCI_INDEX_C2_RESIDENCY, 1668 - .bic = BIC_Pkgpc2, 1427 + .bic_number = BIC_Pkgpc2, 1669 1428 .flags = 0, 1670 1429 .pkg_cstate_limit = PCL__2, 1671 1430 }, ··· 1675 1434 .perf_name = "c3-residency", 1676 1435 .msr = MSR_PKG_C3_RESIDENCY, 1677 1436 .rci_index = PCSTATE_RCI_INDEX_C3_RESIDENCY, 1678 - .bic = BIC_Pkgpc3, 1437 + .bic_number = BIC_Pkgpc3, 1679 1438 .flags = 0, 1680 1439 .pkg_cstate_limit = PCL__3, 1681 1440 }, ··· 1685 1444 .perf_name = "c6-residency", 1686 1445 .msr = MSR_PKG_C6_RESIDENCY, 1687 1446 .rci_index = PCSTATE_RCI_INDEX_C6_RESIDENCY, 1688 - .bic = BIC_Pkgpc6, 1447 + .bic_number = BIC_Pkgpc6, 1689 1448 .flags = 0, 1690 1449 .pkg_cstate_limit = PCL__6, 1691 1450 }, ··· 1695 1454 .perf_name = "c7-residency", 1696 1455 .msr = MSR_PKG_C7_RESIDENCY, 1697 1456 .rci_index = PCSTATE_RCI_INDEX_C7_RESIDENCY, 1698 - .bic = BIC_Pkgpc7, 1457 + .bic_number = BIC_Pkgpc7, 1699 1458 .flags = 0, 1700 1459 .pkg_cstate_limit = PCL__7, 1701 1460 }, ··· 1705 1464 .perf_name = "c8-residency", 1706 1465 .msr = MSR_PKG_C8_RESIDENCY, 1707 1466 .rci_index = PCSTATE_RCI_INDEX_C8_RESIDENCY, 1708 - .bic = BIC_Pkgpc8, 1467 + .bic_number = BIC_Pkgpc8, 1709 1468 .flags = 0, 1710 1469 .pkg_cstate_limit = PCL__8, 1711 1470 }, ··· 1715 1474 .perf_name = "c9-residency", 1716 1475 .msr = MSR_PKG_C9_RESIDENCY, 1717 1476 .rci_index = PCSTATE_RCI_INDEX_C9_RESIDENCY, 1718 - .bic = BIC_Pkgpc9, 1477 + .bic_number = BIC_Pkgpc9, 1719 1478 .flags = 0, 1720 1479 .pkg_cstate_limit = PCL__9, 1721 1480 }, ··· 1725 1484 .perf_name = "c10-residency", 1726 1485 .msr = MSR_PKG_C10_RESIDENCY, 1727 1486 .rci_index = PCSTATE_RCI_INDEX_C10_RESIDENCY, 1728 - .bic = BIC_Pkgpc10, 1487 + .bic_number = BIC_Pkgpc10, 1729 1488 .flags = 0, 1730 1489 .pkg_cstate_limit = PCL_10, 1731 1490 }, ··· 2081 1840 ((node_no) * topo.cores_per_node) + \ 2082 1841 (core_no)) 2083 1842 2084 - #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no) 2085 - 2086 1843 /* 2087 1844 * The accumulated sum of MSR is defined as a monotonic 2088 1845 * increasing MSR, it will be accumulated periodically, ··· 2275 2036 struct cpu_topology { 2276 2037 int physical_package_id; 2277 2038 int die_id; 2039 + int l3_id; 2278 2040 int logical_cpu_id; 2279 2041 int physical_node_id; 2280 2042 int logical_node_id; /* 0-based count within the package */ ··· 2297 2057 int max_core_id; 2298 2058 int max_package_id; 2299 2059 int max_die_id; 2060 + int max_l3_id; 2300 2061 int max_node_num; 2301 2062 int nodes_per_pkg; 2302 2063 int cores_per_node; ··· 2331 2090 * skip non-present cpus 2332 2091 */ 2333 2092 2093 + #define PER_THREAD_PARAMS struct thread_data *t, struct core_data *c, struct pkg_data *p 2094 + 2334 2095 int for_all_cpus(int (func) (struct thread_data *, struct core_data *, struct pkg_data *), 2335 2096 struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base) 2336 2097 { ··· 2346 2103 for (thread_no = 0; thread_no < topo.threads_per_core; ++thread_no) { 2347 2104 struct thread_data *t; 2348 2105 struct core_data *c; 2349 - struct pkg_data *p; 2106 + 2350 2107 t = GET_THREAD(thread_base, thread_no, core_no, node_no, pkg_no); 2351 2108 2352 2109 if (cpu_is_not_allowed(t->cpu_id)) 2353 2110 continue; 2354 2111 2355 2112 c = GET_CORE(core_base, core_no, node_no, pkg_no); 2356 - p = GET_PKG(pkg_base, pkg_no); 2357 2113 2358 - retval |= func(t, c, p); 2114 + retval |= func(t, c, &pkg_base[pkg_no]); 2359 2115 } 2360 2116 } 2361 2117 } ··· 2362 2120 return retval; 2363 2121 } 2364 2122 2365 - int is_cpu_first_thread_in_core(struct thread_data *t, struct core_data *c, struct pkg_data *p) 2123 + int is_cpu_first_thread_in_core(PER_THREAD_PARAMS) 2366 2124 { 2367 2125 UNUSED(p); 2368 2126 2369 2127 return ((int)t->cpu_id == c->base_cpu || c->base_cpu < 0); 2370 2128 } 2371 2129 2372 - int is_cpu_first_core_in_package(struct thread_data *t, struct core_data *c, struct pkg_data *p) 2130 + int is_cpu_first_core_in_package(PER_THREAD_PARAMS) 2373 2131 { 2374 2132 UNUSED(c); 2375 2133 2376 2134 return ((int)t->cpu_id == p->base_cpu || p->base_cpu < 0); 2377 2135 } 2378 2136 2379 - int is_cpu_first_thread_in_package(struct thread_data *t, struct core_data *c, struct pkg_data *p) 2137 + int is_cpu_first_thread_in_package(PER_THREAD_PARAMS) 2380 2138 { 2381 2139 return is_cpu_first_thread_in_core(t, c, p) && is_cpu_first_core_in_package(t, c, p); 2382 2140 } ··· 2421 2179 2422 2180 static void bic_disable_msr_access(void) 2423 2181 { 2424 - const unsigned long bic_msrs = BIC_Mod_c6 | BIC_CoreTmp | 2425 - BIC_Totl_c0 | BIC_Any_c0 | BIC_GFX_c0 | BIC_CPUGFX | BIC_PkgTmp; 2426 - 2427 - bic_enabled &= ~bic_msrs; 2182 + CLR_BIC(BIC_Mod_c6, &bic_enabled); 2183 + CLR_BIC(BIC_CoreTmp, &bic_enabled); 2184 + CLR_BIC(BIC_Totl_c0, &bic_enabled); 2185 + CLR_BIC(BIC_Any_c0, &bic_enabled); 2186 + CLR_BIC(BIC_GFX_c0, &bic_enabled); 2187 + CLR_BIC(BIC_CPUGFX, &bic_enabled); 2188 + CLR_BIC(BIC_PkgTmp, &bic_enabled); 2428 2189 2429 2190 free_sys_msr_counters(); 2430 2191 } ··· 2555 2310 char *deferred_skip_names[MAX_DEFERRED]; 2556 2311 int deferred_add_index; 2557 2312 int deferred_skip_index; 2313 + unsigned int deferred_add_consumed; 2314 + unsigned int deferred_skip_consumed; 2558 2315 2559 2316 /* 2560 2317 * HIDE_LIST - hide this list of counters, show the rest [default] ··· 2627 2380 * for all the strings in comma separate name_list, 2628 2381 * set the approprate bit in return value. 2629 2382 */ 2630 - unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode) 2383 + void bic_lookup(cpu_set_t *ret_set, char *name_list, enum show_hide_mode mode) 2631 2384 { 2632 2385 unsigned int i; 2633 - unsigned long long retval = 0; 2634 2386 2635 2387 while (name_list) { 2636 2388 char *comma; ··· 2641 2395 2642 2396 for (i = 0; i < MAX_BIC; ++i) { 2643 2397 if (!strcmp(name_list, bic[i].name)) { 2644 - retval |= (1ULL << i); 2398 + SET_BIC(i, ret_set); 2645 2399 break; 2646 2400 } 2647 2401 if (!strcmp(name_list, "all")) { 2648 - retval |= ~0; 2402 + bic_set_all(ret_set); 2649 2403 break; 2650 2404 } else if (!strcmp(name_list, "topology")) { 2651 - retval |= BIC_GROUP_TOPOLOGY; 2405 + CPU_OR(ret_set, ret_set, &bic_group_topology); 2652 2406 break; 2653 2407 } else if (!strcmp(name_list, "power")) { 2654 - retval |= BIC_GROUP_THERMAL_PWR; 2408 + CPU_OR(ret_set, ret_set, &bic_group_thermal_pwr); 2655 2409 break; 2656 2410 } else if (!strcmp(name_list, "idle")) { 2657 - retval |= BIC_GROUP_IDLE; 2411 + CPU_OR(ret_set, ret_set, &bic_group_idle); 2658 2412 break; 2659 2413 } else if (!strcmp(name_list, "swidle")) { 2660 - retval |= BIC_GROUP_SW_IDLE; 2414 + CPU_OR(ret_set, ret_set, &bic_group_sw_idle); 2661 2415 break; 2662 2416 } else if (!strcmp(name_list, "sysfs")) { /* legacy compatibility */ 2663 - retval |= BIC_GROUP_SW_IDLE; 2417 + CPU_OR(ret_set, ret_set, &bic_group_sw_idle); 2664 2418 break; 2665 2419 } else if (!strcmp(name_list, "hwidle")) { 2666 - retval |= BIC_GROUP_HW_IDLE; 2420 + CPU_OR(ret_set, ret_set, &bic_group_hw_idle); 2667 2421 break; 2668 2422 } else if (!strcmp(name_list, "frequency")) { 2669 - retval |= BIC_GROUP_FREQUENCY; 2423 + CPU_OR(ret_set, ret_set, &bic_group_frequency); 2670 2424 break; 2671 2425 } else if (!strcmp(name_list, "other")) { 2672 - retval |= BIC_OTHER; 2426 + CPU_OR(ret_set, ret_set, &bic_group_other); 2673 2427 break; 2674 2428 } 2675 - 2676 2429 } 2677 2430 if (i == MAX_BIC) { 2678 - fprintf(stderr, "deferred %s\n", name_list); 2679 2431 if (mode == SHOW_LIST) { 2680 2432 deferred_add_names[deferred_add_index++] = name_list; 2681 2433 if (deferred_add_index >= MAX_DEFERRED) { ··· 2700 2456 name_list++; 2701 2457 2702 2458 } 2703 - return retval; 2704 2459 } 2705 2460 2706 2461 void print_header(char *delim) ··· 2717 2474 outp += sprintf(outp, "%sPackage", (printed++ ? delim : "")); 2718 2475 if (DO_BIC(BIC_Die)) 2719 2476 outp += sprintf(outp, "%sDie", (printed++ ? delim : "")); 2477 + if (DO_BIC(BIC_L3)) 2478 + outp += sprintf(outp, "%sL3", (printed++ ? delim : "")); 2720 2479 if (DO_BIC(BIC_Node)) 2721 2480 outp += sprintf(outp, "%sNode", (printed++ ? delim : "")); 2722 2481 if (DO_BIC(BIC_Core)) ··· 2759 2514 2760 2515 for (mp = sys.tp; mp; mp = mp->next) { 2761 2516 2762 - if (mp->format == FORMAT_RAW) { 2517 + if (mp->format == FORMAT_RAW || mp->format == FORMAT_AVERAGE) { 2763 2518 if (mp->width == 64) 2764 2519 outp += sprintf(outp, "%s%18.18s", (printed++ ? delim : ""), mp->name); 2765 2520 else ··· 2834 2589 } 2835 2590 2836 2591 for (mp = sys.cp; mp; mp = mp->next) { 2837 - if (mp->format == FORMAT_RAW) { 2592 + if (mp->format == FORMAT_RAW || mp->format == FORMAT_AVERAGE) { 2838 2593 if (mp->width == 64) 2839 2594 outp += sprintf(outp, "%s%18.18s", delim, mp->name); 2840 2595 else ··· 2964 2719 outp += sprintf(outp, "%sUncMHz", (printed++ ? delim : "")); 2965 2720 2966 2721 for (mp = sys.pp; mp; mp = mp->next) { 2967 - if (mp->format == FORMAT_RAW) { 2722 + if (mp->format == FORMAT_RAW || mp->format == FORMAT_AVERAGE) { 2968 2723 if (mp->width == 64) 2969 2724 outp += sprintf(outp, "%s%18.18s", delim, mp->name); 2970 2725 else if (mp->width == 32) ··· 3022 2777 outp += sprintf(outp, "\n"); 3023 2778 } 3024 2779 3025 - int dump_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) 2780 + int dump_counters(PER_THREAD_PARAMS) 3026 2781 { 3027 2782 int i; 3028 2783 struct msr_counter *mp; ··· 3137 2892 /* 3138 2893 * column formatting convention & formats 3139 2894 */ 3140 - int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) 2895 + int format_counters(PER_THREAD_PARAMS) 3141 2896 { 3142 2897 static int count; 3143 2898 ··· 3190 2945 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 3191 2946 if (DO_BIC(BIC_Die)) 3192 2947 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 2948 + if (DO_BIC(BIC_L3)) 2949 + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 3193 2950 if (DO_BIC(BIC_Node)) 3194 2951 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 3195 2952 if (DO_BIC(BIC_Core)) ··· 3212 2965 if (DO_BIC(BIC_Die)) { 3213 2966 if (c) 3214 2967 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].die_id); 2968 + else 2969 + outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 2970 + } 2971 + if (DO_BIC(BIC_L3)) { 2972 + if (c) 2973 + outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].l3_id); 3215 2974 else 3216 2975 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 3217 2976 } ··· 3285 3032 3286 3033 /* Added counters */ 3287 3034 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { 3288 - if (mp->format == FORMAT_RAW) { 3035 + if (mp->format == FORMAT_RAW || mp->format == FORMAT_AVERAGE) { 3289 3036 if (mp->width == 32) 3290 3037 outp += 3291 3038 sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int)t->counter[i]); ··· 3382 3129 outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), c->core_throt_cnt); 3383 3130 3384 3131 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 3385 - if (mp->format == FORMAT_RAW) { 3132 + if (mp->format == FORMAT_RAW || mp->format == FORMAT_AVERAGE) { 3386 3133 if (mp->width == 32) 3387 3134 outp += 3388 3135 sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int)c->counter[i]); ··· 3581 3328 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->uncore_mhz); 3582 3329 3583 3330 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { 3584 - if (mp->format == FORMAT_RAW) { 3331 + if (mp->format == FORMAT_RAW || mp->format == FORMAT_AVERAGE) { 3585 3332 if (mp->width == 32) 3586 3333 outp += 3587 3334 sprintf(outp, "%s0x%08x", (printed++ ? delim : ""), (unsigned int)p->counter[i]); ··· 3679 3426 outp = output_buffer; 3680 3427 } 3681 3428 3682 - void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) 3429 + void format_all_counters(PER_THREAD_PARAMS) 3683 3430 { 3684 3431 static int count; 3685 3432 ··· 3758 3505 new->rapl_dram_perf_status.raw_value - old->rapl_dram_perf_status.raw_value; 3759 3506 3760 3507 for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { 3761 - if (mp->format == FORMAT_RAW) 3508 + if (mp->format == FORMAT_RAW || mp->format == FORMAT_AVERAGE) 3762 3509 old->counter[i] = new->counter[i]; 3763 3510 else if (mp->format == FORMAT_AVERAGE) 3764 3511 old->counter[i] = new->counter[i]; ··· 3802 3549 DELTA_WRAP32(new->core_energy.raw_value, old->core_energy.raw_value); 3803 3550 3804 3551 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 3805 - if (mp->format == FORMAT_RAW) 3552 + if (mp->format == FORMAT_RAW || mp->format == FORMAT_AVERAGE) 3806 3553 old->counter[i] = new->counter[i]; 3807 3554 else 3808 3555 old->counter[i] = new->counter[i] - old->counter[i]; ··· 3916 3663 old->smi_count = new->smi_count - old->smi_count; 3917 3664 3918 3665 for (i = 0, mp = sys.tp; mp; i++, mp = mp->next) { 3919 - if (mp->format == FORMAT_RAW) 3666 + if (mp->format == FORMAT_RAW || mp->format == FORMAT_AVERAGE) 3920 3667 old->counter[i] = new->counter[i]; 3921 3668 else 3922 3669 old->counter[i] = new->counter[i] - old->counter[i]; ··· 3970 3717 c->unit = RAPL_UNIT_INVALID; 3971 3718 } 3972 3719 3973 - void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) 3720 + void clear_counters(PER_THREAD_PARAMS) 3974 3721 { 3975 3722 int i; 3976 3723 struct msr_counter *mp; ··· 4067 3814 dst->raw_value += src->raw_value; 4068 3815 } 4069 3816 4070 - int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) 3817 + int sum_counters(PER_THREAD_PARAMS) 4071 3818 { 4072 3819 int i; 4073 3820 struct msr_counter *mp; ··· 4215 3962 * sum the counters for all cpus in the system 4216 3963 * compute the weighted average 4217 3964 */ 4218 - void compute_average(struct thread_data *t, struct core_data *c, struct pkg_data *p) 3965 + void compute_average(PER_THREAD_PARAMS) 4219 3966 { 4220 3967 int i; 4221 3968 struct msr_counter *mp; ··· 4798 4545 return NULL; 4799 4546 } 4800 4547 4801 - int get_cstate_counters(unsigned int cpu, struct thread_data *t, struct core_data *c, struct pkg_data *p) 4548 + int get_cstate_counters(unsigned int cpu, PER_THREAD_PARAMS) 4802 4549 { 4803 4550 /* 4804 4551 * Overcommit memory a little bit here, ··· 5098 4845 * migrate to cpu 5099 4846 * acquire and record local counters for that cpu 5100 4847 */ 5101 - int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) 4848 + int get_counters(PER_THREAD_PARAMS) 5102 4849 { 5103 4850 int cpu = t->cpu_id; 5104 4851 unsigned long long msr; ··· 5926 5673 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu); 5927 5674 } 5928 5675 5676 + int get_l3_id(int cpu) 5677 + { 5678 + return parse_int_file("/sys/devices/system/cpu/cpu%d/cache/index3/id", cpu); 5679 + } 5680 + 5929 5681 int get_core_id(int cpu) 5930 5682 { 5931 5683 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); ··· 6119 5861 for (thread_no = 0; thread_no < topo.threads_per_core; ++thread_no) { 6120 5862 struct thread_data *t, *t2; 6121 5863 struct core_data *c, *c2; 6122 - struct pkg_data *p, *p2; 6123 5864 6124 5865 t = GET_THREAD(thread_base, thread_no, core_no, node_no, pkg_no); 6125 5866 ··· 6130 5873 c = GET_CORE(core_base, core_no, node_no, pkg_no); 6131 5874 c2 = GET_CORE(core_base2, core_no, node_no, pkg_no); 6132 5875 6133 - p = GET_PKG(pkg_base, pkg_no); 6134 - p2 = GET_PKG(pkg_base2, pkg_no); 6135 - 6136 - retval |= func(t, c, p, t2, c2, p2); 5876 + retval |= func(t, c, &pkg_base[pkg_no], t2, c2, &pkg_base2[pkg_no]); 6137 5877 } 6138 5878 } 6139 5879 } ··· 6588 6334 timer_t timerid; 6589 6335 6590 6336 /* Timer callback, update the sum of MSRs periodically. */ 6591 - static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg_data *p) 6337 + static int update_msr_sum(PER_THREAD_PARAMS) 6592 6338 { 6593 6339 int i, ret; 6594 6340 int cpu = t->cpu_id; ··· 6826 6572 int ret = 0; 6827 6573 6828 6574 caps = cap_get_proc(); 6829 - if (caps == NULL) 6575 + if (caps == NULL) { 6576 + /* 6577 + * CONFIG_MULTIUSER=n kernels have no cap_get_proc() 6578 + * Allow them to continue and attempt to access MSRs 6579 + */ 6580 + if (errno == ENOSYS) 6581 + return 0; 6582 + 6830 6583 return 1; 6584 + } 6831 6585 6832 6586 if (cap_get_flag(caps, CAP_SYS_RAWIO, CAP_EFFECTIVE, &cap_flag_value)) { 6833 6587 ret = 1; ··· 7002 6740 sprintf(path_base, "/sys/devices/system/cpu/intel_uncore_frequency/package_%02d_die_%02d", i, 7003 6741 j); 7004 6742 7005 - if (access(path_base, R_OK)) 6743 + sprintf(path, "%s/current_freq_khz", path_base); 6744 + if (access(path, R_OK)) 7006 6745 continue; 7007 6746 7008 6747 BIC_PRESENT(BIC_UNCORE_MHZ); ··· 7335 7072 * print_epb() 7336 7073 * Decode the ENERGY_PERF_BIAS MSR 7337 7074 */ 7338 - int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p) 7075 + int print_epb(PER_THREAD_PARAMS) 7339 7076 { 7340 7077 char *epb_string; 7341 7078 int cpu, epb; ··· 7384 7121 * print_hwp() 7385 7122 * Decode the MSR_HWP_CAPABILITIES 7386 7123 */ 7387 - int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) 7124 + int print_hwp(PER_THREAD_PARAMS) 7388 7125 { 7389 7126 unsigned long long msr; 7390 7127 int cpu; ··· 7473 7210 /* 7474 7211 * print_perf_limit() 7475 7212 */ 7476 - int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p) 7213 + int print_perf_limit(PER_THREAD_PARAMS) 7477 7214 { 7478 7215 unsigned long long msr; 7479 7216 int cpu; ··· 7598 7335 unsigned long long msr; 7599 7336 unsigned int time_unit; 7600 7337 double tdp; 7601 - const unsigned long long bic_watt_bits = BIC_SysWatt | BIC_PkgWatt | BIC_CorWatt | BIC_RAMWatt | BIC_GFXWatt; 7602 - const unsigned long long bic_joules_bits = BIC_Sys_J | BIC_Pkg_J | BIC_Cor_J | BIC_RAM_J | BIC_GFX_J; 7603 7338 7604 - if (rapl_joules) 7605 - bic_enabled &= ~bic_watt_bits; 7606 - else 7607 - bic_enabled &= ~bic_joules_bits; 7339 + if (rapl_joules) { 7340 + CLR_BIC(BIC_SysWatt, &bic_enabled); 7341 + CLR_BIC(BIC_PkgWatt, &bic_enabled); 7342 + CLR_BIC(BIC_CorWatt, &bic_enabled); 7343 + CLR_BIC(BIC_RAMWatt, &bic_enabled); 7344 + CLR_BIC(BIC_GFXWatt, &bic_enabled); 7345 + } else { 7346 + CLR_BIC(BIC_Sys_J, &bic_enabled); 7347 + CLR_BIC(BIC_Pkg_J, &bic_enabled); 7348 + CLR_BIC(BIC_Cor_J, &bic_enabled); 7349 + CLR_BIC(BIC_RAM_J, &bic_enabled); 7350 + CLR_BIC(BIC_GFX_J, &bic_enabled); 7351 + } 7608 7352 7609 7353 if (!platform->rapl_msrs || no_msr) 7610 7354 return; 7611 7355 7612 7356 if (!(platform->rapl_msrs & RAPL_PKG_PERF_STATUS)) 7613 - bic_enabled &= ~BIC_PKG__; 7357 + CLR_BIC(BIC_PKG__, &bic_enabled); 7614 7358 if (!(platform->rapl_msrs & RAPL_DRAM_PERF_STATUS)) 7615 - bic_enabled &= ~BIC_RAM__; 7359 + CLR_BIC(BIC_RAM__, &bic_enabled); 7616 7360 7617 7361 /* units on package 0, verify later other packages match */ 7618 7362 if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr)) ··· 7658 7388 { 7659 7389 unsigned long long msr; 7660 7390 double tdp; 7661 - const unsigned long long bic_watt_bits = BIC_PkgWatt | BIC_CorWatt; 7662 - const unsigned long long bic_joules_bits = BIC_Pkg_J | BIC_Cor_J; 7663 7391 7664 - if (rapl_joules) 7665 - bic_enabled &= ~bic_watt_bits; 7666 - else 7667 - bic_enabled &= ~bic_joules_bits; 7392 + if (rapl_joules) { 7393 + CLR_BIC(BIC_SysWatt, &bic_enabled); 7394 + CLR_BIC(BIC_CorWatt, &bic_enabled); 7395 + } else { 7396 + CLR_BIC(BIC_Pkg_J, &bic_enabled); 7397 + CLR_BIC(BIC_Cor_J, &bic_enabled); 7398 + } 7668 7399 7669 7400 if (!platform->rapl_msrs || no_msr) 7670 7401 return; ··· 7848 7577 return 0; 7849 7578 } 7850 7579 7851 - int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) 7580 + int print_rapl(PER_THREAD_PARAMS) 7852 7581 { 7853 7582 unsigned long long msr; 7854 7583 const char *msr_name; ··· 8002 7731 * below this value, including the Digital Thermal Sensor (DTS), 8003 7732 * Package Thermal Management Sensor (PTM), and thermal event thresholds. 8004 7733 */ 8005 - int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p) 7734 + int set_temperature_target(PER_THREAD_PARAMS) 8006 7735 { 8007 7736 unsigned long long msr; 8008 7737 unsigned int tcc_default, tcc_offset; ··· 8070 7799 return 0; 8071 7800 } 8072 7801 8073 - int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) 7802 + int print_thermal(PER_THREAD_PARAMS) 8074 7803 { 8075 7804 unsigned long long msr; 8076 7805 unsigned int dts, dts2; ··· 8150 7879 for_all_cpus(print_thermal, ODD_COUNTERS); 8151 7880 } 8152 7881 8153 - int get_cpu_type(struct thread_data *t, struct core_data *c, struct pkg_data *p) 7882 + int get_cpu_type(PER_THREAD_PARAMS) 8154 7883 { 8155 7884 unsigned int eax, ebx, ecx, edx; 8156 7885 ··· 8412 8141 enum rapl_unit unit; 8413 8142 unsigned int next_domain; 8414 8143 8415 - if (!BIC_IS_ENABLED(cai->bic)) 8144 + if (!BIC_IS_ENABLED(cai->bic_number)) 8416 8145 continue; 8417 8146 8418 8147 memset(domain_visited, 0, num_domains * sizeof(*domain_visited)); ··· 8476 8205 8477 8206 /* If any CPU has access to the counter, make it present */ 8478 8207 if (has_counter) 8479 - BIC_PRESENT(cai->bic); 8208 + BIC_PRESENT(cai->bic_number); 8480 8209 } 8481 8210 8482 8211 free(domain_visited); ··· 8697 8426 if (!per_core && pkg_visited[pkg_id]) 8698 8427 continue; 8699 8428 8700 - const bool counter_needed = BIC_IS_ENABLED(cai->bic) || 8429 + const bool counter_needed = BIC_IS_ENABLED(cai->bic_number) || 8701 8430 (soft_c1 && (cai->flags & CSTATE_COUNTER_FLAG_SOFT_C1_DEPENDENCY)); 8702 8431 const bool counter_supported = (platform->supported_cstates & cai->feature_mask); 8703 8432 ··· 8724 8453 8725 8454 /* If any CPU has access to the counter, make it present */ 8726 8455 if (has_counter) 8727 - BIC_PRESENT(cai->bic); 8456 + BIC_PRESENT(cai->bic_number); 8728 8457 } 8729 8458 8730 8459 free(cores_visited); ··· 9220 8949 if (cpus[i].die_id > topo.max_die_id) 9221 8950 topo.max_die_id = cpus[i].die_id; 9222 8951 8952 + /* get l3 information */ 8953 + cpus[i].l3_id = get_l3_id(i); 8954 + if (cpus[i].l3_id > topo.max_l3_id) 8955 + topo.max_l3_id = cpus[i].l3_id; 8956 + 9223 8957 /* get numa node information */ 9224 8958 cpus[i].physical_node_id = get_physical_node_id(&cpus[i]); 9225 8959 if (cpus[i].physical_node_id > topo.max_node_num) ··· 9257 8981 if (!summary_only && topo.num_die > 1) 9258 8982 BIC_PRESENT(BIC_Die); 9259 8983 8984 + if (!summary_only && topo.max_l3_id > 0) 8985 + BIC_PRESENT(BIC_L3); 8986 + 9260 8987 topo.num_packages = max_package_id + 1; 9261 8988 if (debug > 1) 9262 8989 fprintf(outf, "max_package_id %d, sizing for %d packages\n", max_package_id, topo.num_packages); ··· 9283 9004 if (cpu_is_not_present(i)) 9284 9005 continue; 9285 9006 fprintf(outf, 9286 - "cpu %d pkg %d die %d node %d lnode %d core %d thread %d\n", 9287 - i, cpus[i].physical_package_id, cpus[i].die_id, 9007 + "cpu %d pkg %d die %d l3 %d node %d lnode %d core %d thread %d\n", 9008 + i, cpus[i].physical_package_id, cpus[i].die_id, cpus[i].l3_id, 9288 9009 cpus[i].physical_node_id, cpus[i].logical_node_id, cpus[i].physical_core_id, cpus[i].thread_id); 9289 9010 } 9290 9011 ··· 9339 9060 int thread_id = cpus[cpu_id].thread_id; 9340 9061 struct thread_data *t; 9341 9062 struct core_data *c; 9342 - struct pkg_data *p; 9343 9063 9344 9064 /* Workaround for systems where physical_node_id==-1 9345 9065 * and logical_node_id==(-1 - topo.num_cpus) ··· 9348 9070 9349 9071 t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id); 9350 9072 c = GET_CORE(core_base, core_id, node_id, pkg_id); 9351 - p = GET_PKG(pkg_base, pkg_id); 9352 9073 9353 9074 t->cpu_id = cpu_id; 9354 9075 if (!cpu_is_not_allowed(cpu_id)) { 9355 9076 if (c->base_cpu < 0) 9356 9077 c->base_cpu = t->cpu_id; 9357 - if (p->base_cpu < 0) 9358 - p->base_cpu = t->cpu_id; 9078 + if (pkg_base[pkg_id].base_cpu < 0) 9079 + pkg_base[pkg_id].base_cpu = t->cpu_id; 9359 9080 } 9360 9081 9361 9082 c->core_id = core_id; 9362 - p->package_id = pkg_id; 9083 + pkg_base[pkg_id].package_id = pkg_id; 9363 9084 } 9364 9085 9365 9086 int initialize_counters(int cpu_id) ··· 9398 9121 err(-1, "calloc %d NMI", topo.max_cpu_num + 1); 9399 9122 } 9400 9123 9401 - int update_topo(struct thread_data *t, struct core_data *c, struct pkg_data *p) 9124 + int update_topo(PER_THREAD_PARAMS) 9402 9125 { 9403 9126 topo.allowed_cpus++; 9404 9127 if ((int)t->cpu_id == c->base_cpu) ··· 9466 9189 void check_perf_access(void) 9467 9190 { 9468 9191 if (no_perf || !BIC_IS_ENABLED(BIC_IPC) || !has_instr_count_access()) 9469 - bic_enabled &= ~BIC_IPC; 9192 + CLR_BIC(BIC_IPC, &bic_enabled); 9470 9193 } 9471 9194 9472 9195 bool perf_has_hybrid_devices(void) ··· 10035 9758 * disable more BICs, since it can't be reported accurately. 10036 9759 */ 10037 9760 if (platform->enable_tsc_tweak && !has_base_hz) { 10038 - bic_enabled &= ~BIC_Busy; 10039 - bic_enabled &= ~BIC_Bzy_MHz; 9761 + CLR_BIC(BIC_Busy, &bic_enabled); 9762 + CLR_BIC(BIC_Bzy_MHz, &bic_enabled); 10040 9763 } 10041 9764 } 10042 9765 ··· 10094 9817 timersub(&tv_odd, &tv_even, &tv_delta); 10095 9818 if (for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS)) 10096 9819 fprintf(outf, "%s: Counter reset detected\n", progname); 9820 + delta_platform(&platform_counters_odd, &platform_counters_even); 10097 9821 10098 9822 compute_average(EVEN_COUNTERS); 10099 9823 format_all_counters(EVEN_COUNTERS); ··· 10126 9848 10127 9849 void print_version() 10128 9850 { 10129 - fprintf(outf, "turbostat version 2025.06.08 - Len Brown <lenb@kernel.org>\n"); 9851 + fprintf(outf, "turbostat version 2025.09.09 - Len Brown <lenb@kernel.org>\n"); 10130 9852 } 10131 9853 10132 9854 #define COMMAND_LINE_SIZE 2048 ··· 10423 10145 format = FORMAT_RAW; 10424 10146 goto next; 10425 10147 } 10148 + if (!strncmp(add_command, "average", strlen("average"))) { 10149 + format = FORMAT_AVERAGE; 10150 + goto next; 10151 + } 10426 10152 if (!strncmp(add_command, "delta", strlen("delta"))) { 10427 10153 format = FORMAT_DELTA; 10428 10154 goto next; ··· 10699 10417 has_format = true; 10700 10418 } 10701 10419 10420 + if (strcmp("average", format_name) == 0) { 10421 + format = FORMAT_AVERAGE; 10422 + has_format = true; 10423 + } 10424 + 10702 10425 if (strcmp("delta", format_name) == 0) { 10703 10426 format = FORMAT_DELTA; 10704 10427 has_format = true; 10705 10428 } 10706 10429 10707 10430 if (!has_format) { 10708 - fprintf(stderr, "%s: Invalid format %s. Expected raw or delta\n", __func__, format_name); 10431 + fprintf(stderr, "%s: Invalid format %s. Expected raw, average or delta\n", 10432 + __func__, format_name); 10709 10433 exit(1); 10710 10434 } 10711 10435 } ··· 10801 10513 int i; 10802 10514 10803 10515 for (i = 0; i < deferred_add_index; ++i) 10804 - if (!strcmp(name, deferred_add_names[i])) 10516 + if (!strcmp(name, deferred_add_names[i])) { 10517 + deferred_add_consumed |= (1 << i); 10805 10518 return 1; 10519 + } 10806 10520 return 0; 10807 10521 } 10808 10522 ··· 10813 10523 int i; 10814 10524 10815 10525 for (i = 0; i < deferred_skip_index; ++i) 10816 - if (!strcmp(name, deferred_skip_names[i])) 10526 + if (!strcmp(name, deferred_skip_names[i])) { 10527 + deferred_skip_consumed |= (1 << i); 10817 10528 return 1; 10529 + } 10818 10530 return 0; 10531 + } 10532 + 10533 + void verify_deferred_consumed(void) 10534 + { 10535 + int i; 10536 + int fail = 0; 10537 + 10538 + for (i = 0; i < deferred_add_index; ++i) { 10539 + if (!(deferred_add_consumed & (1 << i))) { 10540 + warnx("Counter '%s' can not be added.", deferred_add_names[i]); 10541 + fail++; 10542 + } 10543 + } 10544 + for (i = 0; i < deferred_skip_index; ++i) { 10545 + if (!(deferred_skip_consumed & (1 << i))) { 10546 + warnx("Counter '%s' can not be skipped.", deferred_skip_names[i]); 10547 + fail++; 10548 + } 10549 + } 10550 + if (fail) 10551 + exit(-EINVAL); 10819 10552 } 10820 10553 10821 10554 void probe_cpuidle_residency(void) ··· 10849 10536 int state; 10850 10537 int min_state = 1024, max_state = 0; 10851 10538 char *sp; 10852 - 10853 - if (!DO_BIC(BIC_pct_idle)) 10854 - return; 10855 10539 10856 10540 for (state = 10; state >= 0; --state) { 10857 10541 ··· 11062 10752 no_perf = 1; 11063 10753 break; 11064 10754 case 'e': 11065 - /* --enable specified counter */ 11066 - bic_enabled = bic_enabled | bic_lookup(optarg, SHOW_LIST); 10755 + /* --enable specified counter, without clearning existing list */ 10756 + bic_lookup(&bic_enabled, optarg, SHOW_LIST); 11067 10757 break; 11068 10758 case 'f': 11069 10759 force_load++; 11070 10760 break; 11071 10761 case 'd': 11072 10762 debug++; 11073 - ENABLE_BIC(BIC_DISABLED_BY_DEFAULT); 10763 + bic_set_all(&bic_enabled); 11074 10764 break; 11075 10765 case 'H': 11076 10766 /* 11077 10767 * --hide: do not show those specified 11078 10768 * multiple invocations simply clear more bits in enabled mask 11079 10769 */ 11080 - bic_enabled &= ~bic_lookup(optarg, HIDE_LIST); 10770 + { 10771 + cpu_set_t bic_group_hide; 10772 + 10773 + BIC_INIT(&bic_group_hide); 10774 + 10775 + bic_lookup(&bic_group_hide, optarg, HIDE_LIST); 10776 + bic_clear_bits(&bic_enabled, &bic_group_hide); 10777 + } 11081 10778 break; 11082 10779 case 'h': 11083 10780 default: ··· 11108 10791 rapl_joules++; 11109 10792 break; 11110 10793 case 'l': 11111 - ENABLE_BIC(BIC_DISABLED_BY_DEFAULT); 10794 + bic_set_all(&bic_enabled); 11112 10795 list_header_only++; 11113 10796 quiet++; 11114 10797 break; ··· 11145 10828 * subsequent invocations can add to it. 11146 10829 */ 11147 10830 if (shown == 0) 11148 - bic_enabled = bic_lookup(optarg, SHOW_LIST); 11149 - else 11150 - bic_enabled |= bic_lookup(optarg, SHOW_LIST); 10831 + BIC_INIT(&bic_enabled); 10832 + bic_lookup(&bic_enabled, optarg, SHOW_LIST); 11151 10833 shown = 1; 11152 10834 break; 11153 10835 case 'S': ··· 11183 10867 { 11184 10868 int fd, ret; 11185 10869 10870 + bic_groups_init(); 10871 + 11186 10872 fd = open("/sys/fs/cgroup/cgroup.procs", O_WRONLY); 11187 10873 if (fd < 0) 11188 10874 goto skip_cgroup_setting; ··· 11206 10888 11207 10889 probe_cpuidle_residency(); 11208 10890 probe_cpuidle_counts(); 10891 + 10892 + verify_deferred_consumed(); 11209 10893 11210 10894 if (!getuid()) 11211 10895 set_rlimit();
+6 -4
tools/testing/selftests/drivers/net/napi_threaded.py
··· 35 35 threaded = cmd(f"cat /sys/class/net/{cfg.ifname}/threaded").stdout 36 36 defer(_set_threaded_state, cfg, threaded) 37 37 38 + return combined 39 + 38 40 39 41 def enable_dev_threaded_disable_napi_threaded(cfg, nl) -> None: 40 42 """ ··· 51 49 napi0_id = napis[0]['id'] 52 50 napi1_id = napis[1]['id'] 53 51 54 - _setup_deferred_cleanup(cfg) 52 + qcnt = _setup_deferred_cleanup(cfg) 55 53 56 54 # set threaded 57 55 _set_threaded_state(cfg, 1) ··· 64 62 nl.napi_set({'id': napi1_id, 'threaded': 'disabled'}) 65 63 66 64 cmd(f"ethtool -L {cfg.ifname} combined 1") 67 - cmd(f"ethtool -L {cfg.ifname} combined 2") 65 + cmd(f"ethtool -L {cfg.ifname} combined {qcnt}") 68 66 _assert_napi_threaded_enabled(nl, napi0_id) 69 67 _assert_napi_threaded_disabled(nl, napi1_id) 70 68 ··· 82 80 napi0_id = napis[0]['id'] 83 81 napi1_id = napis[1]['id'] 84 82 85 - _setup_deferred_cleanup(cfg) 83 + qcnt = _setup_deferred_cleanup(cfg) 86 84 87 85 # set threaded 88 86 _set_threaded_state(cfg, 1) ··· 92 90 _assert_napi_threaded_enabled(nl, napi1_id) 93 91 94 92 cmd(f"ethtool -L {cfg.ifname} combined 1") 95 - cmd(f"ethtool -L {cfg.ifname} combined 2") 93 + cmd(f"ethtool -L {cfg.ifname} combined {qcnt}") 96 94 97 95 # check napi threaded is set for both napis 98 96 _assert_napi_threaded_enabled(nl, napi0_id)
+1
tools/testing/selftests/net/forwarding/sch_ets.sh
··· 11 11 ets_test_strict 12 12 ets_test_mixed 13 13 ets_test_dwrr 14 + ets_test_plug 14 15 classifier_mode 15 16 ets_test_strict 16 17 ets_test_mixed
+8
tools/testing/selftests/net/forwarding/sch_ets_tests.sh
··· 224 224 ets_set_dwrr_two_bands 225 225 xfail_on_slow ets_dwrr_test_01 226 226 } 227 + 228 + ets_test_plug() 229 + { 230 + ets_change_qdisc $put 2 "3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3" "1514 1514" 231 + tc qdisc add dev $put handle 20: parent 10:4 plug 232 + start_traffic_pktsize 100 $h1.10 192.0.2.1 192.0.2.2 00:c1:a0:c1:a0:00 "-c 1" 233 + ets_qdisc_setup $put 2 234 + }
+63
tools/testing/selftests/net/tls.c
··· 2708 2708 close(cfd); 2709 2709 } 2710 2710 2711 + TEST(data_steal) { 2712 + struct tls_crypto_info_keys tls; 2713 + char buf[20000], buf2[20000]; 2714 + struct sockaddr_in addr; 2715 + int sfd, cfd, ret, fd; 2716 + int pid, status; 2717 + socklen_t len; 2718 + 2719 + len = sizeof(addr); 2720 + memrnd(buf, sizeof(buf)); 2721 + 2722 + tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_GCM_256, &tls, 0); 2723 + 2724 + addr.sin_family = AF_INET; 2725 + addr.sin_addr.s_addr = htonl(INADDR_ANY); 2726 + addr.sin_port = 0; 2727 + 2728 + fd = socket(AF_INET, SOCK_STREAM, 0); 2729 + sfd = socket(AF_INET, SOCK_STREAM, 0); 2730 + 2731 + ASSERT_EQ(bind(sfd, &addr, sizeof(addr)), 0); 2732 + ASSERT_EQ(listen(sfd, 10), 0); 2733 + ASSERT_EQ(getsockname(sfd, &addr, &len), 0); 2734 + ASSERT_EQ(connect(fd, &addr, sizeof(addr)), 0); 2735 + ASSERT_GE(cfd = accept(sfd, &addr, &len), 0); 2736 + close(sfd); 2737 + 2738 + ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); 2739 + if (ret) { 2740 + ASSERT_EQ(errno, ENOENT); 2741 + SKIP(return, "no TLS support"); 2742 + } 2743 + ASSERT_EQ(setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")), 0); 2744 + 2745 + /* Spawn a child and get it into the read wait path of the underlying 2746 + * TCP socket. 2747 + */ 2748 + pid = fork(); 2749 + ASSERT_GE(pid, 0); 2750 + if (!pid) { 2751 + EXPECT_EQ(recv(cfd, buf, sizeof(buf), MSG_WAITALL), 2752 + sizeof(buf)); 2753 + exit(!__test_passed(_metadata)); 2754 + } 2755 + 2756 + usleep(2000); 2757 + ASSERT_EQ(setsockopt(fd, SOL_TLS, TLS_TX, &tls, tls.len), 0); 2758 + ASSERT_EQ(setsockopt(cfd, SOL_TLS, TLS_RX, &tls, tls.len), 0); 2759 + 2760 + EXPECT_EQ(send(fd, buf, sizeof(buf), 0), sizeof(buf)); 2761 + usleep(2000); 2762 + EXPECT_EQ(recv(cfd, buf2, sizeof(buf2), MSG_DONTWAIT), -1); 2763 + /* Don't check errno, the error will be different depending 2764 + * on what random bytes TLS interpreted as the record length. 2765 + */ 2766 + 2767 + close(fd); 2768 + close(cfd); 2769 + 2770 + EXPECT_EQ(wait(&status), pid); 2771 + EXPECT_EQ(status, 0); 2772 + } 2773 + 2711 2774 static void __attribute__((constructor)) fips_check(void) { 2712 2775 int res; 2713 2776 FILE *f;
+3 -3
tools/testing/selftests/proc/proc-maps-race.c
··· 202 202 int offs = end - text; 203 203 204 204 text[offs] = '\0'; 205 - printf(text); 205 + printf("%s", text); 206 206 text[offs] = '\n'; 207 207 printf("\n"); 208 208 } else { 209 - printf(text); 209 + printf("%s", text); 210 210 } 211 211 } 212 212 ··· 221 221 nr--; 222 222 start--; 223 223 } 224 - printf(start); 224 + printf("%s", start); 225 225 } 226 226 227 227 static void print_boundaries(const char *title, FIXTURE_DATA(proc_maps_race) *self)