Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge 5.12-rc6 into driver-core-next

We need the driver core fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+4536 -2128
+3
.mailmap
··· 36 36 Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk> 37 37 Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com> 38 38 Andrew Vasquez <andrew.vasquez@qlogic.com> 39 + Andrey Konovalov <andreyknvl@gmail.com> <andreyknvl@google.com> 39 40 Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com> 40 41 Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com> 41 42 Andy Adamson <andros@citi.umich.edu> ··· 66 65 Changbin Du <changbin.du@intel.com> <changbin.du@intel.com> 67 66 Chao Yu <chao@kernel.org> <chao2.yu@samsung.com> 68 67 Chao Yu <chao@kernel.org> <yuchao0@huawei.com> 68 + Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com> 69 + Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org> 69 70 Christophe Ricard <christophe.ricard@gmail.com> 70 71 Christoph Hellwig <hch@lst.de> 71 72 Corey Minyard <minyard@acm.org>
+5 -5
Documentation/arm64/acpi_object_usage.rst
··· 17 17 18 18 - Recommended: BERT, EINJ, ERST, HEST, PCCT, SSDT 19 19 20 - - Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IORT, 21 - MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT, STAO, 22 - TCPA, TPM2, UEFI, XENV 20 + - Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IBFT, 21 + IORT, MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT, 22 + STAO, TCPA, TPM2, UEFI, XENV 23 23 24 - - Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IBFT, IVRS, LPIT, 25 - MSDM, OEMx, PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT 24 + - Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IVRS, LPIT, MSDM, OEMx, 25 + PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT 26 26 27 27 ====== ======================================================================== 28 28 Table Usage for ARMv8 Linux
+3
Documentation/arm64/silicon-errata.rst
··· 130 130 | Marvell | ARM-MMU-500 | #582743 | N/A | 131 131 +----------------+-----------------+-----------------+-----------------------------+ 132 132 +----------------+-----------------+-----------------+-----------------------------+ 133 + | NVIDIA | Carmel Core | N/A | NVIDIA_CARMEL_CNP_ERRATUM | 134 + +----------------+-----------------+-----------------+-----------------------------+ 135 + +----------------+-----------------+-----------------+-----------------------------+ 133 136 | Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | 134 137 +----------------+-----------------+-----------------+-----------------------------+ 135 138 +----------------+-----------------+-----------------+-----------------------------+
+1 -1
Documentation/networking/device_drivers/ethernet/amazon/ena.rst
··· 267 267 Tx 268 268 -- 269 269 270 - end_start_xmit() is called by the stack. This function does the following: 270 + ena_start_xmit() is called by the stack. This function does the following: 271 271 272 272 - Maps data buffers (skb->data and frags). 273 273 - Populates ena_buf for the push buffer (if the driver and device are
+1 -1
Documentation/networking/xfrm_device.rst
··· 50 50 51 51 The NIC driver offering ipsec offload will need to implement these 52 52 callbacks to make the offload available to the network stack's 53 - XFRM subsytem. Additionally, the feature bits NETIF_F_HW_ESP and 53 + XFRM subsystem. Additionally, the feature bits NETIF_F_HW_ESP and 54 54 NETIF_F_HW_ESP_TX_CSUM will signal the availability of the offload. 55 55 56 56
+28 -31
MAINTAINERS
··· 2489 2489 N: sc2731 2490 2490 2491 2491 ARM/STI ARCHITECTURE 2492 - M: Patrice Chotard <patrice.chotard@st.com> 2492 + M: Patrice Chotard <patrice.chotard@foss.st.com> 2493 2493 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2494 2494 S: Maintained 2495 2495 W: http://www.stlinux.com ··· 2522 2522 2523 2523 ARM/STM32 ARCHITECTURE 2524 2524 M: Maxime Coquelin <mcoquelin.stm32@gmail.com> 2525 - M: Alexandre Torgue <alexandre.torgue@st.com> 2525 + M: Alexandre Torgue <alexandre.torgue@foss.st.com> 2526 2526 L: linux-stm32@st-md-mailman.stormreply.com (moderated for non-subscribers) 2527 2527 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2528 2528 S: Maintained ··· 3115 3115 F: drivers/md/bcache/ 3116 3116 3117 3117 BDISP ST MEDIA DRIVER 3118 - M: Fabien Dessenne <fabien.dessenne@st.com> 3118 + M: Fabien Dessenne <fabien.dessenne@foss.st.com> 3119 3119 L: linux-media@vger.kernel.org 3120 3120 S: Supported 3121 3121 W: https://linuxtv.org ··· 3675 3675 L: linux-pm@vger.kernel.org 3676 3676 S: Maintained 3677 3677 T: git git://github.com/broadcom/stblinux.git 3678 - F: drivers/soc/bcm/bcm-pmb.c 3678 + F: drivers/soc/bcm/bcm63xx/bcm-pmb.c 3679 3679 F: include/dt-bindings/soc/bcm-pmb.h 3680 3680 3681 3681 BROADCOM SPECIFIC AMBA DRIVER (BCMA) ··· 5080 5080 F: drivers/platform/x86/dell/dell-wmi.c 5081 5081 5082 5082 DELTA ST MEDIA DRIVER 5083 - M: Hugues Fruchet <hugues.fruchet@st.com> 5083 + M: Hugues Fruchet <hugues.fruchet@foss.st.com> 5084 5084 L: linux-media@vger.kernel.org 5085 5085 S: Supported 5086 5086 W: https://linuxtv.org ··· 6012 6012 6013 6013 DRM DRIVERS FOR STI 6014 6014 M: Benjamin Gaignard <benjamin.gaignard@linaro.org> 6015 - M: Vincent Abriou <vincent.abriou@st.com> 6016 6015 L: dri-devel@lists.freedesktop.org 6017 6016 S: Maintained 6018 6017 T: git git://anongit.freedesktop.org/drm/drm-misc ··· 6019 6020 F: drivers/gpu/drm/sti 6020 6021 6021 6022 DRM DRIVERS FOR STM 6022 - M: Yannick Fertre <yannick.fertre@st.com> 6023 - M: Philippe Cornu <philippe.cornu@st.com> 6023 + M: Yannick Fertre <yannick.fertre@foss.st.com> 6024 + M: Philippe Cornu <philippe.cornu@foss.st.com> 6024 6025 M: Benjamin Gaignard <benjamin.gaignard@linaro.org> 6025 - M: Vincent Abriou <vincent.abriou@st.com> 6026 6026 L: dri-devel@lists.freedesktop.org 6027 6027 S: Maintained 6028 6028 T: git git://anongit.freedesktop.org/drm/drm-misc ··· 7480 7482 GENERIC PHY FRAMEWORK 7481 7483 M: Kishon Vijay Abraham I <kishon@ti.com> 7482 7484 M: Vinod Koul <vkoul@kernel.org> 7483 - L: linux-kernel@vger.kernel.org 7485 + L: linux-phy@lists.infradead.org 7484 7486 S: Supported 7487 + Q: https://patchwork.kernel.org/project/linux-phy/list/ 7485 7488 T: git git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy.git 7486 7489 F: Documentation/devicetree/bindings/phy/ 7487 7490 F: drivers/phy/ ··· 8235 8236 F: mm/hugetlb.c 8236 8237 8237 8238 HVA ST MEDIA DRIVER 8238 - M: Jean-Christophe Trotin <jean-christophe.trotin@st.com> 8239 + M: Jean-Christophe Trotin <jean-christophe.trotin@foss.st.com> 8239 8240 L: linux-media@vger.kernel.org 8240 8241 S: Supported 8241 8242 W: https://linuxtv.org ··· 8525 8526 M: Dany Madden <drt@linux.ibm.com> 8526 8527 M: Lijun Pan <ljp@linux.ibm.com> 8527 8528 M: Sukadev Bhattiprolu <sukadev@linux.ibm.com> 8529 + R: Thomas Falcon <tlfalcon@linux.ibm.com> 8528 8530 L: netdev@vger.kernel.org 8529 8531 S: Supported 8530 8532 F: drivers/net/ethernet/ibm/ibmvnic.* ··· 10035 10035 10036 10036 LED SUBSYSTEM 10037 10037 M: Pavel Machek <pavel@ucw.cz> 10038 - R: Dan Murphy <dmurphy@ti.com> 10039 10038 L: linux-leds@vger.kernel.org 10040 10039 S: Maintained 10041 10040 T: git git://git.kernel.org/pub/scm/linux/kernel/git/pavel/linux-leds.git ··· 10910 10911 F: drivers/media/radio/radio-maxiradio* 10911 10912 10912 10913 MCAN MMIO DEVICE DRIVER 10913 - M: Dan Murphy <dmurphy@ti.com> 10914 10914 M: Pankaj Sharma <pankj.sharma@samsung.com> 10915 10915 L: linux-can@vger.kernel.org 10916 10916 S: Maintained ··· 11170 11172 F: drivers/media/dvb-frontends/stv6111* 11171 11173 11172 11174 MEDIA DRIVERS FOR STM32 - DCMI 11173 - M: Hugues Fruchet <hugues.fruchet@st.com> 11175 + M: Hugues Fruchet <hugues.fruchet@foss.st.com> 11174 11176 L: linux-media@vger.kernel.org 11175 11177 S: Supported 11176 11178 T: git git://linuxtv.org/media_tree.git ··· 12541 12543 M: Mat Martineau <mathew.j.martineau@linux.intel.com> 12542 12544 M: Matthieu Baerts <matthieu.baerts@tessares.net> 12543 12545 L: netdev@vger.kernel.org 12544 - L: mptcp@lists.01.org 12546 + L: mptcp@lists.linux.dev 12545 12547 S: Maintained 12546 12548 W: https://github.com/multipath-tcp/mptcp_net-next/wiki 12547 12549 B: https://github.com/multipath-tcp/mptcp_net-next/issues ··· 14712 14714 QLOGIC QLGE 10Gb ETHERNET DRIVER 14713 14715 M: Manish Chopra <manishc@marvell.com> 14714 14716 M: GR-Linux-NIC-Dev@marvell.com 14715 - L: netdev@vger.kernel.org 14716 - S: Supported 14717 - F: drivers/staging/qlge/ 14718 - 14719 - QLOGIC QLGE 10Gb ETHERNET DRIVER 14720 14717 M: Coiby Xu <coiby.xu@gmail.com> 14721 14718 L: netdev@vger.kernel.org 14722 - S: Maintained 14719 + S: Supported 14723 14720 F: Documentation/networking/device_drivers/qlogic/qlge.rst 14721 + F: drivers/staging/qlge/ 14724 14722 14725 14723 QM1D1B0004 MEDIA DRIVER 14726 14724 M: Akihiro Tsukada <tskd08@gmail.com> ··· 15634 15640 15635 15641 S390 VFIO AP DRIVER 15636 15642 M: Tony Krowiak <akrowiak@linux.ibm.com> 15637 - M: Pierre Morel <pmorel@linux.ibm.com> 15638 15643 M: Halil Pasic <pasic@linux.ibm.com> 15644 + M: Jason Herne <jjherne@linux.ibm.com> 15639 15645 L: linux-s390@vger.kernel.org 15640 15646 S: Supported 15641 15647 W: http://www.ibm.com/developerworks/linux/linux390/ ··· 15647 15653 S390 VFIO-CCW DRIVER 15648 15654 M: Cornelia Huck <cohuck@redhat.com> 15649 15655 M: Eric Farman <farman@linux.ibm.com> 15656 + M: Matthew Rosato <mjrosato@linux.ibm.com> 15650 15657 R: Halil Pasic <pasic@linux.ibm.com> 15651 15658 L: linux-s390@vger.kernel.org 15652 15659 L: kvm@vger.kernel.org ··· 15658 15663 15659 15664 S390 VFIO-PCI DRIVER 15660 15665 M: Matthew Rosato <mjrosato@linux.ibm.com> 15666 + M: Eric Farman <farman@linux.ibm.com> 15661 15667 L: linux-s390@vger.kernel.org 15662 15668 L: kvm@vger.kernel.org 15663 15669 S: Supported ··· 16888 16892 16889 16893 SPIDERNET NETWORK DRIVER for CELL 16890 16894 M: Ishizaki Kou <kou.ishizaki@toshiba.co.jp> 16895 + M: Geoff Levand <geoff@infradead.org> 16891 16896 L: netdev@vger.kernel.org 16892 - S: Supported 16897 + L: linuxppc-dev@lists.ozlabs.org 16898 + S: Maintained 16893 16899 F: Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst 16894 16900 F: drivers/net/ethernet/toshiba/spider_net* 16895 16901 ··· 16945 16947 F: drivers/media/i2c/st-mipid02.c 16946 16948 16947 16949 ST STM32 I2C/SMBUS DRIVER 16948 - M: Pierre-Yves MORDRET <pierre-yves.mordret@st.com> 16950 + M: Pierre-Yves MORDRET <pierre-yves.mordret@foss.st.com> 16951 + M: Alain Volmat <alain.volmat@foss.st.com> 16949 16952 L: linux-i2c@vger.kernel.org 16950 16953 S: Maintained 16951 16954 F: drivers/i2c/busses/i2c-stm32* ··· 17071 17072 F: kernel/static_call.c 17072 17073 17073 17074 STI AUDIO (ASoC) DRIVERS 17074 - M: Arnaud Pouliquen <arnaud.pouliquen@st.com> 17075 + M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com> 17075 17076 L: alsa-devel@alsa-project.org (moderated for non-subscribers) 17076 17077 S: Maintained 17077 17078 F: Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt ··· 17091 17092 F: drivers/media/usb/stk1160/ 17092 17093 17093 17094 STM32 AUDIO (ASoC) DRIVERS 17094 - M: Olivier Moysan <olivier.moysan@st.com> 17095 - M: Arnaud Pouliquen <arnaud.pouliquen@st.com> 17095 + M: Olivier Moysan <olivier.moysan@foss.st.com> 17096 + M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com> 17096 17097 L: alsa-devel@alsa-project.org (moderated for non-subscribers) 17097 17098 S: Maintained 17098 17099 F: Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml 17099 17100 F: sound/soc/stm/ 17100 17101 17101 17102 STM32 TIMER/LPTIMER DRIVERS 17102 - M: Fabrice Gasnier <fabrice.gasnier@st.com> 17103 + M: Fabrice Gasnier <fabrice.gasnier@foss.st.com> 17103 17104 S: Maintained 17104 17105 F: Documentation/ABI/testing/*timer-stm32 17105 17106 F: Documentation/devicetree/bindings/*/*stm32-*timer* ··· 17109 17110 17110 17111 STMMAC ETHERNET DRIVER 17111 17112 M: Giuseppe Cavallaro <peppe.cavallaro@st.com> 17112 - M: Alexandre Torgue <alexandre.torgue@st.com> 17113 + M: Alexandre Torgue <alexandre.torgue@foss.st.com> 17113 17114 M: Jose Abreu <joabreu@synopsys.com> 17114 17115 L: netdev@vger.kernel.org 17115 17116 S: Supported ··· 17851 17852 F: drivers/thermal/ti-soc-thermal/ 17852 17853 17853 17854 TI BQ27XXX POWER SUPPLY DRIVER 17854 - R: Dan Murphy <dmurphy@ti.com> 17855 17855 F: drivers/power/supply/bq27xxx_battery.c 17856 17856 F: drivers/power/supply/bq27xxx_battery_i2c.c 17857 17857 F: include/linux/power/bq27xxx_battery.h ··· 17985 17987 F: sound/soc/codecs/tas571x* 17986 17988 17987 17989 TI TCAN4X5X DEVICE DRIVER 17988 - M: Dan Murphy <dmurphy@ti.com> 17989 17990 L: linux-can@vger.kernel.org 17990 17991 S: Maintained 17991 17992 F: Documentation/devicetree/bindings/net/can/tcan4x5x.txt
+1 -1
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 12 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc4 5 + EXTRAVERSION = -rc6 6 6 NAME = Frozen Wasteland 7 7 8 8 # *DOCUMENTATION*
+3
arch/arm/boot/dts/am33xx.dtsi
··· 40 40 ethernet1 = &cpsw_emac1; 41 41 spi0 = &spi0; 42 42 spi1 = &spi1; 43 + mmc0 = &mmc1; 44 + mmc1 = &mmc2; 45 + mmc2 = &mmc3; 43 46 }; 44 47 45 48 cpus {
-8
arch/arm/boot/dts/at91-sam9x60ek.dts
··· 334 334 }; 335 335 336 336 &pinctrl { 337 - atmel,mux-mask = < 338 - /* A B C */ 339 - 0xFFFFFE7F 0xC0E0397F 0xEF00019D /* pioA */ 340 - 0x03FFFFFF 0x02FC7E68 0x00780000 /* pioB */ 341 - 0xffffffff 0xF83FFFFF 0xB800F3FC /* pioC */ 342 - 0x003FFFFF 0x003F8000 0x00000000 /* pioD */ 343 - >; 344 - 345 337 adc { 346 338 pinctrl_adc_default: adc_default { 347 339 atmel,pins = <AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;
+2 -2
arch/arm/boot/dts/at91-sama5d27_som1.dtsi
··· 84 84 pinctrl-0 = <&pinctrl_macb0_default>; 85 85 phy-mode = "rmii"; 86 86 87 - ethernet-phy@0 { 88 - reg = <0x0>; 87 + ethernet-phy@7 { 88 + reg = <0x7>; 89 89 interrupt-parent = <&pioA>; 90 90 interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>; 91 91 pinctrl-names = "default";
+16 -6
arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
··· 210 210 micrel,led-mode = <1>; 211 211 clocks = <&clks IMX6UL_CLK_ENET_REF>; 212 212 clock-names = "rmii-ref"; 213 - reset-gpios = <&gpio_spi 1 GPIO_ACTIVE_LOW>; 214 - reset-assert-us = <10000>; 215 - reset-deassert-us = <100>; 216 213 217 214 }; 218 215 ··· 219 222 micrel,led-mode = <1>; 220 223 clocks = <&clks IMX6UL_CLK_ENET2_REF>; 221 224 clock-names = "rmii-ref"; 222 - reset-gpios = <&gpio_spi 2 GPIO_ACTIVE_LOW>; 223 - reset-assert-us = <10000>; 224 - reset-deassert-us = <100>; 225 225 }; 226 226 }; 227 227 }; ··· 235 241 pinctrl-0 = <&pinctrl_flexcan2>; 236 242 xceiver-supply = <&reg_can_3v3>; 237 243 status = "okay"; 244 + }; 245 + 246 + &gpio_spi { 247 + eth0-phy-hog { 248 + gpio-hog; 249 + gpios = <1 GPIO_ACTIVE_HIGH>; 250 + output-high; 251 + line-name = "eth0-phy"; 252 + }; 253 + 254 + eth1-phy-hog { 255 + gpio-hog; 256 + gpios = <2 GPIO_ACTIVE_HIGH>; 257 + output-high; 258 + line-name = "eth1-phy"; 259 + }; 238 260 }; 239 261 240 262 &i2c1 {
+1
arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts
··· 14 14 }; 15 15 16 16 &gpmi { 17 + fsl,use-minimum-ecc; 17 18 status = "okay"; 18 19 };
+9
arch/arm/boot/dts/sam9x60.dtsi
··· 606 606 compatible = "microchip,sam9x60-pinctrl", "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus"; 607 607 ranges = <0xfffff400 0xfffff400 0x800>; 608 608 609 + /* mux-mask corresponding to sam9x60 SoC in TFBGA228L package */ 610 + atmel,mux-mask = < 611 + /* A B C */ 612 + 0xffffffff 0xffe03fff 0xef00019d /* pioA */ 613 + 0x03ffffff 0x02fc7e7f 0x00780000 /* pioB */ 614 + 0xffffffff 0xffffffff 0xf83fffff /* pioC */ 615 + 0x003fffff 0x003f8000 0x00000000 /* pioD */ 616 + >; 617 + 609 618 pioA: gpio@fffff400 { 610 619 compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio"; 611 620 reg = <0xfffff400 0x200>;
+15 -1
arch/arm/mach-imx/avic.c
··· 7 7 #include <linux/module.h> 8 8 #include <linux/irq.h> 9 9 #include <linux/irqdomain.h> 10 + #include <linux/irqchip.h> 10 11 #include <linux/io.h> 11 12 #include <linux/of.h> 12 13 #include <linux/of_address.h> ··· 163 162 * interrupts. It registers the interrupt enable and disable functions 164 163 * to the kernel for each interrupt source. 165 164 */ 166 - void __init mxc_init_irq(void __iomem *irqbase) 165 + static void __init mxc_init_irq(void __iomem *irqbase) 167 166 { 168 167 struct device_node *np; 169 168 int irq_base; ··· 221 220 222 221 printk(KERN_INFO "MXC IRQ initialized\n"); 223 222 } 223 + 224 + static int __init imx_avic_init(struct device_node *node, 225 + struct device_node *parent) 226 + { 227 + void __iomem *avic_base; 228 + 229 + avic_base = of_iomap(node, 0); 230 + BUG_ON(!avic_base); 231 + mxc_init_irq(avic_base); 232 + return 0; 233 + } 234 + 235 + IRQCHIP_DECLARE(imx_avic, "fsl,avic", imx_avic_init);
-1
arch/arm/mach-imx/common.h
··· 22 22 void imx21_init_early(void); 23 23 void imx31_init_early(void); 24 24 void imx35_init_early(void); 25 - void mxc_init_irq(void __iomem *); 26 25 void mx31_init_irq(void); 27 26 void mx35_init_irq(void); 28 27 void mxc_set_cpu_type(unsigned int type);
-11
arch/arm/mach-imx/mach-imx1.c
··· 17 17 mxc_set_cpu_type(MXC_CPU_MX1); 18 18 } 19 19 20 - static void __init imx1_init_irq(void) 21 - { 22 - void __iomem *avic_addr; 23 - 24 - avic_addr = ioremap(MX1_AVIC_ADDR, SZ_4K); 25 - WARN_ON(!avic_addr); 26 - 27 - mxc_init_irq(avic_addr); 28 - } 29 - 30 20 static const char * const imx1_dt_board_compat[] __initconst = { 31 21 "fsl,imx1", 32 22 NULL ··· 24 34 25 35 DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)") 26 36 .init_early = imx1_init_early, 27 - .init_irq = imx1_init_irq, 28 37 .dt_compat = imx1_dt_board_compat, 29 38 .restart = mxc_restart, 30 39 MACHINE_END
-12
arch/arm/mach-imx/mach-imx25.c
··· 22 22 imx_aips_allow_unprivileged_access("fsl,imx25-aips"); 23 23 } 24 24 25 - static void __init mx25_init_irq(void) 26 - { 27 - struct device_node *np; 28 - void __iomem *avic_base; 29 - 30 - np = of_find_compatible_node(NULL, NULL, "fsl,avic"); 31 - avic_base = of_iomap(np, 0); 32 - BUG_ON(!avic_base); 33 - mxc_init_irq(avic_base); 34 - } 35 - 36 25 static const char * const imx25_dt_board_compat[] __initconst = { 37 26 "fsl,imx25", 38 27 NULL ··· 31 42 .init_early = imx25_init_early, 32 43 .init_machine = imx25_dt_init, 33 44 .init_late = imx25_pm_init, 34 - .init_irq = mx25_init_irq, 35 45 .dt_compat = imx25_dt_board_compat, 36 46 MACHINE_END
-12
arch/arm/mach-imx/mach-imx27.c
··· 56 56 mxc_set_cpu_type(MXC_CPU_MX27); 57 57 } 58 58 59 - static void __init mx27_init_irq(void) 60 - { 61 - void __iomem *avic_base; 62 - struct device_node *np; 63 - 64 - np = of_find_compatible_node(NULL, NULL, "fsl,avic"); 65 - avic_base = of_iomap(np, 0); 66 - BUG_ON(!avic_base); 67 - mxc_init_irq(avic_base); 68 - } 69 - 70 59 static const char * const imx27_dt_board_compat[] __initconst = { 71 60 "fsl,imx27", 72 61 NULL ··· 64 75 DT_MACHINE_START(IMX27_DT, "Freescale i.MX27 (Device Tree Support)") 65 76 .map_io = mx27_map_io, 66 77 .init_early = imx27_init_early, 67 - .init_irq = mx27_init_irq, 68 78 .init_late = imx27_pm_init, 69 79 .dt_compat = imx27_dt_board_compat, 70 80 MACHINE_END
-1
arch/arm/mach-imx/mach-imx31.c
··· 14 14 DT_MACHINE_START(IMX31_DT, "Freescale i.MX31 (Device Tree Support)") 15 15 .map_io = mx31_map_io, 16 16 .init_early = imx31_init_early, 17 - .init_irq = mx31_init_irq, 18 17 .dt_compat = imx31_dt_board_compat, 19 18 MACHINE_END
-1
arch/arm/mach-imx/mach-imx35.c
··· 27 27 .l2c_aux_mask = ~0, 28 28 .map_io = mx35_map_io, 29 29 .init_early = imx35_init_early, 30 - .init_irq = mx35_init_irq, 31 30 .dt_compat = imx35_dt_board_compat, 32 31 MACHINE_END
-24
arch/arm/mach-imx/mm-imx3.c
··· 109 109 mx3_ccm_base = of_iomap(np, 0); 110 110 BUG_ON(!mx3_ccm_base); 111 111 } 112 - 113 - void __init mx31_init_irq(void) 114 - { 115 - void __iomem *avic_base; 116 - struct device_node *np; 117 - 118 - np = of_find_compatible_node(NULL, NULL, "fsl,imx31-avic"); 119 - avic_base = of_iomap(np, 0); 120 - BUG_ON(!avic_base); 121 - 122 - mxc_init_irq(avic_base); 123 - } 124 112 #endif /* ifdef CONFIG_SOC_IMX31 */ 125 113 126 114 #ifdef CONFIG_SOC_IMX35 ··· 145 157 np = of_find_compatible_node(NULL, NULL, "fsl,imx35-ccm"); 146 158 mx3_ccm_base = of_iomap(np, 0); 147 159 BUG_ON(!mx3_ccm_base); 148 - } 149 - 150 - void __init mx35_init_irq(void) 151 - { 152 - void __iomem *avic_base; 153 - struct device_node *np; 154 - 155 - np = of_find_compatible_node(NULL, NULL, "fsl,imx35-avic"); 156 - avic_base = of_iomap(np, 0); 157 - BUG_ON(!avic_base); 158 - 159 - mxc_init_irq(avic_base); 160 160 } 161 161 #endif /* ifdef CONFIG_SOC_IMX35 */
+58 -17
arch/arm/mach-omap2/sr_device.c
··· 88 88 89 89 extern struct omap_sr_data omap_sr_pdata[]; 90 90 91 - static int __init sr_dev_init(struct omap_hwmod *oh, void *user) 91 + static int __init sr_init_by_name(const char *name, const char *voltdm) 92 92 { 93 93 struct omap_sr_data *sr_data = NULL; 94 94 struct omap_volt_data *volt_data; 95 - struct omap_smartreflex_dev_attr *sr_dev_attr; 96 95 static int i; 97 96 98 - if (!strncmp(oh->name, "smartreflex_mpu_iva", 20) || 99 - !strncmp(oh->name, "smartreflex_mpu", 16)) 97 + if (!strncmp(name, "smartreflex_mpu_iva", 20) || 98 + !strncmp(name, "smartreflex_mpu", 16)) 100 99 sr_data = &omap_sr_pdata[OMAP_SR_MPU]; 101 - else if (!strncmp(oh->name, "smartreflex_core", 17)) 100 + else if (!strncmp(name, "smartreflex_core", 17)) 102 101 sr_data = &omap_sr_pdata[OMAP_SR_CORE]; 103 - else if (!strncmp(oh->name, "smartreflex_iva", 16)) 102 + else if (!strncmp(name, "smartreflex_iva", 16)) 104 103 sr_data = &omap_sr_pdata[OMAP_SR_IVA]; 105 104 106 105 if (!sr_data) { 107 - pr_err("%s: Unknown instance %s\n", __func__, oh->name); 106 + pr_err("%s: Unknown instance %s\n", __func__, name); 108 107 return -EINVAL; 109 108 } 110 109 111 - sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr; 112 - if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) { 113 - pr_err("%s: No voltage domain specified for %s. Cannot initialize\n", 114 - __func__, oh->name); 115 - goto exit; 116 - } 117 - 118 - sr_data->name = oh->name; 110 + sr_data->name = name; 119 111 if (cpu_is_omap343x()) 120 112 sr_data->ip_type = 1; 121 113 else ··· 128 136 } 129 137 } 130 138 131 - sr_data->voltdm = voltdm_lookup(sr_dev_attr->sensor_voltdm_name); 139 + sr_data->voltdm = voltdm_lookup(voltdm); 132 140 if (!sr_data->voltdm) { 133 141 pr_err("%s: Unable to get voltage domain pointer for VDD %s\n", 134 - __func__, sr_dev_attr->sensor_voltdm_name); 142 + __func__, voltdm); 135 143 goto exit; 136 144 } 137 145 ··· 152 160 return 0; 153 161 } 154 162 163 + static int __init sr_dev_init(struct omap_hwmod *oh, void *user) 164 + { 165 + struct omap_smartreflex_dev_attr *sr_dev_attr; 166 + 167 + sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr; 168 + if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) { 169 + pr_err("%s: No voltage domain specified for %s. Cannot initialize\n", 170 + __func__, oh->name); 171 + return 0; 172 + } 173 + 174 + return sr_init_by_name(oh->name, sr_dev_attr->sensor_voltdm_name); 175 + } 176 + 155 177 /* 156 178 * API to be called from board files to enable smartreflex 157 179 * autocompensation at init. ··· 175 169 sr_enable_on_init = true; 176 170 } 177 171 172 + static const char * const omap4_sr_instances[] = { 173 + "mpu", 174 + "iva", 175 + "core", 176 + }; 177 + 178 + static const char * const dra7_sr_instances[] = { 179 + "mpu", 180 + "core", 181 + }; 182 + 178 183 int __init omap_devinit_smartreflex(void) 179 184 { 185 + const char * const *sr_inst; 186 + int i, nr_sr = 0; 187 + 188 + if (soc_is_omap44xx()) { 189 + sr_inst = omap4_sr_instances; 190 + nr_sr = ARRAY_SIZE(omap4_sr_instances); 191 + 192 + } else if (soc_is_dra7xx()) { 193 + sr_inst = dra7_sr_instances; 194 + nr_sr = ARRAY_SIZE(dra7_sr_instances); 195 + } 196 + 197 + if (nr_sr) { 198 + const char *name, *voltdm; 199 + 200 + for (i = 0; i < nr_sr; i++) { 201 + name = kasprintf(GFP_KERNEL, "smartreflex_%s", sr_inst[i]); 202 + voltdm = sr_inst[i]; 203 + sr_init_by_name(name, voltdm); 204 + } 205 + 206 + return 0; 207 + } 208 + 180 209 return omap_hwmod_for_each_by_class("smartreflex", sr_dev_init, NULL); 181 210 }
+10
arch/arm64/Kconfig
··· 810 810 811 811 If unsure, say Y. 812 812 813 + config NVIDIA_CARMEL_CNP_ERRATUM 814 + bool "NVIDIA Carmel CNP: CNP on Carmel semantically different than ARM cores" 815 + default y 816 + help 817 + If CNP is enabled on Carmel cores, non-sharable TLBIs on a core will not 818 + invalidate shared TLB entries installed by a different core, as it would 819 + on standard ARM cores. 820 + 821 + If unsure, say Y. 822 + 813 823 config SOCIONEXT_SYNQUACER_PREITS 814 824 bool "Socionext Synquacer: Workaround for GICv3 pre-ITS" 815 825 default y
+1
arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
··· 198 198 ranges = <0x0 0x00 0x1700000 0x100000>; 199 199 reg = <0x00 0x1700000 0x0 0x100000>; 200 200 interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>; 201 + dma-coherent; 201 202 202 203 sec_jr0: jr@10000 { 203 204 compatible = "fsl,sec-v5.4-job-ring",
+1
arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
··· 348 348 ranges = <0x0 0x00 0x1700000 0x100000>; 349 349 reg = <0x00 0x1700000 0x0 0x100000>; 350 350 interrupts = <0 75 0x4>; 351 + dma-coherent; 351 352 352 353 sec_jr0: jr@10000 { 353 354 compatible = "fsl,sec-v5.4-job-ring",
+1
arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
··· 354 354 ranges = <0x0 0x00 0x1700000 0x100000>; 355 355 reg = <0x00 0x1700000 0x0 0x100000>; 356 356 interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>; 357 + dma-coherent; 357 358 358 359 sec_jr0: jr@10000 { 359 360 compatible = "fsl,sec-v5.4-job-ring",
+1 -1
arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
··· 35 35 36 36 &i2c2 { 37 37 clock-frequency = <400000>; 38 - pinctrl-names = "default"; 38 + pinctrl-names = "default", "gpio"; 39 39 pinctrl-0 = <&pinctrl_i2c2>; 40 40 pinctrl-1 = <&pinctrl_i2c2_gpio>; 41 41 sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+1 -1
arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
··· 67 67 68 68 &i2c1 { 69 69 clock-frequency = <400000>; 70 - pinctrl-names = "default"; 70 + pinctrl-names = "default", "gpio"; 71 71 pinctrl-0 = <&pinctrl_i2c1>; 72 72 pinctrl-1 = <&pinctrl_i2c1_gpio>; 73 73 sda-gpios = <&gpio5 15 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+1 -1
arch/arm64/include/asm/checksum.h
··· 37 37 } while (--n > 0); 38 38 39 39 sum += ((sum >> 32) | (sum << 32)); 40 - return csum_fold((__force u32)(sum >> 32)); 40 + return csum_fold((__force __wsum)(sum >> 32)); 41 41 } 42 42 #define ip_fast_csum ip_fast_csum 43 43
+2 -1
arch/arm64/include/asm/cpucaps.h
··· 66 66 #define ARM64_WORKAROUND_1508412 58 67 67 #define ARM64_HAS_LDAPR 59 68 68 #define ARM64_KVM_PROTECTED_MODE 60 69 + #define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP 61 69 70 70 - #define ARM64_NCAPS 61 71 + #define ARM64_NCAPS 62 71 72 72 73 #endif /* __ASM_CPUCAPS_H */
+1
arch/arm64/include/asm/kvm_arm.h
··· 278 278 #define CPTR_EL2_DEFAULT CPTR_EL2_RES1 279 279 280 280 /* Hyp Debug Configuration Register bits */ 281 + #define MDCR_EL2_TTRF (1 << 19) 281 282 #define MDCR_EL2_TPMS (1 << 14) 282 283 #define MDCR_EL2_E2PB_MASK (UL(0x3)) 283 284 #define MDCR_EL2_E2PB_SHIFT (UL(12))
+2
arch/arm64/include/asm/processor.h
··· 251 251 extern struct task_struct *cpu_switch_to(struct task_struct *prev, 252 252 struct task_struct *next); 253 253 254 + asmlinkage void arm64_preempt_schedule_irq(void); 255 + 254 256 #define task_pt_regs(p) \ 255 257 ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) 256 258
+2
arch/arm64/include/asm/thread_info.h
··· 55 55 #define arch_setup_new_exec arch_setup_new_exec 56 56 57 57 void arch_release_task_struct(struct task_struct *tsk); 58 + int arch_dup_task_struct(struct task_struct *dst, 59 + struct task_struct *src); 58 60 59 61 #endif 60 62
+8
arch/arm64/kernel/cpu_errata.c
··· 526 526 1, 0), 527 527 }, 528 528 #endif 529 + #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM 530 + { 531 + /* NVIDIA Carmel */ 532 + .desc = "NVIDIA Carmel CNP erratum", 533 + .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP, 534 + ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL), 535 + }, 536 + #endif 529 537 { 530 538 } 531 539 };
+4 -2
arch/arm64/kernel/cpufeature.c
··· 383 383 * of support. 384 384 */ 385 385 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), 386 - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), 387 386 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), 388 387 ARM64_FTR_END, 389 388 }; ··· 1320 1321 * may share TLB entries with a CPU stuck in the crashed 1321 1322 * kernel. 1322 1323 */ 1323 - if (is_kdump_kernel()) 1324 + if (is_kdump_kernel()) 1325 + return false; 1326 + 1327 + if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP)) 1324 1328 return false; 1325 1329 1326 1330 return has_cpuid_feature(entry, scope);
+1 -1
arch/arm64/kernel/cpuinfo.c
··· 353 353 * with the CLIDR_EL1 fields to avoid triggering false warnings 354 354 * when there is a mismatch across the CPUs. Keep track of the 355 355 * effective value of the CTR_EL0 in our internal records for 356 - * acurate sanity check and feature enablement. 356 + * accurate sanity check and feature enablement. 357 357 */ 358 358 info->reg_ctr = read_cpuid_effective_cachetype(); 359 359 info->reg_dczid = read_cpuid(DCZID_EL0);
+2
arch/arm64/kernel/crash_dump.c
··· 64 64 ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) 65 65 { 66 66 memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count); 67 + *ppos += count; 68 + 67 69 return count; 68 70 }
+2
arch/arm64/kernel/process.c
··· 57 57 #include <asm/processor.h> 58 58 #include <asm/pointer_auth.h> 59 59 #include <asm/stacktrace.h> 60 + #include <asm/switch_to.h> 61 + #include <asm/system_misc.h> 60 62 61 63 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) 62 64 #include <linux/stackprotector.h>
+5 -4
arch/arm64/kernel/stacktrace.c
··· 194 194 195 195 #ifdef CONFIG_STACKTRACE 196 196 197 - void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, 198 - struct task_struct *task, struct pt_regs *regs) 197 + noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, 198 + void *cookie, struct task_struct *task, 199 + struct pt_regs *regs) 199 200 { 200 201 struct stackframe frame; 201 202 ··· 204 203 start_backtrace(&frame, regs->regs[29], regs->pc); 205 204 else if (task == current) 206 205 start_backtrace(&frame, 207 - (unsigned long)__builtin_frame_address(0), 208 - (unsigned long)arch_stack_walk); 206 + (unsigned long)__builtin_frame_address(1), 207 + (unsigned long)__builtin_return_address(0)); 209 208 else 210 209 start_backtrace(&frame, thread_saved_fp(task), 211 210 thread_saved_pc(task));
+2
arch/arm64/kvm/debug.c
··· 89 89 * - Debug ROM Address (MDCR_EL2_TDRA) 90 90 * - OS related registers (MDCR_EL2_TDOSA) 91 91 * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB) 92 + * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF) 92 93 * 93 94 * Additionally, KVM only traps guest accesses to the debug registers if 94 95 * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY ··· 113 112 vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK; 114 113 vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM | 115 114 MDCR_EL2_TPMS | 115 + MDCR_EL2_TTRF | 116 116 MDCR_EL2_TPMCR | 117 117 MDCR_EL2_TDRA | 118 118 MDCR_EL2_TDOSA);
+9
arch/arm64/kvm/hyp/vgic-v3-sr.c
··· 429 429 if (has_vhe()) 430 430 flags = local_daif_save(); 431 431 432 + /* 433 + * Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates 434 + * that to be able to set ICC_SRE_EL1.SRE to 0, all the 435 + * interrupt overrides must be set. You've got to love this. 436 + */ 437 + sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO); 438 + isb(); 432 439 write_gicreg(0, ICC_SRE_EL1); 433 440 isb(); 434 441 435 442 val = read_gicreg(ICC_SRE_EL1); 436 443 437 444 write_gicreg(sre, ICC_SRE_EL1); 445 + isb(); 446 + sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0); 438 447 isb(); 439 448 440 449 if (has_vhe())
+19 -2
arch/arm64/mm/mmu.c
··· 1448 1448 struct range arch_get_mappable_range(void) 1449 1449 { 1450 1450 struct range mhp_range; 1451 + u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual)); 1452 + u64 end_linear_pa = __pa(PAGE_END - 1); 1453 + 1454 + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 1455 + /* 1456 + * Check for a wrap, it is possible because of randomized linear 1457 + * mapping the start physical address is actually bigger than 1458 + * the end physical address. In this case set start to zero 1459 + * because [0, end_linear_pa] range must still be able to cover 1460 + * all addressable physical addresses. 1461 + */ 1462 + if (start_linear_pa > end_linear_pa) 1463 + start_linear_pa = 0; 1464 + } 1465 + 1466 + WARN_ON(start_linear_pa > end_linear_pa); 1451 1467 1452 1468 /* 1453 1469 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] ··· 1471 1455 * range which can be mapped inside this linear mapping range, must 1472 1456 * also be derived from its end points. 1473 1457 */ 1474 - mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual)); 1475 - mhp_range.end = __pa(PAGE_END - 1); 1458 + mhp_range.start = start_linear_pa; 1459 + mhp_range.end = end_linear_pa; 1460 + 1476 1461 return mhp_range; 1477 1462 } 1478 1463
+11 -11
arch/ia64/kernel/err_inject.c
··· 59 59 char *buf) \ 60 60 { \ 61 61 u32 cpu=dev->id; \ 62 - return sprintf(buf, "%lx\n", name[cpu]); \ 62 + return sprintf(buf, "%llx\n", name[cpu]); \ 63 63 } 64 64 65 65 #define store(name) \ ··· 86 86 87 87 #ifdef ERR_INJ_DEBUG 88 88 printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu); 89 - printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]); 90 - printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]); 91 - printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n", 89 + printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]); 90 + printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]); 91 + printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n", 92 92 err_data_buffer[cpu].data1, 93 93 err_data_buffer[cpu].data2, 94 94 err_data_buffer[cpu].data3); ··· 117 117 118 118 #ifdef ERR_INJ_DEBUG 119 119 printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]); 120 - printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]); 121 - printk(KERN_DEBUG "resources=%lx\n", resources[cpu]); 120 + printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]); 121 + printk(KERN_DEBUG "resources=%llx\n", resources[cpu]); 122 122 #endif 123 123 return size; 124 124 } ··· 131 131 char *buf) 132 132 { 133 133 unsigned int cpu=dev->id; 134 - return sprintf(buf, "%lx\n", phys_addr[cpu]); 134 + return sprintf(buf, "%llx\n", phys_addr[cpu]); 135 135 } 136 136 137 137 static ssize_t ··· 145 145 ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL); 146 146 if (ret<=0) { 147 147 #ifdef ERR_INJ_DEBUG 148 - printk("Virtual address %lx is not existing.\n",virt_addr); 148 + printk("Virtual address %llx is not existing.\n", virt_addr); 149 149 #endif 150 150 return -EINVAL; 151 151 } ··· 163 163 { 164 164 unsigned int cpu=dev->id; 165 165 166 - return sprintf(buf, "%lx, %lx, %lx\n", 166 + return sprintf(buf, "%llx, %llx, %llx\n", 167 167 err_data_buffer[cpu].data1, 168 168 err_data_buffer[cpu].data2, 169 169 err_data_buffer[cpu].data3); ··· 178 178 int ret; 179 179 180 180 #ifdef ERR_INJ_DEBUG 181 - printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n", 181 + printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n", 182 182 err_data_buffer[cpu].data1, 183 183 err_data_buffer[cpu].data2, 184 184 err_data_buffer[cpu].data3, 185 185 cpu); 186 186 #endif 187 - ret=sscanf(buf, "%lx, %lx, %lx", 187 + ret = sscanf(buf, "%llx, %llx, %llx", 188 188 &err_data_buffer[cpu].data1, 189 189 &err_data_buffer[cpu].data2, 190 190 &err_data_buffer[cpu].data3);
+1 -1
arch/ia64/kernel/mca.c
··· 1824 1824 data = mca_bootmem(); 1825 1825 first_time = 0; 1826 1826 } else 1827 - data = (void *)__get_free_pages(GFP_KERNEL, 1827 + data = (void *)__get_free_pages(GFP_ATOMIC, 1828 1828 get_order(sz)); 1829 1829 if (!data) 1830 1830 panic("Could not allocate MCA memory for cpu %d\n",
+1 -1
arch/mips/kernel/setup.c
··· 43 43 #include <asm/prom.h> 44 44 45 45 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB 46 - const char __section(".appended_dtb") __appended_dtb[0x100000]; 46 + char __section(".appended_dtb") __appended_dtb[0x100000]; 47 47 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */ 48 48 49 49 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
+2 -1
arch/powerpc/platforms/pseries/lpar.c
··· 887 887 888 888 want_v = hpte_encode_avpn(vpn, psize, ssize); 889 889 890 - flags = (newpp & 7) | H_AVPN; 890 + flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN; 891 + flags |= (newpp & HPTE_R_KEY_HI) >> 48; 891 892 if (mmu_has_feature(MMU_FTR_KERNEL_RO)) 892 893 /* Move pp0 into bit 8 (IBM 55) */ 893 894 flags |= (newpp & HPTE_R_PP0) >> 55;
+44 -4
arch/powerpc/platforms/pseries/mobility.c
··· 452 452 return ret; 453 453 } 454 454 455 + /** 456 + * struct pseries_suspend_info - State shared between CPUs for join/suspend. 457 + * @counter: Threads are to increment this upon resuming from suspend 458 + * or if an error is received from H_JOIN. The thread which performs 459 + * the first increment (i.e. sets it to 1) is responsible for 460 + * waking the other threads. 461 + * @done: False if join/suspend is in progress. True if the operation is 462 + * complete (successful or not). 463 + */ 464 + struct pseries_suspend_info { 465 + atomic_t counter; 466 + bool done; 467 + }; 468 + 455 469 static int do_join(void *arg) 456 470 { 457 - atomic_t *counter = arg; 471 + struct pseries_suspend_info *info = arg; 472 + atomic_t *counter = &info->counter; 458 473 long hvrc; 459 474 int ret; 460 475 476 + retry: 461 477 /* Must ensure MSR.EE off for H_JOIN. */ 462 478 hard_irq_disable(); 463 479 hvrc = plpar_hcall_norets(H_JOIN); ··· 489 473 case H_SUCCESS: 490 474 /* 491 475 * The suspend is complete and this cpu has received a 492 - * prod. 476 + * prod, or we've received a stray prod from unrelated 477 + * code (e.g. paravirt spinlocks) and we need to join 478 + * again. 479 + * 480 + * This barrier orders the return from H_JOIN above vs 481 + * the load of info->done. It pairs with the barrier 482 + * in the wakeup/prod path below. 493 483 */ 484 + smp_mb(); 485 + if (READ_ONCE(info->done) == false) { 486 + pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying", 487 + smp_processor_id()); 488 + goto retry; 489 + } 494 490 ret = 0; 495 491 break; 496 492 case H_BAD_MODE: ··· 516 488 517 489 if (atomic_inc_return(counter) == 1) { 518 490 pr_info("CPU %u waking all threads\n", smp_processor_id()); 491 + WRITE_ONCE(info->done, true); 492 + /* 493 + * This barrier orders the store to info->done vs subsequent 494 + * H_PRODs to wake the other CPUs. It pairs with the barrier 495 + * in the H_SUCCESS case above. 496 + */ 497 + smp_mb(); 519 498 prod_others(); 520 499 } 521 500 /* ··· 570 535 int ret; 571 536 572 537 while (true) { 573 - atomic_t counter = ATOMIC_INIT(0); 538 + struct pseries_suspend_info info; 574 539 unsigned long vasi_state; 575 540 int vasi_err; 576 541 577 - ret = stop_machine(do_join, &counter, cpu_online_mask); 542 + info = (struct pseries_suspend_info) { 543 + .counter = ATOMIC_INIT(0), 544 + .done = false, 545 + }; 546 + 547 + ret = stop_machine(do_join, &info, cpu_online_mask); 578 548 if (ret == 0) 579 549 break; 580 550 /*
+1 -1
arch/riscv/Kconfig
··· 314 314 # Common NUMA Features 315 315 config NUMA 316 316 bool "NUMA Memory Allocation and Scheduler Support" 317 - depends on SMP 317 + depends on SMP && MMU 318 318 select GENERIC_ARCH_NUMA 319 319 select OF_NUMA 320 320 select ARCH_SUPPORTS_NUMA_BALANCING
+5 -2
arch/riscv/include/asm/uaccess.h
··· 306 306 * data types like structures or arrays. 307 307 * 308 308 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 309 - * to the result of dereferencing @ptr. 309 + * to the result of dereferencing @ptr. The value of @x is copied to avoid 310 + * re-ordering where @x is evaluated inside the block that enables user-space 311 + * access (thus bypassing user space protection if @x is a function). 310 312 * 311 313 * Caller must check the pointer with access_ok() before calling this 312 314 * function. ··· 318 316 #define __put_user(x, ptr) \ 319 317 ({ \ 320 318 __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ 319 + __typeof__(*__gu_ptr) __val = (x); \ 321 320 long __pu_err = 0; \ 322 321 \ 323 322 __chk_user_ptr(__gu_ptr); \ 324 323 \ 325 324 __enable_user_access(); \ 326 - __put_user_nocheck(x, __gu_ptr, __pu_err); \ 325 + __put_user_nocheck(__val, __gu_ptr, __pu_err); \ 327 326 __disable_user_access(); \ 328 327 \ 329 328 __pu_err; \
+1
arch/riscv/kernel/entry.S
··· 447 447 #endif 448 448 449 449 .section ".rodata" 450 + .align LGREG 450 451 /* Exception vector table */ 451 452 ENTRY(excp_vect_table) 452 453 RISCV_PTR do_trap_insn_misaligned
+1 -1
arch/riscv/kernel/stacktrace.c
··· 14 14 15 15 #include <asm/stacktrace.h> 16 16 17 - register const unsigned long sp_in_global __asm__("sp"); 17 + register unsigned long sp_in_global __asm__("sp"); 18 18 19 19 #ifdef CONFIG_FRAME_POINTER 20 20
+1 -1
arch/riscv/mm/kasan_init.c
··· 216 216 break; 217 217 218 218 kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end)); 219 - }; 219 + } 220 220 221 221 for (i = 0; i < PTRS_PER_PTE; i++) 222 222 set_pte(&kasan_early_shadow_pte[i],
+1 -1
arch/s390/include/asm/vdso/data.h
··· 6 6 #include <vdso/datapage.h> 7 7 8 8 struct arch_vdso_data { 9 - __u64 tod_steering_delta; 9 + __s64 tod_steering_delta; 10 10 __u64 tod_steering_end; 11 11 }; 12 12
+8 -2
arch/s390/kernel/time.c
··· 80 80 { 81 81 struct ptff_qto qto; 82 82 struct ptff_qui qui; 83 + int cs; 83 84 84 85 /* Initialize TOD steering parameters */ 85 86 tod_steering_end = tod_clock_base.tod; 86 - vdso_data->arch_data.tod_steering_end = tod_steering_end; 87 + for (cs = 0; cs < CS_BASES; cs++) 88 + vdso_data[cs].arch_data.tod_steering_end = tod_steering_end; 87 89 88 90 if (!test_facility(28)) 89 91 return; ··· 368 366 { 369 367 unsigned long now, adj; 370 368 struct ptff_qto qto; 369 + int cs; 371 370 372 371 /* Fixup the monotonic sched clock. */ 373 372 tod_clock_base.eitod += delta; ··· 384 381 panic("TOD clock sync offset %li is too large to drift\n", 385 382 tod_steering_delta); 386 383 tod_steering_end = now + (abs(tod_steering_delta) << 15); 387 - vdso_data->arch_data.tod_steering_end = tod_steering_end; 384 + for (cs = 0; cs < CS_BASES; cs++) { 385 + vdso_data[cs].arch_data.tod_steering_end = tod_steering_end; 386 + vdso_data[cs].arch_data.tod_steering_delta = tod_steering_delta; 387 + } 388 388 389 389 /* Update LPAR offset. */ 390 390 if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
+1 -1
arch/x86/Makefile
··· 27 27 REALMODE_CFLAGS := -m16 -g -Os -DDISABLE_BRANCH_PROFILING \ 28 28 -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \ 29 29 -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ 30 - -mno-mmx -mno-sse 30 + -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none) 31 31 32 32 REALMODE_CFLAGS += -ffreestanding 33 33 REALMODE_CFLAGS += -fno-stack-protector
+1
arch/x86/include/asm/smp.h
··· 132 132 void play_dead_common(void); 133 133 void wbinvd_on_cpu(int cpu); 134 134 int wbinvd_on_all_cpus(void); 135 + bool wakeup_cpu0(void); 135 136 136 137 void native_smp_send_reschedule(int cpu); 137 138 void native_send_call_func_ipi(const struct cpumask *mask);
-12
arch/x86/include/asm/xen/page.h
··· 87 87 #endif 88 88 89 89 /* 90 - * The maximum amount of extra memory compared to the base size. The 91 - * main scaling factor is the size of struct page. At extreme ratios 92 - * of base:extra, all the base memory can be filled with page 93 - * structures for the extra memory, leaving no space for anything 94 - * else. 95 - * 96 - * 10x seems like a reasonable balance between scaling flexibility and 97 - * leaving a practically usable system. 98 - */ 99 - #define XEN_EXTRA_MEM_RATIO (10) 100 - 101 - /* 102 90 * Helper functions to write or read unsigned long values to/from 103 91 * memory, when the access may fault. 104 92 */
+12 -13
arch/x86/kernel/acpi/boot.c
··· 1554 1554 /* 1555 1555 * Initialize the ACPI boot-time table parser. 1556 1556 */ 1557 - if (acpi_table_init()) { 1557 + if (acpi_locate_initial_tables()) 1558 1558 disable_acpi(); 1559 - return; 1560 - } 1559 + else 1560 + acpi_reserve_initial_tables(); 1561 + } 1562 + 1563 + int __init early_acpi_boot_init(void) 1564 + { 1565 + if (acpi_disabled) 1566 + return 1; 1567 + 1568 + acpi_table_init_complete(); 1561 1569 1562 1570 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); 1563 1571 ··· 1578 1570 } else { 1579 1571 printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); 1580 1572 disable_acpi(); 1581 - return; 1573 + return 1; 1582 1574 } 1583 1575 } 1584 - } 1585 - 1586 - int __init early_acpi_boot_init(void) 1587 - { 1588 - /* 1589 - * If acpi_disabled, bail out 1590 - */ 1591 - if (acpi_disabled) 1592 - return 1; 1593 1576 1594 1577 /* 1595 1578 * Process the Multiple APIC Description Table (MADT), if present
+3 -5
arch/x86/kernel/setup.c
··· 1045 1045 1046 1046 cleanup_highmap(); 1047 1047 1048 + /* Look for ACPI tables and reserve memory occupied by them. */ 1049 + acpi_boot_table_init(); 1050 + 1048 1051 memblock_set_current_limit(ISA_END_ADDRESS); 1049 1052 e820__memblock_setup(); 1050 1053 ··· 1138 1135 io_delay_init(); 1139 1136 1140 1137 early_platform_quirks(); 1141 - 1142 - /* 1143 - * Parse the ACPI tables for possible boot-time SMP configuration. 1144 - */ 1145 - acpi_boot_table_init(); 1146 1138 1147 1139 early_acpi_boot_init(); 1148 1140
+1 -1
arch/x86/kernel/smpboot.c
··· 1659 1659 local_irq_disable(); 1660 1660 } 1661 1661 1662 - static bool wakeup_cpu0(void) 1662 + bool wakeup_cpu0(void) 1663 1663 { 1664 1664 if (smp_processor_id() == 0 && enable_start_cpu0) 1665 1665 return true;
+1 -1
arch/x86/kvm/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 - ccflags-y += -Iarch/x86/kvm 3 + ccflags-y += -I $(srctree)/arch/x86/kvm 4 4 ccflags-$(CONFIG_KVM_WERROR) += -Werror 5 5 6 6 ifeq ($(CONFIG_FRAME_POINTER),y)
+5 -4
arch/x86/kvm/mmu/mmu.c
··· 5884 5884 struct kvm_mmu_page *sp; 5885 5885 unsigned int ratio; 5886 5886 LIST_HEAD(invalid_list); 5887 + bool flush = false; 5887 5888 ulong to_zap; 5888 5889 5889 5890 rcu_idx = srcu_read_lock(&kvm->srcu); ··· 5906 5905 lpage_disallowed_link); 5907 5906 WARN_ON_ONCE(!sp->lpage_disallowed); 5908 5907 if (is_tdp_mmu_page(sp)) { 5909 - kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, 5910 - sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level)); 5908 + flush = kvm_tdp_mmu_zap_sp(kvm, sp); 5911 5909 } else { 5912 5910 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); 5913 5911 WARN_ON_ONCE(sp->lpage_disallowed); 5914 5912 } 5915 5913 5916 5914 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { 5917 - kvm_mmu_commit_zap_page(kvm, &invalid_list); 5915 + kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); 5918 5916 cond_resched_rwlock_write(&kvm->mmu_lock); 5917 + flush = false; 5919 5918 } 5920 5919 } 5921 - kvm_mmu_commit_zap_page(kvm, &invalid_list); 5920 + kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); 5922 5921 5923 5922 write_unlock(&kvm->mmu_lock); 5924 5923 srcu_read_unlock(&kvm->srcu, rcu_idx);
+14 -12
arch/x86/kvm/mmu/tdp_mmu.c
··· 86 86 list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) 87 87 88 88 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 89 - gfn_t start, gfn_t end, bool can_yield); 89 + gfn_t start, gfn_t end, bool can_yield, bool flush); 90 90 91 91 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root) 92 92 { ··· 99 99 100 100 list_del(&root->link); 101 101 102 - zap_gfn_range(kvm, root, 0, max_gfn, false); 102 + zap_gfn_range(kvm, root, 0, max_gfn, false, false); 103 103 104 104 free_page((unsigned long)root->spt); 105 105 kmem_cache_free(mmu_page_header_cache, root); ··· 668 668 * scheduler needs the CPU or there is contention on the MMU lock. If this 669 669 * function cannot yield, it will not release the MMU lock or reschedule and 670 670 * the caller must ensure it does not supply too large a GFN range, or the 671 - * operation can cause a soft lockup. 671 + * operation can cause a soft lockup. Note, in some use cases a flush may be 672 + * required by prior actions. Ensure the pending flush is performed prior to 673 + * yielding. 672 674 */ 673 675 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, 674 - gfn_t start, gfn_t end, bool can_yield) 676 + gfn_t start, gfn_t end, bool can_yield, bool flush) 675 677 { 676 678 struct tdp_iter iter; 677 - bool flush_needed = false; 678 679 679 680 rcu_read_lock(); 680 681 681 682 tdp_root_for_each_pte(iter, root, start, end) { 682 683 if (can_yield && 683 - tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) { 684 - flush_needed = false; 684 + tdp_mmu_iter_cond_resched(kvm, &iter, flush)) { 685 + flush = false; 685 686 continue; 686 687 } 687 688 ··· 700 699 continue; 701 700 702 701 tdp_mmu_set_spte(kvm, &iter, 0); 703 - flush_needed = true; 702 + flush = true; 704 703 } 705 704 706 705 rcu_read_unlock(); 707 - return flush_needed; 706 + return flush; 708 707 } 709 708 710 709 /* ··· 713 712 * SPTEs have been cleared and a TLB flush is needed before releasing the 714 713 * MMU lock. 715 714 */ 716 - bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end) 715 + bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end, 716 + bool can_yield) 717 717 { 718 718 struct kvm_mmu_page *root; 719 719 bool flush = false; 720 720 721 721 for_each_tdp_mmu_root_yield_safe(kvm, root) 722 - flush |= zap_gfn_range(kvm, root, start, end, true); 722 + flush = zap_gfn_range(kvm, root, start, end, can_yield, flush); 723 723 724 724 return flush; 725 725 } ··· 932 930 struct kvm_mmu_page *root, gfn_t start, 933 931 gfn_t end, unsigned long unused) 934 932 { 935 - return zap_gfn_range(kvm, root, start, end, false); 933 + return zap_gfn_range(kvm, root, start, end, false, false); 936 934 } 937 935 938 936 int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
+23 -1
arch/x86/kvm/mmu/tdp_mmu.h
··· 8 8 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); 9 9 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root); 10 10 11 - bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end); 11 + bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end, 12 + bool can_yield); 13 + static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, 14 + gfn_t end) 15 + { 16 + return __kvm_tdp_mmu_zap_gfn_range(kvm, start, end, true); 17 + } 18 + static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) 19 + { 20 + gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level); 21 + 22 + /* 23 + * Don't allow yielding, as the caller may have a flush pending. Note, 24 + * if mmu_lock is held for write, zapping will never yield in this case, 25 + * but explicitly disallow it for safety. The TDP MMU does not yield 26 + * until it has made forward progress (steps sideways), and when zapping 27 + * a single shadow page that it's guaranteed to see (thus the mmu_lock 28 + * requirement), its "step sideways" will always step beyond the bounds 29 + * of the shadow page's gfn range and stop iterating before yielding. 30 + */ 31 + lockdep_assert_held_write(&kvm->mmu_lock); 32 + return __kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, end, false); 33 + } 12 34 void kvm_tdp_mmu_zap_all(struct kvm *kvm); 13 35 14 36 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
+23 -5
arch/x86/kvm/svm/nested.c
··· 246 246 return true; 247 247 } 248 248 249 - static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) 249 + static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12) 250 250 { 251 251 struct kvm_vcpu *vcpu = &svm->vcpu; 252 252 bool vmcb12_lma; 253 253 254 + /* 255 + * FIXME: these should be done after copying the fields, 256 + * to avoid TOC/TOU races. For these save area checks 257 + * the possible damage is limited since kvm_set_cr0 and 258 + * kvm_set_cr4 handle failure; EFER_SVME is an exception 259 + * so it is force-set later in nested_prepare_vmcb_save. 260 + */ 254 261 if ((vmcb12->save.efer & EFER_SVME) == 0) 255 262 return false; 256 263 ··· 278 271 if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4)) 279 272 return false; 280 273 281 - return nested_vmcb_check_controls(&vmcb12->control); 274 + return true; 282 275 } 283 276 284 277 static void load_nested_vmcb_control(struct vcpu_svm *svm, ··· 403 396 svm->vmcb->save.gdtr = vmcb12->save.gdtr; 404 397 svm->vmcb->save.idtr = vmcb12->save.idtr; 405 398 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED); 406 - svm_set_efer(&svm->vcpu, vmcb12->save.efer); 399 + 400 + /* 401 + * Force-set EFER_SVME even though it is checked earlier on the 402 + * VMCB12, because the guest can flip the bit between the check 403 + * and now. Clearing EFER_SVME would call svm_free_nested. 404 + */ 405 + svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME); 406 + 407 407 svm_set_cr0(&svm->vcpu, vmcb12->save.cr0); 408 408 svm_set_cr4(&svm->vcpu, vmcb12->save.cr4); 409 409 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2; ··· 482 468 483 469 484 470 svm->nested.vmcb12_gpa = vmcb12_gpa; 485 - load_nested_vmcb_control(svm, &vmcb12->control); 486 471 nested_prepare_vmcb_control(svm); 487 472 nested_prepare_vmcb_save(svm, vmcb12); 488 473 ··· 528 515 if (WARN_ON_ONCE(!svm->nested.initialized)) 529 516 return -EINVAL; 530 517 531 - if (!nested_vmcb_checks(svm, vmcb12)) { 518 + load_nested_vmcb_control(svm, &vmcb12->control); 519 + 520 + if (!nested_vmcb_check_save(svm, vmcb12) || 521 + !nested_vmcb_check_controls(&svm->nested.ctl)) { 532 522 vmcb12->control.exit_code = SVM_EXIT_ERR; 533 523 vmcb12->control.exit_code_hi = 0; 534 524 vmcb12->control.exit_info_1 = 0; ··· 1224 1208 * TODO: validate reserved bits for all saved state. 1225 1209 */ 1226 1210 if (!(save->cr0 & X86_CR0_PG)) 1211 + goto out_free; 1212 + if (!(save->efer & EFER_SVME)) 1227 1213 goto out_free; 1228 1214 1229 1215 /*
+8
arch/x86/kvm/svm/pmu.c
··· 98 98 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, 99 99 enum pmu_type type) 100 100 { 101 + struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); 102 + 101 103 switch (msr) { 102 104 case MSR_F15H_PERF_CTL0: 103 105 case MSR_F15H_PERF_CTL1: ··· 107 105 case MSR_F15H_PERF_CTL3: 108 106 case MSR_F15H_PERF_CTL4: 109 107 case MSR_F15H_PERF_CTL5: 108 + if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) 109 + return NULL; 110 + fallthrough; 110 111 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: 111 112 if (type != PMU_TYPE_EVNTSEL) 112 113 return NULL; ··· 120 115 case MSR_F15H_PERF_CTR3: 121 116 case MSR_F15H_PERF_CTR4: 122 117 case MSR_F15H_PERF_CTR5: 118 + if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) 119 + return NULL; 120 + fallthrough; 123 121 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: 124 122 if (type != PMU_TYPE_COUNTER) 125 123 return NULL;
+36 -21
arch/x86/kvm/x86.c
··· 271 271 * When called, it means the previous get/set msr reached an invalid msr. 272 272 * Return true if we want to ignore/silent this failed msr access. 273 273 */ 274 - static bool kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr, 275 - u64 data, bool write) 274 + static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write) 276 275 { 277 276 const char *op = write ? "wrmsr" : "rdmsr"; 278 277 ··· 1444 1445 if (r == KVM_MSR_RET_INVALID) { 1445 1446 /* Unconditionally clear the output for simplicity */ 1446 1447 *data = 0; 1447 - if (kvm_msr_ignored_check(vcpu, index, 0, false)) 1448 + if (kvm_msr_ignored_check(index, 0, false)) 1448 1449 r = 0; 1449 1450 } 1450 1451 ··· 1619 1620 int ret = __kvm_set_msr(vcpu, index, data, host_initiated); 1620 1621 1621 1622 if (ret == KVM_MSR_RET_INVALID) 1622 - if (kvm_msr_ignored_check(vcpu, index, data, true)) 1623 + if (kvm_msr_ignored_check(index, data, true)) 1623 1624 ret = 0; 1624 1625 1625 1626 return ret; ··· 1657 1658 if (ret == KVM_MSR_RET_INVALID) { 1658 1659 /* Unconditionally clear *data for simplicity */ 1659 1660 *data = 0; 1660 - if (kvm_msr_ignored_check(vcpu, index, 0, false)) 1661 + if (kvm_msr_ignored_check(index, 0, false)) 1661 1662 ret = 0; 1662 1663 } 1663 1664 ··· 2328 2329 kvm_vcpu_write_tsc_offset(vcpu, offset); 2329 2330 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 2330 2331 2331 - spin_lock(&kvm->arch.pvclock_gtod_sync_lock); 2332 + spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags); 2332 2333 if (!matched) { 2333 2334 kvm->arch.nr_vcpus_matched_tsc = 0; 2334 2335 } else if (!already_matched) { ··· 2336 2337 } 2337 2338 2338 2339 kvm_track_tsc_matching(vcpu); 2339 - spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); 2340 + spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags); 2340 2341 } 2341 2342 2342 2343 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, ··· 2558 2559 int i; 2559 2560 struct kvm_vcpu *vcpu; 2560 2561 struct kvm_arch *ka = &kvm->arch; 2562 + unsigned long flags; 2561 2563 2562 2564 kvm_hv_invalidate_tsc_page(kvm); 2563 2565 2564 - spin_lock(&ka->pvclock_gtod_sync_lock); 2565 2566 kvm_make_mclock_inprogress_request(kvm); 2567 + 2566 2568 /* no guest entries from this point */ 2569 + spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); 2567 2570 pvclock_update_vm_gtod_copy(kvm); 2571 + spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); 2568 2572 2569 2573 kvm_for_each_vcpu(i, vcpu, kvm) 2570 2574 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); ··· 2575 2573 /* guest entries allowed */ 2576 2574 kvm_for_each_vcpu(i, vcpu, kvm) 2577 2575 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); 2578 - 2579 - spin_unlock(&ka->pvclock_gtod_sync_lock); 2580 2576 #endif 2581 2577 } 2582 2578 ··· 2582 2582 { 2583 2583 struct kvm_arch *ka = &kvm->arch; 2584 2584 struct pvclock_vcpu_time_info hv_clock; 2585 + unsigned long flags; 2585 2586 u64 ret; 2586 2587 2587 - spin_lock(&ka->pvclock_gtod_sync_lock); 2588 + spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); 2588 2589 if (!ka->use_master_clock) { 2589 - spin_unlock(&ka->pvclock_gtod_sync_lock); 2590 + spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); 2590 2591 return get_kvmclock_base_ns() + ka->kvmclock_offset; 2591 2592 } 2592 2593 2593 2594 hv_clock.tsc_timestamp = ka->master_cycle_now; 2594 2595 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; 2595 - spin_unlock(&ka->pvclock_gtod_sync_lock); 2596 + spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); 2596 2597 2597 2598 /* both __this_cpu_read() and rdtsc() should be on the same cpu */ 2598 2599 get_cpu(); ··· 2687 2686 * If the host uses TSC clock, then passthrough TSC as stable 2688 2687 * to the guest. 2689 2688 */ 2690 - spin_lock(&ka->pvclock_gtod_sync_lock); 2689 + spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); 2691 2690 use_master_clock = ka->use_master_clock; 2692 2691 if (use_master_clock) { 2693 2692 host_tsc = ka->master_cycle_now; 2694 2693 kernel_ns = ka->master_kernel_ns; 2695 2694 } 2696 - spin_unlock(&ka->pvclock_gtod_sync_lock); 2695 + spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); 2697 2696 2698 2697 /* Keep irq disabled to prevent changes to the clock */ 2699 2698 local_irq_save(flags); ··· 5727 5726 } 5728 5727 #endif 5729 5728 case KVM_SET_CLOCK: { 5729 + struct kvm_arch *ka = &kvm->arch; 5730 5730 struct kvm_clock_data user_ns; 5731 5731 u64 now_ns; 5732 5732 ··· 5746 5744 * pvclock_update_vm_gtod_copy(). 5747 5745 */ 5748 5746 kvm_gen_update_masterclock(kvm); 5749 - now_ns = get_kvmclock_ns(kvm); 5750 - kvm->arch.kvmclock_offset += user_ns.clock - now_ns; 5747 + 5748 + /* 5749 + * This pairs with kvm_guest_time_update(): when masterclock is 5750 + * in use, we use master_kernel_ns + kvmclock_offset to set 5751 + * unsigned 'system_time' so if we use get_kvmclock_ns() (which 5752 + * is slightly ahead) here we risk going negative on unsigned 5753 + * 'system_time' when 'user_ns.clock' is very small. 5754 + */ 5755 + spin_lock_irq(&ka->pvclock_gtod_sync_lock); 5756 + if (kvm->arch.use_master_clock) 5757 + now_ns = ka->master_kernel_ns; 5758 + else 5759 + now_ns = get_kvmclock_base_ns(); 5760 + ka->kvmclock_offset = user_ns.clock - now_ns; 5761 + spin_unlock_irq(&ka->pvclock_gtod_sync_lock); 5762 + 5751 5763 kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE); 5752 5764 break; 5753 5765 } ··· 7740 7724 struct kvm *kvm; 7741 7725 struct kvm_vcpu *vcpu; 7742 7726 int cpu; 7727 + unsigned long flags; 7743 7728 7744 7729 mutex_lock(&kvm_lock); 7745 7730 list_for_each_entry(kvm, &vm_list, vm_list) ··· 7756 7739 list_for_each_entry(kvm, &vm_list, vm_list) { 7757 7740 struct kvm_arch *ka = &kvm->arch; 7758 7741 7759 - spin_lock(&ka->pvclock_gtod_sync_lock); 7760 - 7742 + spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); 7761 7743 pvclock_update_vm_gtod_copy(kvm); 7744 + spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); 7762 7745 7763 7746 kvm_for_each_vcpu(cpu, vcpu, kvm) 7764 7747 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); 7765 7748 7766 7749 kvm_for_each_vcpu(cpu, vcpu, kvm) 7767 7750 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); 7768 - 7769 - spin_unlock(&ka->pvclock_gtod_sync_lock); 7770 7751 } 7771 7752 mutex_unlock(&kvm_lock); 7772 7753 }
-1
arch/x86/kvm/x86.h
··· 250 250 void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs); 251 251 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); 252 252 253 - void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); 254 253 u64 get_kvmclock_ns(struct kvm *kvm); 255 254 256 255 int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
+1 -1
arch/x86/mm/mem_encrypt.c
··· 262 262 if (pgprot_val(old_prot) == pgprot_val(new_prot)) 263 263 return; 264 264 265 - pa = pfn << page_level_shift(level); 265 + pa = pfn << PAGE_SHIFT; 266 266 size = page_level_size(level); 267 267 268 268 /*
+25 -6
arch/x86/net/bpf_jit_comp.c
··· 1936 1936 * add rsp, 8 // skip eth_type_trans's frame 1937 1937 * ret // return to its caller 1938 1938 */ 1939 - int arch_prepare_bpf_trampoline(void *image, void *image_end, 1939 + int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, 1940 1940 const struct btf_func_model *m, u32 flags, 1941 1941 struct bpf_tramp_progs *tprogs, 1942 1942 void *orig_call) ··· 1975 1975 1976 1976 save_regs(m, &prog, nr_args, stack_size); 1977 1977 1978 + if (flags & BPF_TRAMP_F_CALL_ORIG) { 1979 + /* arg1: mov rdi, im */ 1980 + emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 1981 + if (emit_call(&prog, __bpf_tramp_enter, prog)) { 1982 + ret = -EINVAL; 1983 + goto cleanup; 1984 + } 1985 + } 1986 + 1978 1987 if (fentry->nr_progs) 1979 1988 if (invoke_bpf(m, &prog, fentry, stack_size)) 1980 1989 return -EINVAL; ··· 2002 1993 } 2003 1994 2004 1995 if (flags & BPF_TRAMP_F_CALL_ORIG) { 2005 - if (fentry->nr_progs || fmod_ret->nr_progs) 2006 - restore_regs(m, &prog, nr_args, stack_size); 1996 + restore_regs(m, &prog, nr_args, stack_size); 2007 1997 2008 1998 /* call original function */ 2009 1999 if (emit_call(&prog, orig_call, prog)) { ··· 2011 2003 } 2012 2004 /* remember return value in a stack for bpf prog to access */ 2013 2005 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 2006 + im->ip_after_call = prog; 2007 + memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE); 2008 + prog += X86_PATCH_SIZE; 2014 2009 } 2015 2010 2016 2011 if (fmod_ret->nr_progs) { ··· 2044 2033 * the return value is only updated on the stack and still needs to be 2045 2034 * restored to R0. 2046 2035 */ 2047 - if (flags & BPF_TRAMP_F_CALL_ORIG) 2036 + if (flags & BPF_TRAMP_F_CALL_ORIG) { 2037 + im->ip_epilogue = prog; 2038 + /* arg1: mov rdi, im */ 2039 + emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); 2040 + if (emit_call(&prog, __bpf_tramp_exit, prog)) { 2041 + ret = -EINVAL; 2042 + goto cleanup; 2043 + } 2048 2044 /* restore original return value back into RAX */ 2049 2045 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 2046 + } 2050 2047 2051 2048 EMIT1(0x5B); /* pop rbx */ 2052 2049 EMIT1(0xC9); /* leave */ ··· 2244 2225 padding = true; 2245 2226 goto skip_init_addrs; 2246 2227 } 2247 - addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); 2228 + addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); 2248 2229 if (!addrs) { 2249 2230 prog = orig_prog; 2250 2231 goto out_addrs; ··· 2336 2317 if (image) 2337 2318 bpf_prog_fill_jited_linfo(prog, addrs + 1); 2338 2319 out_addrs: 2339 - kfree(addrs); 2320 + kvfree(addrs); 2340 2321 kfree(jit_data); 2341 2322 prog->aux->jit_data = NULL; 2342 2323 }
+2 -5
arch/x86/xen/p2m.c
··· 98 98 unsigned long xen_max_p2m_pfn __read_mostly; 99 99 EXPORT_SYMBOL_GPL(xen_max_p2m_pfn); 100 100 101 - #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT 102 - #define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT 101 + #ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT 102 + #define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT 103 103 #else 104 104 #define P2M_LIMIT 0 105 105 #endif ··· 416 416 xen_p2m_last_pfn = xen_max_p2m_pfn; 417 417 418 418 p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; 419 - if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC)) 420 - p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO; 421 - 422 419 vm.flags = VM_ALLOC; 423 420 vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), 424 421 PMD_SIZE * PMDS_PER_MID_PAGE);
+14 -2
arch/x86/xen/setup.c
··· 59 59 } xen_remap_buf __initdata __aligned(PAGE_SIZE); 60 60 static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY; 61 61 62 + /* 63 + * The maximum amount of extra memory compared to the base size. The 64 + * main scaling factor is the size of struct page. At extreme ratios 65 + * of base:extra, all the base memory can be filled with page 66 + * structures for the extra memory, leaving no space for anything 67 + * else. 68 + * 69 + * 10x seems like a reasonable balance between scaling flexibility and 70 + * leaving a practically usable system. 71 + */ 72 + #define EXTRA_MEM_RATIO (10) 73 + 62 74 static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB); 63 75 64 76 static void __init xen_parse_512gb(void) ··· 790 778 extra_pages += max_pages - max_pfn; 791 779 792 780 /* 793 - * Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO 781 + * Clamp the amount of extra memory to a EXTRA_MEM_RATIO 794 782 * factor the base size. 795 783 * 796 784 * Make sure we have no memory above max_pages, as this area 797 785 * isn't handled by the p2m management. 798 786 */ 799 - extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), 787 + extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), 800 788 extra_pages, max_pages - max_pfn); 801 789 i = 0; 802 790 addr = xen_e820_table.entries[0].addr;
+33 -31
arch/xtensa/kernel/coprocessor.S
··· 100 100 LOAD_CP_REGS_TAB(7) 101 101 102 102 /* 103 - * coprocessor_flush(struct thread_info*, index) 104 - * a2 a3 105 - * 106 - * Save coprocessor registers for coprocessor 'index'. 107 - * The register values are saved to or loaded from the coprocessor area 108 - * inside the task_info structure. 109 - * 110 - * Note that this function doesn't update the coprocessor_owner information! 111 - * 112 - */ 113 - 114 - ENTRY(coprocessor_flush) 115 - 116 - /* reserve 4 bytes on stack to save a0 */ 117 - abi_entry(4) 118 - 119 - s32i a0, a1, 0 120 - movi a0, .Lsave_cp_regs_jump_table 121 - addx8 a3, a3, a0 122 - l32i a4, a3, 4 123 - l32i a3, a3, 0 124 - add a2, a2, a4 125 - beqz a3, 1f 126 - callx0 a3 127 - 1: l32i a0, a1, 0 128 - 129 - abi_ret(4) 130 - 131 - ENDPROC(coprocessor_flush) 132 - 133 - /* 134 103 * Entry condition: 135 104 * 136 105 * a0: trashed, original value saved on stack (PT_AREG0) ··· 213 244 rfe 214 245 215 246 ENDPROC(fast_coprocessor) 247 + 248 + .text 249 + 250 + /* 251 + * coprocessor_flush(struct thread_info*, index) 252 + * a2 a3 253 + * 254 + * Save coprocessor registers for coprocessor 'index'. 255 + * The register values are saved to or loaded from the coprocessor area 256 + * inside the task_info structure. 257 + * 258 + * Note that this function doesn't update the coprocessor_owner information! 259 + * 260 + */ 261 + 262 + ENTRY(coprocessor_flush) 263 + 264 + /* reserve 4 bytes on stack to save a0 */ 265 + abi_entry(4) 266 + 267 + s32i a0, a1, 0 268 + movi a0, .Lsave_cp_regs_jump_table 269 + addx8 a3, a3, a0 270 + l32i a4, a3, 4 271 + l32i a3, a3, 0 272 + add a2, a2, a4 273 + beqz a3, 1f 274 + callx0 a3 275 + 1: l32i a0, a1, 0 276 + 277 + abi_ret(4) 278 + 279 + ENDPROC(coprocessor_flush) 216 280 217 281 .data 218 282
+4 -1
arch/xtensa/mm/fault.c
··· 112 112 */ 113 113 fault = handle_mm_fault(vma, address, flags, regs); 114 114 115 - if (fault_signal_pending(fault, regs)) 115 + if (fault_signal_pending(fault, regs)) { 116 + if (!user_mode(regs)) 117 + goto bad_page_fault; 116 118 return; 119 + } 117 120 118 121 if (unlikely(fault & VM_FAULT_ERROR)) { 119 122 if (fault & VM_FAULT_OOM)
+19 -4
block/bio.c
··· 277 277 { 278 278 struct bio *parent = bio->bi_private; 279 279 280 - if (!parent->bi_status) 280 + if (bio->bi_status && !parent->bi_status) 281 281 parent->bi_status = bio->bi_status; 282 282 bio_put(bio); 283 283 return parent; ··· 949 949 } 950 950 EXPORT_SYMBOL_GPL(bio_release_pages); 951 951 952 - static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) 952 + static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) 953 953 { 954 954 WARN_ON_ONCE(bio->bi_max_vecs); 955 955 ··· 959 959 bio->bi_iter.bi_size = iter->count; 960 960 bio_set_flag(bio, BIO_NO_PAGE_REF); 961 961 bio_set_flag(bio, BIO_CLONED); 962 + } 962 963 964 + static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) 965 + { 966 + __bio_iov_bvec_set(bio, iter); 963 967 iov_iter_advance(iter, iter->count); 968 + return 0; 969 + } 970 + 971 + static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter) 972 + { 973 + struct request_queue *q = bio->bi_bdev->bd_disk->queue; 974 + struct iov_iter i = *iter; 975 + 976 + iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9); 977 + __bio_iov_bvec_set(bio, &i); 978 + iov_iter_advance(iter, i.count); 964 979 return 0; 965 980 } 966 981 ··· 1109 1094 int ret = 0; 1110 1095 1111 1096 if (iov_iter_is_bvec(iter)) { 1112 - if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) 1113 - return -EINVAL; 1097 + if (bio_op(bio) == REQ_OP_ZONE_APPEND) 1098 + return bio_iov_bvec_set_append(bio, iter); 1114 1099 return bio_iov_bvec_set(bio, iter); 1115 1100 } 1116 1101
+8
block/blk-merge.c
··· 382 382 switch (bio_op(rq->bio)) { 383 383 case REQ_OP_DISCARD: 384 384 case REQ_OP_SECURE_ERASE: 385 + if (queue_max_discard_segments(rq->q) > 1) { 386 + struct bio *bio = rq->bio; 387 + 388 + for_each_bio(bio) 389 + nr_phys_segs++; 390 + return nr_phys_segs; 391 + } 392 + return 1; 385 393 case REQ_OP_WRITE_ZEROES: 386 394 return 0; 387 395 case REQ_OP_WRITE_SAME:
-1
block/blk-mq-debugfs.c
··· 302 302 RQF_NAME(QUIET), 303 303 RQF_NAME(ELVPRIV), 304 304 RQF_NAME(IO_STAT), 305 - RQF_NAME(ALLOCED), 306 305 RQF_NAME(PM), 307 306 RQF_NAME(HASHED), 308 307 RQF_NAME(STATS),
+7
block/partitions/core.c
··· 323 323 int err; 324 324 325 325 /* 326 + * disk_max_parts() won't be zero, either GENHD_FL_EXT_DEVT is set 327 + * or 'minors' is passed to alloc_disk(). 328 + */ 329 + if (partno >= disk_max_parts(disk)) 330 + return ERR_PTR(-EINVAL); 331 + 332 + /* 326 333 * Partitions are not supported on zoned block devices that are used as 327 334 * such. 328 335 */
+1 -2
drivers/acpi/acpica/nsaccess.c
··· 99 99 * just create and link the new node(s) here. 100 100 */ 101 101 new_node = 102 - ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_namespace_node)); 102 + acpi_ns_create_node(*ACPI_CAST_PTR(u32, init_val->name)); 103 103 if (!new_node) { 104 104 status = AE_NO_MEMORY; 105 105 goto unlock_and_exit; 106 106 } 107 107 108 - ACPI_COPY_NAMESEG(new_node->name.ascii, init_val->name); 109 108 new_node->descriptor_type = ACPI_DESC_TYPE_NAMED; 110 109 new_node->type = init_val->type; 111 110
+5 -1
drivers/acpi/internal.h
··· 9 9 #ifndef _ACPI_INTERNAL_H_ 10 10 #define _ACPI_INTERNAL_H_ 11 11 12 + #include <linux/idr.h> 13 + 12 14 #define PREFIX "ACPI: " 13 15 14 16 int early_acpi_osi_init(void); ··· 98 96 99 97 extern struct list_head acpi_bus_id_list; 100 98 99 + #define ACPI_MAX_DEVICE_INSTANCES 4096 100 + 101 101 struct acpi_device_bus_id { 102 102 const char *bus_id; 103 - unsigned int instance_no; 103 + struct ida instance_ida; 104 104 struct list_head node; 105 105 }; 106 106
+7
drivers/acpi/processor_idle.c
··· 29 29 */ 30 30 #ifdef CONFIG_X86 31 31 #include <asm/apic.h> 32 + #include <asm/cpu.h> 32 33 #endif 33 34 34 35 #define _COMPONENT ACPI_PROCESSOR_COMPONENT ··· 542 541 wait_for_freeze(); 543 542 } else 544 543 return -ENODEV; 544 + 545 + #if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU) 546 + /* If NMI wants to wake up CPU0, start CPU0. */ 547 + if (wakeup_cpu0()) 548 + start_cpu0(); 549 + #endif 545 550 } 546 551 547 552 /* Never reached */
+39 -6
drivers/acpi/scan.c
··· 479 479 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) 480 480 if (!strcmp(acpi_device_bus_id->bus_id, 481 481 acpi_device_hid(device))) { 482 - if (acpi_device_bus_id->instance_no > 0) 483 - acpi_device_bus_id->instance_no--; 484 - else { 482 + ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no); 483 + if (ida_is_empty(&acpi_device_bus_id->instance_ida)) { 485 484 list_del(&acpi_device_bus_id->node); 486 485 kfree_const(acpi_device_bus_id->bus_id); 487 486 kfree(acpi_device_bus_id); ··· 630 631 return NULL; 631 632 } 632 633 634 + static int acpi_device_set_name(struct acpi_device *device, 635 + struct acpi_device_bus_id *acpi_device_bus_id) 636 + { 637 + struct ida *instance_ida = &acpi_device_bus_id->instance_ida; 638 + int result; 639 + 640 + result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL); 641 + if (result < 0) 642 + return result; 643 + 644 + device->pnp.instance_no = result; 645 + dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result); 646 + return 0; 647 + } 648 + 633 649 int acpi_device_add(struct acpi_device *device, 634 650 void (*release)(struct device *)) 635 651 { ··· 679 665 680 666 acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device)); 681 667 if (acpi_device_bus_id) { 682 - acpi_device_bus_id->instance_no++; 668 + result = acpi_device_set_name(device, acpi_device_bus_id); 669 + if (result) 670 + goto err_unlock; 683 671 } else { 684 672 acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id), 685 673 GFP_KERNEL); ··· 697 681 goto err_unlock; 698 682 } 699 683 684 + ida_init(&acpi_device_bus_id->instance_ida); 685 + 686 + result = acpi_device_set_name(device, acpi_device_bus_id); 687 + if (result) { 688 + kfree(acpi_device_bus_id); 689 + goto err_unlock; 690 + } 691 + 700 692 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); 701 693 } 702 - dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no); 703 694 704 695 if (device->parent) 705 696 list_add_tail(&device->node, &device->parent->children); ··· 1670 1647 device_initialize(&device->dev); 1671 1648 dev_set_uevent_suppress(&device->dev, true); 1672 1649 acpi_init_coherency(device); 1650 + /* Assume there are unmet deps to start with. */ 1651 + device->dep_unmet = 1; 1673 1652 } 1674 1653 1675 1654 void acpi_device_add_finalize(struct acpi_device *device) ··· 1935 1910 { 1936 1911 struct acpi_dep_data *dep; 1937 1912 1913 + adev->dep_unmet = 0; 1914 + 1938 1915 mutex_lock(&acpi_dep_list_lock); 1939 1916 1940 1917 list_for_each_entry(dep, &acpi_dep_list, node) { ··· 1984 1957 return AE_CTRL_DEPTH; 1985 1958 1986 1959 acpi_scan_init_hotplug(device); 1987 - if (!check_dep) 1960 + /* 1961 + * If check_dep is true at this point, the device has no dependencies, 1962 + * or the creation of the device object would have been postponed above. 1963 + */ 1964 + if (check_dep) 1965 + device->dep_unmet = 0; 1966 + else 1988 1967 acpi_scan_dep_init(device); 1989 1968 1990 1969 out:
+39 -3
drivers/acpi/tables.c
··· 780 780 } 781 781 782 782 /* 783 - * acpi_table_init() 783 + * acpi_locate_initial_tables() 784 784 * 785 785 * find RSDP, find and checksum SDT/XSDT. 786 786 * checksum all tables, print SDT/XSDT ··· 788 788 * result: sdt_entry[] is initialized 789 789 */ 790 790 791 - int __init acpi_table_init(void) 791 + int __init acpi_locate_initial_tables(void) 792 792 { 793 793 acpi_status status; 794 794 ··· 803 803 status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0); 804 804 if (ACPI_FAILURE(status)) 805 805 return -EINVAL; 806 - acpi_table_initrd_scan(); 807 806 807 + return 0; 808 + } 809 + 810 + void __init acpi_reserve_initial_tables(void) 811 + { 812 + int i; 813 + 814 + for (i = 0; i < ACPI_MAX_TABLES; i++) { 815 + struct acpi_table_desc *table_desc = &initial_tables[i]; 816 + u64 start = table_desc->address; 817 + u64 size = table_desc->length; 818 + 819 + if (!start || !size) 820 + break; 821 + 822 + pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n", 823 + table_desc->signature.ascii, start, start + size - 1); 824 + 825 + memblock_reserve(start, size); 826 + } 827 + } 828 + 829 + void __init acpi_table_init_complete(void) 830 + { 831 + acpi_table_initrd_scan(); 808 832 check_multiple_madt(); 833 + } 834 + 835 + int __init acpi_table_init(void) 836 + { 837 + int ret; 838 + 839 + ret = acpi_locate_initial_tables(); 840 + if (ret) 841 + return ret; 842 + 843 + acpi_table_init_complete(); 844 + 809 845 return 0; 810 846 } 811 847
+1
drivers/acpi/video_detect.c
··· 147 147 }, 148 148 }, 149 149 { 150 + .callback = video_detect_force_vendor, 150 151 .ident = "Sony VPCEH3U1E", 151 152 .matches = { 152 153 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+8 -10
drivers/auxdisplay/charlcd.c
··· 470 470 char c; 471 471 472 472 for (; count-- > 0; (*ppos)++, tmp++) { 473 - if (!in_interrupt() && (((count + 1) & 0x1f) == 0)) 473 + if (((count + 1) & 0x1f) == 0) { 474 474 /* 475 - * let's be a little nice with other processes 476 - * that need some CPU 475 + * charlcd_write() is invoked as a VFS->write() callback 476 + * and as such it is always invoked from preemptible 477 + * context and may sleep. 477 478 */ 478 - schedule(); 479 + cond_resched(); 480 + } 479 481 480 482 if (get_user(c, tmp)) 481 483 return -EFAULT; ··· 539 537 int count = strlen(s); 540 538 541 539 for (; count-- > 0; tmp++) { 542 - if (!in_interrupt() && (((count + 1) & 0x1f) == 0)) 543 - /* 544 - * let's be a little nice with other processes 545 - * that need some CPU 546 - */ 547 - schedule(); 540 + if (((count + 1) & 0x1f) == 0) 541 + cond_resched(); 548 542 549 543 charlcd_write_char(lcd, *tmp); 550 544 }
+3
drivers/base/dd.c
··· 96 96 97 97 get_device(dev); 98 98 99 + kfree(dev->p->deferred_probe_reason); 100 + dev->p->deferred_probe_reason = NULL; 101 + 99 102 /* 100 103 * Drop the mutex while probing each device; the probe path may 101 104 * manipulate the deferred list
+47 -8
drivers/base/power/runtime.c
··· 305 305 return 0; 306 306 } 307 307 308 - static void rpm_put_suppliers(struct device *dev) 308 + static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) 309 309 { 310 310 struct device_link *link; 311 311 ··· 313 313 device_links_read_lock_held()) { 314 314 315 315 while (refcount_dec_not_one(&link->rpm_active)) 316 - pm_runtime_put(link->supplier); 316 + pm_runtime_put_noidle(link->supplier); 317 + 318 + if (try_to_suspend) 319 + pm_request_idle(link->supplier); 317 320 } 321 + } 322 + 323 + static void rpm_put_suppliers(struct device *dev) 324 + { 325 + __rpm_put_suppliers(dev, true); 326 + } 327 + 328 + static void rpm_suspend_suppliers(struct device *dev) 329 + { 330 + struct device_link *link; 331 + int idx = device_links_read_lock(); 332 + 333 + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, 334 + device_links_read_lock_held()) 335 + pm_request_idle(link->supplier); 336 + 337 + device_links_read_unlock(idx); 318 338 } 319 339 320 340 /** ··· 364 344 idx = device_links_read_lock(); 365 345 366 346 retval = rpm_get_suppliers(dev); 367 - if (retval) 347 + if (retval) { 348 + rpm_put_suppliers(dev); 368 349 goto fail; 350 + } 369 351 370 352 device_links_read_unlock(idx); 371 353 } ··· 390 368 || (dev->power.runtime_status == RPM_RESUMING && retval))) { 391 369 idx = device_links_read_lock(); 392 370 393 - fail: 394 - rpm_put_suppliers(dev); 371 + __rpm_put_suppliers(dev, false); 395 372 373 + fail: 396 374 device_links_read_unlock(idx); 397 375 } 398 376 ··· 664 642 goto out; 665 643 } 666 644 645 + if (dev->power.irq_safe) 646 + goto out; 647 + 667 648 /* Maybe the parent is now able to suspend. */ 668 - if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { 649 + if (parent && !parent->power.ignore_children) { 669 650 spin_unlock(&dev->power.lock); 670 651 671 652 spin_lock(&parent->power.lock); ··· 676 651 spin_unlock(&parent->power.lock); 677 652 678 653 spin_lock(&dev->power.lock); 654 + } 655 + /* Maybe the suppliers are now able to suspend. */ 656 + if (dev->power.links_count > 0) { 657 + spin_unlock_irq(&dev->power.lock); 658 + 659 + rpm_suspend_suppliers(dev); 660 + 661 + spin_lock_irq(&dev->power.lock); 679 662 } 680 663 681 664 out: ··· 1690 1657 device_links_read_lock_held()) 1691 1658 if (link->flags & DL_FLAG_PM_RUNTIME) { 1692 1659 link->supplier_preactivated = true; 1693 - refcount_inc(&link->rpm_active); 1694 1660 pm_runtime_get_sync(link->supplier); 1661 + refcount_inc(&link->rpm_active); 1695 1662 } 1696 1663 1697 1664 device_links_read_unlock(idx); ··· 1704 1671 void pm_runtime_put_suppliers(struct device *dev) 1705 1672 { 1706 1673 struct device_link *link; 1674 + unsigned long flags; 1675 + bool put; 1707 1676 int idx; 1708 1677 1709 1678 idx = device_links_read_lock(); ··· 1714 1679 device_links_read_lock_held()) 1715 1680 if (link->supplier_preactivated) { 1716 1681 link->supplier_preactivated = false; 1717 - if (refcount_dec_not_one(&link->rpm_active)) 1682 + spin_lock_irqsave(&dev->power.lock, flags); 1683 + put = pm_runtime_status_suspended(dev) && 1684 + refcount_dec_not_one(&link->rpm_active); 1685 + spin_unlock_irqrestore(&dev->power.lock, flags); 1686 + if (put) 1718 1687 pm_runtime_put(link->supplier); 1719 1688 } 1720 1689
+21 -5
drivers/block/null_blk/main.c
··· 1369 1369 } 1370 1370 1371 1371 if (dev->zoned) 1372 - cmd->error = null_process_zoned_cmd(cmd, op, 1373 - sector, nr_sectors); 1372 + sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors); 1374 1373 else 1375 - cmd->error = null_process_cmd(cmd, op, sector, nr_sectors); 1374 + sts = null_process_cmd(cmd, op, sector, nr_sectors); 1375 + 1376 + /* Do not overwrite errors (e.g. timeout errors) */ 1377 + if (cmd->error == BLK_STS_OK) 1378 + cmd->error = sts; 1376 1379 1377 1380 out: 1378 1381 nullb_complete_cmd(cmd); ··· 1454 1451 1455 1452 static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) 1456 1453 { 1454 + struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); 1455 + 1457 1456 pr_info("rq %p timed out\n", rq); 1458 - blk_mq_complete_request(rq); 1457 + 1458 + /* 1459 + * If the device is marked as blocking (i.e. memory backed or zoned 1460 + * device), the submission path may be blocked waiting for resources 1461 + * and cause real timeouts. For these real timeouts, the submission 1462 + * path will complete the request using blk_mq_complete_request(). 1463 + * Only fake timeouts need to execute blk_mq_complete_request() here. 1464 + */ 1465 + cmd->error = BLK_STS_TIMEOUT; 1466 + if (cmd->fake_timeout) 1467 + blk_mq_complete_request(rq); 1459 1468 return BLK_EH_DONE; 1460 1469 } 1461 1470 ··· 1488 1473 cmd->rq = bd->rq; 1489 1474 cmd->error = BLK_STS_OK; 1490 1475 cmd->nq = nq; 1476 + cmd->fake_timeout = should_timeout_request(bd->rq); 1491 1477 1492 1478 blk_mq_start_request(bd->rq); 1493 1479 ··· 1505 1489 return BLK_STS_OK; 1506 1490 } 1507 1491 } 1508 - if (should_timeout_request(bd->rq)) 1492 + if (cmd->fake_timeout) 1509 1493 return BLK_STS_OK; 1510 1494 1511 1495 return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
+1
drivers/block/null_blk/null_blk.h
··· 22 22 blk_status_t error; 23 23 struct nullb_queue *nq; 24 24 struct hrtimer timer; 25 + bool fake_timeout; 25 26 }; 26 27 27 28 struct nullb_queue {
+1 -1
drivers/block/xen-blkback/blkback.c
··· 891 891 out: 892 892 for (i = last_map; i < num; i++) { 893 893 /* Don't zap current batch's valid persistent grants. */ 894 - if(i >= last_map + segs_to_map) 894 + if(i >= map_until) 895 895 pages[i]->persistent_gnt = NULL; 896 896 pages[i]->handle = BLKBACK_INVALID_HANDLE; 897 897 }
+2 -2
drivers/bus/omap_l3_noc.c
··· 285 285 */ 286 286 l3->debug_irq = platform_get_irq(pdev, 0); 287 287 ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler, 288 - 0x0, "l3-dbg-irq", l3); 288 + IRQF_NO_THREAD, "l3-dbg-irq", l3); 289 289 if (ret) { 290 290 dev_err(l3->dev, "request_irq failed for %d\n", 291 291 l3->debug_irq); ··· 294 294 295 295 l3->app_irq = platform_get_irq(pdev, 1); 296 296 ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler, 297 - 0x0, "l3-app-irq", l3); 297 + IRQF_NO_THREAD, "l3-app-irq", l3); 298 298 if (ret) 299 299 dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq); 300 300
+3 -1
drivers/bus/ti-sysc.c
··· 3053 3053 3054 3054 pm_runtime_put_sync(&pdev->dev); 3055 3055 pm_runtime_disable(&pdev->dev); 3056 - reset_control_assert(ddata->rsts); 3056 + 3057 + if (!reset_control_status(ddata->rsts)) 3058 + reset_control_assert(ddata->rsts); 3057 3059 3058 3060 unprepare: 3059 3061 sysc_unprepare(ddata);
+9 -8
drivers/clk/qcom/clk-rcg2.c
··· 730 730 struct clk_rate_request parent_req = { }; 731 731 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw); 732 732 struct clk_hw *xo, *p0, *p1, *p2; 733 - unsigned long request, p0_rate; 733 + unsigned long p0_rate; 734 + u8 mux_div = cgfx->div; 734 735 int ret; 735 736 736 737 p0 = cgfx->hws[0]; ··· 751 750 return 0; 752 751 } 753 752 754 - request = req->rate; 755 - if (cgfx->div > 1) 756 - parent_req.rate = request = request * cgfx->div; 753 + if (mux_div == 0) 754 + mux_div = 1; 755 + 756 + parent_req.rate = req->rate * mux_div; 757 757 758 758 /* This has to be a fixed rate PLL */ 759 759 p0_rate = clk_hw_get_rate(p0); 760 760 761 - if (request == p0_rate) { 761 + if (parent_req.rate == p0_rate) { 762 762 req->rate = req->best_parent_rate = p0_rate; 763 763 req->best_parent_hw = p0; 764 764 return 0; ··· 767 765 768 766 if (req->best_parent_hw == p0) { 769 767 /* Are we going back to a previously used rate? */ 770 - if (clk_hw_get_rate(p2) == request) 768 + if (clk_hw_get_rate(p2) == parent_req.rate) 771 769 req->best_parent_hw = p2; 772 770 else 773 771 req->best_parent_hw = p1; ··· 782 780 return ret; 783 781 784 782 req->rate = req->best_parent_rate = parent_req.rate; 785 - if (cgfx->div > 1) 786 - req->rate /= cgfx->div; 783 + req->rate /= mux_div; 787 784 788 785 return 0; 789 786 }
+5 -2
drivers/clk/qcom/clk-rpmh.c
··· 510 510 .num_clks = ARRAY_SIZE(sm8350_rpmh_clocks), 511 511 }; 512 512 513 + /* Resource name must match resource id present in cmd-db */ 514 + DEFINE_CLK_RPMH_ARC(sc7280, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 4); 515 + 513 516 static struct clk_hw *sc7280_rpmh_clocks[] = { 514 - [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw, 515 - [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw, 517 + [RPMH_CXO_CLK] = &sc7280_bi_tcxo.hw, 518 + [RPMH_CXO_CLK_A] = &sc7280_bi_tcxo_ao.hw, 516 519 [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw, 517 520 [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw, 518 521 [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+2 -2
drivers/clk/qcom/gcc-sc7180.c
··· 620 620 .name = "gcc_sdcc1_apps_clk_src", 621 621 .parent_data = gcc_parent_data_1, 622 622 .num_parents = 5, 623 - .ops = &clk_rcg2_ops, 623 + .ops = &clk_rcg2_floor_ops, 624 624 }, 625 625 }; 626 626 ··· 642 642 .name = "gcc_sdcc1_ice_core_clk_src", 643 643 .parent_data = gcc_parent_data_0, 644 644 .num_parents = 4, 645 - .ops = &clk_rcg2_floor_ops, 645 + .ops = &clk_rcg2_ops, 646 646 }, 647 647 }; 648 648
+2 -2
drivers/cpufreq/freq_table.c
··· 267 267 __ATTR_RO(_name##_frequencies) 268 268 269 269 /* 270 - * show_scaling_available_frequencies - show available normal frequencies for 270 + * scaling_available_frequencies_show - show available normal frequencies for 271 271 * the specified CPU 272 272 */ 273 273 static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy, ··· 279 279 EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); 280 280 281 281 /* 282 - * show_available_boost_freqs - show available boost frequencies for 282 + * scaling_boost_frequencies_show - show available boost frequencies for 283 283 * the specified CPU 284 284 */ 285 285 static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
+1
drivers/extcon/extcon.c
··· 1241 1241 sizeof(*edev->nh), GFP_KERNEL); 1242 1242 if (!edev->nh) { 1243 1243 ret = -ENOMEM; 1244 + device_unregister(&edev->dev); 1244 1245 goto err_dev; 1245 1246 } 1246 1247
+7 -2
drivers/firewire/nosy.c
··· 346 346 struct client *client = file->private_data; 347 347 spinlock_t *client_list_lock = &client->lynx->client_list_lock; 348 348 struct nosy_stats stats; 349 + int ret; 349 350 350 351 switch (cmd) { 351 352 case NOSY_IOC_GET_STATS: ··· 361 360 return 0; 362 361 363 362 case NOSY_IOC_START: 363 + ret = -EBUSY; 364 364 spin_lock_irq(client_list_lock); 365 - list_add_tail(&client->link, &client->lynx->client_list); 365 + if (list_empty(&client->link)) { 366 + list_add_tail(&client->link, &client->lynx->client_list); 367 + ret = 0; 368 + } 366 369 spin_unlock_irq(client_list_lock); 367 370 368 - return 0; 371 + return ret; 369 372 370 373 case NOSY_IOC_STOP: 371 374 spin_lock_irq(client_list_lock);
+3 -7
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1007 1007 1008 1008 /* s3/s4 mask */ 1009 1009 bool in_suspend; 1010 - bool in_hibernate; 1011 - 1012 - /* 1013 - * The combination flag in_poweroff_reboot_com used to identify the poweroff 1014 - * and reboot opt in the s0i3 system-wide suspend. 1015 - */ 1016 - bool in_poweroff_reboot_com; 1010 + bool in_s3; 1011 + bool in_s4; 1012 + bool in_s0ix; 1017 1013 1018 1014 atomic_t in_gpu_reset; 1019 1015 enum pp_mp1_state mp1_state;
+34 -98
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 2371 2371 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2372 2372 if (!adev->ip_blocks[i].status.late_initialized) 2373 2373 continue; 2374 + /* skip CG for GFX on S0ix */ 2375 + if (adev->in_s0ix && 2376 + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) 2377 + continue; 2374 2378 /* skip CG for VCE/UVD, it's handled specially */ 2375 2379 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 2376 2380 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && ··· 2405 2401 for (j = 0; j < adev->num_ip_blocks; j++) { 2406 2402 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2407 2403 if (!adev->ip_blocks[i].status.late_initialized) 2404 + continue; 2405 + /* skip PG for GFX on S0ix */ 2406 + if (adev->in_s0ix && 2407 + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) 2408 2408 continue; 2409 2409 /* skip CG for VCE/UVD, it's handled specially */ 2410 2410 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && ··· 2686 2678 { 2687 2679 int i, r; 2688 2680 2689 - if (adev->in_poweroff_reboot_com || 2690 - !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) { 2691 - amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2692 - amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2693 - } 2681 + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2682 + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2694 2683 2695 2684 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2696 2685 if (!adev->ip_blocks[i].status.valid) ··· 2727 2722 { 2728 2723 int i, r; 2729 2724 2725 + if (adev->in_s0ix) 2726 + amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry); 2727 + 2730 2728 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 2731 2729 if (!adev->ip_blocks[i].status.valid) 2732 2730 continue; ··· 2742 2734 adev->ip_blocks[i].status.hw = false; 2743 2735 continue; 2744 2736 } 2737 + 2738 + /* skip suspend of gfx and psp for S0ix 2739 + * gfx is in gfxoff state, so on resume it will exit gfxoff just 2740 + * like at runtime. PSP is also part of the always on hardware 2741 + * so no need to suspend it. 2742 + */ 2743 + if (adev->in_s0ix && 2744 + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || 2745 + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)) 2746 + continue; 2747 + 2745 2748 /* XXX handle errors */ 2746 2749 r = adev->ip_blocks[i].version->funcs->suspend(adev); 2747 2750 /* XXX handle errors */ ··· 3692 3673 */ 3693 3674 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) 3694 3675 { 3695 - struct amdgpu_device *adev; 3696 - struct drm_crtc *crtc; 3697 - struct drm_connector *connector; 3698 - struct drm_connector_list_iter iter; 3676 + struct amdgpu_device *adev = drm_to_adev(dev); 3699 3677 int r; 3700 - 3701 - adev = drm_to_adev(dev); 3702 3678 3703 3679 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 3704 3680 return 0; ··· 3706 3692 3707 3693 cancel_delayed_work_sync(&adev->delayed_init_work); 3708 3694 3709 - if (!amdgpu_device_has_dc_support(adev)) { 3710 - /* turn off display hw */ 3711 - drm_modeset_lock_all(dev); 3712 - drm_connector_list_iter_begin(dev, &iter); 3713 - drm_for_each_connector_iter(connector, &iter) 3714 - drm_helper_connector_dpms(connector, 3715 - DRM_MODE_DPMS_OFF); 3716 - drm_connector_list_iter_end(&iter); 3717 - drm_modeset_unlock_all(dev); 3718 - /* unpin the front buffers and cursors */ 3719 - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3720 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 3721 - struct drm_framebuffer *fb = crtc->primary->fb; 3722 - struct amdgpu_bo *robj; 3723 - 3724 - if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { 3725 - struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 3726 - r = amdgpu_bo_reserve(aobj, true); 3727 - if (r == 0) { 3728 - amdgpu_bo_unpin(aobj); 3729 - amdgpu_bo_unreserve(aobj); 3730 - } 3731 - } 3732 - 3733 - if (fb == NULL || fb->obj[0] == NULL) { 3734 - continue; 3735 - } 3736 - robj = gem_to_amdgpu_bo(fb->obj[0]); 3737 - /* don't unpin kernel fb objects */ 3738 - if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { 3739 - r = amdgpu_bo_reserve(robj, true); 3740 - if (r == 0) { 3741 - amdgpu_bo_unpin(robj); 3742 - amdgpu_bo_unreserve(robj); 3743 - } 3744 - } 3745 - } 3746 - } 3747 - 3748 3695 amdgpu_ras_suspend(adev); 3749 3696 3750 3697 r = amdgpu_device_ip_suspend_phase1(adev); 3751 3698 3752 - amdgpu_amdkfd_suspend(adev, adev->in_runpm); 3699 + if (!adev->in_s0ix) 3700 + amdgpu_amdkfd_suspend(adev, adev->in_runpm); 3753 3701 3754 3702 /* evict vram memory */ 3755 3703 amdgpu_bo_evict_vram(adev); 3756 3704 3757 3705 amdgpu_fence_driver_suspend(adev); 3758 3706 3759 - if (adev->in_poweroff_reboot_com || 3760 - !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) 3761 - r = amdgpu_device_ip_suspend_phase2(adev); 3762 - else 3763 - amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry); 3707 + r = amdgpu_device_ip_suspend_phase2(adev); 3764 3708 /* evict remaining vram memory 3765 3709 * This second call to evict vram is to evict the gart page table 3766 3710 * using the CPU. ··· 3740 3768 */ 3741 3769 int amdgpu_device_resume(struct drm_device *dev, bool fbcon) 3742 3770 { 3743 - struct drm_connector *connector; 3744 - struct drm_connector_list_iter iter; 3745 3771 struct amdgpu_device *adev = drm_to_adev(dev); 3746 - struct drm_crtc *crtc; 3747 3772 int r = 0; 3748 3773 3749 3774 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 3750 3775 return 0; 3751 3776 3752 - if (amdgpu_acpi_is_s0ix_supported(adev)) 3777 + if (adev->in_s0ix) 3753 3778 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry); 3754 3779 3755 3780 /* post card */ ··· 3771 3802 queue_delayed_work(system_wq, &adev->delayed_init_work, 3772 3803 msecs_to_jiffies(AMDGPU_RESUME_MS)); 3773 3804 3774 - if (!amdgpu_device_has_dc_support(adev)) { 3775 - /* pin cursors */ 3776 - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3777 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 3778 - 3779 - if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { 3780 - struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 3781 - r = amdgpu_bo_reserve(aobj, true); 3782 - if (r == 0) { 3783 - r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); 3784 - if (r != 0) 3785 - dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r); 3786 - amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); 3787 - amdgpu_bo_unreserve(aobj); 3788 - } 3789 - } 3790 - } 3805 + if (!adev->in_s0ix) { 3806 + r = amdgpu_amdkfd_resume(adev, adev->in_runpm); 3807 + if (r) 3808 + return r; 3791 3809 } 3792 - r = amdgpu_amdkfd_resume(adev, adev->in_runpm); 3793 - if (r) 3794 - return r; 3795 3810 3796 3811 /* Make sure IB tests flushed */ 3797 3812 flush_delayed_work(&adev->delayed_init_work); 3798 3813 3799 - /* blat the mode back in */ 3800 - if (fbcon) { 3801 - if (!amdgpu_device_has_dc_support(adev)) { 3802 - /* pre DCE11 */ 3803 - drm_helper_resume_force_mode(dev); 3804 - 3805 - /* turn on display hw */ 3806 - drm_modeset_lock_all(dev); 3807 - 3808 - drm_connector_list_iter_begin(dev, &iter); 3809 - drm_for_each_connector_iter(connector, &iter) 3810 - drm_helper_connector_dpms(connector, 3811 - DRM_MODE_DPMS_ON); 3812 - drm_connector_list_iter_end(&iter); 3813 - 3814 - drm_modeset_unlock_all(dev); 3815 - } 3814 + if (fbcon) 3816 3815 amdgpu_fbdev_set_suspend(adev, 0); 3817 - } 3818 3816 3819 3817 drm_kms_helper_poll_enable(dev); 3820 3818
+89
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 1310 1310 return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos, 1311 1311 stime, etime, mode); 1312 1312 } 1313 + 1314 + int amdgpu_display_suspend_helper(struct amdgpu_device *adev) 1315 + { 1316 + struct drm_device *dev = adev_to_drm(adev); 1317 + struct drm_crtc *crtc; 1318 + struct drm_connector *connector; 1319 + struct drm_connector_list_iter iter; 1320 + int r; 1321 + 1322 + /* turn off display hw */ 1323 + drm_modeset_lock_all(dev); 1324 + drm_connector_list_iter_begin(dev, &iter); 1325 + drm_for_each_connector_iter(connector, &iter) 1326 + drm_helper_connector_dpms(connector, 1327 + DRM_MODE_DPMS_OFF); 1328 + drm_connector_list_iter_end(&iter); 1329 + drm_modeset_unlock_all(dev); 1330 + /* unpin the front buffers and cursors */ 1331 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1332 + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1333 + struct drm_framebuffer *fb = crtc->primary->fb; 1334 + struct amdgpu_bo *robj; 1335 + 1336 + if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { 1337 + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 1338 + r = amdgpu_bo_reserve(aobj, true); 1339 + if (r == 0) { 1340 + amdgpu_bo_unpin(aobj); 1341 + amdgpu_bo_unreserve(aobj); 1342 + } 1343 + } 1344 + 1345 + if (fb == NULL || fb->obj[0] == NULL) { 1346 + continue; 1347 + } 1348 + robj = gem_to_amdgpu_bo(fb->obj[0]); 1349 + /* don't unpin kernel fb objects */ 1350 + if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { 1351 + r = amdgpu_bo_reserve(robj, true); 1352 + if (r == 0) { 1353 + amdgpu_bo_unpin(robj); 1354 + amdgpu_bo_unreserve(robj); 1355 + } 1356 + } 1357 + } 1358 + return r; 1359 + } 1360 + 1361 + int amdgpu_display_resume_helper(struct amdgpu_device *adev) 1362 + { 1363 + struct drm_device *dev = adev_to_drm(adev); 1364 + struct drm_connector *connector; 1365 + struct drm_connector_list_iter iter; 1366 + struct drm_crtc *crtc; 1367 + int r; 1368 + 1369 + /* pin cursors */ 1370 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1371 + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1372 + 1373 + if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) { 1374 + struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 1375 + r = amdgpu_bo_reserve(aobj, true); 1376 + if (r == 0) { 1377 + r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); 1378 + if (r != 0) 1379 + dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r); 1380 + amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); 1381 + amdgpu_bo_unreserve(aobj); 1382 + } 1383 + } 1384 + } 1385 + 1386 + drm_helper_resume_force_mode(dev); 1387 + 1388 + /* turn on display hw */ 1389 + drm_modeset_lock_all(dev); 1390 + 1391 + drm_connector_list_iter_begin(dev, &iter); 1392 + drm_for_each_connector_iter(connector, &iter) 1393 + drm_helper_connector_dpms(connector, 1394 + DRM_MODE_DPMS_ON); 1395 + drm_connector_list_iter_end(&iter); 1396 + 1397 + drm_modeset_unlock_all(dev); 1398 + 1399 + return 0; 1400 + } 1401 +
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
··· 47 47 const struct drm_format_info * 48 48 amdgpu_lookup_format_info(u32 format, uint64_t modifier); 49 49 50 + int amdgpu_display_suspend_helper(struct amdgpu_device *adev); 51 + int amdgpu_display_resume_helper(struct amdgpu_device *adev); 52 + 50 53 #endif
+19 -12
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 1107 1107 {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 1108 1108 {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 1109 1109 {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 1110 + {0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 1110 1111 {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, 1111 1112 1112 1113 /* Van Gogh */ ··· 1275 1274 */ 1276 1275 if (!amdgpu_passthrough(adev)) 1277 1276 adev->mp1_state = PP_MP1_STATE_UNLOAD; 1278 - adev->in_poweroff_reboot_com = true; 1279 1277 amdgpu_device_ip_suspend(adev); 1280 - adev->in_poweroff_reboot_com = false; 1281 1278 adev->mp1_state = PP_MP1_STATE_NONE; 1282 1279 } 1283 1280 1284 1281 static int amdgpu_pmops_suspend(struct device *dev) 1285 1282 { 1286 1283 struct drm_device *drm_dev = dev_get_drvdata(dev); 1284 + struct amdgpu_device *adev = drm_to_adev(drm_dev); 1285 + int r; 1287 1286 1288 - return amdgpu_device_suspend(drm_dev, true); 1287 + if (amdgpu_acpi_is_s0ix_supported(adev)) 1288 + adev->in_s0ix = true; 1289 + adev->in_s3 = true; 1290 + r = amdgpu_device_suspend(drm_dev, true); 1291 + adev->in_s3 = false; 1292 + 1293 + return r; 1289 1294 } 1290 1295 1291 1296 static int amdgpu_pmops_resume(struct device *dev) 1292 1297 { 1293 1298 struct drm_device *drm_dev = dev_get_drvdata(dev); 1299 + struct amdgpu_device *adev = drm_to_adev(drm_dev); 1300 + int r; 1294 1301 1295 - return amdgpu_device_resume(drm_dev, true); 1302 + r = amdgpu_device_resume(drm_dev, true); 1303 + if (amdgpu_acpi_is_s0ix_supported(adev)) 1304 + adev->in_s0ix = false; 1305 + return r; 1296 1306 } 1297 1307 1298 1308 static int amdgpu_pmops_freeze(struct device *dev) ··· 1312 1300 struct amdgpu_device *adev = drm_to_adev(drm_dev); 1313 1301 int r; 1314 1302 1315 - adev->in_hibernate = true; 1303 + adev->in_s4 = true; 1316 1304 r = amdgpu_device_suspend(drm_dev, true); 1317 - adev->in_hibernate = false; 1305 + adev->in_s4 = false; 1318 1306 if (r) 1319 1307 return r; 1320 1308 return amdgpu_asic_reset(adev); ··· 1330 1318 static int amdgpu_pmops_poweroff(struct device *dev) 1331 1319 { 1332 1320 struct drm_device *drm_dev = dev_get_drvdata(dev); 1333 - struct amdgpu_device *adev = drm_to_adev(drm_dev); 1334 - int r; 1335 1321 1336 - adev->in_poweroff_reboot_com = true; 1337 - r = amdgpu_device_suspend(drm_dev, true); 1338 - adev->in_poweroff_reboot_com = false; 1339 - return r; 1322 + return amdgpu_device_suspend(drm_dev, true); 1340 1323 } 1341 1324 1342 1325 static int amdgpu_pmops_restore(struct device *dev)
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 778 778 dev_info->high_va_offset = AMDGPU_GMC_HOLE_END; 779 779 dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size; 780 780 } 781 - dev_info->virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); 781 + dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); 782 782 dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; 783 - dev_info->gart_page_size = AMDGPU_GPU_PAGE_SIZE; 783 + dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); 784 784 dev_info->cu_active_number = adev->gfx.cu_info.number; 785 785 dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; 786 786 dev_info->ce_ram_size = adev->gfx.ce_ram_size;
+2 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 1028 1028 { 1029 1029 struct ttm_resource_manager *man; 1030 1030 1031 - /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ 1032 - #ifndef CONFIG_HIBERNATION 1033 - if (adev->flags & AMD_IS_APU) { 1034 - /* Useless to evict on IGP chips */ 1031 + if (adev->in_s3 && (adev->flags & AMD_IS_APU)) { 1032 + /* No need to evict vram on APUs for suspend to ram */ 1035 1033 return 0; 1036 1034 } 1037 - #endif 1038 1035 1039 1036 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); 1040 1037 return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
+5 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 2197 2197 uint64_t eaddr; 2198 2198 2199 2199 /* validate the parameters */ 2200 - if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 2201 - size == 0 || size & AMDGPU_GPU_PAGE_MASK) 2200 + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || 2201 + size == 0 || size & ~PAGE_MASK) 2202 2202 return -EINVAL; 2203 2203 2204 2204 /* make sure object fit at this offset */ ··· 2263 2263 int r; 2264 2264 2265 2265 /* validate the parameters */ 2266 - if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 2267 - size == 0 || size & AMDGPU_GPU_PAGE_MASK) 2266 + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || 2267 + size == 0 || size & ~PAGE_MASK) 2268 2268 return -EINVAL; 2269 2269 2270 2270 /* make sure object fit at this offset */ ··· 2409 2409 after->start = eaddr + 1; 2410 2410 after->last = tmp->last; 2411 2411 after->offset = tmp->offset; 2412 - after->offset += after->start - tmp->start; 2412 + after->offset += (after->start - tmp->start) << PAGE_SHIFT; 2413 2413 after->flags = tmp->flags; 2414 2414 after->bo_va = tmp->bo_va; 2415 2415 list_add(&after->list, &tmp->bo_va->invalids);
+8 -1
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
··· 2897 2897 static int dce_v10_0_suspend(void *handle) 2898 2898 { 2899 2899 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2900 + int r; 2901 + 2902 + r = amdgpu_display_suspend_helper(adev); 2903 + if (r) 2904 + return r; 2900 2905 2901 2906 adev->mode_info.bl_level = 2902 2907 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); ··· 2926 2921 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, 2927 2922 bl_level); 2928 2923 } 2924 + if (ret) 2925 + return ret; 2929 2926 2930 - return ret; 2927 + return amdgpu_display_resume_helper(adev); 2931 2928 } 2932 2929 2933 2930 static bool dce_v10_0_is_idle(void *handle)
+8 -1
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
··· 3027 3027 static int dce_v11_0_suspend(void *handle) 3028 3028 { 3029 3029 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3030 + int r; 3031 + 3032 + r = amdgpu_display_suspend_helper(adev); 3033 + if (r) 3034 + return r; 3030 3035 3031 3036 adev->mode_info.bl_level = 3032 3037 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); ··· 3056 3051 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, 3057 3052 bl_level); 3058 3053 } 3054 + if (ret) 3055 + return ret; 3059 3056 3060 - return ret; 3057 + return amdgpu_display_resume_helper(adev); 3061 3058 } 3062 3059 3063 3060 static bool dce_v11_0_is_idle(void *handle)
+7 -1
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
··· 2770 2770 static int dce_v6_0_suspend(void *handle) 2771 2771 { 2772 2772 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2773 + int r; 2773 2774 2775 + r = amdgpu_display_suspend_helper(adev); 2776 + if (r) 2777 + return r; 2774 2778 adev->mode_info.bl_level = 2775 2779 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); 2776 2780 ··· 2798 2794 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, 2799 2795 bl_level); 2800 2796 } 2797 + if (ret) 2798 + return ret; 2801 2799 2802 - return ret; 2800 + return amdgpu_display_resume_helper(adev); 2803 2801 } 2804 2802 2805 2803 static bool dce_v6_0_is_idle(void *handle)
+8 -1
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
··· 2796 2796 static int dce_v8_0_suspend(void *handle) 2797 2797 { 2798 2798 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2799 + int r; 2800 + 2801 + r = amdgpu_display_suspend_helper(adev); 2802 + if (r) 2803 + return r; 2799 2804 2800 2805 adev->mode_info.bl_level = 2801 2806 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); ··· 2825 2820 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, 2826 2821 bl_level); 2827 2822 } 2823 + if (ret) 2824 + return ret; 2828 2825 2829 - return ret; 2826 + return amdgpu_display_resume_helper(adev); 2830 2827 } 2831 2828 2832 2829 static bool dce_v8_0_is_idle(void *handle)
+14 -1
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
··· 39 39 #include "dce_v11_0.h" 40 40 #include "dce_virtual.h" 41 41 #include "ivsrcid/ivsrcid_vislands30.h" 42 + #include "amdgpu_display.h" 42 43 43 44 #define DCE_VIRTUAL_VBLANK_PERIOD 16666666 44 45 ··· 492 491 493 492 static int dce_virtual_suspend(void *handle) 494 493 { 494 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 495 + int r; 496 + 497 + r = amdgpu_display_suspend_helper(adev); 498 + if (r) 499 + return r; 495 500 return dce_virtual_hw_fini(handle); 496 501 } 497 502 498 503 static int dce_virtual_resume(void *handle) 499 504 { 500 - return dce_virtual_hw_init(handle); 505 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 506 + int r; 507 + 508 + r = dce_virtual_hw_init(handle); 509 + if (r) 510 + return r; 511 + return amdgpu_display_resume_helper(adev); 501 512 } 502 513 503 514 static bool dce_virtual_is_idle(void *handle)
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
··· 155 155 156 156 /* Wait till CP writes sync code: */ 157 157 status = amdkfd_fence_wait_timeout( 158 - (unsigned int *) rm_state, 158 + rm_state, 159 159 QUEUESTATE__ACTIVE, 1500); 160 160 161 161 kfd_gtt_sa_free(dbgdev->dev, mem_obj);
+3 -3
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 1167 1167 if (retval) 1168 1168 goto fail_allocate_vidmem; 1169 1169 1170 - dqm->fence_addr = dqm->fence_mem->cpu_ptr; 1170 + dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr; 1171 1171 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr; 1172 1172 1173 1173 init_interrupts(dqm); ··· 1340 1340 return retval; 1341 1341 } 1342 1342 1343 - int amdkfd_fence_wait_timeout(unsigned int *fence_addr, 1344 - unsigned int fence_value, 1343 + int amdkfd_fence_wait_timeout(uint64_t *fence_addr, 1344 + uint64_t fence_value, 1345 1345 unsigned int timeout_ms) 1346 1346 { 1347 1347 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
··· 192 192 uint16_t vmid_pasid[VMID_NUM]; 193 193 uint64_t pipelines_addr; 194 194 uint64_t fence_gpu_addr; 195 - unsigned int *fence_addr; 195 + uint64_t *fence_addr; 196 196 struct kfd_mem_obj *fence_mem; 197 197 bool active_runlist; 198 198 int sched_policy;
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
··· 347 347 } 348 348 349 349 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, 350 - uint32_t fence_value) 350 + uint64_t fence_value) 351 351 { 352 352 uint32_t *buffer, size; 353 353 int retval = 0;
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
··· 283 283 } 284 284 285 285 static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer, 286 - uint64_t fence_address, uint32_t fence_value) 286 + uint64_t fence_address, uint64_t fence_value) 287 287 { 288 288 struct pm4_mes_query_status *packet; 289 289
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
··· 263 263 } 264 264 265 265 static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer, 266 - uint64_t fence_address, uint32_t fence_value) 266 + uint64_t fence_address, uint64_t fence_value) 267 267 { 268 268 struct pm4_mes_query_status *packet; 269 269
+4 -4
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 1003 1003 u32 *ctl_stack_used_size, 1004 1004 u32 *save_area_used_size); 1005 1005 1006 - int amdkfd_fence_wait_timeout(unsigned int *fence_addr, 1007 - unsigned int fence_value, 1006 + int amdkfd_fence_wait_timeout(uint64_t *fence_addr, 1007 + uint64_t fence_value, 1008 1008 unsigned int timeout_ms); 1009 1009 1010 1010 /* Packet Manager */ ··· 1040 1040 uint32_t filter_param, bool reset, 1041 1041 unsigned int sdma_engine); 1042 1042 int (*query_status)(struct packet_manager *pm, uint32_t *buffer, 1043 - uint64_t fence_address, uint32_t fence_value); 1043 + uint64_t fence_address, uint64_t fence_value); 1044 1044 int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); 1045 1045 1046 1046 /* Packet sizes */ ··· 1062 1062 struct scheduling_resources *res); 1063 1063 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); 1064 1064 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, 1065 - uint32_t fence_value); 1065 + uint64_t fence_value); 1066 1066 1067 1067 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, 1068 1068 enum kfd_unmap_queues_filter mode,
+56 -1
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
··· 587 587 tmp, MC_CG_ARB_FREQ_F0); 588 588 } 589 589 590 + static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr) 591 + { 592 + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); 593 + uint16_t pcie_gen = 0; 594 + 595 + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 && 596 + adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4) 597 + pcie_gen = 3; 598 + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 && 599 + adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3) 600 + pcie_gen = 2; 601 + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 && 602 + adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2) 603 + pcie_gen = 1; 604 + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 && 605 + adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1) 606 + pcie_gen = 0; 607 + 608 + return pcie_gen; 609 + } 610 + 611 + static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr) 612 + { 613 + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); 614 + uint16_t pcie_width = 0; 615 + 616 + if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 617 + pcie_width = 16; 618 + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 619 + pcie_width = 12; 620 + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 621 + pcie_width = 8; 622 + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 623 + pcie_width = 4; 624 + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 625 + pcie_width = 2; 626 + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 627 + pcie_width = 1; 628 + 629 + return pcie_width; 630 + } 631 + 590 632 static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) 591 633 { 592 634 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); ··· 725 683 PP_Min_PCIEGen), 726 684 get_pcie_lane_support(data->pcie_lane_cap, 727 685 PP_Max_PCIELane)); 686 + 687 + if (data->pcie_dpm_key_disabled) 688 + phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 689 + data->dpm_table.pcie_speed_table.count, 690 + smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr)); 728 691 } 729 692 return 0; 730 693 } ··· 1294 1247 PPSMC_MSG_PCIeDPM_Enable, 1295 1248 NULL)), 1296 1249 "Failed to enable pcie DPM during DPM Start Function!", 1250 + return -EINVAL); 1251 + } else { 1252 + PP_ASSERT_WITH_CODE( 1253 + (0 == smum_send_msg_to_smc(hwmgr, 1254 + PPSMC_MSG_PCIeDPM_Disable, 1255 + NULL)), 1256 + "Failed to disble pcie DPM during DPM Start Function!", 1297 1257 return -EINVAL); 1298 1258 } 1299 1259 ··· 3330 3276 3331 3277 disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) && 3332 3278 !hwmgr->display_config->multi_monitor_in_sync) || 3333 - smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time); 3279 + (hwmgr->display_config->num_display && 3280 + smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time)); 3334 3281 3335 3282 disable_mclk_switching = disable_mclk_switching_for_frame_lock || 3336 3283 disable_mclk_switching_for_display;
+62 -10
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
··· 54 54 #include "smuio/smuio_9_0_offset.h" 55 55 #include "smuio/smuio_9_0_sh_mask.h" 56 56 57 + #define smnPCIE_LC_SPEED_CNTL 0x11140290 58 + #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 59 + 57 60 #define HBM_MEMORY_CHANNEL_WIDTH 128 58 61 59 62 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; ··· 446 443 if (PP_CAP(PHM_PlatformCaps_VCEDPM)) 447 444 data->smu_features[GNLD_DPM_VCE].supported = true; 448 445 449 - if (!data->registry_data.pcie_dpm_key_disabled) 450 - data->smu_features[GNLD_DPM_LINK].supported = true; 446 + data->smu_features[GNLD_DPM_LINK].supported = true; 451 447 452 448 if (!data->registry_data.dcefclk_dpm_key_disabled) 453 449 data->smu_features[GNLD_DPM_DCEFCLK].supported = true; ··· 1544 1542 1545 1543 if (pp_table->PcieLaneCount[i] > pcie_width) 1546 1544 pp_table->PcieLaneCount[i] = pcie_width; 1545 + } 1546 + 1547 + if (data->registry_data.pcie_dpm_key_disabled) { 1548 + for (i = 0; i < NUM_LINK_LEVELS; i++) { 1549 + pp_table->PcieGenSpeed[i] = pcie_gen; 1550 + pp_table->PcieLaneCount[i] = pcie_width; 1551 + } 1547 1552 } 1548 1553 1549 1554 return 0; ··· 2973 2964 return -1); 2974 2965 data->smu_features[GNLD_ACDC].enabled = true; 2975 2966 } 2967 + } 2968 + 2969 + if (data->registry_data.pcie_dpm_key_disabled) { 2970 + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, 2971 + false, data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap), 2972 + "Attempt to Disable Link DPM feature Failed!", return -EINVAL); 2973 + data->smu_features[GNLD_DPM_LINK].enabled = false; 2974 + data->smu_features[GNLD_DPM_LINK].supported = false; 2976 2975 } 2977 2976 2978 2977 return 0; ··· 4601 4584 return 0; 4602 4585 } 4603 4586 4587 + static int vega10_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr) 4588 + { 4589 + struct amdgpu_device *adev = hwmgr->adev; 4590 + 4591 + return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 4592 + PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) 4593 + >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 4594 + } 4595 + 4596 + static int vega10_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr) 4597 + { 4598 + struct amdgpu_device *adev = hwmgr->adev; 4599 + 4600 + return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 4601 + PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 4602 + >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 4603 + } 4604 + 4604 4605 static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, 4605 4606 enum pp_clock_type type, char *buf) 4606 4607 { ··· 4627 4592 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); 4628 4593 struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table); 4629 4594 struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table); 4630 - struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); 4631 4595 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL; 4596 + uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; 4597 + PPTable_t *pptable = &(data->smc_state_table.pp_table); 4632 4598 4633 4599 int i, now, size = 0, count = 0; 4634 4600 ··· 4686 4650 "*" : ""); 4687 4651 break; 4688 4652 case PP_PCIE: 4689 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now); 4653 + current_gen_speed = 4654 + vega10_get_current_pcie_link_speed_level(hwmgr); 4655 + current_lane_width = 4656 + vega10_get_current_pcie_link_width_level(hwmgr); 4657 + for (i = 0; i < NUM_LINK_LEVELS; i++) { 4658 + gen_speed = pptable->PcieGenSpeed[i]; 4659 + lane_width = pptable->PcieLaneCount[i]; 4690 4660 4691 - for (i = 0; i < pcie_table->count; i++) 4692 - size += sprintf(buf + size, "%d: %s %s\n", i, 4693 - (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" : 4694 - (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" : 4695 - (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "", 4696 - (i == now) ? "*" : ""); 4661 + size += sprintf(buf + size, "%d: %s %s %s\n", i, 4662 + (gen_speed == 0) ? "2.5GT/s," : 4663 + (gen_speed == 1) ? "5.0GT/s," : 4664 + (gen_speed == 2) ? "8.0GT/s," : 4665 + (gen_speed == 3) ? "16.0GT/s," : "", 4666 + (lane_width == 1) ? "x1" : 4667 + (lane_width == 2) ? "x2" : 4668 + (lane_width == 3) ? "x4" : 4669 + (lane_width == 4) ? "x8" : 4670 + (lane_width == 5) ? "x12" : 4671 + (lane_width == 6) ? "x16" : "", 4672 + (current_gen_speed == gen_speed) && 4673 + (current_lane_width == lane_width) ? 4674 + "*" : ""); 4675 + } 4697 4676 break; 4677 + 4698 4678 case OD_SCLK: 4699 4679 if (hwmgr->od_enabled) { 4700 4680 size = sprintf(buf, "%s:\n", "OD_SCLK");
+24
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
··· 133 133 data->registry_data.auto_wattman_debug = 0; 134 134 data->registry_data.auto_wattman_sample_period = 100; 135 135 data->registry_data.auto_wattman_threshold = 50; 136 + data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); 136 137 } 137 138 138 139 static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr) ··· 540 539 pp_table->PcieLaneCount[i] = pcie_width_arg; 541 540 } 542 541 542 + /* override to the highest if it's disabled from ppfeaturmask */ 543 + if (data->registry_data.pcie_dpm_key_disabled) { 544 + for (i = 0; i < NUM_LINK_LEVELS; i++) { 545 + smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width; 546 + ret = smum_send_msg_to_smc_with_parameter(hwmgr, 547 + PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, 548 + NULL); 549 + PP_ASSERT_WITH_CODE(!ret, 550 + "[OverridePcieParameters] Attempt to override pcie params failed!", 551 + return ret); 552 + 553 + pp_table->PcieGenSpeed[i] = pcie_gen; 554 + pp_table->PcieLaneCount[i] = pcie_width; 555 + } 556 + ret = vega12_enable_smc_features(hwmgr, 557 + false, 558 + data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap); 559 + PP_ASSERT_WITH_CODE(!ret, 560 + "Attempt to Disable DPM LINK Failed!", 561 + return ret); 562 + data->smu_features[GNLD_DPM_LINK].enabled = false; 563 + data->smu_features[GNLD_DPM_LINK].supported = false; 564 + } 543 565 return 0; 544 566 } 545 567
+25
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
··· 171 171 data->registry_data.gfxoff_controlled_by_driver = 1; 172 172 data->gfxoff_allowed = false; 173 173 data->counter_gfxoff = 0; 174 + data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK); 174 175 } 175 176 176 177 static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) ··· 883 882 /* update the pptable */ 884 883 pp_table->PcieGenSpeed[i] = pcie_gen_arg; 885 884 pp_table->PcieLaneCount[i] = pcie_width_arg; 885 + } 886 + 887 + /* override to the highest if it's disabled from ppfeaturmask */ 888 + if (data->registry_data.pcie_dpm_key_disabled) { 889 + for (i = 0; i < NUM_LINK_LEVELS; i++) { 890 + smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width; 891 + ret = smum_send_msg_to_smc_with_parameter(hwmgr, 892 + PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, 893 + NULL); 894 + PP_ASSERT_WITH_CODE(!ret, 895 + "[OverridePcieParameters] Attempt to override pcie params failed!", 896 + return ret); 897 + 898 + pp_table->PcieGenSpeed[i] = pcie_gen; 899 + pp_table->PcieLaneCount[i] = pcie_width; 900 + } 901 + ret = vega20_enable_smc_features(hwmgr, 902 + false, 903 + data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap); 904 + PP_ASSERT_WITH_CODE(!ret, 905 + "Attempt to Disable DPM LINK Failed!", 906 + return ret); 907 + data->smu_features[GNLD_DPM_LINK].enabled = false; 908 + data->smu_features[GNLD_DPM_LINK].supported = false; 886 909 } 887 910 888 911 return 0;
+3 -2
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 1294 1294 bool use_baco = !smu->is_apu && 1295 1295 ((amdgpu_in_reset(adev) && 1296 1296 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 1297 - ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev))); 1297 + ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); 1298 1298 1299 1299 /* 1300 1300 * For custom pptable uploading, skip the DPM features ··· 1431 1431 1432 1432 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); 1433 1433 1434 - if (smu->is_apu) 1434 + /* skip CGPG when in S0ix */ 1435 + if (smu->is_apu && !adev->in_s0ix) 1435 1436 smu_set_gfx_cgpg(&adev->smu, false); 1436 1437 1437 1438 return 0;
+5
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
··· 384 384 385 385 static bool vangogh_is_dpm_running(struct smu_context *smu) 386 386 { 387 + struct amdgpu_device *adev = smu->adev; 387 388 int ret = 0; 388 389 uint32_t feature_mask[2]; 389 390 uint64_t feature_enabled; 391 + 392 + /* we need to re-init after suspend so return false */ 393 + if (adev->in_suspend) 394 + return false; 390 395 391 396 ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2); 392 397
+2 -1
drivers/gpu/drm/etnaviv/etnaviv_gem.c
··· 689 689 struct page **pages = pvec + pinned; 690 690 691 691 ret = pin_user_pages_fast(ptr, num_pages, 692 - !userptr->ro ? FOLL_WRITE : 0, pages); 692 + FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM, 693 + pages); 693 694 if (ret < 0) { 694 695 unpin_user_pages(pvec, pinned); 695 696 kvfree(pvec);
-1
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
··· 13 13 #include <linux/irq.h> 14 14 #include <linux/mfd/syscon.h> 15 15 #include <linux/of_device.h> 16 - #include <linux/of_gpio.h> 17 16 #include <linux/platform_device.h> 18 17 #include <linux/pm_runtime.h> 19 18 #include <linux/regmap.h>
+3 -2
drivers/gpu/drm/i915/display/intel_atomic_plane.c
··· 317 317 if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc) 318 318 return 0; 319 319 320 - new_crtc_state->enabled_planes |= BIT(plane->id); 321 - 322 320 ret = plane->check_plane(new_crtc_state, new_plane_state); 323 321 if (ret) 324 322 return ret; 323 + 324 + if (fb) 325 + new_crtc_state->enabled_planes |= BIT(plane->id); 325 326 326 327 /* FIXME pre-g4x don't work like this */ 327 328 if (new_plane_state->uapi.visible)
+1 -3
drivers/gpu/drm/i915/display/intel_dp.c
··· 3619 3619 { 3620 3620 int ret; 3621 3621 3622 - intel_dp_lttpr_init(intel_dp); 3623 - 3624 - if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) 3622 + if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) 3625 3623 return false; 3626 3624 3627 3625 /*
+7
drivers/gpu/drm/i915/display/intel_dp_aux.c
··· 133 133 else 134 134 precharge = 5; 135 135 136 + /* Max timeout value on G4x-BDW: 1.6ms */ 136 137 if (IS_BROADWELL(dev_priv)) 137 138 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 138 139 else ··· 160 159 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 161 160 u32 ret; 162 161 162 + /* 163 + * Max timeout values: 164 + * SKL-GLK: 1.6ms 165 + * CNL: 3.2ms 166 + * ICL+: 4ms 167 + */ 163 168 ret = DP_AUX_CH_CTL_SEND_BUSY | 164 169 DP_AUX_CH_CTL_DONE | 165 170 DP_AUX_CH_CTL_INTERRUPT |
+2 -8
drivers/gpu/drm/i915/display/intel_vdsc.c
··· 1014 1014 { 1015 1015 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; 1016 1016 1017 - if (crtc_state->cpu_transcoder == TRANSCODER_EDP) 1018 - return DSS_CTL1; 1019 - 1020 - return ICL_PIPE_DSS_CTL1(pipe); 1017 + return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL1(pipe) : DSS_CTL1; 1021 1018 } 1022 1019 1023 1020 static i915_reg_t dss_ctl2_reg(const struct intel_crtc_state *crtc_state) 1024 1021 { 1025 1022 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; 1026 1023 1027 - if (crtc_state->cpu_transcoder == TRANSCODER_EDP) 1028 - return DSS_CTL2; 1029 - 1030 - return ICL_PIPE_DSS_CTL2(pipe); 1024 + return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL2(pipe) : DSS_CTL2; 1031 1025 } 1032 1026 1033 1027 void intel_dsc_enable(struct intel_encoder *encoder,
+12 -1
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
··· 316 316 WRITE_ONCE(fence->vma, NULL); 317 317 vma->fence = NULL; 318 318 319 - with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref) 319 + /* 320 + * Skip the write to HW if and only if the device is currently 321 + * suspended. 322 + * 323 + * If the driver does not currently hold a wakeref (if_in_use == 0), 324 + * the device may currently be runtime suspended, or it may be woken 325 + * up before the suspend takes place. If the device is not suspended 326 + * (powered down) and we skip clearing the fence register, the HW is 327 + * left in an undefined state where we may end up with multiple 328 + * registers overlapping. 329 + */ 330 + with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref) 320 331 fence_write(fence); 321 332 } 322 333
+24 -5
drivers/gpu/drm/i915/intel_runtime_pm.c
··· 412 412 } 413 413 414 414 /** 415 - * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use 415 + * __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active 416 416 * @rpm: the intel_runtime_pm structure 417 + * @ignore_usecount: get a ref even if dev->power.usage_count is 0 417 418 * 418 419 * This function grabs a device-level runtime pm reference if the device is 419 - * already in use and ensures that it is powered up. It is illegal to try 420 - * and access the HW should intel_runtime_pm_get_if_in_use() report failure. 420 + * already active and ensures that it is powered up. It is illegal to try 421 + * and access the HW should intel_runtime_pm_get_if_active() report failure. 422 + * 423 + * If @ignore_usecount=true, a reference will be acquired even if there is no 424 + * user requiring the device to be powered up (dev->power.usage_count == 0). 425 + * If the function returns false in this case then it's guaranteed that the 426 + * device's runtime suspend hook has been called already or that it will be 427 + * called (and hence it's also guaranteed that the device's runtime resume 428 + * hook will be called eventually). 421 429 * 422 430 * Any runtime pm reference obtained by this function must have a symmetric 423 431 * call to intel_runtime_pm_put() to release the reference again. ··· 433 425 * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates 434 426 * as True if the wakeref was acquired, or False otherwise. 435 427 */ 436 - intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) 428 + static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm, 429 + bool ignore_usecount) 437 430 { 438 431 if (IS_ENABLED(CONFIG_PM)) { 439 432 /* ··· 443 434 * function, since the power state is undefined. This applies 444 435 * atm to the late/early system suspend/resume handlers. 445 436 */ 446 - if (pm_runtime_get_if_in_use(rpm->kdev) <= 0) 437 + if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0) 447 438 return 0; 448 439 } 449 440 450 441 intel_runtime_pm_acquire(rpm, true); 451 442 452 443 return track_intel_runtime_pm_wakeref(rpm); 444 + } 445 + 446 + intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) 447 + { 448 + return __intel_runtime_pm_get_if_active(rpm, false); 449 + } 450 + 451 + intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm) 452 + { 453 + return __intel_runtime_pm_get_if_active(rpm, true); 453 454 } 454 455 455 456 /**
+5
drivers/gpu/drm/i915/intel_runtime_pm.h
··· 177 177 178 178 intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm); 179 179 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm); 180 + intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm); 180 181 intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm); 181 182 intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm); 182 183 ··· 187 186 188 187 #define with_intel_runtime_pm_if_in_use(rpm, wf) \ 189 188 for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \ 189 + intel_runtime_pm_put((rpm), (wf)), (wf) = 0) 190 + 191 + #define with_intel_runtime_pm_if_active(rpm, wf) \ 192 + for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \ 190 193 intel_runtime_pm_put((rpm), (wf)), (wf) = 0) 191 194 192 195 void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
+1 -1
drivers/gpu/drm/imx/imx-drm-core.c
··· 215 215 216 216 ret = drmm_mode_config_init(drm); 217 217 if (ret) 218 - return ret; 218 + goto err_kms; 219 219 220 220 ret = drm_vblank_init(drm, MAX_CRTC); 221 221 if (ret)
+11 -1
drivers/gpu/drm/imx/imx-ldb.c
··· 197 197 int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; 198 198 int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); 199 199 200 + if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) { 201 + dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux); 202 + return; 203 + } 204 + 200 205 drm_panel_prepare(imx_ldb_ch->panel); 201 206 202 207 if (dual) { ··· 259 254 unsigned long di_clk = mode->clock * 1000; 260 255 int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); 261 256 u32 bus_format = imx_ldb_ch->bus_format; 257 + 258 + if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) { 259 + dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux); 260 + return; 261 + } 262 262 263 263 if (mode->clock > 170000) { 264 264 dev_warn(ldb->dev, ··· 593 583 struct imx_ldb_channel *channel = &imx_ldb->channel[i]; 594 584 595 585 if (!channel->ldb) 596 - break; 586 + continue; 597 587 598 588 ret = imx_ldb_register(drm, channel); 599 589 if (ret)
+1 -1
drivers/gpu/drm/msm/adreno/a5xx_power.c
··· 304 304 /* Set up the limits management */ 305 305 if (adreno_is_a530(adreno_gpu)) 306 306 a530_lm_setup(gpu); 307 - else 307 + else if (adreno_is_a540(adreno_gpu)) 308 308 a540_lm_setup(gpu); 309 309 310 310 /* Set up SP/TP power collpase */
+1 -1
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
··· 339 339 else 340 340 bit = a6xx_gmu_oob_bits[state].ack_new; 341 341 342 - gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit); 342 + gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit); 343 343 } 344 344 345 345 /* Enable CPU control of SPTP power power collapse */
+72 -36
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
··· 522 522 return a6xx_idle(gpu, ring) ? 0 : -EINVAL; 523 523 } 524 524 525 - static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, 525 + /* 526 + * Check that the microcode version is new enough to include several key 527 + * security fixes. Return true if the ucode is safe. 528 + */ 529 + static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu, 526 530 struct drm_gem_object *obj) 527 531 { 532 + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 533 + struct msm_gpu *gpu = &adreno_gpu->base; 528 534 u32 *buf = msm_gem_get_vaddr(obj); 535 + bool ret = false; 529 536 530 537 if (IS_ERR(buf)) 531 - return; 538 + return false; 532 539 533 540 /* 534 - * If the lowest nibble is 0xa that is an indication that this microcode 535 - * has been patched. The actual version is in dword [3] but we only care 536 - * about the patchlevel which is the lowest nibble of dword [3] 537 - * 538 - * Otherwise check that the firmware is greater than or equal to 1.90 539 - * which was the first version that had this fix built in 541 + * Targets up to a640 (a618, a630 and a640) need to check for a 542 + * microcode version that is patched to support the whereami opcode or 543 + * one that is new enough to include it by default. 540 544 */ 541 - if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) 542 - a6xx_gpu->has_whereami = true; 543 - else if ((buf[0] & 0xfff) > 0x190) 544 - a6xx_gpu->has_whereami = true; 545 + if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) || 546 + adreno_is_a640(adreno_gpu)) { 547 + /* 548 + * If the lowest nibble is 0xa that is an indication that this 549 + * microcode has been patched. The actual version is in dword 550 + * [3] but we only care about the patchlevel which is the lowest 551 + * nibble of dword [3] 552 + * 553 + * Otherwise check that the firmware is greater than or equal 554 + * to 1.90 which was the first version that had this fix built 555 + * in 556 + */ 557 + if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) || 558 + (buf[0] & 0xfff) >= 0x190) { 559 + a6xx_gpu->has_whereami = true; 560 + ret = true; 561 + goto out; 562 + } 545 563 564 + DRM_DEV_ERROR(&gpu->pdev->dev, 565 + "a630 SQE ucode is too old. Have version %x need at least %x\n", 566 + buf[0] & 0xfff, 0x190); 567 + } else { 568 + /* 569 + * a650 tier targets don't need whereami but still need to be 570 + * equal to or newer than 1.95 for other security fixes 571 + */ 572 + if (adreno_is_a650(adreno_gpu)) { 573 + if ((buf[0] & 0xfff) >= 0x195) { 574 + ret = true; 575 + goto out; 576 + } 577 + 578 + DRM_DEV_ERROR(&gpu->pdev->dev, 579 + "a650 SQE ucode is too old. Have version %x need at least %x\n", 580 + buf[0] & 0xfff, 0x195); 581 + } 582 + 583 + /* 584 + * When a660 is added those targets should return true here 585 + * since those have all the critical security fixes built in 586 + * from the start 587 + */ 588 + } 589 + out: 546 590 msm_gem_put_vaddr(obj); 591 + return ret; 547 592 } 548 593 549 594 static int a6xx_ucode_init(struct msm_gpu *gpu) ··· 611 566 } 612 567 613 568 msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw"); 614 - a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo); 569 + if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) { 570 + msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace); 571 + drm_gem_object_put(a6xx_gpu->sqe_bo); 572 + 573 + a6xx_gpu->sqe_bo = NULL; 574 + return -EPERM; 575 + } 615 576 } 616 577 617 578 gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO, ··· 1401 1350 u32 revn) 1402 1351 { 1403 1352 struct opp_table *opp_table; 1404 - struct nvmem_cell *cell; 1405 1353 u32 supp_hw = UINT_MAX; 1406 - void *buf; 1354 + u16 speedbin; 1355 + int ret; 1407 1356 1408 - cell = nvmem_cell_get(dev, "speed_bin"); 1409 - /* 1410 - * -ENOENT means that the platform doesn't support speedbin which is 1411 - * fine 1412 - */ 1413 - if (PTR_ERR(cell) == -ENOENT) 1414 - return 0; 1415 - else if (IS_ERR(cell)) { 1357 + ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin); 1358 + if (ret) { 1416 1359 DRM_DEV_ERROR(dev, 1417 - "failed to read speed-bin. Some OPPs may not be supported by hardware"); 1360 + "failed to read speed-bin (%d). Some OPPs may not be supported by hardware", 1361 + ret); 1418 1362 goto done; 1419 1363 } 1364 + speedbin = le16_to_cpu(speedbin); 1420 1365 1421 - buf = nvmem_cell_read(cell, NULL); 1422 - if (IS_ERR(buf)) { 1423 - nvmem_cell_put(cell); 1424 - DRM_DEV_ERROR(dev, 1425 - "failed to read speed-bin. Some OPPs may not be supported by hardware"); 1426 - goto done; 1427 - } 1428 - 1429 - supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf)); 1430 - 1431 - kfree(buf); 1432 - nvmem_cell_put(cell); 1366 + supp_hw = fuse_to_supp_hw(dev, revn, speedbin); 1433 1367 1434 1368 done: 1435 1369 opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1);
+7 -5
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
··· 43 43 #define DPU_DEBUGFS_DIR "msm_dpu" 44 44 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask" 45 45 46 + #define MIN_IB_BW 400000000ULL /* Min ib vote 400MB */ 47 + 46 48 static int dpu_kms_hw_init(struct msm_kms *kms); 47 49 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms); 48 50 ··· 933 931 DPU_DEBUG("REG_DMA is not defined"); 934 932 } 935 933 934 + if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) 935 + dpu_kms_parse_data_bus_icc_path(dpu_kms); 936 + 936 937 pm_runtime_get_sync(&dpu_kms->pdev->dev); 937 938 938 939 dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0); ··· 1036 1031 } 1037 1032 1038 1033 dpu_vbif_init_memtypes(dpu_kms); 1039 - 1040 - if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) 1041 - dpu_kms_parse_data_bus_icc_path(dpu_kms); 1042 1034 1043 1035 pm_runtime_put_sync(&dpu_kms->pdev->dev); 1044 1036 ··· 1193 1191 1194 1192 ddev = dpu_kms->dev; 1195 1193 1194 + WARN_ON(!(dpu_kms->num_paths)); 1196 1195 /* Min vote of BW is required before turning on AXI clk */ 1197 1196 for (i = 0; i < dpu_kms->num_paths; i++) 1198 - icc_set_bw(dpu_kms->path[i], 0, 1199 - dpu_kms->catalog->perf.min_dram_ib); 1197 + icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW)); 1200 1198 1201 1199 rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); 1202 1200 if (rc) {
+7
drivers/gpu/drm/msm/dp/dp_aux.c
··· 32 32 struct drm_dp_aux dp_aux; 33 33 }; 34 34 35 + #define MAX_AUX_RETRIES 5 36 + 35 37 static const char *dp_aux_get_error(u32 aux_error) 36 38 { 37 39 switch (aux_error) { ··· 379 377 ret = dp_aux_cmd_fifo_tx(aux, msg); 380 378 381 379 if (ret < 0) { 380 + if (aux->native) { 381 + aux->retry_cnt++; 382 + if (!(aux->retry_cnt % MAX_AUX_RETRIES)) 383 + dp_catalog_aux_update_cfg(aux->catalog); 384 + } 382 385 usleep_range(400, 500); /* at least 400us to next try */ 383 386 goto unlock_exit; 384 387 }
+1 -1
drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
··· 163 163 break; 164 164 case MSM_DSI_PHY_7NM: 165 165 case MSM_DSI_PHY_7NM_V4_1: 166 - pll = msm_dsi_pll_7nm_init(pdev, id); 166 + pll = msm_dsi_pll_7nm_init(pdev, type, id); 167 167 break; 168 168 default: 169 169 pll = ERR_PTR(-ENXIO);
+4 -2
drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
··· 117 117 } 118 118 #endif 119 119 #ifdef CONFIG_DRM_MSM_DSI_7NM_PHY 120 - struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id); 120 + struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, 121 + enum msm_dsi_phy_type type, int id); 121 122 #else 122 123 static inline struct msm_dsi_pll * 123 - msm_dsi_pll_7nm_init(struct platform_device *pdev, int id) 124 + msm_dsi_pll_7nm_init(struct platform_device *pdev, 125 + enum msm_dsi_phy_type type, int id) 124 126 { 125 127 return ERR_PTR(-ENODEV); 126 128 }
+6 -5
drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
··· 325 325 pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low); 326 326 pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid); 327 327 pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high); 328 - pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40); 328 + pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, reg->pll_lockdet_rate); 329 329 pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06); 330 330 pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */ 331 331 pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters); ··· 509 509 { 510 510 struct msm_dsi_pll *pll = hw_clk_to_pll(hw); 511 511 struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll); 512 + struct dsi_pll_config *config = &pll_7nm->pll_configuration; 512 513 void __iomem *base = pll_7nm->mmio; 513 514 u64 ref_clk = pll_7nm->vco_ref_clk_rate; 514 515 u64 vco_rate = 0x0; ··· 530 529 /* 531 530 * TODO: 532 531 * 1. Assumes prescaler is disabled 533 - * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits) 534 532 */ 535 - multiplier = 1 << 18; 533 + multiplier = 1 << config->frac_bits; 536 534 pll_freq = dec * (ref_clk * 2); 537 535 tmp64 = (ref_clk * 2 * frac); 538 536 pll_freq += div_u64(tmp64, multiplier); ··· 852 852 return ret; 853 853 } 854 854 855 - struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id) 855 + struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, 856 + enum msm_dsi_phy_type type, int id) 856 857 { 857 858 struct dsi_pll_7nm *pll_7nm; 858 859 struct msm_dsi_pll *pll; ··· 886 885 pll = &pll_7nm->base; 887 886 pll->min_rate = 1000000000UL; 888 887 pll->max_rate = 3500000000UL; 889 - if (pll->type == MSM_DSI_PHY_7NM_V4_1) { 888 + if (type == MSM_DSI_PHY_7NM_V4_1) { 890 889 pll->min_rate = 600000000UL; 891 890 pll->max_rate = (unsigned long)5000000000ULL; 892 891 /* workaround for max rate overflowing on 32-bit builds: */
+5 -2
drivers/gpu/drm/msm/msm_atomic.c
··· 57 57 58 58 static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) 59 59 { 60 + int crtc_index; 60 61 struct drm_crtc *crtc; 61 62 62 - for_each_crtc_mask(kms->dev, crtc, crtc_mask) 63 - mutex_lock(&kms->commit_lock[drm_crtc_index(crtc)]); 63 + for_each_crtc_mask(kms->dev, crtc, crtc_mask) { 64 + crtc_index = drm_crtc_index(crtc); 65 + mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index); 66 + } 64 67 } 65 68 66 69 static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
+12
drivers/gpu/drm/msm/msm_drv.c
··· 1072 1072 static int __maybe_unused msm_pm_prepare(struct device *dev) 1073 1073 { 1074 1074 struct drm_device *ddev = dev_get_drvdata(dev); 1075 + struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL; 1076 + 1077 + if (!priv || !priv->kms) 1078 + return 0; 1075 1079 1076 1080 return drm_mode_config_helper_suspend(ddev); 1077 1081 } ··· 1083 1079 static void __maybe_unused msm_pm_complete(struct device *dev) 1084 1080 { 1085 1081 struct drm_device *ddev = dev_get_drvdata(dev); 1082 + struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL; 1083 + 1084 + if (!priv || !priv->kms) 1085 + return; 1086 1086 1087 1087 drm_mode_config_helper_resume(ddev); 1088 1088 } ··· 1319 1311 static void msm_pdev_shutdown(struct platform_device *pdev) 1320 1312 { 1321 1313 struct drm_device *drm = platform_get_drvdata(pdev); 1314 + struct msm_drm_private *priv = drm ? drm->dev_private : NULL; 1315 + 1316 + if (!priv || !priv->kms) 1317 + return; 1322 1318 1323 1319 drm_atomic_helper_shutdown(drm); 1324 1320 }
+1 -1
drivers/gpu/drm/msm/msm_fence.c
··· 45 45 int ret; 46 46 47 47 if (fence > fctx->last_fence) { 48 - DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n", 48 + DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n", 49 49 fctx->name, fence, fctx->last_fence); 50 50 return -EINVAL; 51 51 }
+2 -6
drivers/gpu/drm/msm/msm_kms.h
··· 157 157 * from the crtc's pending_timer close to end of the frame: 158 158 */ 159 159 struct mutex commit_lock[MAX_CRTCS]; 160 - struct lock_class_key commit_lock_keys[MAX_CRTCS]; 161 160 unsigned pending_crtc_mask; 162 161 struct msm_pending_timer pending_timers[MAX_CRTCS]; 163 162 }; ··· 166 167 { 167 168 unsigned i, ret; 168 169 169 - for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) { 170 - lockdep_register_key(&kms->commit_lock_keys[i]); 171 - __mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]", 172 - &kms->commit_lock_keys[i]); 173 - } 170 + for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) 171 + mutex_init(&kms->commit_lock[i]); 174 172 175 173 kms->funcs = funcs; 176 174
+12 -1
drivers/gpu/drm/nouveau/dispnv50/disp.c
··· 2693 2693 else 2694 2694 nouveau_display(dev)->format_modifiers = disp50xx_modifiers; 2695 2695 2696 - if (disp->disp->object.oclass >= GK104_DISP) { 2696 + /* FIXME: 256x256 cursors are supported on Kepler, however unlike Maxwell and later 2697 + * generations Kepler requires that we use small pages (4K) for cursor scanout surfaces. The 2698 + * proper fix for this is to teach nouveau to migrate fbs being used for the cursor plane to 2699 + * small page allocations in prepare_fb(). When this is implemented, we should also force 2700 + * large pages (128K) for ovly fbs in order to fix Kepler ovlys. 2701 + * But until then, just limit cursors to 128x128 - which is small enough to avoid ever using 2702 + * large pages. 2703 + */ 2704 + if (disp->disp->object.oclass >= GM107_DISP) { 2697 2705 dev->mode_config.cursor_width = 256; 2698 2706 dev->mode_config.cursor_height = 256; 2707 + } else if (disp->disp->object.oclass >= GK104_DISP) { 2708 + dev->mode_config.cursor_width = 128; 2709 + dev->mode_config.cursor_height = 128; 2699 2710 } else { 2700 2711 dev->mode_config.cursor_width = 64; 2701 2712 dev->mode_config.cursor_height = 64;
+6 -25
drivers/gpu/drm/rcar-du/rcar_du_encoder.c
··· 48 48 static const struct drm_encoder_funcs rcar_du_encoder_funcs = { 49 49 }; 50 50 51 - static void rcar_du_encoder_release(struct drm_device *dev, void *res) 52 - { 53 - struct rcar_du_encoder *renc = res; 54 - 55 - drm_encoder_cleanup(&renc->base); 56 - kfree(renc); 57 - } 58 - 59 51 int rcar_du_encoder_init(struct rcar_du_device *rcdu, 60 52 enum rcar_du_output output, 61 53 struct device_node *enc_node) 62 54 { 63 55 struct rcar_du_encoder *renc; 64 56 struct drm_bridge *bridge; 65 - int ret; 66 57 67 58 /* 68 59 * Locate the DRM bridge from the DT node. For the DPAD outputs, if the ··· 92 101 return -ENOLINK; 93 102 } 94 103 95 - renc = kzalloc(sizeof(*renc), GFP_KERNEL); 96 - if (renc == NULL) 97 - return -ENOMEM; 98 - 99 - renc->output = output; 100 - 101 104 dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n", 102 105 enc_node, output); 103 106 104 - ret = drm_encoder_init(&rcdu->ddev, &renc->base, &rcar_du_encoder_funcs, 105 - DRM_MODE_ENCODER_NONE, NULL); 106 - if (ret < 0) { 107 - kfree(renc); 108 - return ret; 109 - } 107 + renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base, 108 + &rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE, 109 + NULL); 110 + if (!renc) 111 + return -ENOMEM; 110 112 111 - ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_encoder_release, 112 - renc); 113 - if (ret) 114 - return ret; 113 + renc->output = output; 115 114 116 115 /* 117 116 * Attach the bridge to the encoder. The bridge will create the
+13 -17
drivers/gpu/drm/tegra/dc.c
··· 1688 1688 dev_err(dc->dev, 1689 1689 "failed to set clock rate to %lu Hz\n", 1690 1690 state->pclk); 1691 + 1692 + err = clk_set_rate(dc->clk, state->pclk); 1693 + if (err < 0) 1694 + dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n", 1695 + dc->clk, state->pclk, err); 1691 1696 } 1692 1697 1693 1698 DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk), ··· 1703 1698 value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1; 1704 1699 tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); 1705 1700 } 1706 - 1707 - err = clk_set_rate(dc->clk, state->pclk); 1708 - if (err < 0) 1709 - dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n", 1710 - dc->clk, state->pclk, err); 1711 1701 } 1712 1702 1713 1703 static void tegra_dc_stop(struct tegra_dc *dc) ··· 2501 2501 * POWER_CONTROL registers during CRTC enabling. 2502 2502 */ 2503 2503 if (dc->soc->coupled_pm && dc->pipe == 1) { 2504 - u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER; 2505 - struct device_link *link; 2506 - struct device *partner; 2504 + struct device *companion; 2505 + struct tegra_dc *parent; 2507 2506 2508 - partner = driver_find_device(dc->dev->driver, NULL, NULL, 2509 - tegra_dc_match_by_pipe); 2510 - if (!partner) 2507 + companion = driver_find_device(dc->dev->driver, NULL, (const void *)0, 2508 + tegra_dc_match_by_pipe); 2509 + if (!companion) 2511 2510 return -EPROBE_DEFER; 2512 2511 2513 - link = device_link_add(dc->dev, partner, flags); 2514 - if (!link) { 2515 - dev_err(dc->dev, "failed to link controllers\n"); 2516 - return -EINVAL; 2517 - } 2512 + parent = dev_get_drvdata(companion); 2513 + dc->client.parent = &parent->client; 2518 2514 2519 - dev_dbg(dc->dev, "coupled to %s\n", dev_name(partner)); 2515 + dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion)); 2520 2516 } 2521 2517 2522 2518 return 0;
+7
drivers/gpu/drm/tegra/sor.c
··· 3115 3115 * kernel is possible. 3116 3116 */ 3117 3117 if (sor->rst) { 3118 + err = pm_runtime_resume_and_get(sor->dev); 3119 + if (err < 0) { 3120 + dev_err(sor->dev, "failed to get runtime PM: %d\n", err); 3121 + return err; 3122 + } 3123 + 3118 3124 err = reset_control_acquire(sor->rst); 3119 3125 if (err < 0) { 3120 3126 dev_err(sor->dev, "failed to acquire SOR reset: %d\n", ··· 3154 3148 } 3155 3149 3156 3150 reset_control_release(sor->rst); 3151 + pm_runtime_put(sor->dev); 3157 3152 } 3158 3153 3159 3154 err = clk_prepare_enable(sor->clk_safe);
+6 -4
drivers/gpu/host1x/bus.c
··· 705 705 EXPORT_SYMBOL(host1x_driver_unregister); 706 706 707 707 /** 708 - * host1x_client_register() - register a host1x client 708 + * __host1x_client_register() - register a host1x client 709 709 * @client: host1x client 710 + * @key: lock class key for the client-specific mutex 710 711 * 711 712 * Registers a host1x client with each host1x controller instance. Note that 712 713 * each client will only match their parent host1x controller and will only be ··· 716 715 * device and call host1x_device_init(), which will in turn call each client's 717 716 * &host1x_client_ops.init implementation. 718 717 */ 719 - int host1x_client_register(struct host1x_client *client) 718 + int __host1x_client_register(struct host1x_client *client, 719 + struct lock_class_key *key) 720 720 { 721 721 struct host1x *host1x; 722 722 int err; 723 723 724 724 INIT_LIST_HEAD(&client->list); 725 - mutex_init(&client->lock); 725 + __mutex_init(&client->lock, "host1x client lock", key); 726 726 client->usecount = 0; 727 727 728 728 mutex_lock(&devices_lock); ··· 744 742 745 743 return 0; 746 744 } 747 - EXPORT_SYMBOL(host1x_client_register); 745 + EXPORT_SYMBOL(__host1x_client_register); 748 746 749 747 /** 750 748 * host1x_client_unregister() - unregister a host1x client
+2 -2
drivers/infiniband/hw/cxgb4/cm.c
··· 3610 3610 ep->com.local_addr.ss_family == AF_INET) { 3611 3611 err = cxgb4_remove_server_filter( 3612 3612 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3613 - ep->com.dev->rdev.lldi.rxq_ids[0], 0); 3613 + ep->com.dev->rdev.lldi.rxq_ids[0], false); 3614 3614 } else { 3615 3615 struct sockaddr_in6 *sin6; 3616 3616 c4iw_init_wr_wait(ep->com.wr_waitp); 3617 3617 err = cxgb4_remove_server( 3618 3618 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3619 - ep->com.dev->rdev.lldi.rxq_ids[0], 0); 3619 + ep->com.dev->rdev.lldi.rxq_ids[0], true); 3620 3620 if (err) 3621 3621 goto done; 3622 3622 err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
+3 -1
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
··· 1194 1194 upper_32_bits(dma)); 1195 1195 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG, 1196 1196 (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); 1197 - roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0); 1197 + 1198 + /* Make sure to write tail first and then head */ 1198 1199 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0); 1200 + roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0); 1199 1201 } else { 1200 1202 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma); 1201 1203 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
+1 -1
drivers/infiniband/hw/mlx5/devx.c
··· 1116 1116 case MLX5_CMD_OP_CREATE_MKEY: 1117 1117 MLX5_SET(destroy_mkey_in, din, opcode, 1118 1118 MLX5_CMD_OP_DESTROY_MKEY); 1119 - MLX5_SET(destroy_mkey_in, in, mkey_index, *obj_id); 1119 + MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id); 1120 1120 break; 1121 1121 case MLX5_CMD_OP_CREATE_CQ: 1122 1122 MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
+14 -4
drivers/infiniband/hw/mlx5/qp.c
··· 1078 1078 1079 1079 qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); 1080 1080 MLX5_SET(qpc, qpc, uar_page, uar_index); 1081 - MLX5_SET(qpc, qpc, ts_format, MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT); 1081 + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev)); 1082 1082 MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); 1083 1083 1084 1084 /* Set "fast registration enabled" for all kernel QPs */ ··· 1188 1188 } 1189 1189 return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING; 1190 1190 } 1191 - return MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT; 1191 + return fr_supported ? MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING : 1192 + MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT; 1192 1193 } 1193 1194 1194 1195 static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) ··· 1207 1206 } 1208 1207 return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING; 1209 1208 } 1210 - return MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT; 1209 + return fr_supported ? MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING : 1210 + MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT; 1211 1211 } 1212 1212 1213 1213 static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq, ··· 1219 1217 MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING || 1220 1218 MLX5_CAP_ROCE(dev->mdev, qp_ts_format) == 1221 1219 MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME; 1222 - int ts_format = MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT; 1220 + int ts_format = fr_supported ? MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING : 1221 + MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT; 1223 1222 1224 1223 if (recv_cq && 1225 1224 recv_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) ··· 1933 1930 if (qp->flags & IB_QP_CREATE_MANAGED_RECV) 1934 1931 MLX5_SET(qpc, qpc, cd_slave_receive, 1); 1935 1932 1933 + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev)); 1936 1934 MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ); 1937 1935 MLX5_SET(qpc, qpc, no_sq, 1); 1938 1936 MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); ··· 4877 4873 struct mlx5_ib_dev *dev; 4878 4874 int has_net_offloads; 4879 4875 __be64 *rq_pas0; 4876 + int ts_format; 4880 4877 void *in; 4881 4878 void *rqc; 4882 4879 void *wq; ··· 4885 4880 int err; 4886 4881 4887 4882 dev = to_mdev(pd->device); 4883 + 4884 + ts_format = get_rq_ts_format(dev, to_mcq(init_attr->cq)); 4885 + if (ts_format < 0) 4886 + return ts_format; 4888 4887 4889 4888 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; 4890 4889 in = kvzalloc(inlen, GFP_KERNEL); ··· 4899 4890 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 4900 4891 MLX5_SET(rqc, rqc, mem_rq_type, 4901 4892 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); 4893 + MLX5_SET(rqc, rqc, ts_format, ts_format); 4902 4894 MLX5_SET(rqc, rqc, user_index, rwq->user_index); 4903 4895 MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn); 4904 4896 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+1 -1
drivers/interconnect/bulk.c
··· 53 53 EXPORT_SYMBOL_GPL(icc_bulk_put); 54 54 55 55 /** 56 - * icc_bulk_set() - set bandwidth to a set of paths 56 + * icc_bulk_set_bw() - set bandwidth to a set of paths 57 57 * @num_paths: the number of icc_bulk_data 58 58 * @paths: the icc_bulk_data table containing the paths and bandwidth 59 59 *
+2
drivers/interconnect/core.c
··· 942 942 GFP_KERNEL); 943 943 if (new) 944 944 src->links = new; 945 + else 946 + ret = -ENOMEM; 945 947 946 948 out: 947 949 mutex_unlock(&icc_lock);
+8 -8
drivers/interconnect/qcom/msm8939.c
··· 131 131 DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8939_MASTER_SDCC_2, 8, -1, -1, MSM8939_PNOC_INT_1); 132 132 DEFINE_QNODE(mas_qdss_bam, MSM8939_MASTER_QDSS_BAM, 8, -1, -1, MSM8939_SNOC_QDSS_INT); 133 133 DEFINE_QNODE(mas_qdss_etr, MSM8939_MASTER_QDSS_ETR, 8, -1, -1, MSM8939_SNOC_QDSS_INT); 134 - DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, 20, -1, MSM8939_SLAVE_SRVC_SNOC); 134 + DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, -1, -1, MSM8939_SLAVE_SRVC_SNOC); 135 135 DEFINE_QNODE(mas_spdm, MSM8939_MASTER_SPDM, 4, -1, -1, MSM8939_PNOC_MAS_0); 136 136 DEFINE_QNODE(mas_tcu0, MSM8939_MASTER_TCU0, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2); 137 137 DEFINE_QNODE(mas_usb_hs1, MSM8939_MASTER_USB_HS1, 4, -1, -1, MSM8939_PNOC_MAS_1); ··· 156 156 DEFINE_QNODE(pcnoc_snoc_slv, MSM8939_PNOC_SNOC_SLV, 8, -1, 45, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC, MSM8939_SNOC_INT_1); 157 157 DEFINE_QNODE(qdss_int, MSM8939_SNOC_QDSS_INT, 8, -1, -1, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC); 158 158 DEFINE_QNODE(slv_apps_l2, MSM8939_SLAVE_AMPSS_L2, 16, -1, -1, 0); 159 - DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, 20, 0); 159 + DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, -1, 0); 160 160 DEFINE_QNODE(slv_audio, MSM8939_SLAVE_LPASS, 4, -1, -1, 0); 161 161 DEFINE_QNODE(slv_bimc_cfg, MSM8939_SLAVE_BIMC_CFG, 4, -1, -1, 0); 162 162 DEFINE_QNODE(slv_blsp_1, MSM8939_SLAVE_BLSP_1, 4, -1, -1, 0); 163 163 DEFINE_QNODE(slv_boot_rom, MSM8939_SLAVE_BOOT_ROM, 4, -1, -1, 0); 164 164 DEFINE_QNODE(slv_camera_cfg, MSM8939_SLAVE_CAMERA_CFG, 4, -1, -1, 0); 165 - DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, 106, 0); 166 - DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, 107, 0); 165 + DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, -1, 0); 166 + DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, -1, 0); 167 167 DEFINE_QNODE(slv_clk_ctl, MSM8939_SLAVE_CLK_CTL, 4, -1, -1, 0); 168 168 DEFINE_QNODE(slv_crypto_0_cfg, MSM8939_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0); 169 169 DEFINE_QNODE(slv_dehr_cfg, MSM8939_SLAVE_DEHR_CFG, 4, -1, -1, 0); ··· 187 187 DEFINE_QNODE(slv_security, MSM8939_SLAVE_SECURITY, 4, -1, -1, 0); 188 188 DEFINE_QNODE(slv_snoc_cfg, MSM8939_SLAVE_SNOC_CFG, 4, -1, -1, 0); 189 189 DEFINE_QNODE(slv_spdm, MSM8939_SLAVE_SPDM, 4, -1, -1, 0); 190 - DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, 29, 0); 190 + DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, -1, 0); 191 191 DEFINE_QNODE(slv_tcsr, MSM8939_SLAVE_TCSR, 4, -1, -1, 0); 192 192 DEFINE_QNODE(slv_tlmm, MSM8939_SLAVE_TLMM, 4, -1, -1, 0); 193 193 DEFINE_QNODE(slv_usb_hs1, MSM8939_SLAVE_USB_HS1, 4, -1, -1, 0); 194 194 DEFINE_QNODE(slv_usb_hs2, MSM8939_SLAVE_USB_HS2, 4, -1, -1, 0); 195 195 DEFINE_QNODE(slv_venus_cfg, MSM8939_SLAVE_VENUS_CFG, 4, -1, -1, 0); 196 - DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, 3, -1, MSM8939_SNOC_BIMC_0_SLV); 197 - DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, 24, MSM8939_SLAVE_EBI_CH0); 196 + DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_0_SLV); 197 + DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0); 198 198 DEFINE_QNODE(snoc_bimc_1_mas, MSM8939_SNOC_BIMC_1_MAS, 16, 76, -1, MSM8939_SNOC_BIMC_1_SLV); 199 199 DEFINE_QNODE(snoc_bimc_1_slv, MSM8939_SNOC_BIMC_1_SLV, 16, -1, 104, MSM8939_SLAVE_EBI_CH0); 200 200 DEFINE_QNODE(snoc_bimc_2_mas, MSM8939_SNOC_BIMC_2_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_2_SLV); 201 201 DEFINE_QNODE(snoc_bimc_2_slv, MSM8939_SNOC_BIMC_2_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0); 202 202 DEFINE_QNODE(snoc_int_0, MSM8939_SNOC_INT_0, 8, 99, 130, MSM8939_SLAVE_QDSS_STM, MSM8939_SLAVE_IMEM, MSM8939_SNOC_PNOC_MAS); 203 - DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, 100, 131, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64); 203 + DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, -1, -1, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64); 204 204 DEFINE_QNODE(snoc_int_bimc, MSM8939_SNOC_INT_BIMC, 8, 101, 132, MSM8939_SNOC_BIMC_1_MAS); 205 205 DEFINE_QNODE(snoc_pcnoc_mas, MSM8939_SNOC_PNOC_MAS, 8, -1, -1, MSM8939_SNOC_PNOC_SLV); 206 206 DEFINE_QNODE(snoc_pcnoc_slv, MSM8939_SNOC_PNOC_SLV, 8, -1, -1, MSM8939_PNOC_INT_0);
+2 -2
drivers/isdn/capi/kcapi.c
··· 721 721 * Return value: CAPI result code 722 722 */ 723 723 724 - u16 capi20_get_manufacturer(u32 contr, u8 *buf) 724 + u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]) 725 725 { 726 726 struct capi_ctr *ctr; 727 727 u16 ret; ··· 787 787 * Return value: CAPI result code 788 788 */ 789 789 790 - u16 capi20_get_serial(u32 contr, u8 *serial) 790 + u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]) 791 791 { 792 792 struct capi_ctr *ctr; 793 793 u16 ret;
+1 -1
drivers/isdn/hardware/mISDN/mISDNipac.c
··· 694 694 { 695 695 if (isac->type & IPAC_TYPE_ISACX) 696 696 WriteISAC(isac, ISACX_MASK, 0xff); 697 - else 697 + else if (isac->type != 0) 698 698 WriteISAC(isac, ISAC_MASK, 0xff); 699 699 if (isac->dch.timer.function != NULL) { 700 700 del_timer(&isac->dch.timer);
+1 -1
drivers/md/dm-ioctl.c
··· 529 529 * Grab our output buffer. 530 530 */ 531 531 nl = orig_nl = get_result_buffer(param, param_size, &len); 532 - if (len < needed) { 532 + if (len < needed || len < sizeof(nl->dev)) { 533 533 param->flags |= DM_BUFFER_FULL_FLAG; 534 534 goto out; 535 535 }
+25 -8
drivers/md/dm-table.c
··· 1594 1594 return blk_queue_zoned_model(q) != *zoned_model; 1595 1595 } 1596 1596 1597 + /* 1598 + * Check the device zoned model based on the target feature flag. If the target 1599 + * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are 1600 + * also accepted but all devices must have the same zoned model. If the target 1601 + * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any 1602 + * zoned model with all zoned devices having the same zone size. 1603 + */ 1597 1604 static bool dm_table_supports_zoned_model(struct dm_table *t, 1598 1605 enum blk_zoned_model zoned_model) 1599 1606 { ··· 1610 1603 for (i = 0; i < dm_table_get_num_targets(t); i++) { 1611 1604 ti = dm_table_get_target(t, i); 1612 1605 1613 - if (zoned_model == BLK_ZONED_HM && 1614 - !dm_target_supports_zoned_hm(ti->type)) 1615 - return false; 1616 - 1617 - if (!ti->type->iterate_devices || 1618 - ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model)) 1619 - return false; 1606 + if (dm_target_supports_zoned_hm(ti->type)) { 1607 + if (!ti->type->iterate_devices || 1608 + ti->type->iterate_devices(ti, device_not_zoned_model, 1609 + &zoned_model)) 1610 + return false; 1611 + } else if (!dm_target_supports_mixed_zoned_model(ti->type)) { 1612 + if (zoned_model == BLK_ZONED_HM) 1613 + return false; 1614 + } 1620 1615 } 1621 1616 1622 1617 return true; ··· 1630 1621 struct request_queue *q = bdev_get_queue(dev->bdev); 1631 1622 unsigned int *zone_sectors = data; 1632 1623 1624 + if (!blk_queue_is_zoned(q)) 1625 + return 0; 1626 + 1633 1627 return blk_queue_zone_sectors(q) != *zone_sectors; 1634 1628 } 1635 1629 1630 + /* 1631 + * Check consistency of zoned model and zone sectors across all targets. For 1632 + * zone sectors, if the destination device is a zoned block device, it shall 1633 + * have the specified zone_sectors. 1634 + */ 1636 1635 static int validate_hardware_zoned_model(struct dm_table *table, 1637 1636 enum blk_zoned_model zoned_model, 1638 1637 unsigned int zone_sectors) ··· 1659 1642 return -EINVAL; 1660 1643 1661 1644 if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) { 1662 - DMERR("%s: zone sectors is not consistent across all devices", 1645 + DMERR("%s: zone sectors is not consistent across all zoned devices", 1663 1646 dm_device_name(table->md)); 1664 1647 return -EINVAL; 1665 1648 }
+1 -1
drivers/md/dm-verity-target.c
··· 34 34 #define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks" 35 35 #define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once" 36 36 37 - #define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC + \ 37 + #define DM_VERITY_OPTS_MAX (3 + DM_VERITY_OPTS_FEC + \ 38 38 DM_VERITY_ROOT_HASH_VERIFICATION_OPTS) 39 39 40 40 static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
+1 -1
drivers/md/dm-zoned-target.c
··· 1143 1143 static struct target_type dmz_type = { 1144 1144 .name = "zoned", 1145 1145 .version = {2, 0, 0}, 1146 - .features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM, 1146 + .features = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL, 1147 1147 .module = THIS_MODULE, 1148 1148 .ctr = dmz_ctr, 1149 1149 .dtr = dmz_dtr,
+4 -1
drivers/md/dm.c
··· 2036 2036 if (size != dm_get_size(md)) 2037 2037 memset(&md->geometry, 0, sizeof(md->geometry)); 2038 2038 2039 - set_capacity_and_notify(md->disk, size); 2039 + if (!get_capacity(md->disk)) 2040 + set_capacity(md->disk, size); 2041 + else 2042 + set_capacity_and_notify(md->disk, size); 2040 2043 2041 2044 dm_table_event_callback(t, event_callback, md); 2042 2045
+4 -2
drivers/mfd/intel_quark_i2c_gpio.c
··· 72 72 {} 73 73 }; 74 74 75 - static const struct resource intel_quark_i2c_res[] = { 75 + /* This is used as a place holder and will be modified at run-time */ 76 + static struct resource intel_quark_i2c_res[] = { 76 77 [INTEL_QUARK_IORES_MEM] = { 77 78 .flags = IORESOURCE_MEM, 78 79 }, ··· 86 85 .adr = MFD_ACPI_MATCH_I2C, 87 86 }; 88 87 89 - static const struct resource intel_quark_gpio_res[] = { 88 + /* This is used as a place holder and will be modified at run-time */ 89 + static struct resource intel_quark_gpio_res[] = { 90 90 [INTEL_QUARK_IORES_MEM] = { 91 91 .flags = IORESOURCE_MEM, 92 92 },
+7 -10
drivers/misc/mei/client.c
··· 2286 2286 if (buffer_id == 0) 2287 2287 return -EINVAL; 2288 2288 2289 - if (!mei_cl_is_connected(cl)) 2290 - return -ENODEV; 2289 + if (mei_cl_is_connected(cl)) 2290 + return -EPROTO; 2291 2291 2292 2292 if (cl->dma_mapped) 2293 2293 return -EPROTO; ··· 2327 2327 2328 2328 mutex_unlock(&dev->device_lock); 2329 2329 wait_event_timeout(cl->wait, 2330 - cl->dma_mapped || 2331 - cl->status || 2332 - !mei_cl_is_connected(cl), 2330 + cl->dma_mapped || cl->status, 2333 2331 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 2334 2332 mutex_lock(&dev->device_lock); 2335 2333 ··· 2374 2376 return -EOPNOTSUPP; 2375 2377 } 2376 2378 2377 - if (!mei_cl_is_connected(cl)) 2378 - return -ENODEV; 2379 + /* do not allow unmap for connected client */ 2380 + if (mei_cl_is_connected(cl)) 2381 + return -EPROTO; 2379 2382 2380 2383 if (!cl->dma_mapped) 2381 2384 return -EPROTO; ··· 2404 2405 2405 2406 mutex_unlock(&dev->device_lock); 2406 2407 wait_event_timeout(cl->wait, 2407 - !cl->dma_mapped || 2408 - cl->status || 2409 - !mei_cl_is_connected(cl), 2408 + !cl->dma_mapped || cl->status, 2410 2409 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 2411 2410 mutex_lock(&dev->device_lock); 2412 2411
+19 -15
drivers/net/arcnet/com20020-pci.c
··· 127 127 int i, ioaddr, ret; 128 128 struct resource *r; 129 129 130 + ret = 0; 131 + 130 132 if (pci_enable_device(pdev)) 131 133 return -EIO; 132 134 ··· 140 138 ci = (struct com20020_pci_card_info *)id->driver_data; 141 139 priv->ci = ci; 142 140 mm = &ci->misc_map; 141 + 142 + pci_set_drvdata(pdev, priv); 143 143 144 144 INIT_LIST_HEAD(&priv->list_dev); 145 145 ··· 165 161 dev = alloc_arcdev(device); 166 162 if (!dev) { 167 163 ret = -ENOMEM; 168 - goto out_port; 164 + break; 169 165 } 170 166 dev->dev_port = i; 171 167 ··· 182 178 pr_err("IO region %xh-%xh already allocated\n", 183 179 ioaddr, ioaddr + cm->size - 1); 184 180 ret = -EBUSY; 185 - goto out_port; 181 + goto err_free_arcdev; 186 182 } 187 183 188 184 /* Dummy access after Reset ··· 220 216 if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) { 221 217 pr_err("IO address %Xh is empty!\n", ioaddr); 222 218 ret = -EIO; 223 - goto out_port; 219 + goto err_free_arcdev; 224 220 } 225 221 if (com20020_check(dev)) { 226 222 ret = -EIO; 227 - goto out_port; 223 + goto err_free_arcdev; 228 224 } 229 225 230 226 card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev), 231 227 GFP_KERNEL); 232 228 if (!card) { 233 229 ret = -ENOMEM; 234 - goto out_port; 230 + goto err_free_arcdev; 235 231 } 236 232 237 233 card->index = i; ··· 257 253 258 254 ret = devm_led_classdev_register(&pdev->dev, &card->tx_led); 259 255 if (ret) 260 - goto out_port; 256 + goto err_free_arcdev; 261 257 262 258 ret = devm_led_classdev_register(&pdev->dev, &card->recon_led); 263 259 if (ret) 264 - goto out_port; 260 + goto err_free_arcdev; 265 261 266 262 dev_set_drvdata(&dev->dev, card); 267 263 268 264 ret = com20020_found(dev, IRQF_SHARED); 269 265 if (ret) 270 - goto out_port; 266 + goto err_free_arcdev; 271 267 272 268 devm_arcnet_led_init(dev, dev->dev_id, i); 273 269 274 270 list_add(&card->list, &priv->list_dev); 271 + continue; 272 + 273 + err_free_arcdev: 274 + free_arcdev(dev); 275 + break; 275 276 } 276 - 277 - pci_set_drvdata(pdev, priv); 278 - 279 - return 0; 280 - 281 - out_port: 282 - com20020pci_remove(pdev); 277 + if (ret) 278 + com20020pci_remove(pdev); 283 279 return ret; 284 280 } 285 281
+2 -6
drivers/net/bonding/bond_main.c
··· 3978 3978 3979 3979 rcu_read_lock(); 3980 3980 slave = bond_first_slave_rcu(bond); 3981 - if (!slave) { 3982 - ret = -EINVAL; 3981 + if (!slave) 3983 3982 goto out; 3984 - } 3985 3983 slave_ops = slave->dev->netdev_ops; 3986 - if (!slave_ops->ndo_neigh_setup) { 3987 - ret = -EINVAL; 3984 + if (!slave_ops->ndo_neigh_setup) 3988 3985 goto out; 3989 - } 3990 3986 3991 3987 /* TODO: find another way [1] to implement this. 3992 3988 * Passing a zeroed structure is fragile,
+1 -23
drivers/net/can/c_can/c_can.c
··· 212 212 .brp_inc = 1, 213 213 }; 214 214 215 - static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv) 216 - { 217 - if (priv->device) 218 - pm_runtime_enable(priv->device); 219 - } 220 - 221 - static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv) 222 - { 223 - if (priv->device) 224 - pm_runtime_disable(priv->device); 225 - } 226 - 227 215 static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv) 228 216 { 229 217 if (priv->device) ··· 1323 1335 1324 1336 int register_c_can_dev(struct net_device *dev) 1325 1337 { 1326 - struct c_can_priv *priv = netdev_priv(dev); 1327 1338 int err; 1328 1339 1329 1340 /* Deactivate pins to prevent DRA7 DCAN IP from being ··· 1332 1345 */ 1333 1346 pinctrl_pm_select_sleep_state(dev->dev.parent); 1334 1347 1335 - c_can_pm_runtime_enable(priv); 1336 - 1337 1348 dev->flags |= IFF_ECHO; /* we support local echo */ 1338 1349 dev->netdev_ops = &c_can_netdev_ops; 1339 1350 1340 1351 err = register_candev(dev); 1341 - if (err) 1342 - c_can_pm_runtime_disable(priv); 1343 - else 1352 + if (!err) 1344 1353 devm_can_led_init(dev); 1345 - 1346 1354 return err; 1347 1355 } 1348 1356 EXPORT_SYMBOL_GPL(register_c_can_dev); 1349 1357 1350 1358 void unregister_c_can_dev(struct net_device *dev) 1351 1359 { 1352 - struct c_can_priv *priv = netdev_priv(dev); 1353 - 1354 1360 unregister_candev(dev); 1355 - 1356 - c_can_pm_runtime_disable(priv); 1357 1361 } 1358 1362 EXPORT_SYMBOL_GPL(unregister_c_can_dev); 1359 1363
+2 -1
drivers/net/can/c_can/c_can_pci.c
··· 239 239 { 240 240 struct net_device *dev = pci_get_drvdata(pdev); 241 241 struct c_can_priv *priv = netdev_priv(dev); 242 + void __iomem *addr = priv->base; 242 243 243 244 unregister_c_can_dev(dev); 244 245 245 246 free_c_can_dev(dev); 246 247 247 - pci_iounmap(pdev, priv->base); 248 + pci_iounmap(pdev, addr); 248 249 pci_disable_msi(pdev); 249 250 pci_clear_master(pdev); 250 251 pci_release_regions(pdev);
+5 -1
drivers/net/can/c_can/c_can_platform.c
··· 29 29 #include <linux/list.h> 30 30 #include <linux/io.h> 31 31 #include <linux/platform_device.h> 32 + #include <linux/pm_runtime.h> 32 33 #include <linux/clk.h> 33 34 #include <linux/of.h> 34 35 #include <linux/of_device.h> ··· 387 386 platform_set_drvdata(pdev, dev); 388 387 SET_NETDEV_DEV(dev, &pdev->dev); 389 388 389 + pm_runtime_enable(priv->device); 390 390 ret = register_c_can_dev(dev); 391 391 if (ret) { 392 392 dev_err(&pdev->dev, "registering %s failed (err=%d)\n", ··· 400 398 return 0; 401 399 402 400 exit_free_device: 401 + pm_runtime_disable(priv->device); 403 402 free_c_can_dev(dev); 404 403 exit: 405 404 dev_err(&pdev->dev, "probe failed\n"); ··· 411 408 static int c_can_plat_remove(struct platform_device *pdev) 412 409 { 413 410 struct net_device *dev = platform_get_drvdata(pdev); 411 + struct c_can_priv *priv = netdev_priv(dev); 414 412 415 413 unregister_c_can_dev(dev); 416 - 414 + pm_runtime_disable(priv->device); 417 415 free_c_can_dev(dev); 418 416 419 417 return 0;
+1
drivers/net/can/dev/netlink.c
··· 355 355 356 356 struct rtnl_link_ops can_link_ops __read_mostly = { 357 357 .kind = "can", 358 + .netns_refund = true, 358 359 .maxtype = IFLA_CAN_MAX, 359 360 .policy = can_policy, 360 361 .setup = can_setup,
+7 -1
drivers/net/can/flexcan.c
··· 697 697 static int flexcan_chip_freeze(struct flexcan_priv *priv) 698 698 { 699 699 struct flexcan_regs __iomem *regs = priv->regs; 700 - unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate; 700 + unsigned int timeout; 701 + u32 bitrate = priv->can.bittiming.bitrate; 701 702 u32 reg; 703 + 704 + if (bitrate) 705 + timeout = 1000 * 1000 * 10 / bitrate; 706 + else 707 + timeout = FLEXCAN_TIMEOUT_US / 10; 702 708 703 709 reg = priv->read(&regs->mcr); 704 710 reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT;
+4
drivers/net/can/kvaser_pciefd.c
··· 57 57 #define KVASER_PCIEFD_KCAN_STAT_REG 0x418 58 58 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c 59 59 #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 60 + #define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 60 61 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 61 62 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430 62 63 /* Loopback control register */ ··· 949 948 init_completion(&can->flush_comp); 950 949 timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 951 950 0); 951 + 952 + /* Disable Bus load reporting */ 953 + iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG); 952 954 953 955 tx_npackets = ioread32(can->reg_base + 954 956 KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
+1 -4
drivers/net/can/m_can/m_can.c
··· 501 501 } 502 502 503 503 while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { 504 - if (rxfs & RXFS_RFL) 505 - netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); 506 - 507 504 m_can_read_fifo(dev, rxfs); 508 505 509 506 quota--; ··· 873 876 { 874 877 struct m_can_classdev *cdev = netdev_priv(dev); 875 878 876 - m_can_rx_handler(dev, 1); 879 + m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT); 877 880 878 881 m_can_enable_all_interrupts(cdev); 879 882
+1
drivers/net/can/usb/Kconfig
··· 73 73 - Kvaser Memorator Pro 5xHS 74 74 - Kvaser USBcan Light 4xHS 75 75 - Kvaser USBcan Pro 2xHS v2 76 + - Kvaser USBcan Pro 4xHS 76 77 - Kvaser USBcan Pro 5xHS 77 78 - Kvaser U100 78 79 - Kvaser U100P
+3 -1
drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
··· 86 86 #define USB_U100_PRODUCT_ID 273 87 87 #define USB_U100P_PRODUCT_ID 274 88 88 #define USB_U100S_PRODUCT_ID 275 89 + #define USB_USBCAN_PRO_4HS_PRODUCT_ID 276 89 90 #define USB_HYDRA_PRODUCT_ID_END \ 90 - USB_U100S_PRODUCT_ID 91 + USB_USBCAN_PRO_4HS_PRODUCT_ID 91 92 92 93 static inline bool kvaser_is_leaf(const struct usb_device_id *id) 93 94 { ··· 194 193 { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) }, 195 194 { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) }, 196 195 { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) }, 196 + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) }, 197 197 { } 198 198 }; 199 199 MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
+7 -7
drivers/net/dsa/b53/b53_common.c
··· 1105 1105 b53_disable_port(ds, port); 1106 1106 } 1107 1107 1108 - /* Let DSA handle the case were multiple bridges span the same switch 1109 - * device and different VLAN awareness settings are requested, which 1110 - * would be breaking filtering semantics for any of the other bridge 1111 - * devices. (not hardware supported) 1112 - */ 1113 - ds->vlan_filtering_is_global = true; 1114 - 1115 1108 return b53_setup_devlink_resources(ds); 1116 1109 } 1117 1110 ··· 2657 2664 ds->ops = &b53_switch_ops; 2658 2665 ds->untag_bridge_pvid = true; 2659 2666 dev->vlan_enabled = true; 2667 + /* Let DSA handle the case were multiple bridges span the same switch 2668 + * device and different VLAN awareness settings are requested, which 2669 + * would be breaking filtering semantics for any of the other bridge 2670 + * devices. (not hardware supported) 2671 + */ 2672 + ds->vlan_filtering_is_global = true; 2673 + 2660 2674 mutex_init(&dev->reg_mutex); 2661 2675 mutex_init(&dev->stats_mutex); 2662 2676
+8 -3
drivers/net/dsa/bcm_sf2.c
··· 114 114 /* Force link status for IMP port */ 115 115 reg = core_readl(priv, offset); 116 116 reg |= (MII_SW_OR | LINK_STS); 117 - reg &= ~GMII_SPEED_UP_2G; 117 + if (priv->type == BCM4908_DEVICE_ID) 118 + reg |= GMII_SPEED_UP_2G; 119 + else 120 + reg &= ~GMII_SPEED_UP_2G; 118 121 core_writel(priv, reg, offset); 119 122 120 123 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ ··· 588 585 * in bits 15:8 and the patch level in bits 7:0 which is exactly what 589 586 * the REG_PHY_REVISION register layout is. 590 587 */ 591 - 592 - return priv->hw_params.gphy_rev; 588 + if (priv->int_phy_mask & BIT(port)) 589 + return priv->hw_params.gphy_rev; 590 + else 591 + return 0; 593 592 } 594 593 595 594 static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
+22 -24
drivers/net/dsa/mt7530.c
··· 436 436 TD_DM_DRVP(8) | TD_DM_DRVN(8)); 437 437 438 438 /* Setup core clock for MT7530 */ 439 - if (!trgint) { 440 - /* Disable MT7530 core clock */ 441 - core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); 439 + /* Disable MT7530 core clock */ 440 + core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); 442 441 443 - /* Disable PLL, since phy_device has not yet been created 444 - * provided for phy_[read,write]_mmd_indirect is called, we 445 - * provide our own core_write_mmd_indirect to complete this 446 - * function. 447 - */ 448 - core_write_mmd_indirect(priv, 449 - CORE_GSWPLL_GRP1, 450 - MDIO_MMD_VEND2, 451 - 0); 442 + /* Disable PLL, since phy_device has not yet been created 443 + * provided for phy_[read,write]_mmd_indirect is called, we 444 + * provide our own core_write_mmd_indirect to complete this 445 + * function. 446 + */ 447 + core_write_mmd_indirect(priv, 448 + CORE_GSWPLL_GRP1, 449 + MDIO_MMD_VEND2, 450 + 0); 452 451 453 - /* Set core clock into 500Mhz */ 454 - core_write(priv, CORE_GSWPLL_GRP2, 455 - RG_GSWPLL_POSDIV_500M(1) | 456 - RG_GSWPLL_FBKDIV_500M(25)); 452 + /* Set core clock into 500Mhz */ 453 + core_write(priv, CORE_GSWPLL_GRP2, 454 + RG_GSWPLL_POSDIV_500M(1) | 455 + RG_GSWPLL_FBKDIV_500M(25)); 457 456 458 - /* Enable PLL */ 459 - core_write(priv, CORE_GSWPLL_GRP1, 460 - RG_GSWPLL_EN_PRE | 461 - RG_GSWPLL_POSDIV_200M(2) | 462 - RG_GSWPLL_FBKDIV_200M(32)); 457 + /* Enable PLL */ 458 + core_write(priv, CORE_GSWPLL_GRP1, 459 + RG_GSWPLL_EN_PRE | 460 + RG_GSWPLL_POSDIV_200M(2) | 461 + RG_GSWPLL_FBKDIV_200M(32)); 463 462 464 - /* Enable MT7530 core clock */ 465 - core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); 466 - } 463 + /* Enable MT7530 core clock */ 464 + core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); 467 465 468 466 /* Setup the MT7530 TRGMII Tx Clock */ 469 467 core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+1 -1
drivers/net/ethernet/broadcom/Kconfig
··· 54 54 config BCM4908_ENET 55 55 tristate "Broadcom BCM4908 internal mac support" 56 56 depends on ARCH_BCM4908 || COMPILE_TEST 57 - default y 57 + default y if ARCH_BCM4908 58 58 help 59 59 This driver supports Ethernet controller integrated into Broadcom 60 60 BCM4908 family SoCs.
+1 -1
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
··· 722 722 kvfree(tx_info); 723 723 return 0; 724 724 } 725 - tx_info->open_state = false; 725 + tx_info->open_state = CH_KTLS_OPEN_SUCCESS; 726 726 spin_unlock(&tx_info->lock); 727 727 728 728 complete(&tx_info->completion);
+1
drivers/net/ethernet/faraday/ftgmac100.c
··· 1337 1337 */ 1338 1338 if (unlikely(priv->need_mac_restart)) { 1339 1339 ftgmac100_start_hw(priv); 1340 + priv->need_mac_restart = false; 1340 1341 1341 1342 /* Re-enable "bad" interrupts */ 1342 1343 iowrite32(FTGMAC100_INT_BAD,
+2
drivers/net/ethernet/intel/e1000e/82571.c
··· 899 899 } else { 900 900 data &= ~IGP02E1000_PM_D0_LPLU; 901 901 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); 902 + if (ret_val) 903 + return ret_val; 902 904 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 903 905 * during Dx states where the power conservation is most 904 906 * important. During driver activity we should enable
+3 -3
drivers/net/ethernet/intel/e1000e/hw.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* Copyright(c) 1999 - 2018 Intel Corporation. */ 3 3 4 - #ifndef _E1000_HW_H_ 5 - #define _E1000_HW_H_ 4 + #ifndef _E1000E_HW_H_ 5 + #define _E1000E_HW_H_ 6 6 7 7 #include "regs.h" 8 8 #include "defines.h" ··· 714 714 #include "80003es2lan.h" 715 715 #include "ich8lan.h" 716 716 717 - #endif 717 + #endif /* _E1000E_HW_H_ */
+5 -1
drivers/net/ethernet/intel/e1000e/netdev.c
··· 5974 5974 struct e1000_adapter *adapter; 5975 5975 adapter = container_of(work, struct e1000_adapter, reset_task); 5976 5976 5977 + rtnl_lock(); 5977 5978 /* don't run the task if already down */ 5978 - if (test_bit(__E1000_DOWN, &adapter->state)) 5979 + if (test_bit(__E1000_DOWN, &adapter->state)) { 5980 + rtnl_unlock(); 5979 5981 return; 5982 + } 5980 5983 5981 5984 if (!(adapter->flags & FLAG_RESTART_NOW)) { 5982 5985 e1000e_dump(adapter); 5983 5986 e_err("Reset adapter unexpectedly\n"); 5984 5987 } 5985 5988 e1000e_reinit_locked(adapter); 5989 + rtnl_unlock(); 5986 5990 } 5987 5991 5988 5992 /**
+13
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 3259 3259 } 3260 3260 3261 3261 /** 3262 + * i40e_rx_offset - Return expected offset into page to access data 3263 + * @rx_ring: Ring we are requesting offset of 3264 + * 3265 + * Returns the offset value for ring into the data buffer. 3266 + */ 3267 + static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) 3268 + { 3269 + return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; 3270 + } 3271 + 3272 + /** 3262 3273 * i40e_configure_rx_ring - Configure a receive ring context 3263 3274 * @ring: The Rx ring to configure 3264 3275 * ··· 3379 3368 clear_ring_build_skb_enabled(ring); 3380 3369 else 3381 3370 set_ring_build_skb_enabled(ring); 3371 + 3372 + ring->rx_offset = i40e_rx_offset(ring); 3382 3373 3383 3374 /* cache tail for quicker writes, and clear the reg before use */ 3384 3375 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
-12
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 1570 1570 } 1571 1571 1572 1572 /** 1573 - * i40e_rx_offset - Return expected offset into page to access data 1574 - * @rx_ring: Ring we are requesting offset of 1575 - * 1576 - * Returns the offset value for ring into the data buffer. 1577 - */ 1578 - static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring) 1579 - { 1580 - return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0; 1581 - } 1582 - 1583 - /** 1584 1573 * i40e_setup_rx_descriptors - Allocate Rx descriptors 1585 1574 * @rx_ring: Rx descriptor ring (for a specific queue) to setup 1586 1575 * ··· 1597 1608 rx_ring->next_to_alloc = 0; 1598 1609 rx_ring->next_to_clean = 0; 1599 1610 rx_ring->next_to_use = 0; 1600 - rx_ring->rx_offset = i40e_rx_offset(rx_ring); 1601 1611 1602 1612 /* XDP RX-queue info only needed for RX rings exposed to XDP */ 1603 1613 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
+22 -2
drivers/net/ethernet/intel/ice/ice_base.c
··· 275 275 } 276 276 277 277 /** 278 + * ice_rx_offset - Return expected offset into page to access data 279 + * @rx_ring: Ring we are requesting offset of 280 + * 281 + * Returns the offset value for ring into the data buffer. 282 + */ 283 + static unsigned int ice_rx_offset(struct ice_ring *rx_ring) 284 + { 285 + if (ice_ring_uses_build_skb(rx_ring)) 286 + return ICE_SKB_PAD; 287 + else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 288 + return XDP_PACKET_HEADROOM; 289 + 290 + return 0; 291 + } 292 + 293 + /** 278 294 * ice_setup_rx_ctx - Configure a receive ring context 279 295 * @ring: The Rx ring to configure 280 296 * ··· 429 413 else 430 414 ice_set_ring_build_skb_ena(ring); 431 415 416 + ring->rx_offset = ice_rx_offset(ring); 417 + 432 418 /* init queue specific tail register */ 433 419 ring->tail = hw->hw_addr + QRX_TAIL(pf_q); 434 420 writel(0, ring->tail); 435 421 436 422 if (ring->xsk_pool) { 423 + bool ok; 424 + 437 425 if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { 438 426 dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n", 439 427 num_bufs, ring->q_index); ··· 446 426 return 0; 447 427 } 448 428 449 - err = ice_alloc_rx_bufs_zc(ring, num_bufs); 450 - if (err) 429 + ok = ice_alloc_rx_bufs_zc(ring, num_bufs); 430 + if (!ok) 451 431 dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n", 452 432 ring->q_index, pf_q); 453 433 return 0;
-17
drivers/net/ethernet/intel/ice/ice_txrx.c
··· 444 444 } 445 445 446 446 /** 447 - * ice_rx_offset - Return expected offset into page to access data 448 - * @rx_ring: Ring we are requesting offset of 449 - * 450 - * Returns the offset value for ring into the data buffer. 451 - */ 452 - static unsigned int ice_rx_offset(struct ice_ring *rx_ring) 453 - { 454 - if (ice_ring_uses_build_skb(rx_ring)) 455 - return ICE_SKB_PAD; 456 - else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 457 - return XDP_PACKET_HEADROOM; 458 - 459 - return 0; 460 - } 461 - 462 - /** 463 447 * ice_setup_rx_ring - Allocate the Rx descriptors 464 448 * @rx_ring: the Rx ring to set up 465 449 * ··· 477 493 478 494 rx_ring->next_to_use = 0; 479 495 rx_ring->next_to_clean = 0; 480 - rx_ring->rx_offset = ice_rx_offset(rx_ring); 481 496 482 497 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 483 498 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
+5 -5
drivers/net/ethernet/intel/ice/ice_xsk.c
··· 358 358 * This function allocates a number of Rx buffers from the fill ring 359 359 * or the internal recycle mechanism and places them on the Rx ring. 360 360 * 361 - * Returns false if all allocations were successful, true if any fail. 361 + * Returns true if all allocations were successful, false if any fail. 362 362 */ 363 363 bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) 364 364 { 365 365 union ice_32b_rx_flex_desc *rx_desc; 366 366 u16 ntu = rx_ring->next_to_use; 367 367 struct ice_rx_buf *rx_buf; 368 - bool ret = false; 368 + bool ok = true; 369 369 dma_addr_t dma; 370 370 371 371 if (!count) 372 - return false; 372 + return true; 373 373 374 374 rx_desc = ICE_RX_DESC(rx_ring, ntu); 375 375 rx_buf = &rx_ring->rx_buf[ntu]; ··· 377 377 do { 378 378 rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool); 379 379 if (!rx_buf->xdp) { 380 - ret = true; 380 + ok = false; 381 381 break; 382 382 } 383 383 ··· 402 402 ice_release_rx_desc(rx_ring, ntu); 403 403 } 404 404 405 - return ret; 405 + return ok; 406 406 } 407 407 408 408 /**
+3 -3
drivers/net/ethernet/intel/igb/e1000_hw.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* Copyright(c) 2007 - 2018 Intel Corporation. */ 3 3 4 - #ifndef _E1000_HW_H_ 5 - #define _E1000_HW_H_ 4 + #ifndef _E1000_IGB_HW_H_ 5 + #define _E1000_IGB_HW_H_ 6 6 7 7 #include <linux/types.h> 8 8 #include <linux/delay.h> ··· 551 551 552 552 void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); 553 553 void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); 554 - #endif /* _E1000_HW_H_ */ 554 + #endif /* _E1000_IGB_HW_H_ */
+2 -2
drivers/net/ethernet/intel/igb/igb.h
··· 748 748 void igb_ptp_rx_hang(struct igb_adapter *adapter); 749 749 void igb_ptp_tx_hang(struct igb_adapter *adapter); 750 750 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); 751 - void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, 752 - struct sk_buff *skb); 751 + int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, 752 + struct sk_buff *skb); 753 753 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); 754 754 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); 755 755 void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
+21 -12
drivers/net/ethernet/intel/igb/igb_main.c
··· 8214 8214 new_buff->pagecnt_bias = old_buff->pagecnt_bias; 8215 8215 } 8216 8216 8217 - static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) 8217 + static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, 8218 + int rx_buf_pgcnt) 8218 8219 { 8219 8220 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; 8220 8221 struct page *page = rx_buffer->page; ··· 8226 8225 8227 8226 #if (PAGE_SIZE < 8192) 8228 8227 /* if we are only owner of page we can reuse it */ 8229 - if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) 8228 + if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1)) 8230 8229 return false; 8231 8230 #else 8232 8231 #define IGB_LAST_OFFSET \ ··· 8302 8301 return NULL; 8303 8302 8304 8303 if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { 8305 - igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb); 8306 - xdp->data += IGB_TS_HDR_LEN; 8307 - size -= IGB_TS_HDR_LEN; 8304 + if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) { 8305 + xdp->data += IGB_TS_HDR_LEN; 8306 + size -= IGB_TS_HDR_LEN; 8307 + } 8308 8308 } 8309 8309 8310 8310 /* Determine available headroom for copy */ ··· 8366 8364 8367 8365 /* pull timestamp out of packet data */ 8368 8366 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { 8369 - igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); 8370 - __skb_pull(skb, IGB_TS_HDR_LEN); 8367 + if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb)) 8368 + __skb_pull(skb, IGB_TS_HDR_LEN); 8371 8369 } 8372 8370 8373 8371 /* update buffer offset */ ··· 8616 8614 } 8617 8615 8618 8616 static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, 8619 - const unsigned int size) 8617 + const unsigned int size, int *rx_buf_pgcnt) 8620 8618 { 8621 8619 struct igb_rx_buffer *rx_buffer; 8622 8620 8623 8621 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 8622 + *rx_buf_pgcnt = 8623 + #if (PAGE_SIZE < 8192) 8624 + page_count(rx_buffer->page); 8625 + #else 8626 + 0; 8627 + #endif 8624 8628 prefetchw(rx_buffer->page); 8625 8629 8626 8630 /* we are reusing so sync this buffer for CPU use */ ··· 8642 8634 } 8643 8635 8644 8636 static void igb_put_rx_buffer(struct igb_ring *rx_ring, 8645 - struct igb_rx_buffer *rx_buffer) 8637 + struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt) 8646 8638 { 8647 - if (igb_can_reuse_rx_page(rx_buffer)) { 8639 + if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) { 8648 8640 /* hand second half of page back to the ring */ 8649 8641 igb_reuse_rx_page(rx_ring, rx_buffer); 8650 8642 } else { ··· 8672 8664 unsigned int xdp_xmit = 0; 8673 8665 struct xdp_buff xdp; 8674 8666 u32 frame_sz = 0; 8667 + int rx_buf_pgcnt; 8675 8668 8676 8669 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ 8677 8670 #if (PAGE_SIZE < 8192) ··· 8702 8693 */ 8703 8694 dma_rmb(); 8704 8695 8705 - rx_buffer = igb_get_rx_buffer(rx_ring, size); 8696 + rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt); 8706 8697 8707 8698 /* retrieve a buffer from the ring */ 8708 8699 if (!skb) { ··· 8745 8736 break; 8746 8737 } 8747 8738 8748 - igb_put_rx_buffer(rx_ring, rx_buffer); 8739 + igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt); 8749 8740 cleaned_count++; 8750 8741 8751 8742 /* fetch next buffer in frame if non-eop */
+24 -7
drivers/net/ethernet/intel/igb/igb_ptp.c
··· 856 856 dev_kfree_skb_any(skb); 857 857 } 858 858 859 + #define IGB_RET_PTP_DISABLED 1 860 + #define IGB_RET_PTP_INVALID 2 861 + 859 862 /** 860 863 * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp 861 864 * @q_vector: Pointer to interrupt specific structure ··· 867 864 * 868 865 * This function is meant to retrieve a timestamp from the first buffer of an 869 866 * incoming frame. The value is stored in little endian format starting on 870 - * byte 8. 867 + * byte 8 868 + * 869 + * Returns: 0 if success, nonzero if failure 871 870 **/ 872 - void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, 873 - struct sk_buff *skb) 871 + int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, 872 + struct sk_buff *skb) 874 873 { 875 - __le64 *regval = (__le64 *)va; 876 874 struct igb_adapter *adapter = q_vector->adapter; 875 + __le64 *regval = (__le64 *)va; 877 876 int adjust = 0; 877 + 878 + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) 879 + return IGB_RET_PTP_DISABLED; 878 880 879 881 /* The timestamp is recorded in little endian format. 880 882 * DWORD: 0 1 2 3 881 883 * Field: Reserved Reserved SYSTIML SYSTIMH 882 884 */ 885 + 886 + /* check reserved dwords are zero, be/le doesn't matter for zero */ 887 + if (regval[0]) 888 + return IGB_RET_PTP_INVALID; 889 + 883 890 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), 884 891 le64_to_cpu(regval[1])); 885 892 ··· 909 896 } 910 897 skb_hwtstamps(skb)->hwtstamp = 911 898 ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); 899 + 900 + return 0; 912 901 } 913 902 914 903 /** ··· 921 906 * This function is meant to retrieve a timestamp from the internal registers 922 907 * of the adapter and store it in the skb. 923 908 **/ 924 - void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, 925 - struct sk_buff *skb) 909 + void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) 926 910 { 927 911 struct igb_adapter *adapter = q_vector->adapter; 928 912 struct e1000_hw *hw = &adapter->hw; 929 - u64 regval; 930 913 int adjust = 0; 914 + u64 regval; 915 + 916 + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) 917 + return; 931 918 932 919 /* If this bit is set, then the RX registers contain the time stamp. No 933 920 * other packet will be time stamped until we read these registers, so
+1 -1
drivers/net/ethernet/intel/igc/igc.h
··· 547 547 void igc_ptp_reset(struct igc_adapter *adapter); 548 548 void igc_ptp_suspend(struct igc_adapter *adapter); 549 549 void igc_ptp_stop(struct igc_adapter *adapter); 550 - void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va, 550 + void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va, 551 551 struct sk_buff *skb); 552 552 int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); 553 553 int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+4 -3
drivers/net/ethernet/intel/igc/igc_ethtool.c
··· 1711 1711 Autoneg); 1712 1712 } 1713 1713 1714 + /* Set pause flow control settings */ 1715 + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); 1716 + 1714 1717 switch (hw->fc.requested_mode) { 1715 1718 case igc_fc_full: 1716 1719 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); ··· 1728 1725 Asym_Pause); 1729 1726 break; 1730 1727 default: 1731 - ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); 1732 - ethtool_link_ksettings_add_link_mode(cmd, advertising, 1733 - Asym_Pause); 1728 + break; 1734 1729 } 1735 1730 1736 1731 status = pm_runtime_suspended(&adapter->pdev->dev) ?
+9
drivers/net/ethernet/intel/igc/igc_main.c
··· 3831 3831 3832 3832 adapter = container_of(work, struct igc_adapter, reset_task); 3833 3833 3834 + rtnl_lock(); 3835 + /* If we're already down or resetting, just bail */ 3836 + if (test_bit(__IGC_DOWN, &adapter->state) || 3837 + test_bit(__IGC_RESETTING, &adapter->state)) { 3838 + rtnl_unlock(); 3839 + return; 3840 + } 3841 + 3834 3842 igc_rings_dump(adapter); 3835 3843 igc_regs_dump(adapter); 3836 3844 netdev_err(adapter->netdev, "Reset adapter\n"); 3837 3845 igc_reinit_locked(adapter); 3846 + rtnl_unlock(); 3838 3847 } 3839 3848 3840 3849 /**
+38 -30
drivers/net/ethernet/intel/igc/igc_ptp.c
··· 152 152 } 153 153 154 154 /** 155 - * igc_ptp_rx_pktstamp - retrieve Rx per packet timestamp 155 + * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer 156 156 * @q_vector: Pointer to interrupt specific structure 157 157 * @va: Pointer to address containing Rx buffer 158 158 * @skb: Buffer containing timestamp and packet 159 159 * 160 - * This function is meant to retrieve the first timestamp from the 161 - * first buffer of an incoming frame. The value is stored in little 162 - * endian format starting on byte 0. There's a second timestamp 163 - * starting on byte 8. 164 - **/ 165 - void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va, 160 + * This function retrieves the timestamp saved in the beginning of packet 161 + * buffer. While two timestamps are available, one in timer0 reference and the 162 + * other in timer1 reference, this function considers only the timestamp in 163 + * timer0 reference. 164 + */ 165 + void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va, 166 166 struct sk_buff *skb) 167 167 { 168 168 struct igc_adapter *adapter = q_vector->adapter; 169 - __le64 *regval = (__le64 *)va; 170 - int adjust = 0; 169 + u64 regval; 170 + int adjust; 171 171 172 - /* The timestamp is recorded in little endian format. 173 - * DWORD: | 0 | 1 | 2 | 3 174 - * Field: | Timer0 Low | Timer0 High | Timer1 Low | Timer1 High 172 + /* Timestamps are saved in little endian at the beginning of the packet 173 + * buffer following the layout: 174 + * 175 + * DWORD: | 0 | 1 | 2 | 3 | 176 + * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH | 177 + * 178 + * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds 179 + * part of the timestamp. 175 180 */ 176 - igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), 177 - le64_to_cpu(regval[0])); 181 + regval = le32_to_cpu(va[2]); 182 + regval |= (u64)le32_to_cpu(va[3]) << 32; 183 + igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 178 184 179 - /* adjust timestamp for the RX latency based on link speed */ 180 - if (adapter->hw.mac.type == igc_i225) { 181 - switch (adapter->link_speed) { 182 - case SPEED_10: 183 - adjust = IGC_I225_RX_LATENCY_10; 184 - break; 185 - case SPEED_100: 186 - adjust = IGC_I225_RX_LATENCY_100; 187 - break; 188 - case SPEED_1000: 189 - adjust = IGC_I225_RX_LATENCY_1000; 190 - break; 191 - case SPEED_2500: 192 - adjust = IGC_I225_RX_LATENCY_2500; 193 - break; 194 - } 185 + /* Adjust timestamp for the RX latency based on link speed */ 186 + switch (adapter->link_speed) { 187 + case SPEED_10: 188 + adjust = IGC_I225_RX_LATENCY_10; 189 + break; 190 + case SPEED_100: 191 + adjust = IGC_I225_RX_LATENCY_100; 192 + break; 193 + case SPEED_1000: 194 + adjust = IGC_I225_RX_LATENCY_1000; 195 + break; 196 + case SPEED_2500: 197 + adjust = IGC_I225_RX_LATENCY_2500; 198 + break; 199 + default: 200 + adjust = 0; 201 + netdev_warn_once(adapter->netdev, "Imprecise timestamp\n"); 202 + break; 195 203 } 196 204 skb_hwtstamps(skb)->hwtstamp = 197 205 ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+2 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 4118 4118 #endif 4119 4119 } 4120 4120 4121 + ring->rx_offset = ixgbe_rx_offset(ring); 4122 + 4121 4123 if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) { 4122 4124 u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); 4123 4125 ··· 6580 6578 6581 6579 rx_ring->next_to_clean = 0; 6582 6580 rx_ring->next_to_use = 0; 6583 - rx_ring->rx_offset = ixgbe_rx_offset(rx_ring); 6584 6581 6585 6582 /* XDP RX-queue info */ 6586 6583 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
+2 -2
drivers/net/ethernet/marvell/Kconfig
··· 6 6 config NET_VENDOR_MARVELL 7 7 bool "Marvell devices" 8 8 default y 9 - depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET || COMPILE_TEST 9 + depends on PCI || CPU_PXA168 || PPC32 || PLAT_ORION || INET || COMPILE_TEST 10 10 help 11 11 If you have a network (Ethernet) card belonging to this class, say Y. 12 12 ··· 19 19 20 20 config MV643XX_ETH 21 21 tristate "Marvell Discovery (643XX) and Orion ethernet support" 22 - depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST 22 + depends on PPC32 || PLAT_ORION || COMPILE_TEST 23 23 depends on INET 24 24 select PHYLIB 25 25 select MVMDIO
+1 -1
drivers/net/ethernet/marvell/mv643xx_eth.c
··· 2684 2684 MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids); 2685 2685 #endif 2686 2686 2687 - #if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60) 2687 + #ifdef CONFIG_OF_IRQ 2688 2688 #define mv643xx_eth_property(_np, _name, _v) \ 2689 2689 do { \ 2690 2690 u32 tmp; \
-2
drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
··· 13499 13499 [NPC_LT_LC_IP] = { 13500 13500 /* SIP+DIP: 8 bytes, KW2[63:0] */ 13501 13501 KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10), 13502 - /* TOS: 1 byte, KW1[63:56] */ 13503 - KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf), 13504 13502 }, 13505 13503 /* Layer C: IPv6 */ 13506 13504 [NPC_LT_LC_IP6] = {
+4 -2
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
··· 2462 2462 INTR_MASK(rvu->hw->total_pfs) & ~1ULL); 2463 2463 2464 2464 for (irq = 0; irq < rvu->num_vec; irq++) { 2465 - if (rvu->irq_allocated[irq]) 2465 + if (rvu->irq_allocated[irq]) { 2466 2466 free_irq(pci_irq_vector(rvu->pdev, irq), rvu); 2467 + rvu->irq_allocated[irq] = false; 2468 + } 2467 2469 } 2468 2470 2469 2471 pci_free_irq_vectors(rvu->pdev); ··· 2977 2975 struct rvu *rvu = pci_get_drvdata(pdev); 2978 2976 2979 2977 rvu_dbg_exit(rvu); 2980 - rvu_unregister_interrupts(rvu); 2981 2978 rvu_unregister_dl(rvu); 2979 + rvu_unregister_interrupts(rvu); 2982 2980 rvu_flr_wq_destroy(rvu); 2983 2981 rvu_cgx_exit(rvu); 2984 2982 rvu_fwdata_exit(rvu);
+1
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
··· 678 678 u8 *intf, u8 *ena); 679 679 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); 680 680 u32 rvu_cgx_get_fifolen(struct rvu *rvu); 681 + void *rvu_first_cgx_pdata(struct rvu *rvu); 681 682 682 683 /* CPT APIs */ 683 684 int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
+16 -2
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
··· 89 89 return rvu->cgx_idmap[cgx_id]; 90 90 } 91 91 92 + /* Return first enabled CGX instance if none are enabled then return NULL */ 93 + void *rvu_first_cgx_pdata(struct rvu *rvu) 94 + { 95 + int first_enabled_cgx = 0; 96 + void *cgxd = NULL; 97 + 98 + for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) { 99 + cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu); 100 + if (cgxd) 101 + break; 102 + } 103 + 104 + return cgxd; 105 + } 106 + 92 107 /* Based on P2X connectivity find mapped NIX block for a PF */ 93 108 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf, 94 109 int cgx_id, int lmac_id) ··· 726 711 u32 rvu_cgx_get_fifolen(struct rvu *rvu) 727 712 { 728 713 struct mac_ops *mac_ops; 729 - int rvu_def_cgx_id = 0; 730 714 u32 fifo_len; 731 715 732 - mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); 716 + mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); 733 717 fifo_len = mac_ops ? mac_ops->fifo_len : 0; 734 718 735 719 return fifo_len;
+36 -21
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
··· 234 234 char __user *buffer, 235 235 size_t count, loff_t *ppos) 236 236 { 237 - int index, off = 0, flag = 0, go_back = 0, off_prev; 237 + int index, off = 0, flag = 0, go_back = 0, len = 0; 238 238 struct rvu *rvu = filp->private_data; 239 239 int lf, pf, vf, pcifunc; 240 240 struct rvu_block block; 241 241 int bytes_not_copied; 242 + int lf_str_size = 12; 242 243 int buf_size = 2048; 244 + char *lfs; 243 245 char *buf; 244 246 245 247 /* don't allow partial reads */ ··· 251 249 buf = kzalloc(buf_size, GFP_KERNEL); 252 250 if (!buf) 253 251 return -ENOSPC; 254 - off += scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t"); 252 + 253 + lfs = kzalloc(lf_str_size, GFP_KERNEL); 254 + if (!lfs) { 255 + kfree(buf); 256 + return -ENOMEM; 257 + } 258 + off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size, 259 + "pcifunc"); 255 260 for (index = 0; index < BLK_COUNT; index++) 256 - if (strlen(rvu->hw->block[index].name)) 257 - off += scnprintf(&buf[off], buf_size - 1 - off, 258 - "%*s\t", (index - 1) * 2, 259 - rvu->hw->block[index].name); 261 + if (strlen(rvu->hw->block[index].name)) { 262 + off += scnprintf(&buf[off], buf_size - 1 - off, 263 + "%-*s", lf_str_size, 264 + rvu->hw->block[index].name); 265 + } 260 266 off += scnprintf(&buf[off], buf_size - 1 - off, "\n"); 261 267 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { 262 268 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) { ··· 273 263 continue; 274 264 275 265 if (vf) { 266 + sprintf(lfs, "PF%d:VF%d", pf, vf - 1); 276 267 go_back = scnprintf(&buf[off], 277 268 buf_size - 1 - off, 278 - "PF%d:VF%d\t\t", pf, 279 - vf - 1); 269 + "%-*s", lf_str_size, lfs); 280 270 } else { 271 + sprintf(lfs, "PF%d", pf); 281 272 go_back = scnprintf(&buf[off], 282 273 buf_size - 1 - off, 283 - "PF%d\t\t", pf); 274 + "%-*s", lf_str_size, lfs); 284 275 } 285 276 286 277 off += go_back; ··· 289 278 block = rvu->hw->block[index]; 290 279 if (!strlen(block.name)) 291 280 continue; 292 - off_prev = off; 281 + len = 0; 282 + lfs[len] = '\0'; 293 283 for (lf = 0; lf < block.lf.max; lf++) { 294 284 if (block.fn_map[lf] != pcifunc) 295 285 continue; 296 286 flag = 1; 297 - off += scnprintf(&buf[off], buf_size - 1 298 - - off, "%3d,", lf); 287 + len += sprintf(&lfs[len], "%d,", lf); 299 288 } 300 - if (flag && off_prev != off) 301 - off--; 302 - else 303 - go_back++; 289 + 290 + if (flag) 291 + len--; 292 + lfs[len] = '\0'; 304 293 off += scnprintf(&buf[off], buf_size - 1 - off, 305 - "\t"); 294 + "%-*s", lf_str_size, lfs); 295 + if (!strlen(lfs)) 296 + go_back += lf_str_size; 306 297 } 307 298 if (!flag) 308 299 off -= go_back; ··· 316 303 } 317 304 318 305 bytes_not_copied = copy_to_user(buffer, buf, off); 306 + kfree(lfs); 319 307 kfree(buf); 320 308 321 309 if (bytes_not_copied) ··· 333 319 struct rvu *rvu = filp->private; 334 320 struct pci_dev *pdev = NULL; 335 321 struct mac_ops *mac_ops; 336 - int rvu_def_cgx_id = 0; 337 322 char cgx[10], lmac[10]; 338 323 struct rvu_pfvf *pfvf; 339 324 int pf, domain, blkid; ··· 340 327 u16 pcifunc; 341 328 342 329 domain = 2; 343 - mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); 330 + mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); 331 + /* There can be no CGX devices at all */ 332 + if (!mac_ops) 333 + return 0; 344 334 seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n", 345 335 mac_ops->name); 346 336 for (pf = 0; pf < rvu->hw->total_pfs; pf++) { ··· 1834 1818 { 1835 1819 struct mac_ops *mac_ops; 1836 1820 unsigned long lmac_bmap; 1837 - int rvu_def_cgx_id = 0; 1838 1821 int i, lmac_id; 1839 1822 char dname[20]; 1840 1823 void *cgx; ··· 1841 1826 if (!cgx_get_cgxcnt_max()) 1842 1827 return; 1843 1828 1844 - mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); 1829 + mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); 1845 1830 if (!mac_ops) 1846 1831 return; 1847 1832
+1 -1
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
··· 2629 2629 struct nix_rx_flowkey_alg *field; 2630 2630 struct nix_rx_flowkey_alg tmp; 2631 2631 u32 key_type, valid_key; 2632 - int l4_key_offset; 2632 + int l4_key_offset = 0; 2633 2633 2634 2634 if (!alg) 2635 2635 return -EINVAL;
+1 -1
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
··· 2490 2490 index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); 2491 2491 if (index >= mcam->bmap_entries) 2492 2492 break; 2493 + entry = index + 1; 2493 2494 if (mcam->entry2cntr_map[index] != req->cntr) 2494 2495 continue; 2495 2496 2496 - entry = index + 1; 2497 2497 npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, 2498 2498 index, req->cntr); 2499 2499 }
+3 -1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
··· 257 257 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, 258 258 u32 *rule_locs) 259 259 { 260 + u32 rule_cnt = nfc->rule_cnt; 260 261 u32 location = 0; 261 262 int idx = 0; 262 263 int err = 0; 263 264 264 265 nfc->data = pfvf->flow_cfg->ntuple_max_flows; 265 - while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) { 266 + while ((!err || err == -ENOENT) && idx < rule_cnt) { 266 267 err = otx2_get_flow(pfvf, nfc, location); 267 268 if (!err) 268 269 rule_locs[idx++] = location; 269 270 location++; 270 271 } 272 + nfc->rule_cnt = rule_cnt; 271 273 272 274 return err; 273 275 }
+5
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
··· 1672 1672 struct otx2_nic *pf = netdev_priv(netdev); 1673 1673 struct otx2_cq_poll *cq_poll = NULL; 1674 1674 struct otx2_qset *qset = &pf->qset; 1675 + struct otx2_rss_info *rss; 1675 1676 int qidx, vec, wrk; 1676 1677 1677 1678 netif_carrier_off(netdev); ··· 1684 1683 1685 1684 /* First stop packet Rx/Tx */ 1686 1685 otx2_rxtx_enable(pf, false); 1686 + 1687 + /* Clear RSS enable flag */ 1688 + rss = &pf->hw.rss_info; 1689 + rss->enable = false; 1687 1690 1688 1691 /* Cleanup Queue IRQ */ 1689 1692 vec = pci_irq_vector(pf->pdev,
+1 -1
drivers/net/ethernet/marvell/pxa168_eth.c
··· 1544 1544 clk_disable_unprepare(pep->clk); 1545 1545 mdiobus_unregister(pep->smi_bus); 1546 1546 mdiobus_free(pep->smi_bus); 1547 - unregister_netdev(dev); 1548 1547 cancel_work_sync(&pep->tx_timeout_task); 1548 + unregister_netdev(dev); 1549 1549 free_netdev(dev); 1550 1550 return 0; 1551 1551 }
+4 -3
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 92 92 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) 93 93 #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) 94 94 95 - #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) 95 + #define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8)) 96 + #define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2) 97 + #define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts))) 96 98 /* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between 97 99 * WQEs, This page will absorb write overflow by the hardware, when 98 100 * receiving packets larger than MTU. These oversize packets are 99 101 * dropped by the driver at a later stage. 100 102 */ 101 - #define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8)) 102 - #define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS)) 103 + #define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1)) 103 104 #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) 104 105 #define MLX5E_MAX_RQ_NUM_MTTS \ 105 106 ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
··· 1181 1181 1182 1182 mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG, 1183 1183 &ctstate, &ctstate_mask); 1184 - if (ctstate_mask) 1184 + 1185 + if ((ctstate & ctstate_mask) == MLX5_CT_STATE_TRK_BIT) 1185 1186 return -EOPNOTSUPP; 1186 1187 1187 1188 ctstate_mask |= MLX5_CT_STATE_TRK_BIT;
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
··· 685 685 u16 vport_num; 686 686 int err = 0; 687 687 688 - if (flow_attr->ip_version == 4) { 688 + if (flow_attr->tun_ip_version == 4) { 689 689 /* Addresses are swapped for decap */ 690 690 attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4; 691 691 attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4; 692 692 err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr); 693 693 } 694 694 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) 695 - else if (flow_attr->ip_version == 6) { 695 + else if (flow_attr->tun_ip_version == 6) { 696 696 /* Addresses are swapped for decap */ 697 697 attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6; 698 698 attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6; ··· 718 718 esw_attr->rx_tun_attr->decap_vport = vport_num; 719 719 720 720 out: 721 - if (flow_attr->ip_version == 4) 721 + if (flow_attr->tun_ip_version == 4) 722 722 mlx5e_route_lookup_ipv4_put(&attr); 723 723 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) 724 - else if (flow_attr->ip_version == 6) 724 + else if (flow_attr->tun_ip_version == 6) 725 725 mlx5e_route_lookup_ipv6_put(&attr); 726 726 #endif 727 727 return err;
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
··· 89 89 * required to establish routing. 90 90 */ 91 91 flow_flag_set(flow, TUN_RX); 92 + flow->attr->tun_ip_version = ip_version; 92 93 return 0; 93 94 } 94 95 ··· 1092 1091 if (err || !esw_attr->rx_tun_attr->decap_vport) 1093 1092 goto out; 1094 1093 1095 - key.ip_version = attr->ip_version; 1094 + key.ip_version = attr->tun_ip_version; 1096 1095 if (key.ip_version == 4) 1097 1096 key.endpoint_ip.v4 = esw_attr->rx_tun_attr->dst_ip.v4; 1098 1097 else
+4
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
··· 227 227 option_key = (struct geneve_opt *)&enc_opts.key->data[0]; 228 228 option_mask = (struct geneve_opt *)&enc_opts.mask->data[0]; 229 229 230 + if (option_mask->opt_class == 0 && option_mask->type == 0 && 231 + !memchr_inv(option_mask->opt_data, 0, option_mask->length * 4)) 232 + return 0; 233 + 230 234 if (option_key->length > max_tlv_option_data_len) { 231 235 NL_SET_ERR_MSG_MOD(extack, 232 236 "Matching on GENEVE options: unsupported option len");
+10 -1
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 1887 1887 { 1888 1888 struct mlx5e_priv *priv = netdev_priv(netdev); 1889 1889 struct mlx5_core_dev *mdev = priv->mdev; 1890 + int err; 1890 1891 1891 1892 if (!MLX5_CAP_GEN(mdev, cqe_compression)) 1892 1893 return -EOPNOTSUPP; ··· 1897 1896 return -EINVAL; 1898 1897 } 1899 1898 1900 - mlx5e_modify_rx_cqe_compression_locked(priv, enable); 1899 + err = mlx5e_modify_rx_cqe_compression_locked(priv, enable); 1900 + if (err) 1901 + return err; 1902 + 1901 1903 priv->channels.params.rx_cqe_compress_def = enable; 1902 1904 1903 1905 return 0; ··· 2018 2014 */ 2019 2015 2020 2016 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 2017 + struct mlx5e_params old_params; 2018 + 2019 + old_params = priv->channels.params; 2021 2020 priv->channels.params = new_channels.params; 2022 2021 err = mlx5e_num_channels_changed(priv); 2022 + if (err) 2023 + priv->channels.params = old_params; 2023 2024 goto out; 2024 2025 } 2025 2026
+57 -24
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 334 334 rq->wqe_overflow.addr); 335 335 } 336 336 337 - static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) 337 + static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix) 338 338 { 339 - return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT; 339 + return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT; 340 340 } 341 341 342 342 static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) ··· 577 577 mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i); 578 578 u32 byte_count = 579 579 rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; 580 - u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i); 580 + u64 dma_offset = mlx5e_get_mpwqe_offset(i); 581 581 582 582 wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom); 583 583 wqe->data[0].byte_count = cpu_to_be32(byte_count); ··· 2368 2368 { 2369 2369 switch (params->rq_wq_type) { 2370 2370 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: 2371 - return order_base_2(MLX5E_UMR_WQEBBS) + 2372 - mlx5e_get_rq_log_wq_sz(rqp->rqc); 2371 + return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, 2372 + order_base_2(MLX5E_UMR_WQEBBS) + 2373 + mlx5e_get_rq_log_wq_sz(rqp->rqc)); 2373 2374 default: /* MLX5_WQ_TYPE_CYCLIC */ 2374 2375 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; 2375 2376 } ··· 2503 2502 { 2504 2503 int i; 2505 2504 2506 - if (chs->port_ptp) 2505 + if (chs->port_ptp) { 2507 2506 mlx5e_port_ptp_close(chs->port_ptp); 2507 + chs->port_ptp = NULL; 2508 + } 2508 2509 2509 2510 for (i = 0; i < chs->num; i++) 2510 2511 mlx5e_close_channel(chs->c[i]); ··· 3818 3815 s->tx_dropped += sq_stats->dropped; 3819 3816 } 3820 3817 } 3818 + if (priv->port_ptp_opened) { 3819 + for (i = 0; i < priv->max_opened_tc; i++) { 3820 + struct mlx5e_sq_stats *sq_stats = &priv->port_ptp_stats.sq[i]; 3821 + 3822 + s->tx_packets += sq_stats->packets; 3823 + s->tx_bytes += sq_stats->bytes; 3824 + s->tx_dropped += sq_stats->dropped; 3825 + } 3826 + } 3821 3827 } 3822 3828 3823 3829 void ··· 3846 3834 } 3847 3835 3848 3836 if (mlx5e_is_uplink_rep(priv)) { 3837 + struct mlx5e_vport_stats *vstats = &priv->stats.vport; 3838 + 3849 3839 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); 3850 3840 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); 3851 3841 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); 3852 3842 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); 3843 + 3844 + /* vport multicast also counts packets that are dropped due to steering 3845 + * or rx out of buffer 3846 + */ 3847 + stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets); 3853 3848 } else { 3854 3849 mlx5e_fold_sw_stats64(priv, stats); 3855 3850 } ··· 4702 4683 struct mlx5e_channel *c = priv->channels.c[i]; 4703 4684 4704 4685 mlx5e_rq_replace_xdp_prog(&c->rq, prog); 4705 - if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) 4686 + if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) { 4687 + bpf_prog_inc(prog); 4706 4688 mlx5e_rq_replace_xdp_prog(&c->xskrq, prog); 4689 + } 4707 4690 } 4708 4691 4709 4692 unlock: ··· 4978 4957 params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2, 4979 4958 priv->max_nch); 4980 4959 params->num_tc = 1; 4960 + 4961 + /* Set an initial non-zero value, so that mlx5e_select_queue won't 4962 + * divide by zero if called before first activating channels. 4963 + */ 4964 + priv->num_tc_x_num_ch = params->num_channels * params->num_tc; 4981 4965 4982 4966 /* SQ */ 4983 4967 params->log_sq_size = is_kdump_kernel() ? ··· 5500 5474 struct net_device *netdev, 5501 5475 struct mlx5_core_dev *mdev) 5502 5476 { 5503 - memset(priv, 0, sizeof(*priv)); 5504 - 5505 5477 /* priv init */ 5506 5478 priv->mdev = mdev; 5507 5479 priv->netdev = netdev; ··· 5532 5508 { 5533 5509 int i; 5534 5510 5511 + /* bail if change profile failed and also rollback failed */ 5512 + if (!priv->mdev) 5513 + return; 5514 + 5535 5515 destroy_workqueue(priv->wq); 5536 5516 free_cpumask_var(priv->scratchpad.cpumask); 5537 5517 5538 5518 for (i = 0; i < priv->htb.max_qos_sqs; i++) 5539 5519 kfree(priv->htb.qos_sq_stats[i]); 5540 5520 kvfree(priv->htb.qos_sq_stats); 5521 + 5522 + memset(priv, 0, sizeof(*priv)); 5541 5523 } 5542 5524 5543 5525 struct net_device * ··· 5660 5630 } 5661 5631 5662 5632 static int 5663 - mlx5e_netdev_attach_profile(struct mlx5e_priv *priv, 5633 + mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev, 5664 5634 const struct mlx5e_profile *new_profile, void *new_ppriv) 5665 5635 { 5666 - struct net_device *netdev = priv->netdev; 5667 - struct mlx5_core_dev *mdev = priv->mdev; 5636 + struct mlx5e_priv *priv = netdev_priv(netdev); 5668 5637 int err; 5669 5638 5670 5639 err = mlx5e_priv_init(priv, netdev, mdev); ··· 5676 5647 priv->ppriv = new_ppriv; 5677 5648 err = new_profile->init(priv->mdev, priv->netdev); 5678 5649 if (err) 5679 - return err; 5650 + goto priv_cleanup; 5680 5651 err = mlx5e_attach_netdev(priv); 5681 5652 if (err) 5682 - new_profile->cleanup(priv); 5653 + goto profile_cleanup; 5654 + return err; 5655 + 5656 + profile_cleanup: 5657 + new_profile->cleanup(priv); 5658 + priv_cleanup: 5659 + mlx5e_priv_cleanup(priv); 5683 5660 return err; 5684 5661 } 5685 5662 ··· 5694 5659 { 5695 5660 unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile); 5696 5661 const struct mlx5e_profile *orig_profile = priv->profile; 5662 + struct net_device *netdev = priv->netdev; 5663 + struct mlx5_core_dev *mdev = priv->mdev; 5697 5664 void *orig_ppriv = priv->ppriv; 5698 5665 int err, rollback_err; 5699 5666 5700 5667 /* sanity */ 5701 5668 if (new_max_nch != priv->max_nch) { 5702 - netdev_warn(priv->netdev, 5703 - "%s: Replacing profile with different max channels\n", 5669 + netdev_warn(netdev, "%s: Replacing profile with different max channels\n", 5704 5670 __func__); 5705 5671 return -EINVAL; 5706 5672 } ··· 5711 5675 priv->profile->cleanup(priv); 5712 5676 mlx5e_priv_cleanup(priv); 5713 5677 5714 - err = mlx5e_netdev_attach_profile(priv, new_profile, new_ppriv); 5678 + err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv); 5715 5679 if (err) { /* roll back to original profile */ 5716 - netdev_warn(priv->netdev, "%s: new profile init failed, %d\n", 5717 - __func__, err); 5680 + netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err); 5718 5681 goto rollback; 5719 5682 } 5720 5683 5721 5684 return 0; 5722 5685 5723 5686 rollback: 5724 - rollback_err = mlx5e_netdev_attach_profile(priv, orig_profile, orig_ppriv); 5725 - if (rollback_err) { 5726 - netdev_err(priv->netdev, 5727 - "%s: failed to rollback to orig profile, %d\n", 5687 + rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv); 5688 + if (rollback_err) 5689 + netdev_err(netdev, "%s: failed to rollback to orig profile, %d\n", 5728 5690 __func__, rollback_err); 5729 - } 5730 5691 return err; 5731 5692 } 5732 5693
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 500 500 struct mlx5e_icosq *sq = rq->icosq; 501 501 struct mlx5_wq_cyc *wq = &sq->wq; 502 502 struct mlx5e_umr_wqe *umr_wqe; 503 - u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); 504 503 u16 pi; 505 504 int err; 506 505 int i; ··· 530 531 umr_wqe->ctrl.opmod_idx_opcode = 531 532 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | 532 533 MLX5_OPCODE_UMR); 533 - umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); 534 + umr_wqe->uctrl.xlt_offset = 535 + cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix))); 534 536 535 537 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { 536 538 .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
+45 -12
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 2296 2296 *match_level = MLX5_MATCH_L4; 2297 2297 } 2298 2298 2299 + /* Currenlty supported only for MPLS over UDP */ 2300 + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) && 2301 + !netif_is_bareudp(filter_dev)) { 2302 + NL_SET_ERR_MSG_MOD(extack, 2303 + "Matching on MPLS is supported only for MPLS over UDP"); 2304 + netdev_err(priv->netdev, 2305 + "Matching on MPLS is supported only for MPLS over UDP\n"); 2306 + return -EOPNOTSUPP; 2307 + } 2308 + 2299 2309 return 0; 2300 2310 } 2301 2311 ··· 2909 2899 return 0; 2910 2900 } 2911 2901 2902 + static bool modify_tuple_supported(bool modify_tuple, bool ct_clear, 2903 + bool ct_flow, struct netlink_ext_ack *extack, 2904 + struct mlx5e_priv *priv, 2905 + struct mlx5_flow_spec *spec) 2906 + { 2907 + if (!modify_tuple || ct_clear) 2908 + return true; 2909 + 2910 + if (ct_flow) { 2911 + NL_SET_ERR_MSG_MOD(extack, 2912 + "can't offload tuple modification with non-clear ct()"); 2913 + netdev_info(priv->netdev, 2914 + "can't offload tuple modification with non-clear ct()"); 2915 + return false; 2916 + } 2917 + 2918 + /* Add ct_state=-trk match so it will be offloaded for non ct flows 2919 + * (or after clear action), as otherwise, since the tuple is changed, 2920 + * we can't restore ct state 2921 + */ 2922 + if (mlx5_tc_ct_add_no_trk_match(spec)) { 2923 + NL_SET_ERR_MSG_MOD(extack, 2924 + "can't offload tuple modification with ct matches and no ct(clear) action"); 2925 + netdev_info(priv->netdev, 2926 + "can't offload tuple modification with ct matches and no ct(clear) action"); 2927 + return false; 2928 + } 2929 + 2930 + return true; 2931 + } 2932 + 2912 2933 static bool modify_header_match_supported(struct mlx5e_priv *priv, 2913 2934 struct mlx5_flow_spec *spec, 2914 2935 struct flow_action *flow_action, ··· 2978 2937 return err; 2979 2938 } 2980 2939 2981 - /* Add ct_state=-trk match so it will be offloaded for non ct flows 2982 - * (or after clear action), as otherwise, since the tuple is changed, 2983 - * we can't restore ct state 2984 - */ 2985 - if (!ct_clear && modify_tuple && 2986 - mlx5_tc_ct_add_no_trk_match(spec)) { 2987 - NL_SET_ERR_MSG_MOD(extack, 2988 - "can't offload tuple modify header with ct matches"); 2989 - netdev_info(priv->netdev, 2990 - "can't offload tuple modify header with ct matches"); 2940 + if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack, 2941 + priv, spec)) 2991 2942 return false; 2992 - } 2993 2943 2994 2944 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); 2995 2945 if (modify_ip_header && ip_proto != IPPROTO_TCP && ··· 4477 4445 */ 4478 4446 if (rate) { 4479 4447 rate = (rate * BITS_PER_BYTE) + 500000; 4480 - rate_mbps = max_t(u64, do_div(rate, 1000000), 1); 4448 + do_div(rate, 1000000); 4449 + rate_mbps = max_t(u32, rate, 1); 4481 4450 } 4482 4451 4483 4452 err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
+1
drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
··· 79 79 u8 inner_match_level; 80 80 u8 outer_match_level; 81 81 u8 ip_version; 82 + u8 tun_ip_version; 82 83 u32 flags; 83 84 union { 84 85 struct mlx5_esw_flow_attr esw_attr[0];
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 551 551 552 552 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) && 553 553 MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) && 554 - mlx5_eswitch_vport_match_metadata_enabled(esw)) 554 + mlx5_eswitch_vport_match_metadata_enabled(esw) && 555 + MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) 555 556 attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE; 556 557 557 558 if (attr->dest_ft) {
+1
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
··· 575 575 MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size)); 576 576 MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn); 577 577 MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn); 578 + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev)); 578 579 MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma); 579 580 if (MLX5_CAP_GEN(mdev, cqe_version) == 1) 580 581 MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 233 233 } 234 234 235 235 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 236 + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(priv->mdev)); 236 237 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD); 237 238 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 238 239 MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, ··· 695 694 static void mlx5_rdma_netdev_free(struct net_device *netdev) 696 695 { 697 696 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 697 + struct mlx5_core_dev *mdev = priv->mdev; 698 698 struct mlx5i_priv *ipriv = priv->ppriv; 699 699 const struct mlx5e_profile *profile = priv->profile; 700 700 ··· 704 702 705 703 if (!ipriv->sub_interface) { 706 704 mlx5i_pkey_qpn_ht_cleanup(netdev); 707 - mlx5e_destroy_mdev_resources(priv->mdev); 705 + mlx5e_destroy_mdev_resources(mdev); 708 706 } 709 707 } 710 708
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
··· 495 495 return -EINVAL; 496 496 497 497 field_select = MLX5_MTPPS_FS_ENABLE; 498 + pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index); 499 + if (pin < 0) 500 + return -EBUSY; 501 + 498 502 if (on) { 499 503 bool rt_mode = mlx5_real_time_mode(mdev); 500 504 u32 nsec; 501 505 s64 sec; 502 - 503 - pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index); 504 - if (pin < 0) 505 - return -EBUSY; 506 506 507 507 pin_mode = MLX5_PIN_MODE_OUT; 508 508 pattern = MLX5_OUT_PATTERN_PERIODIC;
+1 -3
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
··· 181 181 u16 max_functions; 182 182 u16 function_id; 183 183 int err = 0; 184 - bool ecpu; 185 184 int i; 186 185 187 186 max_functions = mlx5_sf_max_functions(dev); 188 187 function_id = MLX5_CAP_GEN(dev, sf_base_id); 189 - ecpu = mlx5_read_embedded_cpu(dev); 190 188 /* Arm the vhca context as the vhca event notifier */ 191 189 for (i = 0; i < max_functions; i++) { 192 - err = mlx5_vhca_event_arm(dev, function_id, ecpu); 190 + err = mlx5_vhca_event_arm(dev, function_id); 193 191 if (err) 194 192 return err; 195 193
+4 -6
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
··· 6 6 #include "sf.h" 7 7 #include "mlx5_ifc_vhca_event.h" 8 8 #include "vhca_event.h" 9 - #include "ecpf.h" 9 + #include "mlx5_core.h" 10 10 11 11 struct mlx5_sf_hw { 12 12 u32 usr_sfnum; ··· 18 18 struct mlx5_core_dev *dev; 19 19 struct mlx5_sf_hw *sfs; 20 20 int max_local_functions; 21 - u8 ecpu: 1; 22 21 struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */ 23 22 struct notifier_block vhca_nb; 24 23 }; ··· 63 64 } 64 65 if (sw_id == -ENOSPC) { 65 66 err = -ENOSPC; 66 - goto err; 67 + goto exist_err; 67 68 } 68 69 69 70 hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id); ··· 71 72 if (err) 72 73 goto err; 73 74 74 - err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, table->ecpu, usr_sfnum); 75 + err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, usr_sfnum); 75 76 if (err) 76 77 goto vhca_err; 77 78 ··· 117 118 118 119 hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id); 119 120 mutex_lock(&table->table_lock); 120 - err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, table->ecpu, out, sizeof(out)); 121 + err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out)); 121 122 if (err) 122 123 goto err; 123 124 state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state); ··· 163 164 table->dev = dev; 164 165 table->sfs = sfs; 165 166 table->max_local_functions = max_functions; 166 - table->ecpu = mlx5_read_embedded_cpu(dev); 167 167 dev->priv.sf_hw_table = table; 168 168 mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions); 169 169 return 0;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h
··· 20 20 21 21 u8 sw_function_id[0x20]; 22 22 23 - u8 reserved_at_40[0x80]; 23 + u8 reserved_at_40[0x40]; 24 24 }; 25 25 26 26 struct mlx5_ifc_query_vhca_state_out_bits {
+11 -12
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
··· 19 19 struct mlx5_vhca_state_event event; 20 20 }; 21 21 22 - int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, 23 - bool ecpu, u32 *out, u32 outlen) 22 + int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen) 24 23 { 25 24 u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {}; 26 25 27 26 MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE); 28 27 MLX5_SET(query_vhca_state_in, in, function_id, function_id); 29 - MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, ecpu); 28 + MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, 0); 30 29 31 30 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 32 31 } 33 32 34 33 static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id, 35 - bool ecpu, u32 *in, u32 inlen) 34 + u32 *in, u32 inlen) 36 35 { 37 36 u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {}; 38 37 39 38 MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE); 40 39 MLX5_SET(modify_vhca_state_in, in, function_id, function_id); 41 - MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu); 40 + MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0); 42 41 43 42 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 44 43 } 45 44 46 - int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id) 45 + int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id) 47 46 { 48 47 u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {}; 49 48 u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {}; 50 49 51 50 MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE); 52 51 MLX5_SET(modify_vhca_state_in, in, function_id, function_id); 53 - MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu); 52 + MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0); 54 53 MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1); 55 54 MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id); 56 55 57 56 return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out); 58 57 } 59 58 60 - int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu) 59 + int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id) 61 60 { 62 61 u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {}; 63 62 64 63 MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1); 65 64 MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1); 66 65 67 - return mlx5_cmd_modify_vhca_state(dev, function_id, ecpu, in, sizeof(in)); 66 + return mlx5_cmd_modify_vhca_state(dev, function_id, in, sizeof(in)); 68 67 } 69 68 70 69 static void ··· 72 73 u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {}; 73 74 int err; 74 75 75 - err = mlx5_cmd_query_vhca_state(dev, event->function_id, event->ecpu, out, sizeof(out)); 76 + err = mlx5_cmd_query_vhca_state(dev, event->function_id, out, sizeof(out)); 76 77 if (err) 77 78 return; 78 79 ··· 81 82 event->new_vhca_state = MLX5_GET(query_vhca_state_out, out, 82 83 vhca_state_context.vhca_state); 83 84 84 - mlx5_vhca_event_arm(dev, event->function_id, event->ecpu); 85 + mlx5_vhca_event_arm(dev, event->function_id); 85 86 86 87 blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event); 87 88 } ··· 93 94 struct mlx5_core_dev *dev = notifier->dev; 94 95 95 96 mlx5_vhca_event_notify(dev, &work->event); 97 + kfree(work); 96 98 } 97 99 98 100 static int ··· 110 110 INIT_WORK(&work->work, &mlx5_vhca_state_work_handler); 111 111 work->notifier = notifier; 112 112 work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id); 113 - work->event.ecpu = be16_to_cpu(eqe->data.vhca_state.ec_function); 114 113 mlx5_events_work_enqueue(notifier->dev, &work->work); 115 114 return NOTIFY_OK; 116 115 }
+3 -4
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
··· 10 10 u16 function_id; 11 11 u16 sw_function_id; 12 12 u8 new_vhca_state; 13 - bool ecpu; 14 13 }; 15 14 16 15 static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev) ··· 24 25 void mlx5_vhca_event_stop(struct mlx5_core_dev *dev); 25 26 int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); 26 27 void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); 27 - int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id); 28 - int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu); 28 + int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id); 29 + int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id); 29 30 int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, 30 - bool ecpu, u32 *out, u32 outlen); 31 + u32 *out, u32 outlen); 31 32 #else 32 33 33 34 static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
+1
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
··· 169 169 MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt)); 170 170 MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); 171 171 MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt)); 172 + MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev)); 172 173 MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma); 173 174 if (MLX5_CAP_GEN(mdev, cqe_version) == 1) 174 175 MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
··· 264 264 static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p) 265 265 { 266 266 u64 index = 267 - (MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) | 268 - MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32) << 26); 267 + ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) | 268 + ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26); 269 269 270 270 return index << 6; 271 271 }
+17 -7
drivers/net/ethernet/netronome/nfp/flower/metadata.c
··· 327 327 goto err_free_ctx_entry; 328 328 } 329 329 330 + /* Do net allocate a mask-id for pre_tun_rules. These flows are used to 331 + * configure the pre_tun table and are never actually send to the 332 + * firmware as an add-flow message. This causes the mask-id allocation 333 + * on the firmware to get out of sync if allocated here. 334 + */ 330 335 new_mask_id = 0; 331 - if (!nfp_check_mask_add(app, nfp_flow->mask_data, 336 + if (!nfp_flow->pre_tun_rule.dev && 337 + !nfp_check_mask_add(app, nfp_flow->mask_data, 332 338 nfp_flow->meta.mask_len, 333 339 &nfp_flow->meta.flags, &new_mask_id)) { 334 340 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id"); ··· 365 359 goto err_remove_mask; 366 360 } 367 361 368 - if (!nfp_check_mask_remove(app, nfp_flow->mask_data, 362 + if (!nfp_flow->pre_tun_rule.dev && 363 + !nfp_check_mask_remove(app, nfp_flow->mask_data, 369 364 nfp_flow->meta.mask_len, 370 365 NULL, &new_mask_id)) { 371 366 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id"); ··· 381 374 return 0; 382 375 383 376 err_remove_mask: 384 - nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len, 385 - NULL, &new_mask_id); 377 + if (!nfp_flow->pre_tun_rule.dev) 378 + nfp_check_mask_remove(app, nfp_flow->mask_data, 379 + nfp_flow->meta.mask_len, 380 + NULL, &new_mask_id); 386 381 err_remove_rhash: 387 382 WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table, 388 383 &ctx_entry->ht_node, ··· 415 406 416 407 __nfp_modify_flow_metadata(priv, nfp_flow); 417 408 418 - nfp_check_mask_remove(app, nfp_flow->mask_data, 419 - nfp_flow->meta.mask_len, &nfp_flow->meta.flags, 420 - &new_mask_id); 409 + if (!nfp_flow->pre_tun_rule.dev) 410 + nfp_check_mask_remove(app, nfp_flow->mask_data, 411 + nfp_flow->meta.mask_len, &nfp_flow->meta.flags, 412 + &new_mask_id); 421 413 422 414 /* Update flow payload with mask ids. */ 423 415 nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
+18
drivers/net/ethernet/netronome/nfp/flower/offload.c
··· 1142 1142 return -EOPNOTSUPP; 1143 1143 } 1144 1144 1145 + if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && 1146 + !(key_layer & NFP_FLOWER_LAYER_IPV6)) { 1147 + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present"); 1148 + return -EOPNOTSUPP; 1149 + } 1150 + 1145 1151 /* Skip fields known to exist. */ 1146 1152 mask += sizeof(struct nfp_flower_meta_tci); 1147 1153 ext += sizeof(struct nfp_flower_meta_tci); ··· 1158 1152 mask += sizeof(struct nfp_flower_in_port); 1159 1153 ext += sizeof(struct nfp_flower_in_port); 1160 1154 1155 + /* Ensure destination MAC address matches pre_tun_dev. */ 1156 + mac = (struct nfp_flower_mac_mpls *)ext; 1157 + if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) { 1158 + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC"); 1159 + return -EOPNOTSUPP; 1160 + } 1161 + 1161 1162 /* Ensure destination MAC address is fully matched. */ 1162 1163 mac = (struct nfp_flower_mac_mpls *)mask; 1163 1164 if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { 1164 1165 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked"); 1166 + return -EOPNOTSUPP; 1167 + } 1168 + 1169 + if (mac->mpls_lse) { 1170 + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported"); 1165 1171 return -EOPNOTSUPP; 1166 1172 } 1167 1173
+13 -2
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
··· 16 16 #define NFP_FL_MAX_ROUTES 32 17 17 18 18 #define NFP_TUN_PRE_TUN_RULE_LIMIT 32 19 - #define NFP_TUN_PRE_TUN_RULE_DEL 0x1 20 - #define NFP_TUN_PRE_TUN_IDX_BIT 0x8 19 + #define NFP_TUN_PRE_TUN_RULE_DEL BIT(0) 20 + #define NFP_TUN_PRE_TUN_IDX_BIT BIT(3) 21 + #define NFP_TUN_PRE_TUN_IPV6_BIT BIT(7) 21 22 22 23 /** 23 24 * struct nfp_tun_pre_run_rule - rule matched before decap ··· 1269 1268 { 1270 1269 struct nfp_flower_priv *app_priv = app->priv; 1271 1270 struct nfp_tun_offloaded_mac *mac_entry; 1271 + struct nfp_flower_meta_tci *key_meta; 1272 1272 struct nfp_tun_pre_tun_rule payload; 1273 1273 struct net_device *internal_dev; 1274 1274 int err; ··· 1291 1289 internal_dev->dev_addr); 1292 1290 if (!mac_entry) 1293 1291 return -ENOENT; 1292 + 1293 + /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being 1294 + * set/clear for port_idx. 1295 + */ 1296 + key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data; 1297 + if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6) 1298 + mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT; 1299 + else 1300 + mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT; 1294 1301 1295 1302 payload.port_idx = cpu_to_be16(mac_entry->index); 1296 1303
+7 -6
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
··· 1079 1079 { 1080 1080 int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems; 1081 1081 struct ionic_tx_stats *stats = q_to_tx_stats(q); 1082 + int ndescs; 1082 1083 int err; 1083 1084 1084 - /* If TSO, need roundup(skb->len/mss) descs */ 1085 + /* Each desc is mss long max, so a descriptor for each gso_seg */ 1085 1086 if (skb_is_gso(skb)) 1086 - return (skb->len / skb_shinfo(skb)->gso_size) + 1; 1087 + ndescs = skb_shinfo(skb)->gso_segs; 1088 + else 1089 + ndescs = 1; 1087 1090 1088 - /* If non-TSO, just need 1 desc and nr_frags sg elems */ 1089 1091 if (skb_shinfo(skb)->nr_frags <= sg_elems) 1090 - return 1; 1092 + return ndescs; 1091 1093 1092 1094 /* Too many frags, so linearize */ 1093 1095 err = skb_linearize(skb); ··· 1098 1096 1099 1097 stats->linearize++; 1100 1098 1101 - /* Need 1 desc and zero sg elems */ 1102 - return 1; 1099 + return ndescs; 1103 1100 } 1104 1101 1105 1102 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
+3
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
··· 1425 1425 1426 1426 if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) { 1427 1427 vfree(fw_dump->tmpl_hdr); 1428 + fw_dump->tmpl_hdr = NULL; 1428 1429 1429 1430 if (qlcnic_83xx_md_check_extended_dump_capability(adapter)) 1430 1431 extended = !qlcnic_83xx_extend_md_capab(adapter); ··· 1444 1443 struct qlcnic_83xx_dump_template_hdr *hdr; 1445 1444 1446 1445 hdr = fw_dump->tmpl_hdr; 1446 + if (!hdr) 1447 + return; 1447 1448 hdr->drv_cap_mask = 0x1f; 1448 1449 fw_dump->cap_mask = 0x1f; 1449 1450 dev_info(&pdev->dev,
+4 -2
drivers/net/ethernet/realtek/r8169_main.c
··· 4646 4646 4647 4647 rtl8169_update_counters(tp); 4648 4648 4649 + pci_clear_master(tp->pci_dev); 4650 + rtl_pci_commit(tp); 4651 + 4649 4652 rtl8169_cleanup(tp, true); 4650 4653 4651 4654 rtl_prepare_power_down(tp); ··· 4656 4653 4657 4654 static void rtl8169_up(struct rtl8169_private *tp) 4658 4655 { 4656 + pci_set_master(tp->pci_dev); 4659 4657 phy_resume(tp->phydev); 4660 4658 rtl8169_init_phy(tp); 4661 4659 napi_enable(&tp->napi); ··· 5310 5306 rtl_hw_initialize(tp); 5311 5307 5312 5308 rtl_hw_reset(tp); 5313 - 5314 - pci_set_master(pdev); 5315 5309 5316 5310 rc = rtl_alloc_irq(tp); 5317 5311 if (rc < 0) {
+6 -3
drivers/net/ethernet/socionext/netsec.c
··· 1715 1715 goto err1; 1716 1716 1717 1717 /* set phy power down */ 1718 - data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) | 1719 - BMCR_PDOWN; 1720 - netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); 1718 + data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR); 1719 + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, 1720 + data | BMCR_PDOWN); 1721 1721 1722 1722 ret = netsec_reset_hardware(priv, true); 1723 1723 if (ret) 1724 1724 goto err2; 1725 + 1726 + /* Restore phy power state */ 1727 + netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data); 1725 1728 1726 1729 spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock); 1727 1730 spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
+2
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
··· 1214 1214 plat_dat->init = sun8i_dwmac_init; 1215 1215 plat_dat->exit = sun8i_dwmac_exit; 1216 1216 plat_dat->setup = sun8i_dwmac_setup; 1217 + plat_dat->tx_fifo_size = 4096; 1218 + plat_dat->rx_fifo_size = 16384; 1217 1219 1218 1220 ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat); 1219 1221 if (ret)
+25 -12
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 1880 1880 if (IS_ERR(lp->regs)) { 1881 1881 dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); 1882 1882 ret = PTR_ERR(lp->regs); 1883 - goto free_netdev; 1883 + goto cleanup_clk; 1884 1884 } 1885 1885 lp->regs_start = ethres->start; 1886 1886 ··· 1958 1958 break; 1959 1959 default: 1960 1960 ret = -EINVAL; 1961 - goto free_netdev; 1961 + goto cleanup_clk; 1962 1962 } 1963 1963 } else { 1964 1964 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 1965 1965 if (ret) 1966 - goto free_netdev; 1966 + goto cleanup_clk; 1967 1967 } 1968 1968 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && 1969 1969 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { 1970 1970 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); 1971 1971 ret = -EINVAL; 1972 - goto free_netdev; 1972 + goto cleanup_clk; 1973 1973 } 1974 1974 1975 1975 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ ··· 1982 1982 dev_err(&pdev->dev, 1983 1983 "unable to get DMA resource\n"); 1984 1984 of_node_put(np); 1985 - goto free_netdev; 1985 + goto cleanup_clk; 1986 1986 } 1987 1987 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 1988 1988 &dmares); ··· 2002 2002 if (IS_ERR(lp->dma_regs)) { 2003 2003 dev_err(&pdev->dev, "could not map DMA regs\n"); 2004 2004 ret = PTR_ERR(lp->dma_regs); 2005 - goto free_netdev; 2005 + goto cleanup_clk; 2006 2006 } 2007 2007 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 2008 2008 dev_err(&pdev->dev, "could not determine irqs\n"); 2009 2009 ret = -ENOMEM; 2010 - goto free_netdev; 2010 + goto cleanup_clk; 2011 2011 } 2012 2012 2013 2013 /* Autodetect the need for 64-bit DMA pointers. ··· 2037 2037 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2038 2038 if (ret) { 2039 2039 dev_err(&pdev->dev, "No suitable DMA available\n"); 2040 - goto free_netdev; 2040 + goto cleanup_clk; 2041 2041 } 2042 2042 2043 2043 /* Check for Ethernet core IRQ (optional) */ ··· 2068 2068 if (!lp->phy_node) { 2069 2069 dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); 2070 2070 ret = -EINVAL; 2071 - goto free_netdev; 2071 + goto cleanup_mdio; 2072 2072 } 2073 2073 lp->pcs_phy = of_mdio_find_device(lp->phy_node); 2074 2074 if (!lp->pcs_phy) { 2075 2075 ret = -EPROBE_DEFER; 2076 - goto free_netdev; 2076 + goto cleanup_mdio; 2077 2077 } 2078 2078 lp->phylink_config.pcs_poll = true; 2079 2079 } ··· 2087 2087 if (IS_ERR(lp->phylink)) { 2088 2088 ret = PTR_ERR(lp->phylink); 2089 2089 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 2090 - goto free_netdev; 2090 + goto cleanup_mdio; 2091 2091 } 2092 2092 2093 2093 ret = register_netdev(lp->ndev); 2094 2094 if (ret) { 2095 2095 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 2096 - goto free_netdev; 2096 + goto cleanup_phylink; 2097 2097 } 2098 2098 2099 2099 return 0; 2100 + 2101 + cleanup_phylink: 2102 + phylink_destroy(lp->phylink); 2103 + 2104 + cleanup_mdio: 2105 + if (lp->pcs_phy) 2106 + put_device(&lp->pcs_phy->dev); 2107 + if (lp->mii_bus) 2108 + axienet_mdio_teardown(lp); 2109 + of_node_put(lp->phy_node); 2110 + 2111 + cleanup_clk: 2112 + clk_disable_unprepare(lp->clk); 2100 2113 2101 2114 free_netdev: 2102 2115 free_netdev(ndev);
+33 -17
drivers/net/ipa/ipa_cmd.c
··· 175 175 : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); 176 176 if (mem->offset > offset_max || 177 177 ipa->mem_offset > offset_max - mem->offset) { 178 - dev_err(dev, "IPv%c %s%s table region offset too large " 179 - "(0x%04x + 0x%04x > 0x%04x)\n", 180 - ipv6 ? '6' : '4', hashed ? "hashed " : "", 181 - route ? "route" : "filter", 182 - ipa->mem_offset, mem->offset, offset_max); 178 + dev_err(dev, "IPv%c %s%s table region offset too large\n", 179 + ipv6 ? '6' : '4', hashed ? "hashed " : "", 180 + route ? "route" : "filter"); 181 + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", 182 + ipa->mem_offset, mem->offset, offset_max); 183 + 183 184 return false; 184 185 } 185 186 186 187 if (mem->offset > ipa->mem_size || 187 188 mem->size > ipa->mem_size - mem->offset) { 188 - dev_err(dev, "IPv%c %s%s table region out of range " 189 - "(0x%04x + 0x%04x > 0x%04x)\n", 190 - ipv6 ? '6' : '4', hashed ? "hashed " : "", 191 - route ? "route" : "filter", 192 - mem->offset, mem->size, ipa->mem_size); 189 + dev_err(dev, "IPv%c %s%s table region out of range\n", 190 + ipv6 ? '6' : '4', hashed ? "hashed " : "", 191 + route ? "route" : "filter"); 192 + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", 193 + mem->offset, mem->size, ipa->mem_size); 194 + 193 195 return false; 194 196 } 195 197 ··· 207 205 u32 size_max; 208 206 u32 size; 209 207 208 + /* In ipa_cmd_hdr_init_local_add() we record the offset and size 209 + * of the header table memory area. Make sure the offset and size 210 + * fit in the fields that need to hold them, and that the entire 211 + * range is within the overall IPA memory range. 212 + */ 210 213 offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK); 211 214 if (mem->offset > offset_max || 212 215 ipa->mem_offset > offset_max - mem->offset) { 213 - dev_err(dev, "header table region offset too large " 214 - "(0x%04x + 0x%04x > 0x%04x)\n", 215 - ipa->mem_offset + mem->offset, offset_max); 216 + dev_err(dev, "header table region offset too large\n"); 217 + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", 218 + ipa->mem_offset, mem->offset, offset_max); 219 + 216 220 return false; 217 221 } 218 222 219 223 size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK); 220 224 size = ipa->mem[IPA_MEM_MODEM_HEADER].size; 221 225 size += ipa->mem[IPA_MEM_AP_HEADER].size; 222 - if (mem->offset > ipa->mem_size || size > ipa->mem_size - mem->offset) { 223 - dev_err(dev, "header table region out of range " 224 - "(0x%04x + 0x%04x > 0x%04x)\n", 225 - mem->offset, size, ipa->mem_size); 226 + 227 + if (size > size_max) { 228 + dev_err(dev, "header table region size too large\n"); 229 + dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max); 230 + 231 + return false; 232 + } 233 + if (size > ipa->mem_size || mem->offset > ipa->mem_size - size) { 234 + dev_err(dev, "header table region out of range\n"); 235 + dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", 236 + mem->offset, size, ipa->mem_size); 237 + 226 238 return false; 227 239 } 228 240
+2
drivers/net/ipa/ipa_qmi.c
··· 249 249 .decoded_size = IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ, 250 250 .fn = ipa_server_driver_init_complete, 251 251 }, 252 + { }, 252 253 }; 253 254 254 255 /* Handle an INIT_DRIVER response message from the modem. */ ··· 270 269 .decoded_size = IPA_QMI_INIT_DRIVER_RSP_SZ, 271 270 .fn = ipa_client_init_driver, 272 271 }, 272 + { }, 273 273 }; 274 274 275 275 /* Return a pointer to an init modem driver request structure, which contains
+9
drivers/net/phy/broadcom.c
··· 342 342 bcm54xx_adjust_rxrefclk(phydev); 343 343 344 344 switch (BRCM_PHY_MODEL(phydev)) { 345 + case PHY_ID_BCM50610: 346 + case PHY_ID_BCM50610M: 347 + err = bcm54xx_config_clock_delay(phydev); 348 + break; 345 349 case PHY_ID_BCM54210E: 346 350 err = bcm54210e_config_init(phydev); 347 351 break; ··· 402 398 ret = genphy_resume(phydev); 403 399 if (ret < 0) 404 400 return ret; 401 + 402 + /* Upon exiting power down, the PHY remains in an internal reset state 403 + * for 40us 404 + */ 405 + fsleep(40); 405 406 406 407 return bcm54xx_config_init(phydev); 407 408 }
+1 -1
drivers/net/phy/phylink.c
··· 476 476 err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode, 477 477 state->interface); 478 478 if (err < 0) 479 - phylink_err(pl, "mac_prepare failed: %pe\n", 479 + phylink_err(pl, "mac_finish failed: %pe\n", 480 480 ERR_PTR(err)); 481 481 } 482 482 }
+2
drivers/net/usb/cdc-phonet.c
··· 387 387 388 388 err = register_netdev(dev); 389 389 if (err) { 390 + /* Set disconnected flag so that disconnect() returns early. */ 391 + pnd->disconnected = 1; 390 392 usb_driver_release_interface(&usbpn_driver, data_intf); 391 393 goto out; 392 394 }
+4 -1
drivers/net/usb/r8152.c
··· 6553 6553 ops->in_nway = rtl8153_in_nway; 6554 6554 ops->hw_phy_cfg = r8153_hw_phy_cfg; 6555 6555 ops->autosuspend_en = rtl8153_runtime_enable; 6556 - tp->rx_buf_sz = 32 * 1024; 6556 + if (tp->udev->speed < USB_SPEED_SUPER) 6557 + tp->rx_buf_sz = 16 * 1024; 6558 + else 6559 + tp->rx_buf_sz = 32 * 1024; 6557 6560 tp->eee_en = true; 6558 6561 tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; 6559 6562 break;
+1 -2
drivers/net/veth.c
··· 302 302 if (rxq < rcv->real_num_rx_queues) { 303 303 rq = &rcv_priv->rq[rxq]; 304 304 rcv_xdp = rcu_access_pointer(rq->xdp_prog); 305 - if (rcv_xdp) 306 - skb_record_rx_queue(skb, rxq); 305 + skb_record_rx_queue(skb, rxq); 307 306 } 308 307 309 308 skb_tx_timestamp(skb);
+42 -2
drivers/net/wan/hdlc_x25.c
··· 23 23 24 24 struct x25_state { 25 25 x25_hdlc_proto settings; 26 + bool up; 27 + spinlock_t up_lock; /* Protects "up" */ 26 28 }; 27 29 28 30 static int x25_ioctl(struct net_device *dev, struct ifreq *ifr); ··· 106 104 107 105 static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) 108 106 { 107 + hdlc_device *hdlc = dev_to_hdlc(dev); 108 + struct x25_state *x25st = state(hdlc); 109 109 int result; 110 110 111 111 /* There should be a pseudo header of 1 byte added by upper layers. ··· 118 114 return NETDEV_TX_OK; 119 115 } 120 116 117 + spin_lock_bh(&x25st->up_lock); 118 + if (!x25st->up) { 119 + spin_unlock_bh(&x25st->up_lock); 120 + kfree_skb(skb); 121 + return NETDEV_TX_OK; 122 + } 123 + 121 124 switch (skb->data[0]) { 122 125 case X25_IFACE_DATA: /* Data to be transmitted */ 123 126 skb_pull(skb, 1); 124 127 if ((result = lapb_data_request(dev, skb)) != LAPB_OK) 125 128 dev_kfree_skb(skb); 129 + spin_unlock_bh(&x25st->up_lock); 126 130 return NETDEV_TX_OK; 127 131 128 132 case X25_IFACE_CONNECT: ··· 159 147 break; 160 148 } 161 149 150 + spin_unlock_bh(&x25st->up_lock); 162 151 dev_kfree_skb(skb); 163 152 return NETDEV_TX_OK; 164 153 } ··· 177 164 .data_transmit = x25_data_transmit, 178 165 }; 179 166 hdlc_device *hdlc = dev_to_hdlc(dev); 167 + struct x25_state *x25st = state(hdlc); 180 168 struct lapb_parms_struct params; 181 169 int result; 182 170 ··· 204 190 if (result != LAPB_OK) 205 191 return -EINVAL; 206 192 193 + spin_lock_bh(&x25st->up_lock); 194 + x25st->up = true; 195 + spin_unlock_bh(&x25st->up_lock); 196 + 207 197 return 0; 208 198 } 209 199 ··· 215 197 216 198 static void x25_close(struct net_device *dev) 217 199 { 200 + hdlc_device *hdlc = dev_to_hdlc(dev); 201 + struct x25_state *x25st = state(hdlc); 202 + 203 + spin_lock_bh(&x25st->up_lock); 204 + x25st->up = false; 205 + spin_unlock_bh(&x25st->up_lock); 206 + 218 207 lapb_unregister(dev); 219 208 } 220 209 ··· 230 205 static int x25_rx(struct sk_buff *skb) 231 206 { 232 207 struct net_device *dev = skb->dev; 208 + hdlc_device *hdlc = dev_to_hdlc(dev); 209 + struct x25_state *x25st = state(hdlc); 233 210 234 211 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 235 212 dev->stats.rx_dropped++; 236 213 return NET_RX_DROP; 237 214 } 238 215 239 - if (lapb_data_received(dev, skb) == LAPB_OK) 240 - return NET_RX_SUCCESS; 216 + spin_lock_bh(&x25st->up_lock); 217 + if (!x25st->up) { 218 + spin_unlock_bh(&x25st->up_lock); 219 + kfree_skb(skb); 220 + dev->stats.rx_dropped++; 221 + return NET_RX_DROP; 222 + } 241 223 224 + if (lapb_data_received(dev, skb) == LAPB_OK) { 225 + spin_unlock_bh(&x25st->up_lock); 226 + return NET_RX_SUCCESS; 227 + } 228 + 229 + spin_unlock_bh(&x25st->up_lock); 242 230 dev->stats.rx_errors++; 243 231 dev_kfree_skb_any(skb); 244 232 return NET_RX_DROP; ··· 336 298 return result; 337 299 338 300 memcpy(&state(hdlc)->settings, &new_settings, size); 301 + state(hdlc)->up = false; 302 + spin_lock_init(&state(hdlc)->up_lock); 339 303 340 304 /* There's no header_ops so hard_header_len should be 0. */ 341 305 dev->hard_header_len = 0;
+8 -1
drivers/pinctrl/intel/pinctrl-intel.c
··· 1357 1357 gpps[i].gpio_base = 0; 1358 1358 break; 1359 1359 case INTEL_GPIO_BASE_NOMAP: 1360 + break; 1360 1361 default: 1361 1362 break; 1362 1363 } ··· 1394 1393 gpps[i].size = min(gpp_size, npins); 1395 1394 npins -= gpps[i].size; 1396 1395 1396 + gpps[i].gpio_base = gpps[i].base; 1397 1397 gpps[i].padown_num = padown_num; 1398 1398 1399 1399 /* ··· 1493 1491 if (IS_ERR(regs)) 1494 1492 return PTR_ERR(regs); 1495 1493 1496 - /* Determine community features based on the revision */ 1494 + /* 1495 + * Determine community features based on the revision. 1496 + * A value of all ones means the device is not present. 1497 + */ 1497 1498 value = readl(regs + REVID); 1499 + if (value == ~0u) 1500 + return -ENODEV; 1498 1501 if (((value & REVID_MASK) >> REVID_SHIFT) >= 0x94) { 1499 1502 community->features |= PINCTRL_FEATURE_DEBOUNCE; 1500 1503 community->features |= PINCTRL_FEATURE_1K_PD;
+1 -1
drivers/pinctrl/pinctrl-microchip-sgpio.c
··· 572 572 /* Type value spread over 2 registers sets: low, high bit */ 573 573 sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, addr.bit, 574 574 BIT(addr.port), (!!(type & 0x1)) << addr.port); 575 - sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER + SGPIO_MAX_BITS, addr.bit, 575 + sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, SGPIO_MAX_BITS + addr.bit, 576 576 BIT(addr.port), (!!(type & 0x2)) << addr.port); 577 577 578 578 if (type == SGPIO_INT_TRG_LEVEL)
+8 -5
drivers/pinctrl/pinctrl-rockchip.c
··· 3727 3727 static int __maybe_unused rockchip_pinctrl_resume(struct device *dev) 3728 3728 { 3729 3729 struct rockchip_pinctrl *info = dev_get_drvdata(dev); 3730 - int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX, 3731 - rk3288_grf_gpio6c_iomux | 3732 - GPIO6C6_SEL_WRITE_ENABLE); 3730 + int ret; 3733 3731 3734 - if (ret) 3735 - return ret; 3732 + if (info->ctrl->type == RK3288) { 3733 + ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX, 3734 + rk3288_grf_gpio6c_iomux | 3735 + GPIO6C6_SEL_WRITE_ENABLE); 3736 + if (ret) 3737 + return ret; 3738 + } 3736 3739 3737 3740 return pinctrl_force_default(info->pctl_dev); 3738 3741 }
+1 -1
drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
··· 392 392 unsigned long *configs, unsigned int nconfs) 393 393 { 394 394 struct lpi_pinctrl *pctrl = dev_get_drvdata(pctldev->dev); 395 - unsigned int param, arg, pullup, strength; 395 + unsigned int param, arg, pullup = LPI_GPIO_BIAS_DISABLE, strength = 2; 396 396 bool value, output_enabled = false; 397 397 const struct lpi_pingroup *g; 398 398 unsigned long sval;
+8 -8
drivers/pinctrl/qcom/pinctrl-sc7280.c
··· 1439 1439 [172] = PINGROUP(172, qdss, _, _, _, _, _, _, _, _), 1440 1440 [173] = PINGROUP(173, qdss, _, _, _, _, _, _, _, _), 1441 1441 [174] = PINGROUP(174, qdss, _, _, _, _, _, _, _, _), 1442 - [175] = UFS_RESET(ufs_reset, 0x1be000), 1443 - [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x1b3000, 15, 0), 1444 - [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0x1b3000, 13, 6), 1445 - [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x1b3000, 11, 3), 1446 - [179] = SDC_QDSD_PINGROUP(sdc1_data, 0x1b3000, 9, 0), 1447 - [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1b4000, 14, 6), 1448 - [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1b4000, 11, 3), 1449 - [182] = SDC_QDSD_PINGROUP(sdc2_data, 0x1b4000, 9, 0), 1442 + [175] = UFS_RESET(ufs_reset, 0xbe000), 1443 + [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0xb3004, 0, 6), 1444 + [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0xb3000, 13, 6), 1445 + [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0xb3000, 11, 3), 1446 + [179] = SDC_QDSD_PINGROUP(sdc1_data, 0xb3000, 9, 0), 1447 + [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0xb4000, 14, 6), 1448 + [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xb4000, 11, 3), 1449 + [182] = SDC_QDSD_PINGROUP(sdc2_data, 0xb4000, 9, 0), 1450 1450 }; 1451 1451 1452 1452 static const struct msm_pinctrl_soc_data sc7280_pinctrl = {
+1 -1
drivers/pinctrl/qcom/pinctrl-sdx55.c
··· 423 423 424 424 static const char * const qdss_stm_groups[] = { 425 425 "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio12", "gpio13", 426 - "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19" "gpio20", "gpio21", "gpio22", 426 + "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22", 427 427 "gpio23", "gpio44", "gpio45", "gpio52", "gpio53", "gpio56", "gpio57", "gpio61", "gpio62", 428 428 "gpio63", "gpio64", "gpio65", "gpio66", 429 429 };
+8 -3
drivers/platform/x86/Kconfig
··· 1173 1173 depends on PCI 1174 1174 help 1175 1175 The Intel Platform Controller Hub for Intel Core SoCs provides access 1176 - to Power Management Controller registers via a PCI interface. This 1176 + to Power Management Controller registers via various interfaces. This 1177 1177 driver can utilize debugging capabilities and supported features as 1178 - exposed by the Power Management Controller. 1178 + exposed by the Power Management Controller. It also may perform some 1179 + tasks in the PMC in order to enable transition into the SLPS0 state. 1180 + It should be selected on all Intel platforms supported by the driver. 1179 1181 1180 1182 Supported features: 1181 1183 - SLP_S0_RESIDENCY counter 1182 1184 - PCH IP Power Gating status 1183 - - LTR Ignore 1185 + - LTR Ignore / LTR Show 1184 1186 - MPHY/PLL gating status (Sunrisepoint PCH only) 1187 + - SLPS0 Debug registers (Cannonlake/Icelake PCH) 1188 + - Low Power Mode registers (Tigerlake and beyond) 1189 + - PMC quirks as needed to enable SLPS0/S0ix 1185 1190 1186 1191 config INTEL_PMT_CLASS 1187 1192 tristate
+3
drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c
··· 185 185 sysfs_remove_group(wmi_priv.enumeration_data[instance_id].attr_name_kobj, 186 186 &enumeration_attr_group); 187 187 } 188 + wmi_priv.enumeration_instances_count = 0; 189 + 188 190 kfree(wmi_priv.enumeration_data); 191 + wmi_priv.enumeration_data = NULL; 189 192 }
+3
drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c
··· 175 175 sysfs_remove_group(wmi_priv.integer_data[instance_id].attr_name_kobj, 176 176 &integer_attr_group); 177 177 } 178 + wmi_priv.integer_instances_count = 0; 179 + 178 180 kfree(wmi_priv.integer_data); 181 + wmi_priv.integer_data = NULL; 179 182 }
+3
drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
··· 183 183 sysfs_remove_group(wmi_priv.po_data[instance_id].attr_name_kobj, 184 184 &po_attr_group); 185 185 } 186 + wmi_priv.po_instances_count = 0; 187 + 186 188 kfree(wmi_priv.po_data); 189 + wmi_priv.po_data = NULL; 187 190 }
+3
drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c
··· 155 155 sysfs_remove_group(wmi_priv.str_data[instance_id].attr_name_kobj, 156 156 &str_attr_group); 157 157 } 158 + wmi_priv.str_instances_count = 0; 159 + 158 160 kfree(wmi_priv.str_data); 161 + wmi_priv.str_data = NULL; 159 162 }
+32 -52
drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
··· 210 210 */ 211 211 static int create_attributes_level_sysfs_files(void) 212 212 { 213 - int ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); 213 + int ret; 214 214 215 - if (ret) { 216 - pr_debug("could not create reset_bios file\n"); 215 + ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); 216 + if (ret) 217 217 return ret; 218 - } 219 218 220 219 ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr); 221 - if (ret) { 222 - pr_debug("could not create changing_pending_reboot file\n"); 223 - sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); 224 - } 225 - return ret; 226 - } 220 + if (ret) 221 + return ret; 227 222 228 - static void release_reset_bios_data(void) 229 - { 230 - sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); 231 - sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr); 223 + return 0; 232 224 } 233 225 234 226 static ssize_t wmi_sysman_attr_show(struct kobject *kobj, struct attribute *attr, ··· 365 373 */ 366 374 static void release_attributes_data(void) 367 375 { 368 - release_reset_bios_data(); 369 - 370 376 mutex_lock(&wmi_priv.mutex); 371 377 exit_enum_attributes(); 372 378 exit_int_attributes(); ··· 376 386 wmi_priv.authentication_dir_kset = NULL; 377 387 } 378 388 if (wmi_priv.main_dir_kset) { 389 + sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr); 390 + sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr); 379 391 destroy_attribute_objs(wmi_priv.main_dir_kset); 380 392 kset_unregister(wmi_priv.main_dir_kset); 393 + wmi_priv.main_dir_kset = NULL; 381 394 } 382 395 mutex_unlock(&wmi_priv.mutex); 383 - 384 396 } 385 397 386 398 /** ··· 489 497 490 498 err_attr_init: 491 499 mutex_unlock(&wmi_priv.mutex); 492 - release_attributes_data(); 493 500 kfree(obj); 494 501 return retval; 495 502 } ··· 504 513 } 505 514 506 515 ret = init_bios_attr_set_interface(); 507 - if (ret || !wmi_priv.bios_attr_wdev) { 508 - pr_debug("failed to initialize set interface\n"); 509 - goto fail_set_interface; 510 - } 516 + if (ret) 517 + return ret; 511 518 512 519 ret = init_bios_attr_pass_interface(); 513 - if (ret || !wmi_priv.password_attr_wdev) { 514 - pr_debug("failed to initialize pass interface\n"); 515 - goto fail_pass_interface; 520 + if (ret) 521 + goto err_exit_bios_attr_set_interface; 522 + 523 + if (!wmi_priv.bios_attr_wdev || !wmi_priv.password_attr_wdev) { 524 + pr_debug("failed to find set or pass interface\n"); 525 + ret = -ENODEV; 526 + goto err_exit_bios_attr_pass_interface; 516 527 } 517 528 518 529 ret = class_register(&firmware_attributes_class); 519 530 if (ret) 520 - goto fail_class; 531 + goto err_exit_bios_attr_pass_interface; 521 532 522 533 wmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0), 523 534 NULL, "%s", DRIVER_NAME); 524 535 if (IS_ERR(wmi_priv.class_dev)) { 525 536 ret = PTR_ERR(wmi_priv.class_dev); 526 - goto fail_classdev; 537 + goto err_unregister_class; 527 538 } 528 539 529 540 wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL, 530 541 &wmi_priv.class_dev->kobj); 531 542 if (!wmi_priv.main_dir_kset) { 532 543 ret = -ENOMEM; 533 - goto fail_main_kset; 544 + goto err_destroy_classdev; 534 545 } 535 546 536 547 wmi_priv.authentication_dir_kset = kset_create_and_add("authentication", NULL, 537 548 &wmi_priv.class_dev->kobj); 538 549 if (!wmi_priv.authentication_dir_kset) { 539 550 ret = -ENOMEM; 540 - goto fail_authentication_kset; 551 + goto err_release_attributes_data; 541 552 } 542 553 543 554 ret = create_attributes_level_sysfs_files(); 544 555 if (ret) { 545 556 pr_debug("could not create reset BIOS attribute\n"); 546 - goto fail_reset_bios; 557 + goto err_release_attributes_data; 547 558 } 548 559 549 560 ret = init_bios_attributes(ENUM, DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID); 550 561 if (ret) { 551 562 pr_debug("failed to populate enumeration type attributes\n"); 552 - goto fail_create_group; 563 + goto err_release_attributes_data; 553 564 } 554 565 555 566 ret = init_bios_attributes(INT, DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID); 556 567 if (ret) { 557 568 pr_debug("failed to populate integer type attributes\n"); 558 - goto fail_create_group; 569 + goto err_release_attributes_data; 559 570 } 560 571 561 572 ret = init_bios_attributes(STR, DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID); 562 573 if (ret) { 563 574 pr_debug("failed to populate string type attributes\n"); 564 - goto fail_create_group; 575 + goto err_release_attributes_data; 565 576 } 566 577 567 578 ret = init_bios_attributes(PO, DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID); 568 579 if (ret) { 569 580 pr_debug("failed to populate pass object type attributes\n"); 570 - goto fail_create_group; 581 + goto err_release_attributes_data; 571 582 } 572 583 573 584 return 0; 574 585 575 - fail_create_group: 586 + err_release_attributes_data: 576 587 release_attributes_data(); 577 588 578 - fail_reset_bios: 579 - if (wmi_priv.authentication_dir_kset) { 580 - kset_unregister(wmi_priv.authentication_dir_kset); 581 - wmi_priv.authentication_dir_kset = NULL; 582 - } 583 - 584 - fail_authentication_kset: 585 - if (wmi_priv.main_dir_kset) { 586 - kset_unregister(wmi_priv.main_dir_kset); 587 - wmi_priv.main_dir_kset = NULL; 588 - } 589 - 590 - fail_main_kset: 589 + err_destroy_classdev: 591 590 device_destroy(&firmware_attributes_class, MKDEV(0, 0)); 592 591 593 - fail_classdev: 592 + err_unregister_class: 594 593 class_unregister(&firmware_attributes_class); 595 594 596 - fail_class: 595 + err_exit_bios_attr_pass_interface: 597 596 exit_bios_attr_pass_interface(); 598 597 599 - fail_pass_interface: 598 + err_exit_bios_attr_set_interface: 600 599 exit_bios_attr_set_interface(); 601 600 602 - fail_set_interface: 603 601 return ret; 604 602 } 605 603
+7
drivers/platform/x86/intel-hid.c
··· 90 90 DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"), 91 91 }, 92 92 }, 93 + { 94 + .ident = "Lenovo ThinkPad X1 Tablet Gen 2", 95 + .matches = { 96 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 97 + DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"), 98 + }, 99 + }, 93 100 { } 94 101 }; 95 102
+10 -2
drivers/platform/x86/intel-vbtn.c
··· 48 48 }; 49 49 50 50 static const struct key_entry intel_vbtn_switchmap[] = { 51 - { KE_SW, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */ 52 - { KE_SW, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */ 51 + /* 52 + * SW_DOCK should only be reported for docking stations, but DSDTs using the 53 + * intel-vbtn code, always seem to use this for 2-in-1s / convertibles and set 54 + * SW_DOCK=1 when in laptop-mode (in tandem with setting SW_TABLET_MODE=0). 55 + * This causes userspace to think the laptop is docked to a port-replicator 56 + * and to disable suspend-on-lid-close, which is undesirable. 57 + * Map the dock events to KEY_IGNORE to avoid this broken SW_DOCK reporting. 58 + */ 59 + { KE_IGNORE, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */ 60 + { KE_IGNORE, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */ 53 61 { KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */ 54 62 { KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */ 55 63 { KE_END }
+35 -15
drivers/platform/x86/intel_pmc_core.c
··· 863 863 } 864 864 DEFINE_SHOW_ATTRIBUTE(pmc_core_pll); 865 865 866 - static ssize_t pmc_core_ltr_ignore_write(struct file *file, 867 - const char __user *userbuf, 868 - size_t count, loff_t *ppos) 866 + static int pmc_core_send_ltr_ignore(u32 value) 869 867 { 870 868 struct pmc_dev *pmcdev = &pmc; 871 869 const struct pmc_reg_map *map = pmcdev->map; 872 - u32 val, buf_size, fd; 873 - int err; 874 - 875 - buf_size = count < 64 ? count : 64; 876 - 877 - err = kstrtou32_from_user(userbuf, buf_size, 10, &val); 878 - if (err) 879 - return err; 870 + u32 reg; 871 + int err = 0; 880 872 881 873 mutex_lock(&pmcdev->lock); 882 874 883 - if (val > map->ltr_ignore_max) { 875 + if (value > map->ltr_ignore_max) { 884 876 err = -EINVAL; 885 877 goto out_unlock; 886 878 } 887 879 888 - fd = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset); 889 - fd |= (1U << val); 890 - pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, fd); 880 + reg = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset); 881 + reg |= BIT(value); 882 + pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, reg); 891 883 892 884 out_unlock: 893 885 mutex_unlock(&pmcdev->lock); 886 + 887 + return err; 888 + } 889 + 890 + static ssize_t pmc_core_ltr_ignore_write(struct file *file, 891 + const char __user *userbuf, 892 + size_t count, loff_t *ppos) 893 + { 894 + u32 buf_size, value; 895 + int err; 896 + 897 + buf_size = min_t(u32, count, 64); 898 + 899 + err = kstrtou32_from_user(userbuf, buf_size, 10, &value); 900 + if (err) 901 + return err; 902 + 903 + err = pmc_core_send_ltr_ignore(value); 904 + 894 905 return err == 0 ? count : err; 895 906 } 896 907 ··· 1254 1243 platform_set_drvdata(pdev, pmcdev); 1255 1244 pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(); 1256 1245 dmi_check_system(pmc_core_dmi_table); 1246 + 1247 + /* 1248 + * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when 1249 + * a cable is attached. Tell the PMC to ignore it. 1250 + */ 1251 + if (pmcdev->map == &tgl_reg_map) { 1252 + dev_dbg(&pdev->dev, "ignoring GBE LTR\n"); 1253 + pmc_core_send_ltr_ignore(3); 1254 + } 1257 1255 1258 1256 pmc_core_dbgfs_register(pmcdev); 1259 1257
+1 -1
drivers/platform/x86/intel_pmt_class.c
··· 173 173 struct intel_pmt_namespace *ns, 174 174 struct device *parent) 175 175 { 176 - struct resource res; 176 + struct resource res = {0}; 177 177 struct device *dev; 178 178 int ret; 179 179
+6 -7
drivers/platform/x86/intel_pmt_crashlog.c
··· 23 23 #define CRASH_TYPE_OOBMSM 1 24 24 25 25 /* Control Flags */ 26 - #define CRASHLOG_FLAG_DISABLE BIT(27) 26 + #define CRASHLOG_FLAG_DISABLE BIT(28) 27 27 28 28 /* 29 - * Bits 28 and 29 control the state of bit 31. 29 + * Bits 29 and 30 control the state of bit 31. 30 30 * 31 - * Bit 28 will clear bit 31, if set, allowing a new crashlog to be captured. 32 - * Bit 29 will immediately trigger a crashlog to be generated, setting bit 31. 33 - * Bit 30 is read-only and reserved as 0. 31 + * Bit 29 will clear bit 31, if set, allowing a new crashlog to be captured. 32 + * Bit 30 will immediately trigger a crashlog to be generated, setting bit 31. 34 33 * Bit 31 is the read-only status with a 1 indicating log is complete. 35 34 */ 36 - #define CRASHLOG_FLAG_TRIGGER_CLEAR BIT(28) 37 - #define CRASHLOG_FLAG_TRIGGER_EXECUTE BIT(29) 35 + #define CRASHLOG_FLAG_TRIGGER_CLEAR BIT(29) 36 + #define CRASHLOG_FLAG_TRIGGER_EXECUTE BIT(30) 38 37 #define CRASHLOG_FLAG_TRIGGER_COMPLETE BIT(31) 39 38 #define CRASHLOG_FLAG_TRIGGER_MASK GENMASK(31, 28) 40 39
+79 -29
drivers/platform/x86/thinkpad_acpi.c
··· 4081 4081 4082 4082 case TP_HKEY_EV_KEY_NUMLOCK: 4083 4083 case TP_HKEY_EV_KEY_FN: 4084 - case TP_HKEY_EV_KEY_FN_ESC: 4085 4084 /* key press events, we just ignore them as long as the EC 4086 4085 * is still reporting them in the normal keyboard stream */ 4086 + *send_acpi_ev = false; 4087 + *ignore_acpi_ev = true; 4088 + return true; 4089 + 4090 + case TP_HKEY_EV_KEY_FN_ESC: 4091 + /* Get the media key status to foce the status LED to update */ 4092 + acpi_evalf(hkey_handle, NULL, "GMKS", "v"); 4087 4093 *send_acpi_ev = false; 4088 4094 *ignore_acpi_ev = true; 4089 4095 return true; ··· 9851 9845 * Thinkpad sensor interfaces 9852 9846 */ 9853 9847 9848 + #define DYTC_CMD_QUERY 0 /* To get DYTC status - enable/revision */ 9849 + #define DYTC_QUERY_ENABLE_BIT 8 /* Bit 8 - 0 = disabled, 1 = enabled */ 9850 + #define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */ 9851 + #define DYTC_QUERY_REV_BIT 28 /* Bits 28 - 31 - revision */ 9852 + 9854 9853 #define DYTC_CMD_GET 2 /* To get current IC function and mode */ 9855 9854 #define DYTC_GET_LAPMODE_BIT 17 /* Set when in lapmode */ 9856 9855 ··· 9866 9855 static bool has_lapsensor; 9867 9856 static bool palm_state; 9868 9857 static bool lap_state; 9858 + static int dytc_version; 9869 9859 9870 9860 static int dytc_command(int command, int *output) 9871 9861 { ··· 9878 9866 } 9879 9867 if (!acpi_evalf(dytc_handle, output, NULL, "dd", command)) 9880 9868 return -EIO; 9869 + return 0; 9870 + } 9871 + 9872 + static int dytc_get_version(void) 9873 + { 9874 + int err, output; 9875 + 9876 + /* Check if we've been called before - and just return cached value */ 9877 + if (dytc_version) 9878 + return dytc_version; 9879 + 9880 + /* Otherwise query DYTC and extract version information */ 9881 + err = dytc_command(DYTC_CMD_QUERY, &output); 9882 + /* 9883 + * If support isn't available (ENODEV) then don't return an error 9884 + * and don't create the sysfs group 9885 + */ 9886 + if (err == -ENODEV) 9887 + return 0; 9888 + /* For all other errors we can flag the failure */ 9889 + if (err) 9890 + return err; 9891 + 9892 + /* Check DYTC is enabled and supports mode setting */ 9893 + if (output & BIT(DYTC_QUERY_ENABLE_BIT)) 9894 + dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF; 9895 + 9881 9896 return 0; 9882 9897 } 9883 9898 ··· 10013 9974 if (err) 10014 9975 return err; 10015 9976 } 10016 - if (has_lapsensor) { 9977 + 9978 + /* Check if we know the DYTC version, if we don't then get it */ 9979 + if (!dytc_version) { 9980 + err = dytc_get_version(); 9981 + if (err) 9982 + return err; 9983 + } 9984 + /* 9985 + * Platforms before DYTC version 5 claim to have a lap sensor, but it doesn't work, so we 9986 + * ignore them 9987 + */ 9988 + if (has_lapsensor && (dytc_version >= 5)) { 10017 9989 err = sysfs_create_file(&tpacpi_pdev->dev.kobj, &dev_attr_dytc_lapmode.attr); 10018 9990 if (err) 10019 9991 return err; ··· 10049 9999 * DYTC Platform Profile interface 10050 10000 */ 10051 10001 10052 - #define DYTC_CMD_QUERY 0 /* To get DYTC status - enable/revision */ 10053 10002 #define DYTC_CMD_SET 1 /* To enable/disable IC function mode */ 10054 10003 #define DYTC_CMD_RESET 0x1ff /* To reset back to default */ 10055 - 10056 - #define DYTC_QUERY_ENABLE_BIT 8 /* Bit 8 - 0 = disabled, 1 = enabled */ 10057 - #define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */ 10058 - #define DYTC_QUERY_REV_BIT 28 /* Bits 28 - 31 - revision */ 10059 10004 10060 10005 #define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */ 10061 10006 #define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */ ··· 10187 10142 return err; 10188 10143 10189 10144 if (profile == PLATFORM_PROFILE_BALANCED) { 10190 - /* To get back to balanced mode we just issue a reset command */ 10191 - err = dytc_command(DYTC_CMD_RESET, &output); 10145 + /* 10146 + * To get back to balanced mode we need to issue a reset command. 10147 + * Note we still need to disable CQL mode before hand and re-enable 10148 + * it afterwards, otherwise dytc_lapmode gets reset to 0 and stays 10149 + * stuck at 0 for aprox. 30 minutes. 10150 + */ 10151 + err = dytc_cql_command(DYTC_CMD_RESET, &output); 10192 10152 if (err) 10193 10153 goto unlock; 10194 10154 } else { ··· 10261 10211 if (err) 10262 10212 return err; 10263 10213 10214 + /* Check if we know the DYTC version, if we don't then get it */ 10215 + if (!dytc_version) { 10216 + err = dytc_get_version(); 10217 + if (err) 10218 + return err; 10219 + } 10264 10220 /* Check DYTC is enabled and supports mode setting */ 10265 - if (output & BIT(DYTC_QUERY_ENABLE_BIT)) { 10266 - /* Only DYTC v5.0 and later has this feature. */ 10267 - int dytc_version; 10221 + if (dytc_version >= 5) { 10222 + dbg_printk(TPACPI_DBG_INIT, 10223 + "DYTC version %d: thermal mode available\n", dytc_version); 10224 + /* Create platform_profile structure and register */ 10225 + err = platform_profile_register(&dytc_profile); 10226 + /* 10227 + * If for some reason platform_profiles aren't enabled 10228 + * don't quit terminally. 10229 + */ 10230 + if (err) 10231 + return 0; 10268 10232 10269 - dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF; 10270 - if (dytc_version >= 5) { 10271 - dbg_printk(TPACPI_DBG_INIT, 10272 - "DYTC version %d: thermal mode available\n", dytc_version); 10273 - /* Create platform_profile structure and register */ 10274 - err = platform_profile_register(&dytc_profile); 10275 - /* 10276 - * If for some reason platform_profiles aren't enabled 10277 - * don't quit terminally. 10278 - */ 10279 - if (err) 10280 - return 0; 10281 - 10282 - dytc_profile_available = true; 10283 - /* Ensure initial values are correct */ 10284 - dytc_profile_refresh(); 10285 - } 10233 + dytc_profile_available = true; 10234 + /* Ensure initial values are correct */ 10235 + dytc_profile_refresh(); 10286 10236 } 10287 10237 return 0; 10288 10238 }
+7 -6
drivers/ptp/ptp_qoriq.c
··· 189 189 tmr_add = ptp_qoriq->tmr_add; 190 190 adj = tmr_add; 191 191 192 - /* calculate diff as adj*(scaled_ppm/65536)/1000000 193 - * and round() to the nearest integer 192 + /* 193 + * Calculate diff and round() to the nearest integer 194 + * 195 + * diff = adj * (ppb / 1000000000) 196 + * = adj * scaled_ppm / 65536000000 194 197 */ 195 - adj *= scaled_ppm; 196 - diff = div_u64(adj, 8000000); 197 - diff = (diff >> 13) + ((diff >> 12) & 1); 198 + diff = mul_u64_u64_div_u64(adj, scaled_ppm, 32768000000); 199 + diff = DIV64_U64_ROUND_UP(diff, 2); 198 200 199 201 tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; 200 - 201 202 ptp_qoriq->write(&regs->ctrl_regs->tmr_add, tmr_add); 202 203 203 204 return 0;
+54 -13
drivers/scsi/ibmvscsi/ibmvfc.c
··· 2372 2372 } 2373 2373 2374 2374 /** 2375 + * ibmvfc_event_is_free - Check if event is free or not 2376 + * @evt: ibmvfc event struct 2377 + * 2378 + * Returns: 2379 + * true / false 2380 + **/ 2381 + static bool ibmvfc_event_is_free(struct ibmvfc_event *evt) 2382 + { 2383 + struct ibmvfc_event *loop_evt; 2384 + 2385 + list_for_each_entry(loop_evt, &evt->queue->free, queue_list) 2386 + if (loop_evt == evt) 2387 + return true; 2388 + 2389 + return false; 2390 + } 2391 + 2392 + /** 2375 2393 * ibmvfc_wait_for_ops - Wait for ops to complete 2376 2394 * @vhost: ibmvfc host struct 2377 2395 * @device: device to match (starget or sdev) ··· 2403 2385 { 2404 2386 struct ibmvfc_event *evt; 2405 2387 DECLARE_COMPLETION_ONSTACK(comp); 2406 - int wait; 2388 + int wait, i, q_index, q_size; 2407 2389 unsigned long flags; 2408 2390 signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ; 2391 + struct ibmvfc_queue *queues; 2409 2392 2410 2393 ENTER; 2394 + if (vhost->mq_enabled && vhost->using_channels) { 2395 + queues = vhost->scsi_scrqs.scrqs; 2396 + q_size = vhost->scsi_scrqs.active_queues; 2397 + } else { 2398 + queues = &vhost->crq; 2399 + q_size = 1; 2400 + } 2401 + 2411 2402 do { 2412 2403 wait = 0; 2413 - spin_lock_irqsave(&vhost->crq.l_lock, flags); 2414 - list_for_each_entry(evt, &vhost->crq.sent, queue_list) { 2415 - if (match(evt, device)) { 2416 - evt->eh_comp = &comp; 2417 - wait++; 2404 + spin_lock_irqsave(vhost->host->host_lock, flags); 2405 + for (q_index = 0; q_index < q_size; q_index++) { 2406 + spin_lock(&queues[q_index].l_lock); 2407 + for (i = 0; i < queues[q_index].evt_pool.size; i++) { 2408 + evt = &queues[q_index].evt_pool.events[i]; 2409 + if (!ibmvfc_event_is_free(evt)) { 2410 + if (match(evt, device)) { 2411 + evt->eh_comp = &comp; 2412 + wait++; 2413 + } 2414 + } 2418 2415 } 2416 + spin_unlock(&queues[q_index].l_lock); 2419 2417 } 2420 - spin_unlock_irqrestore(&vhost->crq.l_lock, flags); 2418 + spin_unlock_irqrestore(vhost->host->host_lock, flags); 2421 2419 2422 2420 if (wait) { 2423 2421 timeout = wait_for_completion_timeout(&comp, timeout); 2424 2422 2425 2423 if (!timeout) { 2426 2424 wait = 0; 2427 - spin_lock_irqsave(&vhost->crq.l_lock, flags); 2428 - list_for_each_entry(evt, &vhost->crq.sent, queue_list) { 2429 - if (match(evt, device)) { 2430 - evt->eh_comp = NULL; 2431 - wait++; 2425 + spin_lock_irqsave(vhost->host->host_lock, flags); 2426 + for (q_index = 0; q_index < q_size; q_index++) { 2427 + spin_lock(&queues[q_index].l_lock); 2428 + for (i = 0; i < queues[q_index].evt_pool.size; i++) { 2429 + evt = &queues[q_index].evt_pool.events[i]; 2430 + if (!ibmvfc_event_is_free(evt)) { 2431 + if (match(evt, device)) { 2432 + evt->eh_comp = NULL; 2433 + wait++; 2434 + } 2435 + } 2432 2436 } 2437 + spin_unlock(&queues[q_index].l_lock); 2433 2438 } 2434 - spin_unlock_irqrestore(&vhost->crq.l_lock, flags); 2439 + spin_unlock_irqrestore(vhost->host->host_lock, flags); 2435 2440 if (wait) 2436 2441 dev_err(vhost->dev, "Timed out waiting for aborted commands\n"); 2437 2442 LEAVE;
+6 -2
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 7806 7806 ioc->pend_os_device_add_sz++; 7807 7807 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, 7808 7808 GFP_KERNEL); 7809 - if (!ioc->pend_os_device_add) 7809 + if (!ioc->pend_os_device_add) { 7810 + r = -ENOMEM; 7810 7811 goto out_free_resources; 7812 + } 7811 7813 7812 7814 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; 7813 7815 ioc->device_remove_in_progress = 7814 7816 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); 7815 - if (!ioc->device_remove_in_progress) 7817 + if (!ioc->device_remove_in_progress) { 7818 + r = -ENOMEM; 7816 7819 goto out_free_resources; 7820 + } 7817 7821 7818 7822 ioc->fwfault_debug = mpt3sas_fwfault_debug; 7819 7823
+1
drivers/scsi/qedi/qedi_main.c
··· 1675 1675 if (!qedi->global_queues[i]) { 1676 1676 QEDI_ERR(&qedi->dbg_ctx, 1677 1677 "Unable to allocation global queue %d.\n", i); 1678 + status = -ENOMEM; 1678 1679 goto mem_alloc_failure; 1679 1680 } 1680 1681
+5 -8
drivers/scsi/qla2xxx/qla_target.c
··· 3222 3222 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || 3223 3223 (cmd->sess && cmd->sess->deleted)) { 3224 3224 cmd->state = QLA_TGT_STATE_PROCESSED; 3225 - res = 0; 3226 - goto free; 3225 + return 0; 3227 3226 } 3228 3227 3229 3228 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, ··· 3233 3234 3234 3235 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 3235 3236 &full_req_cnt); 3236 - if (unlikely(res != 0)) 3237 - goto free; 3237 + if (unlikely(res != 0)) { 3238 + return res; 3239 + } 3238 3240 3239 3241 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3240 3242 ··· 3255 3255 vha->flags.online, qla2x00_reset_active(vha), 3256 3256 cmd->reset_count, qpair->chip_reset); 3257 3257 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3258 - res = 0; 3259 - goto free; 3258 + return 0; 3260 3259 } 3261 3260 3262 3261 /* Does F/W have an IOCBs for this request */ ··· 3358 3359 qlt_unmap_sg(vha, cmd); 3359 3360 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3360 3361 3361 - free: 3362 - vha->hw->tgt.tgt_ops->free_cmd(cmd); 3363 3362 return res; 3364 3363 } 3365 3364 EXPORT_SYMBOL(qlt_xmit_response);
-4
drivers/scsi/qla2xxx/tcm_qla2xxx.c
··· 644 644 { 645 645 struct qla_tgt_cmd *cmd = container_of(se_cmd, 646 646 struct qla_tgt_cmd, se_cmd); 647 - struct scsi_qla_host *vha = cmd->vha; 648 647 649 648 if (cmd->aborted) { 650 649 /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task ··· 656 657 cmd->se_cmd.transport_state, 657 658 cmd->se_cmd.t_state, 658 659 cmd->se_cmd.se_cmd_flags); 659 - vha->hw->tgt.tgt_ops->free_cmd(cmd); 660 660 return 0; 661 661 } 662 662 ··· 683 685 { 684 686 struct qla_tgt_cmd *cmd = container_of(se_cmd, 685 687 struct qla_tgt_cmd, se_cmd); 686 - struct scsi_qla_host *vha = cmd->vha; 687 688 int xmit_type = QLA_TGT_XMIT_STATUS; 688 689 689 690 if (cmd->aborted) { ··· 696 699 cmd, kref_read(&cmd->se_cmd.cmd_kref), 697 700 cmd->se_cmd.transport_state, cmd->se_cmd.t_state, 698 701 cmd->se_cmd.se_cmd_flags); 699 - vha->hw->tgt.tgt_ops->free_cmd(cmd); 700 702 return 0; 701 703 } 702 704 cmd->bufflen = se_cmd->data_length;
+13 -1
drivers/scsi/scsi_transport_iscsi.c
··· 2475 2475 */ 2476 2476 mutex_lock(&conn_mutex); 2477 2477 conn->transport->stop_conn(conn, flag); 2478 + conn->state = ISCSI_CONN_DOWN; 2478 2479 mutex_unlock(&conn_mutex); 2479 2480 2480 2481 } ··· 2902 2901 default: 2903 2902 err = transport->set_param(conn, ev->u.set_param.param, 2904 2903 data, ev->u.set_param.len); 2904 + if ((conn->state == ISCSI_CONN_BOUND) || 2905 + (conn->state == ISCSI_CONN_UP)) { 2906 + err = transport->set_param(conn, ev->u.set_param.param, 2907 + data, ev->u.set_param.len); 2908 + } else { 2909 + return -ENOTCONN; 2910 + } 2905 2911 } 2906 2912 2907 2913 return err; ··· 2968 2960 mutex_lock(&conn->ep_mutex); 2969 2961 conn->ep = NULL; 2970 2962 mutex_unlock(&conn->ep_mutex); 2963 + conn->state = ISCSI_CONN_DOWN; 2971 2964 } 2972 2965 2973 2966 transport->ep_disconnect(ep); ··· 3736 3727 ev->r.retcode = transport->bind_conn(session, conn, 3737 3728 ev->u.b_conn.transport_eph, 3738 3729 ev->u.b_conn.is_leading); 3730 + if (!ev->r.retcode) 3731 + conn->state = ISCSI_CONN_BOUND; 3739 3732 mutex_unlock(&conn_mutex); 3740 3733 3741 3734 if (ev->r.retcode || !transport->ep_connect) ··· 3977 3966 static const char *const connection_state_names[] = { 3978 3967 [ISCSI_CONN_UP] = "up", 3979 3968 [ISCSI_CONN_DOWN] = "down", 3980 - [ISCSI_CONN_FAILED] = "failed" 3969 + [ISCSI_CONN_FAILED] = "failed", 3970 + [ISCSI_CONN_BOUND] = "bound" 3981 3971 }; 3982 3972 3983 3973 static ssize_t show_conn_state(struct device *dev,
-1
drivers/soc/litex/litex_soc_ctrl.c
··· 13 13 #include <linux/platform_device.h> 14 14 #include <linux/printk.h> 15 15 #include <linux/module.h> 16 - #include <linux/errno.h> 17 16 #include <linux/io.h> 18 17 #include <linux/reboot.h> 19 18
-74
drivers/soc/qcom/qcom-geni-se.c
··· 3 3 4 4 #include <linux/acpi.h> 5 5 #include <linux/clk.h> 6 - #include <linux/console.h> 7 6 #include <linux/slab.h> 8 7 #include <linux/dma-mapping.h> 9 8 #include <linux/io.h> ··· 91 92 struct device *dev; 92 93 void __iomem *base; 93 94 struct clk_bulk_data ahb_clks[NUM_AHB_CLKS]; 94 - struct geni_icc_path to_core; 95 95 }; 96 96 97 97 static const char * const icc_path_names[] = {"qup-core", "qup-config", 98 98 "qup-memory"}; 99 - 100 - static struct geni_wrapper *earlycon_wrapper; 101 99 102 100 #define QUP_HW_VER_REG 0x4 103 101 ··· 839 843 } 840 844 EXPORT_SYMBOL(geni_icc_disable); 841 845 842 - void geni_remove_earlycon_icc_vote(void) 843 - { 844 - struct platform_device *pdev; 845 - struct geni_wrapper *wrapper; 846 - struct device_node *parent; 847 - struct device_node *child; 848 - 849 - if (!earlycon_wrapper) 850 - return; 851 - 852 - wrapper = earlycon_wrapper; 853 - parent = of_get_next_parent(wrapper->dev->of_node); 854 - for_each_child_of_node(parent, child) { 855 - if (!of_device_is_compatible(child, "qcom,geni-se-qup")) 856 - continue; 857 - 858 - pdev = of_find_device_by_node(child); 859 - if (!pdev) 860 - continue; 861 - 862 - wrapper = platform_get_drvdata(pdev); 863 - icc_put(wrapper->to_core.path); 864 - wrapper->to_core.path = NULL; 865 - 866 - } 867 - of_node_put(parent); 868 - 869 - earlycon_wrapper = NULL; 870 - } 871 - EXPORT_SYMBOL(geni_remove_earlycon_icc_vote); 872 - 873 846 static int geni_se_probe(struct platform_device *pdev) 874 847 { 875 848 struct device *dev = &pdev->dev; 876 849 struct resource *res; 877 850 struct geni_wrapper *wrapper; 878 - struct console __maybe_unused *bcon; 879 - bool __maybe_unused has_earlycon = false; 880 851 int ret; 881 852 882 853 wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL); ··· 866 903 } 867 904 } 868 905 869 - #ifdef CONFIG_SERIAL_EARLYCON 870 - for_each_console(bcon) { 871 - if (!strcmp(bcon->name, "qcom_geni")) { 872 - has_earlycon = true; 873 - break; 874 - } 875 - } 876 - if (!has_earlycon) 877 - goto exit; 878 - 879 - wrapper->to_core.path = devm_of_icc_get(dev, "qup-core"); 880 - if (IS_ERR(wrapper->to_core.path)) 881 - return PTR_ERR(wrapper->to_core.path); 882 - /* 883 - * Put minmal BW request on core clocks on behalf of early console. 884 - * The vote will be removed earlycon exit function. 885 - * 886 - * Note: We are putting vote on each QUP wrapper instead only to which 887 - * earlycon is connected because QUP core clock of different wrapper 888 - * share same voltage domain. If core1 is put to 0, then core2 will 889 - * also run at 0, if not voted. Default ICC vote will be removed ASA 890 - * we touch any of the core clock. 891 - * core1 = core2 = max(core1, core2) 892 - */ 893 - ret = icc_set_bw(wrapper->to_core.path, GENI_DEFAULT_BW, 894 - GENI_DEFAULT_BW); 895 - if (ret) { 896 - dev_err(&pdev->dev, "%s: ICC BW voting failed for core: %d\n", 897 - __func__, ret); 898 - return ret; 899 - } 900 - 901 - if (of_get_compatible_child(pdev->dev.of_node, "qcom,geni-debug-uart")) 902 - earlycon_wrapper = wrapper; 903 - of_node_put(pdev->dev.of_node); 904 - exit: 905 - #endif 906 906 dev_set_drvdata(dev, wrapper); 907 907 dev_dbg(dev, "GENI SE Driver probed\n"); 908 908 return devm_of_platform_populate(dev);
+6 -2
drivers/soc/ti/omap_prm.c
··· 332 332 { 333 333 .name = "l3init", .base = 0x4ae07300, 334 334 .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon, 335 - .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012, 335 + .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01, 336 336 .clkdm_name = "pcie" 337 337 }, 338 338 { ··· 830 830 reset->prm->data->name, id); 831 831 832 832 exit: 833 - if (reset->clkdm) 833 + if (reset->clkdm) { 834 + /* At least dra7 iva needs a delay before clkdm idle */ 835 + if (has_rstst) 836 + udelay(1); 834 837 pdata->clkdm_allow_idle(reset->clkdm); 838 + } 835 839 836 840 return ret; 837 841 }
+1 -1
drivers/staging/rtl8192e/rtllib.h
··· 1105 1105 bool bWithAironetIE; 1106 1106 bool bCkipSupported; 1107 1107 bool bCcxRmEnable; 1108 - u16 CcxRmState[2]; 1108 + u8 CcxRmState[2]; 1109 1109 bool bMBssidValid; 1110 1110 u8 MBssidMask; 1111 1111 u8 MBssid[ETH_ALEN];
+1 -1
drivers/staging/rtl8192e/rtllib_rx.c
··· 1967 1967 info_element->data[2] == 0x96 && 1968 1968 info_element->data[3] == 0x01) { 1969 1969 if (info_element->len == 6) { 1970 - memcpy(network->CcxRmState, &info_element[4], 2); 1970 + memcpy(network->CcxRmState, &info_element->data[4], 2); 1971 1971 if (network->CcxRmState[0] != 0) 1972 1972 network->bCcxRmEnable = true; 1973 1973 else
+8 -1
drivers/target/target_core_pscsi.c
··· 882 882 if (!bio) { 883 883 new_bio: 884 884 nr_vecs = bio_max_segs(nr_pages); 885 - nr_pages -= nr_vecs; 886 885 /* 887 886 * Calls bio_kmalloc() and sets bio->bi_end_io() 888 887 */ ··· 938 939 939 940 return 0; 940 941 fail: 942 + if (bio) 943 + bio_put(bio); 944 + while (req->bio) { 945 + bio = req->bio; 946 + req->bio = bio->bi_next; 947 + bio_put(bio); 948 + } 949 + req->biotail = NULL; 941 950 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 942 951 } 943 952
-7
drivers/tty/serial/qcom_geni_serial.c
··· 1177 1177 struct console *con) { } 1178 1178 #endif 1179 1179 1180 - static int qcom_geni_serial_earlycon_exit(struct console *con) 1181 - { 1182 - geni_remove_earlycon_icc_vote(); 1183 - return 0; 1184 - } 1185 - 1186 1180 static struct qcom_geni_private_data earlycon_private_data; 1187 1181 1188 1182 static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev, ··· 1227 1233 writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN); 1228 1234 1229 1235 dev->con->write = qcom_geni_serial_earlycon_write; 1230 - dev->con->exit = qcom_geni_serial_earlycon_exit; 1231 1236 dev->con->setup = NULL; 1232 1237 qcom_geni_serial_enable_early_read(&se, dev->con); 1233 1238
+70 -50
drivers/usb/class/cdc-acm.c
··· 147 147 #define acm_send_break(acm, ms) \ 148 148 acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0) 149 149 150 - static void acm_kill_urbs(struct acm *acm) 150 + static void acm_poison_urbs(struct acm *acm) 151 151 { 152 152 int i; 153 153 154 - usb_kill_urb(acm->ctrlurb); 154 + usb_poison_urb(acm->ctrlurb); 155 155 for (i = 0; i < ACM_NW; i++) 156 - usb_kill_urb(acm->wb[i].urb); 156 + usb_poison_urb(acm->wb[i].urb); 157 157 for (i = 0; i < acm->rx_buflimit; i++) 158 - usb_kill_urb(acm->read_urbs[i]); 158 + usb_poison_urb(acm->read_urbs[i]); 159 159 } 160 + 161 + static void acm_unpoison_urbs(struct acm *acm) 162 + { 163 + int i; 164 + 165 + for (i = 0; i < acm->rx_buflimit; i++) 166 + usb_unpoison_urb(acm->read_urbs[i]); 167 + for (i = 0; i < ACM_NW; i++) 168 + usb_unpoison_urb(acm->wb[i].urb); 169 + usb_unpoison_urb(acm->ctrlurb); 170 + } 171 + 160 172 161 173 /* 162 174 * Write buffer management. ··· 238 226 239 227 rc = usb_submit_urb(wb->urb, GFP_ATOMIC); 240 228 if (rc < 0) { 241 - dev_err(&acm->data->dev, 242 - "%s - usb_submit_urb(write bulk) failed: %d\n", 243 - __func__, rc); 229 + if (rc != -EPERM) 230 + dev_err(&acm->data->dev, 231 + "%s - usb_submit_urb(write bulk) failed: %d\n", 232 + __func__, rc); 244 233 acm_write_done(acm, wb); 245 234 } 246 235 return rc; ··· 326 313 acm->iocount.dsr++; 327 314 if (difference & ACM_CTRL_DCD) 328 315 acm->iocount.dcd++; 329 - if (newctrl & ACM_CTRL_BRK) 316 + if (newctrl & ACM_CTRL_BRK) { 330 317 acm->iocount.brk++; 318 + tty_insert_flip_char(&acm->port, 0, TTY_BREAK); 319 + } 331 320 if (newctrl & ACM_CTRL_RI) 332 321 acm->iocount.rng++; 333 322 if (newctrl & ACM_CTRL_FRAMING) ··· 495 480 dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n", 496 481 rb->index, urb->actual_length, status); 497 482 498 - if (!acm->dev) { 499 - dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__); 500 - return; 501 - } 502 - 503 483 switch (status) { 504 484 case 0: 505 485 usb_mark_last_busy(acm->dev); ··· 659 649 660 650 res = acm_set_control(acm, val); 661 651 if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE)) 662 - dev_err(&acm->control->dev, "failed to set dtr/rts\n"); 652 + /* This is broken in too many devices to spam the logs */ 653 + dev_dbg(&acm->control->dev, "failed to set dtr/rts\n"); 663 654 } 664 655 665 656 static int acm_port_activate(struct tty_port *port, struct tty_struct *tty) ··· 742 731 * Need to grab write_lock to prevent race with resume, but no need to 743 732 * hold it due to the tty-port initialised flag. 744 733 */ 734 + acm_poison_urbs(acm); 745 735 spin_lock_irq(&acm->write_lock); 746 736 spin_unlock_irq(&acm->write_lock); 747 737 ··· 759 747 usb_autopm_put_interface_async(acm->control); 760 748 } 761 749 762 - acm_kill_urbs(acm); 750 + acm_unpoison_urbs(acm); 751 + 763 752 } 764 753 765 754 static void acm_tty_cleanup(struct tty_struct *tty) ··· 1309 1296 if (!combined_interfaces && intf != control_interface) 1310 1297 return -ENODEV; 1311 1298 1312 - if (!combined_interfaces && usb_interface_claimed(data_interface)) { 1313 - /* valid in this context */ 1314 - dev_dbg(&intf->dev, "The data interface isn't available\n"); 1315 - return -EBUSY; 1316 - } 1317 - 1318 - 1319 1299 if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 || 1320 1300 control_interface->cur_altsetting->desc.bNumEndpoints == 0) 1321 1301 return -EINVAL; ··· 1329 1323 dev_dbg(&intf->dev, "interfaces are valid\n"); 1330 1324 1331 1325 acm = kzalloc(sizeof(struct acm), GFP_KERNEL); 1332 - if (acm == NULL) 1333 - goto alloc_fail; 1326 + if (!acm) 1327 + return -ENOMEM; 1334 1328 1335 1329 tty_port_init(&acm->port); 1336 1330 acm->port.ops = &acm_port_ops; ··· 1347 1341 1348 1342 minor = acm_alloc_minor(acm); 1349 1343 if (minor < 0) 1350 - goto alloc_fail1; 1344 + goto err_put_port; 1351 1345 1352 1346 acm->minor = minor; 1353 1347 acm->dev = usb_dev; ··· 1378 1372 1379 1373 buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); 1380 1374 if (!buf) 1381 - goto alloc_fail1; 1375 + goto err_put_port; 1382 1376 acm->ctrl_buffer = buf; 1383 1377 1384 1378 if (acm_write_buffers_alloc(acm) < 0) 1385 - goto alloc_fail2; 1379 + goto err_free_ctrl_buffer; 1386 1380 1387 1381 acm->ctrlurb = usb_alloc_urb(0, GFP_KERNEL); 1388 1382 if (!acm->ctrlurb) 1389 - goto alloc_fail3; 1383 + goto err_free_write_buffers; 1390 1384 1391 1385 for (i = 0; i < num_rx_buf; i++) { 1392 1386 struct acm_rb *rb = &(acm->read_buffers[i]); ··· 1395 1389 rb->base = usb_alloc_coherent(acm->dev, readsize, GFP_KERNEL, 1396 1390 &rb->dma); 1397 1391 if (!rb->base) 1398 - goto alloc_fail4; 1392 + goto err_free_read_urbs; 1399 1393 rb->index = i; 1400 1394 rb->instance = acm; 1401 1395 1402 1396 urb = usb_alloc_urb(0, GFP_KERNEL); 1403 1397 if (!urb) 1404 - goto alloc_fail4; 1398 + goto err_free_read_urbs; 1405 1399 1406 1400 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 1407 1401 urb->transfer_dma = rb->dma; ··· 1422 1416 struct acm_wb *snd = &(acm->wb[i]); 1423 1417 1424 1418 snd->urb = usb_alloc_urb(0, GFP_KERNEL); 1425 - if (snd->urb == NULL) 1426 - goto alloc_fail5; 1419 + if (!snd->urb) 1420 + goto err_free_write_urbs; 1427 1421 1428 1422 if (usb_endpoint_xfer_int(epwrite)) 1429 1423 usb_fill_int_urb(snd->urb, usb_dev, acm->out, ··· 1441 1435 1442 1436 i = device_create_file(&intf->dev, &dev_attr_bmCapabilities); 1443 1437 if (i < 0) 1444 - goto alloc_fail5; 1438 + goto err_free_write_urbs; 1445 1439 1446 1440 if (h.usb_cdc_country_functional_desc) { /* export the country data */ 1447 1441 struct usb_cdc_country_functional_desc * cfd = ··· 1486 1480 acm->nb_index = 0; 1487 1481 acm->nb_size = 0; 1488 1482 1489 - dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor); 1490 - 1491 1483 acm->line.dwDTERate = cpu_to_le32(9600); 1492 1484 acm->line.bDataBits = 8; 1493 1485 acm_set_line(acm, &acm->line); 1494 1486 1495 - usb_driver_claim_interface(&acm_driver, data_interface, acm); 1496 - usb_set_intfdata(data_interface, acm); 1487 + if (!acm->combined_interfaces) { 1488 + rv = usb_driver_claim_interface(&acm_driver, data_interface, acm); 1489 + if (rv) 1490 + goto err_remove_files; 1491 + } 1497 1492 1498 1493 tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, 1499 1494 &control_interface->dev); 1500 1495 if (IS_ERR(tty_dev)) { 1501 1496 rv = PTR_ERR(tty_dev); 1502 - goto alloc_fail6; 1497 + goto err_release_data_interface; 1503 1498 } 1504 1499 1505 1500 if (quirks & CLEAR_HALT_CONDITIONS) { ··· 1508 1501 usb_clear_halt(usb_dev, acm->out); 1509 1502 } 1510 1503 1504 + dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor); 1505 + 1511 1506 return 0; 1512 - alloc_fail6: 1507 + 1508 + err_release_data_interface: 1509 + if (!acm->combined_interfaces) { 1510 + /* Clear driver data so that disconnect() returns early. */ 1511 + usb_set_intfdata(data_interface, NULL); 1512 + usb_driver_release_interface(&acm_driver, data_interface); 1513 + } 1514 + err_remove_files: 1513 1515 if (acm->country_codes) { 1514 1516 device_remove_file(&acm->control->dev, 1515 1517 &dev_attr_wCountryCodes); 1516 1518 device_remove_file(&acm->control->dev, 1517 1519 &dev_attr_iCountryCodeRelDate); 1518 - kfree(acm->country_codes); 1519 1520 } 1520 1521 device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities); 1521 - alloc_fail5: 1522 - usb_set_intfdata(intf, NULL); 1522 + err_free_write_urbs: 1523 1523 for (i = 0; i < ACM_NW; i++) 1524 1524 usb_free_urb(acm->wb[i].urb); 1525 - alloc_fail4: 1525 + err_free_read_urbs: 1526 1526 for (i = 0; i < num_rx_buf; i++) 1527 1527 usb_free_urb(acm->read_urbs[i]); 1528 1528 acm_read_buffers_free(acm); 1529 1529 usb_free_urb(acm->ctrlurb); 1530 - alloc_fail3: 1530 + err_free_write_buffers: 1531 1531 acm_write_buffers_free(acm); 1532 - alloc_fail2: 1532 + err_free_ctrl_buffer: 1533 1533 usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma); 1534 - alloc_fail1: 1534 + err_put_port: 1535 1535 tty_port_put(&acm->port); 1536 - alloc_fail: 1536 + 1537 1537 return rv; 1538 1538 } 1539 1539 ··· 1554 1540 if (!acm) 1555 1541 return; 1556 1542 1557 - mutex_lock(&acm->mutex); 1558 1543 acm->disconnected = true; 1544 + /* 1545 + * there is a circular dependency. acm_softint() can resubmit 1546 + * the URBs in error handling so we need to block any 1547 + * submission right away 1548 + */ 1549 + acm_poison_urbs(acm); 1550 + mutex_lock(&acm->mutex); 1559 1551 if (acm->country_codes) { 1560 1552 device_remove_file(&acm->control->dev, 1561 1553 &dev_attr_wCountryCodes); ··· 1580 1560 tty_kref_put(tty); 1581 1561 } 1582 1562 1583 - acm_kill_urbs(acm); 1584 1563 cancel_delayed_work_sync(&acm->dwork); 1585 1564 1586 1565 tty_unregister_device(acm_tty_driver, acm->minor); ··· 1621 1602 if (cnt) 1622 1603 return 0; 1623 1604 1624 - acm_kill_urbs(acm); 1605 + acm_poison_urbs(acm); 1625 1606 cancel_delayed_work_sync(&acm->dwork); 1626 1607 acm->urbs_in_error_delay = 0; 1627 1608 ··· 1634 1615 struct urb *urb; 1635 1616 int rv = 0; 1636 1617 1618 + acm_unpoison_urbs(acm); 1637 1619 spin_lock_irq(&acm->write_lock); 1638 1620 1639 1621 if (--acm->susp_count)
+4
drivers/usb/core/quirks.c
··· 498 498 /* DJI CineSSD */ 499 499 { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, 500 500 501 + /* Fibocom L850-GL LTE Modem */ 502 + { USB_DEVICE(0x2cb7, 0x0007), .driver_info = 503 + USB_QUIRK_IGNORE_REMOTE_WAKEUP }, 504 + 501 505 /* INTEL VALUE SSD */ 502 506 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, 503 507
+3 -2
drivers/usb/dwc2/hcd.c
··· 4322 4322 if (hsotg->op_state == OTG_STATE_B_PERIPHERAL) 4323 4323 goto unlock; 4324 4324 4325 - if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL) 4325 + if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL || 4326 + hsotg->flags.b.port_connect_status == 0) 4326 4327 goto skip_power_saving; 4327 4328 4328 4329 /* ··· 5399 5398 dwc2_writel(hsotg, hprt0, HPRT0); 5400 5399 5401 5400 /* Wait for the HPRT0.PrtSusp register field to be set */ 5402 - if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000)) 5401 + if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000)) 5403 5402 dev_warn(hsotg->dev, "Suspend wasn't generated\n"); 5404 5403 5405 5404 /*
+2
drivers/usb/dwc3/dwc3-pci.c
··· 120 120 static const struct property_entry dwc3_pci_mrfld_properties[] = { 121 121 PROPERTY_ENTRY_STRING("dr_mode", "otg"), 122 122 PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"), 123 + PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"), 124 + PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"), 123 125 PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), 124 126 {} 125 127 };
+3
drivers/usb/dwc3/dwc3-qcom.c
··· 244 244 struct device *dev = qcom->dev; 245 245 int ret; 246 246 247 + if (has_acpi_companion(dev)) 248 + return 0; 249 + 247 250 qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr"); 248 251 if (IS_ERR(qcom->icc_path_ddr)) { 249 252 dev_err(dev, "failed to get usb-ddr path: %ld\n",
+6 -5
drivers/usb/dwc3/gadget.c
··· 791 791 reg &= ~DWC3_DALEPENA_EP(dep->number); 792 792 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); 793 793 794 - dep->stream_capable = false; 795 - dep->type = 0; 796 - dep->flags = 0; 797 - 798 794 /* Clear out the ep descriptors for non-ep0 */ 799 795 if (dep->number > 1) { 800 796 dep->endpoint.comp_desc = NULL; ··· 798 802 } 799 803 800 804 dwc3_remove_requests(dwc, dep); 805 + 806 + dep->stream_capable = false; 807 + dep->type = 0; 808 + dep->flags = 0; 801 809 802 810 return 0; 803 811 } ··· 2083 2083 u32 reg; 2084 2084 2085 2085 speed = dwc->gadget_max_speed; 2086 - if (speed > dwc->maximum_speed) 2086 + if (speed == USB_SPEED_UNKNOWN || speed > dwc->maximum_speed) 2087 2087 speed = dwc->maximum_speed; 2088 2088 2089 2089 if (speed == USB_SPEED_SUPER_PLUS && ··· 2523 2523 unsigned long flags; 2524 2524 2525 2525 spin_lock_irqsave(&dwc->lock, flags); 2526 + dwc->gadget_max_speed = USB_SPEED_SUPER_PLUS; 2526 2527 dwc->gadget_ssp_rate = rate; 2527 2528 spin_unlock_irqrestore(&dwc->lock, flags); 2528 2529 }
+5 -5
drivers/usb/gadget/udc/amd5536udc_pci.c
··· 153 153 pci_set_master(pdev); 154 154 pci_try_set_mwi(pdev); 155 155 156 + dev->phys_addr = resource; 157 + dev->irq = pdev->irq; 158 + dev->pdev = pdev; 159 + dev->dev = &pdev->dev; 160 + 156 161 /* init dma pools */ 157 162 if (use_dma) { 158 163 retval = init_dma_pools(dev); 159 164 if (retval != 0) 160 165 goto err_dma; 161 166 } 162 - 163 - dev->phys_addr = resource; 164 - dev->irq = pdev->irq; 165 - dev->pdev = pdev; 166 - dev->dev = &pdev->dev; 167 167 168 168 /* general probing */ 169 169 if (udc_probe(dev)) {
+9 -1
drivers/usb/host/xhci-mtk.c
··· 397 397 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; 398 398 if (mtk->lpm_support) 399 399 xhci->quirks |= XHCI_LPM_SUPPORT; 400 + 401 + /* 402 + * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream, 403 + * and it's 3 when support it. 404 + */ 405 + if (xhci->hci_version < 0x100 && HCC_MAX_PSA(xhci->hcc_params) == 4) 406 + xhci->quirks |= XHCI_BROKEN_STREAMS; 400 407 } 401 408 402 409 /* called during probe() after chip reset completes */ ··· 555 548 if (ret) 556 549 goto put_usb3_hcd; 557 550 558 - if (HCC_MAX_PSA(xhci->hcc_params) >= 4) 551 + if (HCC_MAX_PSA(xhci->hcc_params) >= 4 && 552 + !(xhci->quirks & XHCI_BROKEN_STREAMS)) 559 553 xhci->shared_hcd->can_do_streams = 1; 560 554 561 555 ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
+8 -4
drivers/usb/musb/musb_core.c
··· 2004 2004 MUSB_DEVCTL_HR; 2005 2005 switch (devctl & ~s) { 2006 2006 case MUSB_QUIRK_B_DISCONNECT_99: 2007 - musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n"); 2008 - schedule_delayed_work(&musb->irq_work, 2009 - msecs_to_jiffies(1000)); 2010 - break; 2007 + if (musb->quirk_retries && !musb->flush_irq_work) { 2008 + musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n"); 2009 + schedule_delayed_work(&musb->irq_work, 2010 + msecs_to_jiffies(1000)); 2011 + musb->quirk_retries--; 2012 + break; 2013 + } 2014 + fallthrough; 2011 2015 case MUSB_QUIRK_B_INVALID_VBUS_91: 2012 2016 if (musb->quirk_retries && !musb->flush_irq_work) { 2013 2017 musb_dbg(musb,
+2
drivers/usb/usbip/vhci_hcd.c
··· 594 594 pr_err("invalid port number %d\n", wIndex); 595 595 goto error; 596 596 } 597 + if (wValue >= 32) 598 + goto error; 597 599 if (hcd->speed == HCD_USB3) { 598 600 if ((vhci_hcd->port_status[rhport] & 599 601 USB_SS_PORT_STAT_POWER) != 0) {
+1 -1
drivers/vfio/pci/Kconfig
··· 42 42 43 43 config VFIO_PCI_NVLINK2 44 44 def_bool y 45 - depends on VFIO_PCI && PPC_POWERNV 45 + depends on VFIO_PCI && PPC_POWERNV && SPAPR_TCE_IOMMU 46 46 help 47 47 VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs
+6
drivers/vfio/vfio_iommu_type1.c
··· 739 739 ret = vfio_lock_acct(dma, lock_acct, false); 740 740 741 741 unpin_out: 742 + if (batch->size == 1 && !batch->offset) { 743 + /* May be a VM_PFNMAP pfn, which the batch can't remember. */ 744 + put_pfn(pfn, dma->prot); 745 + batch->size = 0; 746 + } 747 + 742 748 if (ret < 0) { 743 749 if (pinned && !rsvd) { 744 750 for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
+3
drivers/video/fbdev/core/fbcon.c
··· 1333 1333 1334 1334 ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; 1335 1335 1336 + if (!ops->cursor) 1337 + return; 1338 + 1336 1339 ops->cursor(vc, info, mode, get_color(vc, info, c, 1), 1337 1340 get_color(vc, info, c, 0)); 1338 1341 }
-3
drivers/video/fbdev/hyperv_fb.c
··· 1031 1031 PCI_DEVICE_ID_HYPERV_VIDEO, NULL); 1032 1032 if (!pdev) { 1033 1033 pr_err("Unable to find PCI Hyper-V video\n"); 1034 - kfree(info->apertures); 1035 1034 return -ENODEV; 1036 1035 } 1037 1036 ··· 1128 1129 } else { 1129 1130 pci_dev_put(pdev); 1130 1131 } 1131 - kfree(info->apertures); 1132 1132 1133 1133 return 0; 1134 1134 ··· 1139 1141 err1: 1140 1142 if (!gen2vm) 1141 1143 pci_dev_put(pdev); 1142 - kfree(info->apertures); 1143 1144 1144 1145 return -ENOMEM; 1145 1146 }
+2 -2
drivers/xen/Kconfig
··· 50 50 51 51 SUBSYSTEM=="memory", ACTION=="add", RUN+="/bin/sh -c '[ -f /sys$devpath/state ] && echo online > /sys$devpath/state'" 52 52 53 - config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT 53 + config XEN_MEMORY_HOTPLUG_LIMIT 54 54 int "Hotplugged memory limit (in GiB) for a PV guest" 55 55 default 512 56 56 depends on XEN_HAVE_PVMMU 57 - depends on XEN_BALLOON_MEMORY_HOTPLUG 57 + depends on MEMORY_HOTPLUG 58 58 help 59 59 Maxmium amount of memory (in GiB) that a PV guest can be 60 60 expanded to when using memory hotplug.
+1 -2
fs/afs/write.c
··· 851 851 fscache_wait_on_page_write(vnode->cache, vmf->page); 852 852 #endif 853 853 854 - if (PageWriteback(vmf->page) && 855 - wait_on_page_bit_killable(vmf->page, PG_writeback) < 0) 854 + if (wait_on_page_writeback_killable(vmf->page)) 856 855 return VM_FAULT_RETRY; 857 856 858 857 if (lock_page_killable(vmf->page) < 0)
+6 -2
fs/block_dev.c
··· 275 275 bio.bi_opf = dio_bio_write_op(iocb); 276 276 task_io_account_write(ret); 277 277 } 278 + if (iocb->ki_flags & IOCB_NOWAIT) 279 + bio.bi_opf |= REQ_NOWAIT; 278 280 if (iocb->ki_flags & IOCB_HIPRI) 279 281 bio_set_polled(&bio, iocb); 280 282 ··· 430 428 bio->bi_opf = dio_bio_write_op(iocb); 431 429 task_io_account_write(bio->bi_iter.bi_size); 432 430 } 431 + if (iocb->ki_flags & IOCB_NOWAIT) 432 + bio->bi_opf |= REQ_NOWAIT; 433 433 434 434 dio->size += bio->bi_iter.bi_size; 435 435 pos += bio->bi_iter.bi_size; ··· 1244 1240 1245 1241 lockdep_assert_held(&bdev->bd_mutex); 1246 1242 1247 - clear_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); 1248 - 1249 1243 rescan: 1250 1244 ret = blk_drop_partitions(bdev); 1251 1245 if (ret) 1252 1246 return ret; 1247 + 1248 + clear_bit(GD_NEED_PART_SCAN, &disk->state); 1253 1249 1254 1250 /* 1255 1251 * Historically we only set the capacity to zero for devices that
+6 -4
fs/btrfs/Makefile
··· 7 7 subdir-ccflags-y += -Wmissing-prototypes 8 8 subdir-ccflags-y += -Wold-style-definition 9 9 subdir-ccflags-y += -Wmissing-include-dirs 10 - subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable) 11 - subdir-ccflags-y += $(call cc-option, -Wunused-const-variable) 12 - subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned) 13 - subdir-ccflags-y += $(call cc-option, -Wstringop-truncation) 10 + condflags := \ 11 + $(call cc-option, -Wunused-but-set-variable) \ 12 + $(call cc-option, -Wunused-const-variable) \ 13 + $(call cc-option, -Wpacked-not-aligned) \ 14 + $(call cc-option, -Wstringop-truncation) 15 + subdir-ccflags-y += $(condflags) 14 16 # The following turn off the warnings enabled by -Wextra 15 17 subdir-ccflags-y += -Wno-missing-field-initializers 16 18 subdir-ccflags-y += -Wno-sign-compare
+3
fs/btrfs/dev-replace.c
··· 81 81 struct btrfs_dev_replace_item *ptr; 82 82 u64 src_devid; 83 83 84 + if (!dev_root) 85 + return 0; 86 + 84 87 path = btrfs_alloc_path(); 85 88 if (!path) { 86 89 ret = -ENOMEM;
+17 -2
fs/btrfs/disk-io.c
··· 2387 2387 } else { 2388 2388 set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); 2389 2389 fs_info->dev_root = root; 2390 - btrfs_init_devices_late(fs_info); 2391 2390 } 2391 + /* Initialize fs_info for all devices in any case */ 2392 + btrfs_init_devices_late(fs_info); 2392 2393 2393 2394 /* If IGNOREDATACSUMS is set don't bother reading the csum root. */ 2394 2395 if (!btrfs_test_opt(fs_info, IGNOREDATACSUMS)) { ··· 3010 3009 } 3011 3010 } 3012 3011 3012 + /* 3013 + * btrfs_find_orphan_roots() is responsible for finding all the dead 3014 + * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load 3015 + * them into the fs_info->fs_roots_radix tree. This must be done before 3016 + * calling btrfs_orphan_cleanup() on the tree root. If we don't do it 3017 + * first, then btrfs_orphan_cleanup() will delete a dead root's orphan 3018 + * item before the root's tree is deleted - this means that if we unmount 3019 + * or crash before the deletion completes, on the next mount we will not 3020 + * delete what remains of the tree because the orphan item does not 3021 + * exists anymore, which is what tells us we have a pending deletion. 3022 + */ 3023 + ret = btrfs_find_orphan_roots(fs_info); 3024 + if (ret) 3025 + goto out; 3026 + 3013 3027 ret = btrfs_cleanup_fs_roots(fs_info); 3014 3028 if (ret) 3015 3029 goto out; ··· 3084 3068 } 3085 3069 } 3086 3070 3087 - ret = btrfs_find_orphan_roots(fs_info); 3088 3071 out: 3089 3072 return ret; 3090 3073 }
+9 -9
fs/btrfs/inode.c
··· 3099 3099 * @bio_offset: offset to the beginning of the bio (in bytes) 3100 3100 * @page: page where is the data to be verified 3101 3101 * @pgoff: offset inside the page 3102 + * @start: logical offset in the file 3102 3103 * 3103 3104 * The length of such check is always one sector size. 3104 3105 */ 3105 3106 static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio, 3106 - u32 bio_offset, struct page *page, u32 pgoff) 3107 + u32 bio_offset, struct page *page, u32 pgoff, 3108 + u64 start) 3107 3109 { 3108 3110 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 3109 3111 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); ··· 3132 3130 kunmap_atomic(kaddr); 3133 3131 return 0; 3134 3132 zeroit: 3135 - btrfs_print_data_csum_error(BTRFS_I(inode), page_offset(page) + pgoff, 3136 - csum, csum_expected, io_bio->mirror_num); 3133 + btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected, 3134 + io_bio->mirror_num); 3137 3135 if (io_bio->device) 3138 3136 btrfs_dev_stat_inc_and_print(io_bio->device, 3139 3137 BTRFS_DEV_STAT_CORRUPTION_ERRS); ··· 3186 3184 pg_off += sectorsize, bio_offset += sectorsize) { 3187 3185 int ret; 3188 3186 3189 - ret = check_data_csum(inode, io_bio, bio_offset, page, pg_off); 3187 + ret = check_data_csum(inode, io_bio, bio_offset, page, pg_off, 3188 + page_offset(page) + pg_off); 3190 3189 if (ret < 0) 3191 3190 return -EIO; 3192 3191 } ··· 7913 7910 ASSERT(pgoff < PAGE_SIZE); 7914 7911 if (uptodate && 7915 7912 (!csum || !check_data_csum(inode, io_bio, 7916 - bio_offset, bvec.bv_page, pgoff))) { 7913 + bio_offset, bvec.bv_page, 7914 + pgoff, start))) { 7917 7915 clean_io_failure(fs_info, failure_tree, io_tree, 7918 7916 start, bvec.bv_page, 7919 7917 btrfs_ino(BTRFS_I(inode)), ··· 8172 8168 bio->bi_private = dip; 8173 8169 bio->bi_end_io = btrfs_end_dio_bio; 8174 8170 btrfs_io_bio(bio)->logical = file_offset; 8175 - 8176 - WARN_ON_ONCE(write && btrfs_is_zoned(fs_info) && 8177 - fs_info->max_zone_append_size && 8178 - bio_op(bio) != REQ_OP_ZONE_APPEND); 8179 8171 8180 8172 if (bio_op(bio) == REQ_OP_ZONE_APPEND) { 8181 8173 status = extract_ordered_extent(BTRFS_I(inode), bio,
+10 -2
fs/btrfs/qgroup.c
··· 226 226 { 227 227 struct btrfs_qgroup_list *list; 228 228 229 - btrfs_sysfs_del_one_qgroup(fs_info, qgroup); 230 229 list_del(&qgroup->dirty); 231 230 while (!list_empty(&qgroup->groups)) { 232 231 list = list_first_entry(&qgroup->groups, ··· 242 243 list_del(&list->next_member); 243 244 kfree(list); 244 245 } 245 - kfree(qgroup); 246 246 } 247 247 248 248 /* must be called with qgroup_lock held */ ··· 567 569 qgroup = rb_entry(n, struct btrfs_qgroup, node); 568 570 rb_erase(n, &fs_info->qgroup_tree); 569 571 __del_qgroup_rb(fs_info, qgroup); 572 + btrfs_sysfs_del_one_qgroup(fs_info, qgroup); 573 + kfree(qgroup); 570 574 } 571 575 /* 572 576 * We call btrfs_free_qgroup_config() when unmounting ··· 1578 1578 spin_lock(&fs_info->qgroup_lock); 1579 1579 del_qgroup_rb(fs_info, qgroupid); 1580 1580 spin_unlock(&fs_info->qgroup_lock); 1581 + 1582 + /* 1583 + * Remove the qgroup from sysfs now without holding the qgroup_lock 1584 + * spinlock, since the sysfs_remove_group() function needs to take 1585 + * the mutex kernfs_mutex through kernfs_remove_by_name_ns(). 1586 + */ 1587 + btrfs_sysfs_del_one_qgroup(fs_info, qgroup); 1588 + kfree(qgroup); 1581 1589 out: 1582 1590 mutex_unlock(&fs_info->qgroup_ioctl_lock); 1583 1591 return ret;
+3
fs/btrfs/volumes.c
··· 7448 7448 int item_size; 7449 7449 int i, ret, slot; 7450 7450 7451 + if (!device->fs_info->dev_root) 7452 + return 0; 7453 + 7451 7454 key.objectid = BTRFS_DEV_STATS_OBJECTID; 7452 7455 key.type = BTRFS_PERSISTENT_ITEM_KEY; 7453 7456 key.offset = device->devid;
+6
fs/cachefiles/bind.c
··· 118 118 cache->mnt = path.mnt; 119 119 root = path.dentry; 120 120 121 + ret = -EINVAL; 122 + if (mnt_user_ns(path.mnt) != &init_user_ns) { 123 + pr_warn("File cache on idmapped mounts not supported"); 124 + goto error_unsupported; 125 + } 126 + 121 127 /* check parameters */ 122 128 ret = -EOPNOTSUPP; 123 129 if (d_is_negative(root) ||
+3 -4
fs/cachefiles/rdwr.c
··· 24 24 container_of(wait, struct cachefiles_one_read, monitor); 25 25 struct cachefiles_object *object; 26 26 struct fscache_retrieval *op = monitor->op; 27 - struct wait_bit_key *key = _key; 27 + struct wait_page_key *key = _key; 28 28 struct page *page = wait->private; 29 29 30 30 ASSERT(key); 31 31 32 32 _enter("{%lu},%u,%d,{%p,%u}", 33 33 monitor->netfs_page->index, mode, sync, 34 - key->flags, key->bit_nr); 34 + key->page, key->bit_nr); 35 35 36 - if (key->flags != &page->flags || 37 - key->bit_nr != PG_locked) 36 + if (key->page != page || key->bit_nr != PG_locked) 38 37 return 0; 39 38 40 39 _debug("--- monitor %p %lx ---", page, page->flags);
+1 -2
fs/cifs/cifsacl.c
··· 1130 1130 } 1131 1131 1132 1132 /* If it's any one of the ACE we're replacing, skip! */ 1133 - if (!mode_from_sid && 1134 - ((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) || 1133 + if (((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) || 1135 1134 (compare_sids(&pntace->sid, pownersid) == 0) || 1136 1135 (compare_sids(&pntace->sid, pgrpsid) == 0) || 1137 1136 (compare_sids(&pntace->sid, &sid_everyone) == 0) ||
+2 -2
fs/cifs/cifsglob.h
··· 919 919 bool binding:1; /* are we binding the session? */ 920 920 __u16 session_flags; 921 921 __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE]; 922 - __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE]; 923 - __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE]; 922 + __u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE]; 923 + __u8 smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE]; 924 924 __u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE]; 925 925 926 926 __u8 binding_preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
+5
fs/cifs/cifspdu.h
··· 147 147 */ 148 148 #define SMB3_SIGN_KEY_SIZE (16) 149 149 150 + /* 151 + * Size of the smb3 encryption/decryption keys 152 + */ 153 + #define SMB3_ENC_DEC_KEY_SIZE (32) 154 + 150 155 #define CIFS_CLIENT_CHALLENGE_SIZE (8) 151 156 #define CIFS_SERVER_CHALLENGE_SIZE (8) 152 157 #define CIFS_HMAC_MD5_HASH_SIZE (16)
+1
fs/cifs/file.c
··· 165 165 goto posix_open_ret; 166 166 } 167 167 } else { 168 + cifs_revalidate_mapping(*pinode); 168 169 cifs_fattr_to_inode(*pinode, &fattr); 169 170 } 170 171
+1
fs/cifs/smb2glob.h
··· 58 58 #define SMB2_HMACSHA256_SIZE (32) 59 59 #define SMB2_CMACAES_SIZE (16) 60 60 #define SMB3_SIGNKEY_SIZE (16) 61 + #define SMB3_GCM128_CRYPTKEY_SIZE (16) 61 62 #define SMB3_GCM256_CRYPTKEY_SIZE (32) 62 63 63 64 /* Maximum buffer size value we can send with 1 credit */
+2 -2
fs/cifs/smb2misc.c
··· 754 754 } 755 755 } 756 756 spin_unlock(&cifs_tcp_ses_lock); 757 - cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n"); 758 - return false; 757 + cifs_dbg(FYI, "No file id matched, oplock break ignored\n"); 758 + return true; 759 759 } 760 760 761 761 void
+20 -7
fs/cifs/smb2ops.c
··· 2038 2038 { 2039 2039 int rc; 2040 2040 unsigned int ret_data_len; 2041 + struct inode *inode; 2041 2042 struct duplicate_extents_to_file dup_ext_buf; 2042 2043 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink); 2043 2044 ··· 2055 2054 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n", 2056 2055 src_off, dest_off, len); 2057 2056 2058 - rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false); 2059 - if (rc) 2060 - goto duplicate_extents_out; 2057 + inode = d_inode(trgtfile->dentry); 2058 + if (inode->i_size < dest_off + len) { 2059 + rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false); 2060 + if (rc) 2061 + goto duplicate_extents_out; 2061 2062 2063 + /* 2064 + * Although also could set plausible allocation size (i_blocks) 2065 + * here in addition to setting the file size, in reflink 2066 + * it is likely that the target file is sparse. Its allocation 2067 + * size will be queried on next revalidate, but it is important 2068 + * to make sure that file's cached size is updated immediately 2069 + */ 2070 + cifs_setsize(inode, dest_off + len); 2071 + } 2062 2072 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, 2063 2073 trgtfile->fid.volatile_fid, 2064 2074 FSCTL_DUPLICATE_EXTENTS_TO_FILE, ··· 4170 4158 if (ses->Suid == ses_id) { 4171 4159 ses_enc_key = enc ? ses->smb3encryptionkey : 4172 4160 ses->smb3decryptionkey; 4173 - memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE); 4161 + memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE); 4174 4162 spin_unlock(&cifs_tcp_ses_lock); 4175 4163 return 0; 4176 4164 } ··· 4197 4185 int rc = 0; 4198 4186 struct scatterlist *sg; 4199 4187 u8 sign[SMB2_SIGNATURE_SIZE] = {}; 4200 - u8 key[SMB3_SIGN_KEY_SIZE]; 4188 + u8 key[SMB3_ENC_DEC_KEY_SIZE]; 4201 4189 struct aead_request *req; 4202 4190 char *iv; 4203 4191 unsigned int iv_len; ··· 4221 4209 tfm = enc ? server->secmech.ccmaesencrypt : 4222 4210 server->secmech.ccmaesdecrypt; 4223 4211 4224 - if (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM) 4212 + if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) || 4213 + (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) 4225 4214 rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE); 4226 4215 else 4227 - rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE); 4216 + rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE); 4228 4217 4229 4218 if (rc) { 4230 4219 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
+28 -9
fs/cifs/smb2transport.c
··· 298 298 { 299 299 unsigned char zero = 0x0; 300 300 __u8 i[4] = {0, 0, 0, 1}; 301 - __u8 L[4] = {0, 0, 0, 128}; 301 + __u8 L128[4] = {0, 0, 0, 128}; 302 + __u8 L256[4] = {0, 0, 1, 0}; 302 303 int rc = 0; 303 304 unsigned char prfhash[SMB2_HMACSHA256_SIZE]; 304 305 unsigned char *hashptr = prfhash; ··· 355 354 goto smb3signkey_ret; 356 355 } 357 356 358 - rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash, 359 - L, 4); 357 + if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) || 358 + (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) { 359 + rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash, 360 + L256, 4); 361 + } else { 362 + rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash, 363 + L128, 4); 364 + } 360 365 if (rc) { 361 366 cifs_server_dbg(VFS, "%s: Could not update with L\n", __func__); 362 367 goto smb3signkey_ret; ··· 397 390 const struct derivation_triplet *ptriplet) 398 391 { 399 392 int rc; 393 + #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS 394 + struct TCP_Server_Info *server = ses->server; 395 + #endif 400 396 401 397 /* 402 398 * All channels use the same encryption/decryption keys but ··· 432 422 rc = generate_key(ses, ptriplet->encryption.label, 433 423 ptriplet->encryption.context, 434 424 ses->smb3encryptionkey, 435 - SMB3_SIGN_KEY_SIZE); 425 + SMB3_ENC_DEC_KEY_SIZE); 436 426 rc = generate_key(ses, ptriplet->decryption.label, 437 427 ptriplet->decryption.context, 438 428 ses->smb3decryptionkey, 439 - SMB3_SIGN_KEY_SIZE); 429 + SMB3_ENC_DEC_KEY_SIZE); 440 430 if (rc) 441 431 return rc; 442 432 } ··· 452 442 */ 453 443 cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid), 454 444 &ses->Suid); 445 + cifs_dbg(VFS, "Cipher type %d\n", server->cipher_type); 455 446 cifs_dbg(VFS, "Session Key %*ph\n", 456 447 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response); 457 448 cifs_dbg(VFS, "Signing Key %*ph\n", 458 449 SMB3_SIGN_KEY_SIZE, ses->smb3signingkey); 459 - cifs_dbg(VFS, "ServerIn Key %*ph\n", 460 - SMB3_SIGN_KEY_SIZE, ses->smb3encryptionkey); 461 - cifs_dbg(VFS, "ServerOut Key %*ph\n", 462 - SMB3_SIGN_KEY_SIZE, ses->smb3decryptionkey); 450 + if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) || 451 + (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) { 452 + cifs_dbg(VFS, "ServerIn Key %*ph\n", 453 + SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3encryptionkey); 454 + cifs_dbg(VFS, "ServerOut Key %*ph\n", 455 + SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3decryptionkey); 456 + } else { 457 + cifs_dbg(VFS, "ServerIn Key %*ph\n", 458 + SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3encryptionkey); 459 + cifs_dbg(VFS, "ServerOut Key %*ph\n", 460 + SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3decryptionkey); 461 + } 463 462 #endif 464 463 return rc; 465 464 }
+9 -5
fs/gfs2/super.c
··· 162 162 int error; 163 163 164 164 error = init_threads(sdp); 165 - if (error) 165 + if (error) { 166 + gfs2_withdraw_delayed(sdp); 166 167 return error; 168 + } 167 169 168 170 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 169 171 if (gfs2_withdrawn(sdp)) { ··· 752 750 static int gfs2_freeze(struct super_block *sb) 753 751 { 754 752 struct gfs2_sbd *sdp = sb->s_fs_info; 755 - int error = 0; 753 + int error; 756 754 757 755 mutex_lock(&sdp->sd_freeze_mutex); 758 - if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) 756 + if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) { 757 + error = -EBUSY; 759 758 goto out; 759 + } 760 760 761 761 for (;;) { 762 762 if (gfs2_withdrawn(sdp)) { ··· 799 795 struct gfs2_sbd *sdp = sb->s_fs_info; 800 796 801 797 mutex_lock(&sdp->sd_freeze_mutex); 802 - if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN || 798 + if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN || 803 799 !gfs2_holder_initialized(&sdp->sd_freeze_gh)) { 804 800 mutex_unlock(&sdp->sd_freeze_mutex); 805 - return 0; 801 + return -EINVAL; 806 802 } 807 803 808 804 gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+23 -13
fs/io-wq.c
··· 16 16 #include <linux/rculist_nulls.h> 17 17 #include <linux/cpu.h> 18 18 #include <linux/tracehook.h> 19 - #include <linux/freezer.h> 20 19 21 20 #include "../kernel/sched/sched.h" 22 21 #include "io-wq.h" ··· 387 388 388 389 static bool io_flush_signals(void) 389 390 { 390 - if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) { 391 + if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) { 391 392 __set_current_state(TASK_RUNNING); 392 - if (current->task_works) 393 - task_work_run(); 394 - clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL); 393 + tracehook_notify_signal(); 395 394 return true; 396 395 } 397 396 return false; ··· 484 487 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); 485 488 io_wqe_inc_running(worker); 486 489 487 - sprintf(buf, "iou-wrk-%d", wq->task_pid); 490 + snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task_pid); 488 491 set_task_comm(current, buf); 489 492 490 493 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { ··· 502 505 if (io_flush_signals()) 503 506 continue; 504 507 ret = schedule_timeout(WORKER_IDLE_TIMEOUT); 505 - if (try_to_freeze() || ret) 506 - continue; 507 - if (fatal_signal_pending(current)) 508 + if (signal_pending(current)) { 509 + struct ksignal ksig; 510 + 511 + if (!get_signal(&ksig)) 512 + continue; 508 513 break; 514 + } 515 + if (ret) 516 + continue; 509 517 /* timed out, exit unless we're the fixed worker */ 510 518 if (test_bit(IO_WQ_BIT_EXIT, &wq->state) || 511 519 !(worker->flags & IO_WORKER_F_FIXED)) ··· 711 709 char buf[TASK_COMM_LEN]; 712 710 int node; 713 711 714 - sprintf(buf, "iou-mgr-%d", wq->task_pid); 712 + snprintf(buf, sizeof(buf), "iou-mgr-%d", wq->task_pid); 715 713 set_task_comm(current, buf); 716 714 717 715 do { 718 716 set_current_state(TASK_INTERRUPTIBLE); 719 717 io_wq_check_workers(wq); 720 718 schedule_timeout(HZ); 721 - try_to_freeze(); 722 - if (fatal_signal_pending(current)) 719 + if (signal_pending(current)) { 720 + struct ksignal ksig; 721 + 722 + if (!get_signal(&ksig)) 723 + continue; 723 724 set_bit(IO_WQ_BIT_EXIT, &wq->state); 725 + } 724 726 } while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)); 725 727 726 728 io_wq_check_workers(wq); ··· 1071 1065 1072 1066 for_each_node(node) { 1073 1067 struct io_wqe *wqe = wq->wqes[node]; 1074 - WARN_ON_ONCE(!wq_list_empty(&wqe->work_list)); 1068 + struct io_cb_cancel_data match = { 1069 + .fn = io_wq_work_match_all, 1070 + .cancel_all = true, 1071 + }; 1072 + io_wqe_cancel_pending_work(wqe, &match); 1075 1073 kfree(wqe); 1076 1074 } 1077 1075 io_wq_put_hash(wq->hash);
+79 -60
fs/io_uring.c
··· 78 78 #include <linux/task_work.h> 79 79 #include <linux/pagemap.h> 80 80 #include <linux/io_uring.h> 81 - #include <linux/freezer.h> 82 81 83 82 #define CREATE_TRACE_POINTS 84 83 #include <trace/events/io_uring.h> ··· 697 698 REQ_F_NO_FILE_TABLE_BIT, 698 699 REQ_F_LTIMEOUT_ACTIVE_BIT, 699 700 REQ_F_COMPLETE_INLINE_BIT, 701 + REQ_F_REISSUE_BIT, 700 702 701 703 /* not a real bit, just to check we're not overflowing the space */ 702 704 __REQ_F_LAST_BIT, ··· 741 741 REQ_F_LTIMEOUT_ACTIVE = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT), 742 742 /* completion is deferred through io_comp_state */ 743 743 REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT), 744 + /* caller should reissue async */ 745 + REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), 744 746 }; 745 747 746 748 struct async_poll { ··· 1097 1095 io_for_each_link(req, head) { 1098 1096 if (req->flags & REQ_F_INFLIGHT) 1099 1097 return true; 1100 - if (req->task->files == files) 1101 - return true; 1102 1098 } 1103 1099 return false; 1104 1100 } ··· 1216 1216 if (req->flags & REQ_F_ISREG) { 1217 1217 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL)) 1218 1218 io_wq_hash_work(&req->work, file_inode(req->file)); 1219 - } else { 1219 + } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) { 1220 1220 if (def->unbound_nonreg_file) 1221 1221 req->work.flags |= IO_WQ_WORK_UNBOUND; 1222 1222 } ··· 1239 1239 BUG_ON(!tctx); 1240 1240 BUG_ON(!tctx->io_wq); 1241 1241 1242 - trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, 1243 - &req->work, req->flags); 1244 1242 /* init ->work of the whole link before punting */ 1245 1243 io_prep_async_link(req); 1244 + trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, 1245 + &req->work, req->flags); 1246 1246 io_wq_enqueue(tctx->io_wq, &req->work); 1247 1247 if (link) 1248 1248 io_queue_linked_timeout(link); 1249 1249 } 1250 1250 1251 - static void io_kill_timeout(struct io_kiocb *req) 1251 + static void io_kill_timeout(struct io_kiocb *req, int status) 1252 1252 { 1253 1253 struct io_timeout_data *io = req->async_data; 1254 1254 int ret; ··· 1258 1258 atomic_set(&req->ctx->cq_timeouts, 1259 1259 atomic_read(&req->ctx->cq_timeouts) + 1); 1260 1260 list_del_init(&req->timeout.list); 1261 - io_cqring_fill_event(req, 0); 1261 + io_cqring_fill_event(req, status); 1262 1262 io_put_req_deferred(req, 1); 1263 1263 } 1264 - } 1265 - 1266 - /* 1267 - * Returns true if we found and killed one or more timeouts 1268 - */ 1269 - static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, 1270 - struct files_struct *files) 1271 - { 1272 - struct io_kiocb *req, *tmp; 1273 - int canceled = 0; 1274 - 1275 - spin_lock_irq(&ctx->completion_lock); 1276 - list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { 1277 - if (io_match_task(req, tsk, files)) { 1278 - io_kill_timeout(req); 1279 - canceled++; 1280 - } 1281 - } 1282 - spin_unlock_irq(&ctx->completion_lock); 1283 - return canceled != 0; 1284 1264 } 1285 1265 1286 1266 static void __io_queue_deferred(struct io_ring_ctx *ctx) ··· 1307 1327 break; 1308 1328 1309 1329 list_del_init(&req->timeout.list); 1310 - io_kill_timeout(req); 1330 + io_kill_timeout(req, 0); 1311 1331 } while (!list_empty(&ctx->timeout_list)); 1312 1332 1313 1333 ctx->cq_last_tm_flush = seq; ··· 2479 2499 return false; 2480 2500 return true; 2481 2501 } 2502 + #else 2503 + static bool io_rw_should_reissue(struct io_kiocb *req) 2504 + { 2505 + return false; 2506 + } 2482 2507 #endif 2483 2508 2484 2509 static bool io_rw_reissue(struct io_kiocb *req) ··· 2509 2524 { 2510 2525 int cflags = 0; 2511 2526 2512 - if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_reissue(req)) 2513 - return; 2514 - if (res != req->result) 2515 - req_set_fail_links(req); 2516 - 2517 2527 if (req->rw.kiocb.ki_flags & IOCB_WRITE) 2518 2528 kiocb_end_write(req); 2529 + if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) { 2530 + req->flags |= REQ_F_REISSUE; 2531 + return; 2532 + } 2533 + if (res != req->result) 2534 + req_set_fail_links(req); 2519 2535 if (req->flags & REQ_F_BUFFER_SELECTED) 2520 2536 cflags = io_put_rw_kbuf(req); 2521 2537 __io_req_complete(req, issue_flags, res, cflags); ··· 3293 3307 3294 3308 ret = io_iter_do_read(req, iter); 3295 3309 3296 - if (ret == -EIOCBQUEUED) { 3297 - if (req->async_data) 3298 - iov_iter_revert(iter, io_size - iov_iter_count(iter)); 3299 - goto out_free; 3300 - } else if (ret == -EAGAIN) { 3310 + if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { 3301 3311 /* IOPOLL retry should happen for io-wq threads */ 3302 3312 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) 3303 3313 goto done; ··· 3303 3321 /* some cases will consume bytes even on error returns */ 3304 3322 iov_iter_revert(iter, io_size - iov_iter_count(iter)); 3305 3323 ret = 0; 3324 + } else if (ret == -EIOCBQUEUED) { 3325 + goto out_free; 3306 3326 } else if (ret <= 0 || ret == io_size || !force_nonblock || 3307 3327 (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) { 3308 3328 /* read all, failed, already did sync or don't want to retry */ ··· 3417 3433 else 3418 3434 ret2 = -EINVAL; 3419 3435 3436 + if (req->flags & REQ_F_REISSUE) 3437 + ret2 = -EAGAIN; 3438 + 3420 3439 /* 3421 3440 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just 3422 3441 * retry them without IOCB_NOWAIT. ··· 3429 3442 /* no retry on NONBLOCK nor RWF_NOWAIT */ 3430 3443 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) 3431 3444 goto done; 3432 - if (ret2 == -EIOCBQUEUED && req->async_data) 3433 - iov_iter_revert(iter, io_size - iov_iter_count(iter)); 3434 3445 if (!force_nonblock || ret2 != -EAGAIN) { 3435 3446 /* IOPOLL retry should happen for io-wq threads */ 3436 3447 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) ··· 3963 3978 static int io_provide_buffers_prep(struct io_kiocb *req, 3964 3979 const struct io_uring_sqe *sqe) 3965 3980 { 3981 + unsigned long size; 3966 3982 struct io_provide_buf *p = &req->pbuf; 3967 3983 u64 tmp; 3968 3984 ··· 3977 3991 p->addr = READ_ONCE(sqe->addr); 3978 3992 p->len = READ_ONCE(sqe->len); 3979 3993 3980 - if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs))) 3994 + size = (unsigned long)p->len * p->nbufs; 3995 + if (!access_ok(u64_to_user_ptr(p->addr), size)) 3981 3996 return -EFAULT; 3982 3997 3983 3998 p->bgid = READ_ONCE(sqe->buf_group); ··· 4807 4820 ret = -ENOMEM; 4808 4821 goto out; 4809 4822 } 4810 - io = req->async_data; 4811 4823 memcpy(req->async_data, &__io, sizeof(__io)); 4812 4824 return -EAGAIN; 4813 4825 } ··· 5569 5583 5570 5584 data->mode = io_translate_timeout_mode(flags); 5571 5585 hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode); 5572 - io_req_track_inflight(req); 5586 + if (is_timeout_link) 5587 + io_req_track_inflight(req); 5573 5588 return 0; 5574 5589 } 5575 5590 ··· 6173 6186 ret = -ECANCELED; 6174 6187 6175 6188 if (!ret) { 6189 + req->flags &= ~REQ_F_REISSUE; 6176 6190 do { 6177 6191 ret = io_issue_sqe(req, 0); 6178 6192 /* ··· 6467 6479 ret = io_init_req(ctx, req, sqe); 6468 6480 if (unlikely(ret)) { 6469 6481 fail_req: 6470 - io_put_req(req); 6471 - io_req_complete(req, ret); 6472 6482 if (link->head) { 6473 6483 /* fail even hard links since we don't submit */ 6474 6484 link->head->flags |= REQ_F_FAIL_LINK; ··· 6474 6488 io_req_complete(link->head, -ECANCELED); 6475 6489 link->head = NULL; 6476 6490 } 6491 + io_put_req(req); 6492 + io_req_complete(req, ret); 6477 6493 return ret; 6478 6494 } 6479 6495 ret = io_req_prep(req, sqe); ··· 6728 6740 char buf[TASK_COMM_LEN]; 6729 6741 DEFINE_WAIT(wait); 6730 6742 6731 - sprintf(buf, "iou-sqp-%d", sqd->task_pid); 6743 + snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid); 6732 6744 set_task_comm(current, buf); 6733 6745 current->pf_io_worker = NULL; 6734 6746 ··· 6743 6755 int ret; 6744 6756 bool cap_entries, sqt_spin, needs_sched; 6745 6757 6746 - if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) { 6758 + if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || 6759 + signal_pending(current)) { 6760 + bool did_sig = false; 6761 + 6747 6762 mutex_unlock(&sqd->lock); 6763 + if (signal_pending(current)) { 6764 + struct ksignal ksig; 6765 + 6766 + did_sig = get_signal(&ksig); 6767 + } 6748 6768 cond_resched(); 6749 6769 mutex_lock(&sqd->lock); 6770 + if (did_sig) 6771 + break; 6750 6772 io_run_task_work(); 6751 6773 io_run_task_work_head(&sqd->park_task_work); 6752 6774 timeout = jiffies + sqd->sq_thread_idle; 6753 6775 continue; 6754 6776 } 6755 - if (fatal_signal_pending(current)) 6756 - break; 6757 6777 sqt_spin = false; 6758 6778 cap_entries = !list_is_singular(&sqd->ctx_list); 6759 6779 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { ··· 6804 6808 6805 6809 mutex_unlock(&sqd->lock); 6806 6810 schedule(); 6807 - try_to_freeze(); 6808 6811 mutex_lock(&sqd->lock); 6809 6812 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 6810 6813 io_ring_clear_wakeup_flag(ctx); ··· 6868 6873 return 1; 6869 6874 if (!signal_pending(current)) 6870 6875 return 0; 6871 - if (test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL)) 6876 + if (test_thread_flag(TIF_NOTIFY_SIGNAL)) 6872 6877 return -ERESTARTSYS; 6873 6878 return -EINTR; 6874 6879 } ··· 8558 8563 struct io_tctx_node *node; 8559 8564 int ret; 8560 8565 8566 + /* prevent SQPOLL from submitting new requests */ 8567 + if (ctx->sq_data) { 8568 + io_sq_thread_park(ctx->sq_data); 8569 + list_del_init(&ctx->sqd_list); 8570 + io_sqd_update_thread_idle(ctx->sq_data); 8571 + io_sq_thread_unpark(ctx->sq_data); 8572 + } 8573 + 8561 8574 /* 8562 8575 * If we're doing polled IO and end up having requests being 8563 8576 * submitted async (out-of-line), then completions can come in while ··· 8602 8599 io_ring_ctx_free(ctx); 8603 8600 } 8604 8601 8602 + /* Returns true if we found and killed one or more timeouts */ 8603 + static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, 8604 + struct files_struct *files) 8605 + { 8606 + struct io_kiocb *req, *tmp; 8607 + int canceled = 0; 8608 + 8609 + spin_lock_irq(&ctx->completion_lock); 8610 + list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { 8611 + if (io_match_task(req, tsk, files)) { 8612 + io_kill_timeout(req, -ECANCELED); 8613 + canceled++; 8614 + } 8615 + } 8616 + if (canceled != 0) 8617 + io_commit_cqring(ctx); 8618 + spin_unlock_irq(&ctx->completion_lock); 8619 + if (canceled != 0) 8620 + io_cqring_ev_posted(ctx); 8621 + return canceled != 0; 8622 + } 8623 + 8605 8624 static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) 8606 8625 { 8607 8626 unsigned long index; ··· 8638 8613 xa_for_each(&ctx->personalities, index, creds) 8639 8614 io_unregister_personality(ctx, index); 8640 8615 mutex_unlock(&ctx->uring_lock); 8641 - 8642 - /* prevent SQPOLL from submitting new requests */ 8643 - if (ctx->sq_data) { 8644 - io_sq_thread_park(ctx->sq_data); 8645 - list_del_init(&ctx->sqd_list); 8646 - io_sqd_update_thread_idle(ctx->sq_data); 8647 - io_sq_thread_unpark(ctx->sq_data); 8648 - } 8649 8616 8650 8617 io_kill_timeouts(ctx, NULL, NULL); 8651 8618 io_poll_remove_all(ctx, NULL, NULL); ··· 9015 8998 9016 8999 /* make sure overflow events are dropped */ 9017 9000 atomic_inc(&tctx->in_idle); 9001 + __io_uring_files_cancel(NULL); 9002 + 9018 9003 do { 9019 9004 /* read completions before cancelations */ 9020 9005 inflight = tctx_inflight(tctx);
+1 -1
fs/reiserfs/xattr.h
··· 44 44 45 45 static inline int reiserfs_xattrs_initialized(struct super_block *sb) 46 46 { 47 - return REISERFS_SB(sb)->priv_root != NULL; 47 + return REISERFS_SB(sb)->priv_root && REISERFS_SB(sb)->xattr_root; 48 48 } 49 49 50 50 #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
+6 -2
fs/squashfs/export.c
··· 152 152 start = le64_to_cpu(table[n]); 153 153 end = le64_to_cpu(table[n + 1]); 154 154 155 - if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { 155 + if (start >= end 156 + || (end - start) > 157 + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { 156 158 kfree(table); 157 159 return ERR_PTR(-EINVAL); 158 160 } 159 161 } 160 162 161 163 start = le64_to_cpu(table[indexes - 1]); 162 - if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) { 164 + if (start >= lookup_table_start || 165 + (lookup_table_start - start) > 166 + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { 163 167 kfree(table); 164 168 return ERR_PTR(-EINVAL); 165 169 }
+4 -2
fs/squashfs/id.c
··· 97 97 start = le64_to_cpu(table[n]); 98 98 end = le64_to_cpu(table[n + 1]); 99 99 100 - if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { 100 + if (start >= end || (end - start) > 101 + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { 101 102 kfree(table); 102 103 return ERR_PTR(-EINVAL); 103 104 } 104 105 } 105 106 106 107 start = le64_to_cpu(table[indexes - 1]); 107 - if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) { 108 + if (start >= id_table_start || (id_table_start - start) > 109 + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { 108 110 kfree(table); 109 111 return ERR_PTR(-EINVAL); 110 112 }
+1
fs/squashfs/squashfs_fs.h
··· 17 17 18 18 /* size of metadata (inode and directory) blocks */ 19 19 #define SQUASHFS_METADATA_SIZE 8192 20 + #define SQUASHFS_BLOCK_OFFSET 2 20 21 21 22 /* default size of block device I/O */ 22 23 #ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE
+4 -2
fs/squashfs/xattr_id.c
··· 109 109 start = le64_to_cpu(table[n]); 110 110 end = le64_to_cpu(table[n + 1]); 111 111 112 - if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) { 112 + if (start >= end || (end - start) > 113 + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { 113 114 kfree(table); 114 115 return ERR_PTR(-EINVAL); 115 116 } 116 117 } 117 118 118 119 start = le64_to_cpu(table[indexes - 1]); 119 - if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) { 120 + if (start >= table_start || (table_start - start) > 121 + (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) { 120 122 kfree(table); 121 123 return ERR_PTR(-EINVAL); 122 124 }
+1
include/acpi/acpi_bus.h
··· 233 233 234 234 struct acpi_device_pnp { 235 235 acpi_bus_id bus_id; /* Object name */ 236 + int instance_no; /* Instance number of this object */ 236 237 struct acpi_pnp_type type; /* ID type */ 237 238 acpi_bus_address bus_address; /* _ADR */ 238 239 char *unique_id; /* _UID */
+8 -1
include/linux/acpi.h
··· 222 222 void __acpi_unmap_table(void __iomem *map, unsigned long size); 223 223 int early_acpi_boot_init(void); 224 224 int acpi_boot_init (void); 225 + void acpi_boot_table_prepare (void); 225 226 void acpi_boot_table_init (void); 226 227 int acpi_mps_check (void); 227 228 int acpi_numa_init (void); 228 229 230 + int acpi_locate_initial_tables (void); 231 + void acpi_reserve_initial_tables (void); 232 + void acpi_table_init_complete (void); 229 233 int acpi_table_init (void); 230 234 int acpi_table_parse(char *id, acpi_tbl_table_handler handler); 231 235 int __init acpi_table_parse_entries(char *id, unsigned long table_size, ··· 818 814 return 0; 819 815 } 820 816 817 + static inline void acpi_boot_table_prepare(void) 818 + { 819 + } 820 + 821 821 static inline void acpi_boot_table_init(void) 822 822 { 823 - return; 824 823 } 825 824 826 825 static inline int acpi_mps_check(void)
-2
include/linux/blkdev.h
··· 85 85 #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) 86 86 /* account into disk and partition IO statistics */ 87 87 #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) 88 - /* request came from our alloc pool */ 89 - #define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) 90 88 /* runtime pm request */ 91 89 #define RQF_PM ((__force req_flags_t)(1 << 15)) 92 90 /* on IO scheduler merge hash */
+25 -8
include/linux/bpf.h
··· 21 21 #include <linux/capability.h> 22 22 #include <linux/sched/mm.h> 23 23 #include <linux/slab.h> 24 + #include <linux/percpu-refcount.h> 24 25 25 26 struct bpf_verifier_env; 26 27 struct bpf_verifier_log; ··· 557 556 * fentry = a set of program to run before calling original function 558 557 * fexit = a set of program to run after original function 559 558 */ 560 - int arch_prepare_bpf_trampoline(void *image, void *image_end, 559 + struct bpf_tramp_image; 560 + int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, 561 561 const struct btf_func_model *m, u32 flags, 562 562 struct bpf_tramp_progs *tprogs, 563 563 void *orig_call); ··· 567 565 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); 568 566 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog); 569 567 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start); 568 + void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); 569 + void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); 570 570 571 571 struct bpf_ksym { 572 572 unsigned long start; ··· 585 581 BPF_TRAMP_MODIFY_RETURN, 586 582 BPF_TRAMP_MAX, 587 583 BPF_TRAMP_REPLACE, /* more than MAX */ 584 + }; 585 + 586 + struct bpf_tramp_image { 587 + void *image; 588 + struct bpf_ksym ksym; 589 + struct percpu_ref pcref; 590 + void *ip_after_call; 591 + void *ip_epilogue; 592 + union { 593 + struct rcu_head rcu; 594 + struct work_struct work; 595 + }; 588 596 }; 589 597 590 598 struct bpf_trampoline { ··· 621 605 /* Number of attached programs. A counter per kind. */ 622 606 int progs_cnt[BPF_TRAMP_MAX]; 623 607 /* Executable image of trampoline */ 624 - void *image; 608 + struct bpf_tramp_image *cur_image; 625 609 u64 selector; 626 - struct bpf_ksym ksym; 627 610 }; 628 611 629 612 struct bpf_attach_target_info { ··· 706 691 void bpf_image_ksym_del(struct bpf_ksym *ksym); 707 692 void bpf_ksym_add(struct bpf_ksym *ksym); 708 693 void bpf_ksym_del(struct bpf_ksym *ksym); 694 + int bpf_jit_charge_modmem(u32 pages); 695 + void bpf_jit_uncharge_modmem(u32 pages); 709 696 #else 710 697 static inline int bpf_trampoline_link_prog(struct bpf_prog *prog, 711 698 struct bpf_trampoline *tr) ··· 804 787 bool func_proto_unreliable; 805 788 bool sleepable; 806 789 bool tail_call_reachable; 807 - enum bpf_tramp_prog_type trampoline_prog_type; 808 790 struct hlist_node tramp_hlist; 809 791 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ 810 792 const struct btf_type *attach_func_proto; ··· 1109 1093 _ret; \ 1110 1094 }) 1111 1095 1112 - #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ 1096 + #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \ 1113 1097 ({ \ 1114 1098 struct bpf_prog_array_item *_item; \ 1115 1099 struct bpf_prog *_prog; \ ··· 1122 1106 goto _out; \ 1123 1107 _item = &_array->items[0]; \ 1124 1108 while ((_prog = READ_ONCE(_item->prog))) { \ 1125 - bpf_cgroup_storage_set(_item->cgroup_storage); \ 1109 + if (set_cg_storage) \ 1110 + bpf_cgroup_storage_set(_item->cgroup_storage); \ 1126 1111 _ret &= func(_prog, ctx); \ 1127 1112 _item++; \ 1128 1113 } \ ··· 1170 1153 }) 1171 1154 1172 1155 #define BPF_PROG_RUN_ARRAY(array, ctx, func) \ 1173 - __BPF_PROG_RUN_ARRAY(array, ctx, func, false) 1156 + __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true) 1174 1157 1175 1158 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ 1176 - __BPF_PROG_RUN_ARRAY(array, ctx, func, true) 1159 + __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false) 1177 1160 1178 1161 #ifdef CONFIG_BPF_SYSCALL 1179 1162 DECLARE_PER_CPU(int, bpf_prog_active);
+14 -1
include/linux/device-mapper.h
··· 253 253 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY) 254 254 255 255 /* 256 - * Indicates that a target supports host-managed zoned block devices. 256 + * Indicates support for zoned block devices: 257 + * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned 258 + * block devices but does not support combining different zoned models. 259 + * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple 260 + * devices with different zoned models. 257 261 */ 258 262 #ifdef CONFIG_BLK_DEV_ZONED 259 263 #define DM_TARGET_ZONED_HM 0x00000040 ··· 278 274 */ 279 275 #define DM_TARGET_PASSES_CRYPTO 0x00000100 280 276 #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO) 277 + 278 + #ifdef CONFIG_BLK_DEV_ZONED 279 + #define DM_TARGET_MIXED_ZONED_MODEL 0x00000200 280 + #define dm_target_supports_mixed_zoned_model(type) \ 281 + ((type)->features & DM_TARGET_MIXED_ZONED_MODEL) 282 + #else 283 + #define DM_TARGET_MIXED_ZONED_MODEL 0x00000000 284 + #define dm_target_supports_mixed_zoned_model(type) (false) 285 + #endif 281 286 282 287 struct dm_target { 283 288 struct dm_table *table;
+23
include/linux/extcon.h
··· 271 271 struct extcon_dev *edev, unsigned int id, 272 272 struct notifier_block *nb) { } 273 273 274 + static inline int extcon_register_notifier_all(struct extcon_dev *edev, 275 + struct notifier_block *nb) 276 + { 277 + return 0; 278 + } 279 + 280 + static inline int extcon_unregister_notifier_all(struct extcon_dev *edev, 281 + struct notifier_block *nb) 282 + { 283 + return 0; 284 + } 285 + 286 + static inline int devm_extcon_register_notifier_all(struct device *dev, 287 + struct extcon_dev *edev, 288 + struct notifier_block *nb) 289 + { 290 + return 0; 291 + } 292 + 293 + static inline void devm_extcon_unregister_notifier_all(struct device *dev, 294 + struct extcon_dev *edev, 295 + struct notifier_block *nb) { } 296 + 274 297 static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name) 275 298 { 276 299 return ERR_PTR(-ENODEV);
+1 -1
include/linux/firmware/intel/stratix10-svc-client.h
··· 56 56 * COMMAND_RECONFIG_FLAG_PARTIAL: 57 57 * Set to FPGA configuration type (full or partial). 58 58 */ 59 - #define COMMAND_RECONFIG_FLAG_PARTIAL 1 59 + #define COMMAND_RECONFIG_FLAG_PARTIAL 0 60 60 61 61 /* 62 62 * Timeout settings for service clients:
+8 -1
include/linux/host1x.h
··· 320 320 int host1x_device_init(struct host1x_device *device); 321 321 int host1x_device_exit(struct host1x_device *device); 322 322 323 - int host1x_client_register(struct host1x_client *client); 323 + int __host1x_client_register(struct host1x_client *client, 324 + struct lock_class_key *key); 325 + #define host1x_client_register(class) \ 326 + ({ \ 327 + static struct lock_class_key __key; \ 328 + __host1x_client_register(class, &__key); \ 329 + }) 330 + 324 331 int host1x_client_unregister(struct host1x_client *client); 325 332 326 333 int host1x_client_suspend(struct host1x_client *client);
+13 -2
include/linux/hugetlb_cgroup.h
··· 113 113 return !cgroup_subsys_enabled(hugetlb_cgrp_subsys); 114 114 } 115 115 116 + static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg) 117 + { 118 + css_put(&h_cg->css); 119 + } 120 + 116 121 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, 117 122 struct hugetlb_cgroup **ptr); 118 123 extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, ··· 143 138 144 139 extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, 145 140 struct file_region *rg, 146 - unsigned long nr_pages); 141 + unsigned long nr_pages, 142 + bool region_del); 147 143 148 144 extern void hugetlb_cgroup_file_init(void) __init; 149 145 extern void hugetlb_cgroup_migrate(struct page *oldhpage, ··· 153 147 #else 154 148 static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, 155 149 struct file_region *rg, 156 - unsigned long nr_pages) 150 + unsigned long nr_pages, 151 + bool region_del) 157 152 { 158 153 } 159 154 ··· 190 183 static inline bool hugetlb_cgroup_disabled(void) 191 184 { 192 185 return true; 186 + } 187 + 188 + static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg) 189 + { 193 190 } 194 191 195 192 static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+2 -1
include/linux/if_macvlan.h
··· 43 43 if (likely(success)) { 44 44 struct vlan_pcpu_stats *pcpu_stats; 45 45 46 - pcpu_stats = this_cpu_ptr(vlan->pcpu_stats); 46 + pcpu_stats = get_cpu_ptr(vlan->pcpu_stats); 47 47 u64_stats_update_begin(&pcpu_stats->syncp); 48 48 pcpu_stats->rx_packets++; 49 49 pcpu_stats->rx_bytes += len; 50 50 if (multicast) 51 51 pcpu_stats->rx_multicast++; 52 52 u64_stats_update_end(&pcpu_stats->syncp); 53 + put_cpu_ptr(vlan->pcpu_stats); 53 54 } else { 54 55 this_cpu_inc(vlan->pcpu_stats->rx_errors); 55 56 }
+2 -2
include/linux/memblock.h
··· 460 460 /* 461 461 * Set the allocation direction to bottom-up or top-down. 462 462 */ 463 - static inline __init void memblock_set_bottom_up(bool enable) 463 + static inline __init_memblock void memblock_set_bottom_up(bool enable) 464 464 { 465 465 memblock.bottom_up = enable; 466 466 } ··· 470 470 * if this is true, that said, memblock will allocate memory 471 471 * in bottom-up direction. 472 472 */ 473 - static inline __init bool memblock_bottom_up(void) 473 + static inline __init_memblock bool memblock_bottom_up(void) 474 474 { 475 475 return memblock.bottom_up; 476 476 }
+7
include/linux/mlx5/qp.h
··· 547 547 } 548 548 } 549 549 550 + static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev) 551 + { 552 + return !MLX5_CAP_ROCE(dev, qp_ts_format) ? 553 + MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING : 554 + MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT; 555 + } 556 + 550 557 #endif /* MLX5_QP_H */
+15 -3
include/linux/mm.h
··· 1461 1461 1462 1462 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) 1463 1463 1464 + /* 1465 + * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid 1466 + * setting tags for all pages to native kernel tag value 0xff, as the default 1467 + * value 0x00 maps to 0xff. 1468 + */ 1469 + 1464 1470 static inline u8 page_kasan_tag(const struct page *page) 1465 1471 { 1466 - if (kasan_enabled()) 1467 - return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; 1468 - return 0xff; 1472 + u8 tag = 0xff; 1473 + 1474 + if (kasan_enabled()) { 1475 + tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; 1476 + tag ^= 0xff; 1477 + } 1478 + 1479 + return tag; 1469 1480 } 1470 1481 1471 1482 static inline void page_kasan_tag_set(struct page *page, u8 tag) 1472 1483 { 1473 1484 if (kasan_enabled()) { 1485 + tag ^= 0xff; 1474 1486 page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); 1475 1487 page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; 1476 1488 }
+5 -5
include/linux/mmu_notifier.h
··· 169 169 * the last refcount is dropped. 170 170 * 171 171 * If blockable argument is set to false then the callback cannot 172 - * sleep and has to return with -EAGAIN. 0 should be returned 173 - * otherwise. Please note that if invalidate_range_start approves 174 - * a non-blocking behavior then the same applies to 175 - * invalidate_range_end. 176 - * 172 + * sleep and has to return with -EAGAIN if sleeping would be required. 173 + * 0 should be returned otherwise. Please note that notifiers that can 174 + * fail invalidate_range_start are not allowed to implement 175 + * invalidate_range_end, as there is no mechanism for informing the 176 + * notifier that its start failed. 177 177 */ 178 178 int (*invalidate_range_start)(struct mmu_notifier *subscription, 179 179 const struct mmu_notifier_range *range);
+1 -1
include/linux/mutex.h
··· 185 185 # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) 186 186 # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) 187 187 # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) 188 - # define mutex_lock_io_nested(lock, subclass) mutex_lock(lock) 188 + # define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock) 189 189 #endif 190 190 191 191 /*
+2
include/linux/netdevice.h
··· 360 360 NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */ 361 361 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ 362 362 NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ 363 + NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */ 363 364 }; 364 365 365 366 enum { ··· 373 372 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), 374 373 NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), 375 374 NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), 375 + NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED), 376 376 }; 377 377 378 378 enum gro_result {
+2 -5
include/linux/netfilter/x_tables.h
··· 227 227 unsigned int valid_hooks; 228 228 229 229 /* Man behind the curtain... */ 230 - struct xt_table_info __rcu *private; 230 + struct xt_table_info *private; 231 231 232 232 /* Set this to THIS_MODULE if you are a module, otherwise NULL */ 233 233 struct module *me; ··· 376 376 * since addend is most likely 1 377 377 */ 378 378 __this_cpu_add(xt_recseq.sequence, addend); 379 - smp_wmb(); 379 + smp_mb(); 380 380 381 381 return addend; 382 382 } ··· 447 447 } 448 448 449 449 struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *); 450 - 451 - struct xt_table_info 452 - *xt_table_get_private_protected(const struct xt_table *table); 453 450 454 451 #ifdef CONFIG_COMPAT 455 452 #include <net/compat.h>
+1 -1
include/linux/pagemap.h
··· 559 559 return pgoff; 560 560 } 561 561 562 - /* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */ 563 562 struct wait_page_key { 564 563 struct page *page; 565 564 int bit_nr; ··· 682 683 683 684 int put_and_wait_on_page_locked(struct page *page, int state); 684 685 void wait_on_page_writeback(struct page *page); 686 + int wait_on_page_writeback_killable(struct page *page); 685 687 extern void end_page_writeback(struct page *page); 686 688 void wait_for_stable_page(struct page *page); 687 689
-2
include/linux/qcom-geni-se.h
··· 460 460 int geni_icc_enable(struct geni_se *se); 461 461 462 462 int geni_icc_disable(struct geni_se *se); 463 - 464 - void geni_remove_earlycon_icc_vote(void); 465 463 #endif 466 464 #endif
+1
include/linux/skbuff.h
··· 285 285 struct tc_skb_ext { 286 286 __u32 chain; 287 287 __u16 mru; 288 + bool post_ct; 288 289 }; 289 290 #endif 290 291
+1
include/linux/usermode_driver.h
··· 14 14 int umd_load_blob(struct umd_info *info, const void *data, size_t len); 15 15 int umd_unload_blob(struct umd_info *info); 16 16 int fork_usermode_driver(struct umd_info *info); 17 + void umd_cleanup_helper(struct umd_info *info); 17 18 18 19 #endif /* __LINUX_USERMODE_DRIVER_H__ */
+3 -1
include/linux/xarray.h
··· 229 229 * 230 230 * This structure is used either directly or via the XA_LIMIT() macro 231 231 * to communicate the range of IDs that are valid for allocation. 232 - * Two common ranges are predefined for you: 232 + * Three common ranges are predefined for you: 233 233 * * xa_limit_32b - [0 - UINT_MAX] 234 234 * * xa_limit_31b - [0 - INT_MAX] 235 + * * xa_limit_16b - [0 - USHRT_MAX] 235 236 */ 236 237 struct xa_limit { 237 238 u32 max; ··· 243 242 244 243 #define xa_limit_32b XA_LIMIT(0, UINT_MAX) 245 244 #define xa_limit_31b XA_LIMIT(0, INT_MAX) 245 + #define xa_limit_16b XA_LIMIT(0, USHRT_MAX) 246 246 247 247 typedef unsigned __bitwise xa_mark_t; 248 248 #define XA_MARK_0 ((__force xa_mark_t)0U)
+11
include/net/dst.h
··· 550 550 dst->ops->update_pmtu(dst, NULL, skb, mtu, false); 551 551 } 552 552 553 + struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie); 554 + void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, 555 + struct sk_buff *skb, u32 mtu, bool confirm_neigh); 556 + void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk, 557 + struct sk_buff *skb); 558 + u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old); 559 + struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst, 560 + struct sk_buff *skb, 561 + const void *daddr); 562 + unsigned int dst_blackhole_mtu(const struct dst_entry *dst); 563 + 553 564 #endif /* _NET_DST_H */
+1 -1
include/net/inet_connection_sock.h
··· 282 282 return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog; 283 283 } 284 284 285 - void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); 285 + bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); 286 286 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req); 287 287 288 288 static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
+3
include/net/netfilter/nf_tables.h
··· 1536 1536 struct nft_flowtable *flowtable; 1537 1537 bool update; 1538 1538 struct list_head hook_list; 1539 + u32 flags; 1539 1540 }; 1540 1541 1541 1542 #define nft_trans_flowtable(trans) \ ··· 1545 1544 (((struct nft_trans_flowtable *)trans->data)->update) 1546 1545 #define nft_trans_flowtable_hooks(trans) \ 1547 1546 (((struct nft_trans_flowtable *)trans->data)->hook_list) 1547 + #define nft_trans_flowtable_flags(trans) \ 1548 + (((struct nft_trans_flowtable *)trans->data)->flags) 1548 1549 1549 1550 int __init nft_chain_filter_init(void); 1550 1551 void nft_chain_filter_fini(void);
+24
include/net/nexthop.h
··· 410 410 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg, 411 411 struct netlink_ext_ack *extack); 412 412 413 + /* Caller should either hold rcu_read_lock(), or RTNL. */ 413 414 static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh) 414 415 { 415 416 struct nh_info *nhi; ··· 425 424 } 426 425 427 426 nhi = rcu_dereference_rtnl(nh->nh_info); 427 + if (nhi->family == AF_INET6) 428 + return &nhi->fib6_nh; 429 + 430 + return NULL; 431 + } 432 + 433 + /* Variant of nexthop_fib6_nh(). 434 + * Caller should either hold rcu_read_lock_bh(), or RTNL. 435 + */ 436 + static inline struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh) 437 + { 438 + struct nh_info *nhi; 439 + 440 + if (nh->is_group) { 441 + struct nh_group *nh_grp; 442 + 443 + nh_grp = rcu_dereference_bh_rtnl(nh->nh_grp); 444 + nh = nexthop_mpath_select(nh_grp, 0); 445 + if (!nh) 446 + return NULL; 447 + } 448 + 449 + nhi = rcu_dereference_bh_rtnl(nh->nh_info); 428 450 if (nhi->family == AF_INET6) 429 451 return &nhi->fib6_nh; 430 452
+10 -2
include/net/red.h
··· 168 168 v->qcount = -1; 169 169 } 170 170 171 - static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log) 171 + static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, 172 + u8 Scell_log, u8 *stab) 172 173 { 173 174 if (fls(qth_min) + Wlog > 32) 174 175 return false; ··· 179 178 return false; 180 179 if (qth_max < qth_min) 181 180 return false; 181 + if (stab) { 182 + int i; 183 + 184 + for (i = 0; i < RED_STAB_SIZE; i++) 185 + if (stab[i] >= 32) 186 + return false; 187 + } 182 188 return true; 183 189 } 184 190 ··· 295 287 int shift; 296 288 297 289 /* 298 - * The problem: ideally, average length queue recalcultion should 290 + * The problem: ideally, average length queue recalculation should 299 291 * be done over constant clock intervals. This is too expensive, so 300 292 * that the calculation is driven by outgoing packets. 301 293 * When the queue is idle we have to model this clock by hand.
+2
include/net/rtnetlink.h
··· 33 33 * 34 34 * @list: Used internally 35 35 * @kind: Identifier 36 + * @netns_refund: Physical device, move to init_net on netns exit 36 37 * @maxtype: Highest device specific netlink attribute number 37 38 * @policy: Netlink policy for device specific attribute validation 38 39 * @validate: Optional validation function for netlink/changelink parameters ··· 65 64 size_t priv_size; 66 65 void (*setup)(struct net_device *dev); 67 66 67 + bool netns_refund; 68 68 unsigned int maxtype; 69 69 const struct nla_policy *policy; 70 70 int (*validate)(struct nlattr *tb[],
+1 -1
include/net/sock.h
··· 936 936 937 937 static inline bool sk_acceptq_is_full(const struct sock *sk) 938 938 { 939 - return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog); 939 + return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog); 940 940 } 941 941 942 942 /*
+1
include/scsi/scsi_transport_iscsi.h
··· 193 193 ISCSI_CONN_UP = 0, 194 194 ISCSI_CONN_DOWN, 195 195 ISCSI_CONN_FAILED, 196 + ISCSI_CONN_BOUND, 196 197 }; 197 198 198 199 struct iscsi_cls_conn {
+2 -26
include/uapi/linux/blkpg.h
··· 2 2 #ifndef _UAPI__LINUX_BLKPG_H 3 3 #define _UAPI__LINUX_BLKPG_H 4 4 5 - /* 6 - * Partition table and disk geometry handling 7 - * 8 - * A single ioctl with lots of subfunctions: 9 - * 10 - * Device number stuff: 11 - * get_whole_disk() (given the device number of a partition, 12 - * find the device number of the encompassing disk) 13 - * get_all_partitions() (given the device number of a disk, return the 14 - * device numbers of all its known partitions) 15 - * 16 - * Partition stuff: 17 - * add_partition() 18 - * delete_partition() 19 - * test_partition_in_use() (also for test_disk_in_use) 20 - * 21 - * Geometry stuff: 22 - * get_geometry() 23 - * set_geometry() 24 - * get_bios_drivedata() 25 - * 26 - * For today, only the partition stuff - aeb, 990515 27 - */ 28 5 #include <linux/compiler.h> 29 6 #include <linux/ioctl.h> 30 7 ··· 29 52 long long start; /* starting offset in bytes */ 30 53 long long length; /* length in bytes */ 31 54 int pno; /* partition number */ 32 - char devname[BLKPG_DEVNAMELTH]; /* partition name, like sda5 or c0d1p2, 33 - to be used in kernel messages */ 34 - char volname[BLKPG_VOLNAMELTH]; /* volume label */ 55 + char devname[BLKPG_DEVNAMELTH]; /* unused / ignored */ 56 + char volname[BLKPG_VOLNAMELTH]; /* unused / ignore */ 35 57 }; 36 58 37 59 #endif /* _UAPI__LINUX_BLKPG_H */
+11 -5
include/uapi/linux/bpf.h
··· 3850 3850 * 3851 3851 * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags) 3852 3852 * Description 3853 - * Check ctx packet size against exceeding MTU of net device (based 3853 + * Check packet size against exceeding MTU of net device (based 3854 3854 * on *ifindex*). This helper will likely be used in combination 3855 3855 * with helpers that adjust/change the packet size. 3856 3856 * ··· 3866 3866 * Specifying *ifindex* zero means the MTU check is performed 3867 3867 * against the current net device. This is practical if this isn't 3868 3868 * used prior to redirect. 3869 + * 3870 + * On input *mtu_len* must be a valid pointer, else verifier will 3871 + * reject BPF program. If the value *mtu_len* is initialized to 3872 + * zero then the ctx packet size is use. When value *mtu_len* is 3873 + * provided as input this specify the L3 length that the MTU check 3874 + * is done against. Remember XDP and TC length operate at L2, but 3875 + * this value is L3 as this correlate to MTU and IP-header tot_len 3876 + * values which are L3 (similar behavior as bpf_fib_lookup). 3869 3877 * 3870 3878 * The Linux kernel route table can configure MTUs on a more 3871 3879 * specific per route level, which is not provided by this helper. ··· 3899 3891 * 3900 3892 * On return *mtu_len* pointer contains the MTU value of the net 3901 3893 * device. Remember the net device configured MTU is the L3 size, 3902 - * which is returned here and XDP and TX length operate at L2. 3894 + * which is returned here and XDP and TC length operate at L2. 3903 3895 * Helper take this into account for you, but remember when using 3904 - * MTU value in your BPF-code. On input *mtu_len* must be a valid 3905 - * pointer and be initialized (to zero), else verifier will reject 3906 - * BPF program. 3896 + * MTU value in your BPF-code. 3907 3897 * 3908 3898 * Return 3909 3899 * * 0 on success, and populate MTU value in *mtu_len* pointer.
+1 -4
include/uapi/linux/psample.h
··· 3 3 #define __UAPI_PSAMPLE_H 4 4 5 5 enum { 6 - /* sampled packet metadata */ 7 6 PSAMPLE_ATTR_IIFINDEX, 8 7 PSAMPLE_ATTR_OIFINDEX, 9 8 PSAMPLE_ATTR_ORIGSIZE, ··· 10 11 PSAMPLE_ATTR_GROUP_SEQ, 11 12 PSAMPLE_ATTR_SAMPLE_RATE, 12 13 PSAMPLE_ATTR_DATA, 13 - PSAMPLE_ATTR_TUNNEL, 14 - 15 - /* commands attributes */ 16 14 PSAMPLE_ATTR_GROUP_REFCOUNT, 15 + PSAMPLE_ATTR_TUNNEL, 17 16 18 17 __PSAMPLE_ATTR_MAX 19 18 };
+1 -1
kernel/bpf/bpf_inode_storage.c
··· 109 109 fd = *(int *)key; 110 110 f = fget_raw(fd); 111 111 if (!f) 112 - return NULL; 112 + return ERR_PTR(-EBADF); 113 113 114 114 sdata = inode_storage_lookup(f->f_inode, map, true); 115 115 fput(f);
+1 -1
kernel/bpf/bpf_struct_ops.c
··· 430 430 431 431 tprogs[BPF_TRAMP_FENTRY].progs[0] = prog; 432 432 tprogs[BPF_TRAMP_FENTRY].nr_progs = 1; 433 - err = arch_prepare_bpf_trampoline(image, 433 + err = arch_prepare_bpf_trampoline(NULL, image, 434 434 st_map->image + PAGE_SIZE, 435 435 &st_ops->func_models[i], 0, 436 436 tprogs, NULL);
+2 -2
kernel/bpf/core.c
··· 827 827 } 828 828 pure_initcall(bpf_jit_charge_init); 829 829 830 - static int bpf_jit_charge_modmem(u32 pages) 830 + int bpf_jit_charge_modmem(u32 pages) 831 831 { 832 832 if (atomic_long_add_return(pages, &bpf_jit_current) > 833 833 (bpf_jit_limit >> PAGE_SHIFT)) { ··· 840 840 return 0; 841 841 } 842 842 843 - static void bpf_jit_uncharge_modmem(u32 pages) 843 + void bpf_jit_uncharge_modmem(u32 pages) 844 844 { 845 845 atomic_long_sub(pages, &bpf_jit_current); 846 846 }
+15 -4
kernel/bpf/preload/bpf_preload_kern.c
··· 60 60 &magic, sizeof(magic), &pos); 61 61 if (n != sizeof(magic)) 62 62 return -EPIPE; 63 + 63 64 tgid = umd_ops.info.tgid; 64 - wait_event(tgid->wait_pidfd, thread_group_exited(tgid)); 65 - umd_ops.info.tgid = NULL; 65 + if (tgid) { 66 + wait_event(tgid->wait_pidfd, thread_group_exited(tgid)); 67 + umd_cleanup_helper(&umd_ops.info); 68 + } 66 69 return 0; 67 70 } 68 71 ··· 83 80 84 81 static void __exit fini_umd(void) 85 82 { 83 + struct pid *tgid; 84 + 86 85 bpf_preload_ops = NULL; 86 + 87 87 /* kill UMD in case it's still there due to earlier error */ 88 - kill_pid(umd_ops.info.tgid, SIGKILL, 1); 89 - umd_ops.info.tgid = NULL; 88 + tgid = umd_ops.info.tgid; 89 + if (tgid) { 90 + kill_pid(tgid, SIGKILL, 1); 91 + 92 + wait_event(tgid->wait_pidfd, thread_group_exited(tgid)); 93 + umd_cleanup_helper(&umd_ops.info); 94 + } 90 95 umd_unload_blob(&umd_ops.info); 91 96 } 92 97 late_initcall(load_umd);
+5
kernel/bpf/syscall.c
··· 854 854 err = PTR_ERR(btf); 855 855 goto free_map; 856 856 } 857 + if (btf_is_kernel(btf)) { 858 + btf_put(btf); 859 + err = -EACCES; 860 + goto free_map; 861 + } 857 862 map->btf = btf; 858 863 859 864 if (attr->btf_value_type_id) {
+168 -50
kernel/bpf/trampoline.c
··· 57 57 PAGE_SIZE, true, ksym->name); 58 58 } 59 59 60 - static void bpf_trampoline_ksym_add(struct bpf_trampoline *tr) 61 - { 62 - struct bpf_ksym *ksym = &tr->ksym; 63 - 64 - snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", tr->key); 65 - bpf_image_ksym_add(tr->image, ksym); 66 - } 67 - 68 60 static struct bpf_trampoline *bpf_trampoline_lookup(u64 key) 69 61 { 70 62 struct bpf_trampoline *tr; 71 63 struct hlist_head *head; 72 - void *image; 73 64 int i; 74 65 75 66 mutex_lock(&trampoline_mutex); ··· 75 84 if (!tr) 76 85 goto out; 77 86 78 - /* is_root was checked earlier. No need for bpf_jit_charge_modmem() */ 79 - image = bpf_jit_alloc_exec_page(); 80 - if (!image) { 81 - kfree(tr); 82 - tr = NULL; 83 - goto out; 84 - } 85 - 86 87 tr->key = key; 87 88 INIT_HLIST_NODE(&tr->hlist); 88 89 hlist_add_head(&tr->hlist, head); ··· 82 99 mutex_init(&tr->mutex); 83 100 for (i = 0; i < BPF_TRAMP_MAX; i++) 84 101 INIT_HLIST_HEAD(&tr->progs_hlist[i]); 85 - tr->image = image; 86 - INIT_LIST_HEAD_RCU(&tr->ksym.lnode); 87 - bpf_trampoline_ksym_add(tr); 88 102 out: 89 103 mutex_unlock(&trampoline_mutex); 90 104 return tr; ··· 165 185 return tprogs; 166 186 } 167 187 188 + static void __bpf_tramp_image_put_deferred(struct work_struct *work) 189 + { 190 + struct bpf_tramp_image *im; 191 + 192 + im = container_of(work, struct bpf_tramp_image, work); 193 + bpf_image_ksym_del(&im->ksym); 194 + bpf_jit_free_exec(im->image); 195 + bpf_jit_uncharge_modmem(1); 196 + percpu_ref_exit(&im->pcref); 197 + kfree_rcu(im, rcu); 198 + } 199 + 200 + /* callback, fexit step 3 or fentry step 2 */ 201 + static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu) 202 + { 203 + struct bpf_tramp_image *im; 204 + 205 + im = container_of(rcu, struct bpf_tramp_image, rcu); 206 + INIT_WORK(&im->work, __bpf_tramp_image_put_deferred); 207 + schedule_work(&im->work); 208 + } 209 + 210 + /* callback, fexit step 2. Called after percpu_ref_kill confirms. */ 211 + static void __bpf_tramp_image_release(struct percpu_ref *pcref) 212 + { 213 + struct bpf_tramp_image *im; 214 + 215 + im = container_of(pcref, struct bpf_tramp_image, pcref); 216 + call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu); 217 + } 218 + 219 + /* callback, fexit or fentry step 1 */ 220 + static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu) 221 + { 222 + struct bpf_tramp_image *im; 223 + 224 + im = container_of(rcu, struct bpf_tramp_image, rcu); 225 + if (im->ip_after_call) 226 + /* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */ 227 + percpu_ref_kill(&im->pcref); 228 + else 229 + /* the case of fentry trampoline */ 230 + call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu); 231 + } 232 + 233 + static void bpf_tramp_image_put(struct bpf_tramp_image *im) 234 + { 235 + /* The trampoline image that calls original function is using: 236 + * rcu_read_lock_trace to protect sleepable bpf progs 237 + * rcu_read_lock to protect normal bpf progs 238 + * percpu_ref to protect trampoline itself 239 + * rcu tasks to protect trampoline asm not covered by percpu_ref 240 + * (which are few asm insns before __bpf_tramp_enter and 241 + * after __bpf_tramp_exit) 242 + * 243 + * The trampoline is unreachable before bpf_tramp_image_put(). 244 + * 245 + * First, patch the trampoline to avoid calling into fexit progs. 246 + * The progs will be freed even if the original function is still 247 + * executing or sleeping. 248 + * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on 249 + * first few asm instructions to execute and call into 250 + * __bpf_tramp_enter->percpu_ref_get. 251 + * Then use percpu_ref_kill to wait for the trampoline and the original 252 + * function to finish. 253 + * Then use call_rcu_tasks() to make sure few asm insns in 254 + * the trampoline epilogue are done as well. 255 + * 256 + * In !PREEMPT case the task that got interrupted in the first asm 257 + * insns won't go through an RCU quiescent state which the 258 + * percpu_ref_kill will be waiting for. Hence the first 259 + * call_rcu_tasks() is not necessary. 260 + */ 261 + if (im->ip_after_call) { 262 + int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP, 263 + NULL, im->ip_epilogue); 264 + WARN_ON(err); 265 + if (IS_ENABLED(CONFIG_PREEMPTION)) 266 + call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks); 267 + else 268 + percpu_ref_kill(&im->pcref); 269 + return; 270 + } 271 + 272 + /* The trampoline without fexit and fmod_ret progs doesn't call original 273 + * function and doesn't use percpu_ref. 274 + * Use call_rcu_tasks_trace() to wait for sleepable progs to finish. 275 + * Then use call_rcu_tasks() to wait for the rest of trampoline asm 276 + * and normal progs. 277 + */ 278 + call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks); 279 + } 280 + 281 + static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx) 282 + { 283 + struct bpf_tramp_image *im; 284 + struct bpf_ksym *ksym; 285 + void *image; 286 + int err = -ENOMEM; 287 + 288 + im = kzalloc(sizeof(*im), GFP_KERNEL); 289 + if (!im) 290 + goto out; 291 + 292 + err = bpf_jit_charge_modmem(1); 293 + if (err) 294 + goto out_free_im; 295 + 296 + err = -ENOMEM; 297 + im->image = image = bpf_jit_alloc_exec_page(); 298 + if (!image) 299 + goto out_uncharge; 300 + 301 + err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL); 302 + if (err) 303 + goto out_free_image; 304 + 305 + ksym = &im->ksym; 306 + INIT_LIST_HEAD_RCU(&ksym->lnode); 307 + snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx); 308 + bpf_image_ksym_add(image, ksym); 309 + return im; 310 + 311 + out_free_image: 312 + bpf_jit_free_exec(im->image); 313 + out_uncharge: 314 + bpf_jit_uncharge_modmem(1); 315 + out_free_im: 316 + kfree(im); 317 + out: 318 + return ERR_PTR(err); 319 + } 320 + 168 321 static int bpf_trampoline_update(struct bpf_trampoline *tr) 169 322 { 170 - void *old_image = tr->image + ((tr->selector + 1) & 1) * PAGE_SIZE/2; 171 - void *new_image = tr->image + (tr->selector & 1) * PAGE_SIZE/2; 323 + struct bpf_tramp_image *im; 172 324 struct bpf_tramp_progs *tprogs; 173 325 u32 flags = BPF_TRAMP_F_RESTORE_REGS; 174 326 int err, total; ··· 310 198 return PTR_ERR(tprogs); 311 199 312 200 if (total == 0) { 313 - err = unregister_fentry(tr, old_image); 201 + err = unregister_fentry(tr, tr->cur_image->image); 202 + bpf_tramp_image_put(tr->cur_image); 203 + tr->cur_image = NULL; 314 204 tr->selector = 0; 205 + goto out; 206 + } 207 + 208 + im = bpf_tramp_image_alloc(tr->key, tr->selector); 209 + if (IS_ERR(im)) { 210 + err = PTR_ERR(im); 315 211 goto out; 316 212 } 317 213 ··· 327 207 tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs) 328 208 flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME; 329 209 330 - /* Though the second half of trampoline page is unused a task could be 331 - * preempted in the middle of the first half of trampoline and two 332 - * updates to trampoline would change the code from underneath the 333 - * preempted task. Hence wait for tasks to voluntarily schedule or go 334 - * to userspace. 335 - * The same trampoline can hold both sleepable and non-sleepable progs. 336 - * synchronize_rcu_tasks_trace() is needed to make sure all sleepable 337 - * programs finish executing. 338 - * Wait for these two grace periods together. 339 - */ 340 - synchronize_rcu_mult(call_rcu_tasks, call_rcu_tasks_trace); 341 - 342 - err = arch_prepare_bpf_trampoline(new_image, new_image + PAGE_SIZE / 2, 210 + err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE, 343 211 &tr->func.model, flags, tprogs, 344 212 tr->func.addr); 345 213 if (err < 0) 346 214 goto out; 347 215 348 - if (tr->selector) 216 + WARN_ON(tr->cur_image && tr->selector == 0); 217 + WARN_ON(!tr->cur_image && tr->selector); 218 + if (tr->cur_image) 349 219 /* progs already running at this address */ 350 - err = modify_fentry(tr, old_image, new_image); 220 + err = modify_fentry(tr, tr->cur_image->image, im->image); 351 221 else 352 222 /* first time registering */ 353 - err = register_fentry(tr, new_image); 223 + err = register_fentry(tr, im->image); 354 224 if (err) 355 225 goto out; 226 + if (tr->cur_image) 227 + bpf_tramp_image_put(tr->cur_image); 228 + tr->cur_image = im; 356 229 tr->selector++; 357 230 out: 358 231 kfree(tprogs); ··· 477 364 goto out; 478 365 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT]))) 479 366 goto out; 480 - bpf_image_ksym_del(&tr->ksym); 481 - /* This code will be executed when all bpf progs (both sleepable and 482 - * non-sleepable) went through 483 - * bpf_prog_put()->call_rcu[_tasks_trace]()->bpf_prog_free_deferred(). 484 - * Hence no need for another synchronize_rcu_tasks_trace() here, 485 - * but synchronize_rcu_tasks() is still needed, since trampoline 486 - * may not have had any sleepable programs and we need to wait 487 - * for tasks to get out of trampoline code before freeing it. 367 + /* This code will be executed even when the last bpf_tramp_image 368 + * is alive. All progs are detached from the trampoline and the 369 + * trampoline image is patched with jmp into epilogue to skip 370 + * fexit progs. The fentry-only trampoline will be freed via 371 + * multiple rcu callbacks. 488 372 */ 489 - synchronize_rcu_tasks(); 490 - bpf_jit_free_exec(tr->image); 491 373 hlist_del(&tr->hlist); 492 374 kfree(tr); 493 375 out: ··· 586 478 rcu_read_unlock_trace(); 587 479 } 588 480 481 + void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr) 482 + { 483 + percpu_ref_get(&tr->pcref); 484 + } 485 + 486 + void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr) 487 + { 488 + percpu_ref_put(&tr->pcref); 489 + } 490 + 589 491 int __weak 590 - arch_prepare_bpf_trampoline(void *image, void *image_end, 492 + arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, 591 493 const struct btf_func_model *m, u32 flags, 592 494 struct bpf_tramp_progs *tprogs, 593 495 void *orig_call)
+25 -12
kernel/bpf/verifier.c
··· 5861 5861 { 5862 5862 bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || 5863 5863 (opcode == BPF_SUB && !off_is_neg); 5864 - u32 off; 5864 + u32 off, max; 5865 5865 5866 5866 switch (ptr_reg->type) { 5867 5867 case PTR_TO_STACK: 5868 + /* Offset 0 is out-of-bounds, but acceptable start for the 5869 + * left direction, see BPF_REG_FP. 5870 + */ 5871 + max = MAX_BPF_STACK + mask_to_left; 5868 5872 /* Indirect variable offset stack access is prohibited in 5869 5873 * unprivileged mode so it's not handled here. 5870 5874 */ ··· 5876 5872 if (mask_to_left) 5877 5873 *ptr_limit = MAX_BPF_STACK + off; 5878 5874 else 5879 - *ptr_limit = -off; 5880 - return 0; 5875 + *ptr_limit = -off - 1; 5876 + return *ptr_limit >= max ? -ERANGE : 0; 5881 5877 case PTR_TO_MAP_VALUE: 5878 + max = ptr_reg->map_ptr->value_size; 5882 5879 if (mask_to_left) { 5883 5880 *ptr_limit = ptr_reg->umax_value + ptr_reg->off; 5884 5881 } else { 5885 5882 off = ptr_reg->smin_value + ptr_reg->off; 5886 - *ptr_limit = ptr_reg->map_ptr->value_size - off; 5883 + *ptr_limit = ptr_reg->map_ptr->value_size - off - 1; 5887 5884 } 5888 - return 0; 5885 + return *ptr_limit >= max ? -ERANGE : 0; 5889 5886 default: 5890 5887 return -EINVAL; 5891 5888 } ··· 5939 5934 u32 alu_state, alu_limit; 5940 5935 struct bpf_reg_state tmp; 5941 5936 bool ret; 5937 + int err; 5942 5938 5943 5939 if (can_skip_alu_sanitation(env, insn)) 5944 5940 return 0; ··· 5955 5949 alu_state |= ptr_is_dst_reg ? 5956 5950 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; 5957 5951 5958 - if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) 5959 - return 0; 5960 - if (update_alu_sanitation_state(aux, alu_state, alu_limit)) 5961 - return -EACCES; 5952 + err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg); 5953 + if (err < 0) 5954 + return err; 5955 + 5956 + err = update_alu_sanitation_state(aux, alu_state, alu_limit); 5957 + if (err < 0) 5958 + return err; 5962 5959 do_sim: 5963 5960 /* Simulate and find potential out-of-bounds access under 5964 5961 * speculative execution from truncation as a result of ··· 6112 6103 case BPF_ADD: 6113 6104 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); 6114 6105 if (ret < 0) { 6115 - verbose(env, "R%d tried to add from different maps or paths\n", dst); 6106 + verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst); 6116 6107 return ret; 6117 6108 } 6118 6109 /* We can take a fixed offset as long as it doesn't overflow ··· 6167 6158 case BPF_SUB: 6168 6159 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); 6169 6160 if (ret < 0) { 6170 - verbose(env, "R%d tried to sub from different maps or paths\n", dst); 6161 + verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst); 6171 6162 return ret; 6172 6163 } 6173 6164 if (dst_reg == off_reg) { ··· 9065 9056 btf = btf_get_by_fd(attr->prog_btf_fd); 9066 9057 if (IS_ERR(btf)) 9067 9058 return PTR_ERR(btf); 9059 + if (btf_is_kernel(btf)) { 9060 + btf_put(btf); 9061 + return -EACCES; 9062 + } 9068 9063 env->prog->aux->btf = btf; 9069 9064 9070 9065 err = check_btf_func(env, attr, uattr); ··· 11673 11660 off_reg = issrc ? insn->src_reg : insn->dst_reg; 11674 11661 if (isneg) 11675 11662 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 11676 - *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); 11663 + *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); 11677 11664 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); 11678 11665 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); 11679 11666 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
+8 -8
kernel/fork.c
··· 1948 1948 p = dup_task_struct(current, node); 1949 1949 if (!p) 1950 1950 goto fork_out; 1951 - if (args->io_thread) 1951 + if (args->io_thread) { 1952 + /* 1953 + * Mark us an IO worker, and block any signal that isn't 1954 + * fatal or STOP 1955 + */ 1952 1956 p->flags |= PF_IO_WORKER; 1957 + siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1958 + } 1953 1959 1954 1960 /* 1955 1961 * This _must_ happen before we call free_task(), i.e. before we jump ··· 2444 2438 .stack_size = (unsigned long)arg, 2445 2439 .io_thread = 1, 2446 2440 }; 2447 - struct task_struct *tsk; 2448 2441 2449 - tsk = copy_process(NULL, 0, node, &args); 2450 - if (!IS_ERR(tsk)) { 2451 - sigfillset(&tsk->blocked); 2452 - sigdelsetmask(&tsk->blocked, sigmask(SIGKILL)); 2453 - } 2454 - return tsk; 2442 + return copy_process(NULL, 0, node, &args); 2455 2443 } 2456 2444 2457 2445 /*
+1 -1
kernel/freezer.c
··· 134 134 return false; 135 135 } 136 136 137 - if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) 137 + if (!(p->flags & PF_KTHREAD)) 138 138 fake_signal_wake_up(p); 139 139 else 140 140 wake_up_state(p, TASK_INTERRUPTIBLE);
+69
kernel/gcov/clang.c
··· 75 75 76 76 u32 num_counters; 77 77 u64 *counters; 78 + #if CONFIG_CLANG_VERSION < 110000 78 79 const char *function_name; 80 + #endif 79 81 }; 80 82 81 83 static struct gcov_info *current_info; ··· 107 105 } 108 106 EXPORT_SYMBOL(llvm_gcov_init); 109 107 108 + #if CONFIG_CLANG_VERSION < 110000 110 109 void llvm_gcda_start_file(const char *orig_filename, const char version[4], 111 110 u32 checksum) 112 111 { ··· 116 113 current_info->checksum = checksum; 117 114 } 118 115 EXPORT_SYMBOL(llvm_gcda_start_file); 116 + #else 117 + void llvm_gcda_start_file(const char *orig_filename, u32 version, u32 checksum) 118 + { 119 + current_info->filename = orig_filename; 120 + current_info->version = version; 121 + current_info->checksum = checksum; 122 + } 123 + EXPORT_SYMBOL(llvm_gcda_start_file); 124 + #endif 119 125 126 + #if CONFIG_CLANG_VERSION < 110000 120 127 void llvm_gcda_emit_function(u32 ident, const char *function_name, 121 128 u32 func_checksum, u8 use_extra_checksum, u32 cfg_checksum) 122 129 { ··· 146 133 list_add_tail(&info->head, &current_info->functions); 147 134 } 148 135 EXPORT_SYMBOL(llvm_gcda_emit_function); 136 + #else 137 + void llvm_gcda_emit_function(u32 ident, u32 func_checksum, 138 + u8 use_extra_checksum, u32 cfg_checksum) 139 + { 140 + struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL); 141 + 142 + if (!info) 143 + return; 144 + 145 + INIT_LIST_HEAD(&info->head); 146 + info->ident = ident; 147 + info->checksum = func_checksum; 148 + info->use_extra_checksum = use_extra_checksum; 149 + info->cfg_checksum = cfg_checksum; 150 + list_add_tail(&info->head, &current_info->functions); 151 + } 152 + EXPORT_SYMBOL(llvm_gcda_emit_function); 153 + #endif 149 154 150 155 void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters) 151 156 { ··· 326 295 } 327 296 } 328 297 298 + #if CONFIG_CLANG_VERSION < 110000 329 299 static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn) 330 300 { 331 301 size_t cv_size; /* counter values size */ ··· 354 322 kfree(fn_dup); 355 323 return NULL; 356 324 } 325 + #else 326 + static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn) 327 + { 328 + size_t cv_size; /* counter values size */ 329 + struct gcov_fn_info *fn_dup = kmemdup(fn, sizeof(*fn), 330 + GFP_KERNEL); 331 + if (!fn_dup) 332 + return NULL; 333 + INIT_LIST_HEAD(&fn_dup->head); 334 + 335 + cv_size = fn->num_counters * sizeof(fn->counters[0]); 336 + fn_dup->counters = vmalloc(cv_size); 337 + if (!fn_dup->counters) { 338 + kfree(fn_dup); 339 + return NULL; 340 + } 341 + 342 + memcpy(fn_dup->counters, fn->counters, cv_size); 343 + 344 + return fn_dup; 345 + } 346 + #endif 357 347 358 348 /** 359 349 * gcov_info_dup - duplicate profiling data set ··· 416 362 * gcov_info_free - release memory for profiling data set duplicate 417 363 * @info: profiling data set duplicate to free 418 364 */ 365 + #if CONFIG_CLANG_VERSION < 110000 419 366 void gcov_info_free(struct gcov_info *info) 420 367 { 421 368 struct gcov_fn_info *fn, *tmp; ··· 430 375 kfree(info->filename); 431 376 kfree(info); 432 377 } 378 + #else 379 + void gcov_info_free(struct gcov_info *info) 380 + { 381 + struct gcov_fn_info *fn, *tmp; 382 + 383 + list_for_each_entry_safe(fn, tmp, &info->functions, head) { 384 + vfree(fn->counters); 385 + list_del(&fn->head); 386 + kfree(fn); 387 + } 388 + kfree(info->filename); 389 + kfree(info); 390 + } 391 + #endif 433 392 434 393 #define ITER_STRIDE PAGE_SIZE 435 394
+1 -1
kernel/power/energy_model.c
··· 98 98 99 99 return 0; 100 100 } 101 - core_initcall(em_debug_init); 101 + fs_initcall(em_debug_init); 102 102 #else /* CONFIG_DEBUG_FS */ 103 103 static void em_debug_create_pd(struct device *dev) {} 104 104 static void em_debug_remove_pd(struct device *dev) {}
+1 -1
kernel/ptrace.c
··· 375 375 audit_ptrace(task); 376 376 377 377 retval = -EPERM; 378 - if (unlikely(task->flags & (PF_KTHREAD | PF_IO_WORKER))) 378 + if (unlikely(task->flags & PF_KTHREAD)) 379 379 goto out; 380 380 if (same_thread_group(task, current)) 381 381 goto out;
+12 -8
kernel/signal.c
··· 91 91 return true; 92 92 93 93 /* Only allow kernel generated signals to this kthread */ 94 - if (unlikely((t->flags & (PF_KTHREAD | PF_IO_WORKER)) && 94 + if (unlikely((t->flags & PF_KTHREAD) && 95 95 (handler == SIG_KTHREAD_KERNEL) && !force)) 96 96 return true; 97 97 ··· 288 288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); 289 289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); 290 290 291 - if (unlikely(fatal_signal_pending(task) || 292 - (task->flags & (PF_EXITING | PF_IO_WORKER)))) 291 + if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) 293 292 return false; 294 293 295 294 if (mask & JOBCTL_STOP_SIGMASK) ··· 833 834 834 835 if (!valid_signal(sig)) 835 836 return -EINVAL; 836 - /* PF_IO_WORKER threads don't take any signals */ 837 - if (t->flags & PF_IO_WORKER) 838 - return -ESRCH; 839 837 840 838 if (!si_fromuser(info)) 841 839 return 0; ··· 1096 1100 /* 1097 1101 * Skip useless siginfo allocation for SIGKILL and kernel threads. 1098 1102 */ 1099 - if ((sig == SIGKILL) || (t->flags & (PF_KTHREAD | PF_IO_WORKER))) 1103 + if ((sig == SIGKILL) || (t->flags & PF_KTHREAD)) 1100 1104 goto out_set; 1101 1105 1102 1106 /* ··· 2768 2772 } 2769 2773 2770 2774 /* 2775 + * PF_IO_WORKER threads will catch and exit on fatal signals 2776 + * themselves. They have cleanup that must be performed, so 2777 + * we cannot call do_exit() on their behalf. 2778 + */ 2779 + if (current->flags & PF_IO_WORKER) 2780 + goto out; 2781 + 2782 + /* 2771 2783 * Death signals, no core dump. 2772 2784 */ 2773 2785 do_group_exit(ksig->info.si_signo); 2774 2786 /* NOTREACHED */ 2775 2787 } 2776 2788 spin_unlock_irq(&sighand->siglock); 2777 - 2789 + out: 2778 2790 ksig->sig = signr; 2779 2791 2780 2792 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
+44 -8
kernel/trace/ftrace.c
··· 3231 3231 pg = start_pg; 3232 3232 while (pg) { 3233 3233 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 3234 - free_pages((unsigned long)pg->records, order); 3234 + if (order >= 0) 3235 + free_pages((unsigned long)pg->records, order); 3235 3236 start_pg = pg->next; 3236 3237 kfree(pg); 3237 3238 pg = start_pg; ··· 5046 5045 return NULL; 5047 5046 } 5048 5047 5048 + static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr) 5049 + { 5050 + struct ftrace_direct_func *direct; 5051 + 5052 + direct = kmalloc(sizeof(*direct), GFP_KERNEL); 5053 + if (!direct) 5054 + return NULL; 5055 + direct->addr = addr; 5056 + direct->count = 0; 5057 + list_add_rcu(&direct->next, &ftrace_direct_funcs); 5058 + ftrace_direct_func_count++; 5059 + return direct; 5060 + } 5061 + 5049 5062 /** 5050 5063 * register_ftrace_direct - Call a custom trampoline directly 5051 5064 * @ip: The address of the nop at the beginning of a function ··· 5135 5120 5136 5121 direct = ftrace_find_direct_func(addr); 5137 5122 if (!direct) { 5138 - direct = kmalloc(sizeof(*direct), GFP_KERNEL); 5123 + direct = ftrace_alloc_direct_func(addr); 5139 5124 if (!direct) { 5140 5125 kfree(entry); 5141 5126 goto out_unlock; 5142 5127 } 5143 - direct->addr = addr; 5144 - direct->count = 0; 5145 - list_add_rcu(&direct->next, &ftrace_direct_funcs); 5146 - ftrace_direct_func_count++; 5147 5128 } 5148 5129 5149 5130 entry->ip = ip; ··· 5340 5329 int modify_ftrace_direct(unsigned long ip, 5341 5330 unsigned long old_addr, unsigned long new_addr) 5342 5331 { 5332 + struct ftrace_direct_func *direct, *new_direct = NULL; 5343 5333 struct ftrace_func_entry *entry; 5344 5334 struct dyn_ftrace *rec; 5345 5335 int ret = -ENODEV; ··· 5356 5344 if (entry->direct != old_addr) 5357 5345 goto out_unlock; 5358 5346 5347 + direct = ftrace_find_direct_func(old_addr); 5348 + if (WARN_ON(!direct)) 5349 + goto out_unlock; 5350 + if (direct->count > 1) { 5351 + ret = -ENOMEM; 5352 + new_direct = ftrace_alloc_direct_func(new_addr); 5353 + if (!new_direct) 5354 + goto out_unlock; 5355 + direct->count--; 5356 + new_direct->count++; 5357 + } else { 5358 + direct->addr = new_addr; 5359 + } 5360 + 5359 5361 /* 5360 5362 * If there's no other ftrace callback on the rec->ip location, 5361 5363 * then it can be changed directly by the architecture. ··· 5381 5355 } else { 5382 5356 entry->direct = new_addr; 5383 5357 ret = 0; 5358 + } 5359 + 5360 + if (unlikely(ret && new_direct)) { 5361 + direct->count++; 5362 + list_del_rcu(&new_direct->next); 5363 + synchronize_rcu_tasks(); 5364 + kfree(new_direct); 5365 + ftrace_direct_func_count--; 5384 5366 } 5385 5367 5386 5368 out_unlock: ··· 6452 6418 clear_mod_from_hashes(pg); 6453 6419 6454 6420 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 6455 - free_pages((unsigned long)pg->records, order); 6421 + if (order >= 0) 6422 + free_pages((unsigned long)pg->records, order); 6456 6423 tmp_page = pg->next; 6457 6424 kfree(pg); 6458 6425 ftrace_number_of_pages -= 1 << order; ··· 6813 6778 if (!pg->index) { 6814 6779 *last_pg = pg->next; 6815 6780 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 6816 - free_pages((unsigned long)pg->records, order); 6781 + if (order >= 0) 6782 + free_pages((unsigned long)pg->records, order); 6817 6783 ftrace_number_of_pages -= 1 << order; 6818 6784 ftrace_number_of_groups--; 6819 6785 kfree(pg);
+2 -1
kernel/trace/trace.c
··· 2984 2984 2985 2985 size = nr_entries * sizeof(unsigned long); 2986 2986 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, 2987 - sizeof(*entry) + size, trace_ctx); 2987 + (sizeof(*entry) - sizeof(entry->caller)) + size, 2988 + trace_ctx); 2988 2989 if (!event) 2989 2990 goto out; 2990 2991 entry = ring_buffer_event_data(event);
+15 -6
kernel/usermode_driver.c
··· 139 139 struct umd_info *umd_info = info->data; 140 140 141 141 /* cleanup if umh_setup() was successful but exec failed */ 142 - if (info->retval) { 143 - fput(umd_info->pipe_to_umh); 144 - fput(umd_info->pipe_from_umh); 145 - put_pid(umd_info->tgid); 146 - umd_info->tgid = NULL; 147 - } 142 + if (info->retval) 143 + umd_cleanup_helper(umd_info); 148 144 } 145 + 146 + /** 147 + * umd_cleanup_helper - release the resources which were allocated in umd_setup 148 + * @info: information about usermode driver 149 + */ 150 + void umd_cleanup_helper(struct umd_info *info) 151 + { 152 + fput(info->pipe_to_umh); 153 + fput(info->pipe_from_umh); 154 + put_pid(info->tgid); 155 + info->tgid = NULL; 156 + } 157 + EXPORT_SYMBOL_GPL(umd_cleanup_helper); 149 158 150 159 /** 151 160 * fork_usermode_driver - fork a usermode driver
+1
lib/math/div64.c
··· 232 232 233 233 return res + div64_u64(a * b, c); 234 234 } 235 + EXPORT_SYMBOL(mul_u64_u64_div_u64); 235 236 #endif
+14 -12
lib/test_xarray.c
··· 1530 1530 1531 1531 #ifdef CONFIG_XARRAY_MULTI 1532 1532 static void check_split_1(struct xarray *xa, unsigned long index, 1533 - unsigned int order) 1533 + unsigned int order, unsigned int new_order) 1534 1534 { 1535 - XA_STATE(xas, xa, index); 1536 - void *entry; 1537 - unsigned int i = 0; 1535 + XA_STATE_ORDER(xas, xa, index, new_order); 1536 + unsigned int i; 1538 1537 1539 1538 xa_store_order(xa, index, order, xa, GFP_KERNEL); 1540 1539 1541 1540 xas_split_alloc(&xas, xa, order, GFP_KERNEL); 1542 1541 xas_lock(&xas); 1543 1542 xas_split(&xas, xa, order); 1543 + for (i = 0; i < (1 << order); i += (1 << new_order)) 1544 + __xa_store(xa, index + i, xa_mk_index(index + i), 0); 1544 1545 xas_unlock(&xas); 1545 1546 1546 - xa_for_each(xa, index, entry) { 1547 - XA_BUG_ON(xa, entry != xa); 1548 - i++; 1547 + for (i = 0; i < (1 << order); i++) { 1548 + unsigned int val = index + (i & ~((1 << new_order) - 1)); 1549 + XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val)); 1549 1550 } 1550 - XA_BUG_ON(xa, i != 1 << order); 1551 1551 1552 1552 xa_set_mark(xa, index, XA_MARK_0); 1553 1553 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); ··· 1557 1557 1558 1558 static noinline void check_split(struct xarray *xa) 1559 1559 { 1560 - unsigned int order; 1560 + unsigned int order, new_order; 1561 1561 1562 1562 XA_BUG_ON(xa, !xa_empty(xa)); 1563 1563 1564 1564 for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) { 1565 - check_split_1(xa, 0, order); 1566 - check_split_1(xa, 1UL << order, order); 1567 - check_split_1(xa, 3UL << order, order); 1565 + for (new_order = 0; new_order < order; new_order++) { 1566 + check_split_1(xa, 0, order, new_order); 1567 + check_split_1(xa, 1UL << order, order, new_order); 1568 + check_split_1(xa, 3UL << order, order, new_order); 1569 + } 1568 1570 } 1569 1571 } 1570 1572 #else
+6 -5
lib/xarray.c
··· 987 987 * xas_split_alloc() - Allocate memory for splitting an entry. 988 988 * @xas: XArray operation state. 989 989 * @entry: New entry which will be stored in the array. 990 - * @order: New entry order. 990 + * @order: Current entry order. 991 991 * @gfp: Memory allocation flags. 992 992 * 993 993 * This function should be called before calling xas_split(). ··· 1011 1011 1012 1012 do { 1013 1013 unsigned int i; 1014 - void *sibling; 1014 + void *sibling = NULL; 1015 1015 struct xa_node *node; 1016 1016 1017 1017 node = kmem_cache_alloc(radix_tree_node_cachep, gfp); ··· 1021 1021 for (i = 0; i < XA_CHUNK_SIZE; i++) { 1022 1022 if ((i & mask) == 0) { 1023 1023 RCU_INIT_POINTER(node->slots[i], entry); 1024 - sibling = xa_mk_sibling(0); 1024 + sibling = xa_mk_sibling(i); 1025 1025 } else { 1026 1026 RCU_INIT_POINTER(node->slots[i], sibling); 1027 1027 } ··· 1041 1041 * xas_split() - Split a multi-index entry into smaller entries. 1042 1042 * @xas: XArray operation state. 1043 1043 * @entry: New entry to store in the array. 1044 - * @order: New entry order. 1044 + * @order: Current entry order. 1045 1045 * 1046 - * The value in the entry is copied to all the replacement entries. 1046 + * The size of the new entries is set in @xas. The value in @entry is 1047 + * copied to all the replacement entries. 1047 1048 * 1048 1049 * Context: Any context. The caller should hold the xa_lock. 1049 1050 */
+2 -2
mm/highmem.c
··· 618 618 int idx; 619 619 620 620 /* With debug all even slots are unmapped and act as guard */ 621 - if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) { 621 + if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) { 622 622 WARN_ON_ONCE(!pte_none(pteval)); 623 623 continue; 624 624 } ··· 654 654 int idx; 655 655 656 656 /* With debug all even slots are unmapped and act as guard */ 657 - if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) { 657 + if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) { 658 658 WARN_ON_ONCE(!pte_none(pteval)); 659 659 continue; 660 660 }
+37 -4
mm/hugetlb.c
··· 280 280 nrg->reservation_counter = 281 281 &h_cg->rsvd_hugepage[hstate_index(h)]; 282 282 nrg->css = &h_cg->css; 283 + /* 284 + * The caller will hold exactly one h_cg->css reference for the 285 + * whole contiguous reservation region. But this area might be 286 + * scattered when there are already some file_regions reside in 287 + * it. As a result, many file_regions may share only one css 288 + * reference. In order to ensure that one file_region must hold 289 + * exactly one h_cg->css reference, we should do css_get for 290 + * each file_region and leave the reference held by caller 291 + * untouched. 292 + */ 293 + css_get(&h_cg->css); 283 294 if (!resv->pages_per_hpage) 284 295 resv->pages_per_hpage = pages_per_huge_page(h); 285 296 /* pages_per_hpage should be the same for all entries in ··· 301 290 nrg->reservation_counter = NULL; 302 291 nrg->css = NULL; 303 292 } 293 + #endif 294 + } 295 + 296 + static void put_uncharge_info(struct file_region *rg) 297 + { 298 + #ifdef CONFIG_CGROUP_HUGETLB 299 + if (rg->css) 300 + css_put(rg->css); 304 301 #endif 305 302 } 306 303 ··· 335 316 prg->to = rg->to; 336 317 337 318 list_del(&rg->link); 319 + put_uncharge_info(rg); 338 320 kfree(rg); 339 321 340 322 rg = prg; ··· 347 327 nrg->from = rg->from; 348 328 349 329 list_del(&rg->link); 330 + put_uncharge_info(rg); 350 331 kfree(rg); 351 332 } 352 333 } ··· 683 662 684 663 del += t - f; 685 664 hugetlb_cgroup_uncharge_file_region( 686 - resv, rg, t - f); 665 + resv, rg, t - f, false); 687 666 688 667 /* New entry for end of split region */ 689 668 nrg->from = t; ··· 704 683 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 705 684 del += rg->to - rg->from; 706 685 hugetlb_cgroup_uncharge_file_region(resv, rg, 707 - rg->to - rg->from); 686 + rg->to - rg->from, true); 708 687 list_del(&rg->link); 709 688 kfree(rg); 710 689 continue; ··· 712 691 713 692 if (f <= rg->from) { /* Trim beginning of region */ 714 693 hugetlb_cgroup_uncharge_file_region(resv, rg, 715 - t - rg->from); 694 + t - rg->from, false); 716 695 717 696 del += t - rg->from; 718 697 rg->from = t; 719 698 } else { /* Trim end of region */ 720 699 hugetlb_cgroup_uncharge_file_region(resv, rg, 721 - rg->to - f); 700 + rg->to - f, false); 722 701 723 702 del += rg->to - f; 724 703 rg->to = f; ··· 5208 5187 */ 5209 5188 long rsv_adjust; 5210 5189 5190 + /* 5191 + * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the 5192 + * reference to h_cg->css. See comment below for detail. 5193 + */ 5211 5194 hugetlb_cgroup_uncharge_cgroup_rsvd( 5212 5195 hstate_index(h), 5213 5196 (chg - add) * pages_per_huge_page(h), h_cg); ··· 5219 5194 rsv_adjust = hugepage_subpool_put_pages(spool, 5220 5195 chg - add); 5221 5196 hugetlb_acct_memory(h, -rsv_adjust); 5197 + } else if (h_cg) { 5198 + /* 5199 + * The file_regions will hold their own reference to 5200 + * h_cg->css. So we should release the reference held 5201 + * via hugetlb_cgroup_charge_cgroup_rsvd() when we are 5202 + * done. 5203 + */ 5204 + hugetlb_cgroup_put_rsvd_cgroup(h_cg); 5222 5205 } 5223 5206 } 5224 5207 return true;
+8 -2
mm/hugetlb_cgroup.c
··· 391 391 392 392 void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, 393 393 struct file_region *rg, 394 - unsigned long nr_pages) 394 + unsigned long nr_pages, 395 + bool region_del) 395 396 { 396 397 if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages) 397 398 return; ··· 401 400 !resv->reservation_counter) { 402 401 page_counter_uncharge(rg->reservation_counter, 403 402 nr_pages * resv->pages_per_hpage); 404 - css_put(rg->css); 403 + /* 404 + * Only do css_put(rg->css) when we delete the entire region 405 + * because one file_region must hold exactly one css reference. 406 + */ 407 + if (region_del) 408 + css_put(rg->css); 405 409 } 406 410 } 407 411
+9
mm/kfence/core.c
··· 12 12 #include <linux/debugfs.h> 13 13 #include <linux/kcsan-checks.h> 14 14 #include <linux/kfence.h> 15 + #include <linux/kmemleak.h> 15 16 #include <linux/list.h> 16 17 #include <linux/lockdep.h> 17 18 #include <linux/memblock.h> ··· 480 479 481 480 addr += 2 * PAGE_SIZE; 482 481 } 482 + 483 + /* 484 + * The pool is live and will never be deallocated from this point on. 485 + * Remove the pool object from the kmemleak object tree, as it would 486 + * otherwise overlap with allocations returned by kfence_alloc(), which 487 + * are registered with kmemleak through the slab post-alloc hook. 488 + */ 489 + kmemleak_free(__kfence_pool); 483 490 484 491 return true; 485 492
+2 -1
mm/kmemleak.c
··· 97 97 #include <linux/atomic.h> 98 98 99 99 #include <linux/kasan.h> 100 + #include <linux/kfence.h> 100 101 #include <linux/kmemleak.h> 101 102 #include <linux/memory_hotplug.h> 102 103 ··· 590 589 atomic_set(&object->use_count, 1); 591 590 object->flags = OBJECT_ALLOCATED; 592 591 object->pointer = ptr; 593 - object->size = size; 592 + object->size = kfence_ksize((void *)ptr) ?: size; 594 593 object->excess_ref = 0; 595 594 object->min_count = min_count; 596 595 object->count = 0; /* white color initially */
+1 -1
mm/memory.c
··· 166 166 zero_pfn = page_to_pfn(ZERO_PAGE(0)); 167 167 return 0; 168 168 } 169 - core_initcall(init_zero_pfn); 169 + early_initcall(init_zero_pfn); 170 170 171 171 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count) 172 172 {
+23
mm/mmu_notifier.c
··· 501 501 ""); 502 502 WARN_ON(mmu_notifier_range_blockable(range) || 503 503 _ret != -EAGAIN); 504 + /* 505 + * We call all the notifiers on any EAGAIN, 506 + * there is no way for a notifier to know if 507 + * its start method failed, thus a start that 508 + * does EAGAIN can't also do end. 509 + */ 510 + WARN_ON(ops->invalidate_range_end); 504 511 ret = _ret; 505 512 } 513 + } 514 + } 515 + 516 + if (ret) { 517 + /* 518 + * Must be non-blocking to get here. If there are multiple 519 + * notifiers and one or more failed start, any that succeeded 520 + * start are expecting their end to be called. Do so now. 521 + */ 522 + hlist_for_each_entry_rcu(subscription, &subscriptions->list, 523 + hlist, srcu_read_lock_held(&srcu)) { 524 + if (!subscription->ops->invalidate_range_end) 525 + continue; 526 + 527 + subscription->ops->invalidate_range_end(subscription, 528 + range); 506 529 } 507 530 } 508 531 srcu_read_unlock(&srcu, id);
+16
mm/page-writeback.c
··· 2833 2833 } 2834 2834 EXPORT_SYMBOL_GPL(wait_on_page_writeback); 2835 2835 2836 + /* 2837 + * Wait for a page to complete writeback. Returns -EINTR if we get a 2838 + * fatal signal while waiting. 2839 + */ 2840 + int wait_on_page_writeback_killable(struct page *page) 2841 + { 2842 + while (PageWriteback(page)) { 2843 + trace_wait_on_page_writeback(page, page_mapping(page)); 2844 + if (wait_on_page_bit_killable(page, PG_writeback)) 2845 + return -EINTR; 2846 + } 2847 + 2848 + return 0; 2849 + } 2850 + EXPORT_SYMBOL_GPL(wait_on_page_writeback_killable); 2851 + 2836 2852 /** 2837 2853 * wait_for_stable_page() - wait for writeback to finish, if necessary. 2838 2854 * @page: The page to wait on.
+15 -1
mm/z3fold.c
··· 1346 1346 page = list_entry(pos, struct page, lru); 1347 1347 1348 1348 zhdr = page_address(page); 1349 - if (test_bit(PAGE_HEADLESS, &page->private)) 1349 + if (test_bit(PAGE_HEADLESS, &page->private)) { 1350 + /* 1351 + * For non-headless pages, we wait to do this 1352 + * until we have the page lock to avoid racing 1353 + * with __z3fold_alloc(). Headless pages don't 1354 + * have a lock (and __z3fold_alloc() will never 1355 + * see them), but we still need to test and set 1356 + * PAGE_CLAIMED to avoid racing with 1357 + * z3fold_free(), so just do it now before 1358 + * leaving the loop. 1359 + */ 1360 + if (test_and_set_bit(PAGE_CLAIMED, &page->private)) 1361 + continue; 1362 + 1350 1363 break; 1364 + } 1351 1365 1352 1366 if (kref_get_unless_zero(&zhdr->refcount) == 0) { 1353 1367 zhdr = NULL;
+2
net/bridge/br_switchdev.c
··· 128 128 { 129 129 if (!fdb->dst) 130 130 return; 131 + if (test_bit(BR_FDB_LOCAL, &fdb->flags)) 132 + return; 131 133 132 134 switch (type) { 133 135 case RTM_DELNEIGH:
+8 -10
net/can/isotp.c
··· 196 196 nskb->dev = dev; 197 197 can_skb_set_owner(nskb, sk); 198 198 ncf = (struct canfd_frame *)nskb->data; 199 - skb_put(nskb, so->ll.mtu); 199 + skb_put_zero(nskb, so->ll.mtu); 200 200 201 201 /* create & send flow control reply */ 202 202 ncf->can_id = so->txid; ··· 215 215 if (ae) 216 216 ncf->data[0] = so->opt.ext_address; 217 217 218 - if (so->ll.mtu == CANFD_MTU) 219 - ncf->flags = so->ll.tx_flags; 218 + ncf->flags = so->ll.tx_flags; 220 219 221 220 can_send_ret = can_send(nskb, 1); 222 221 if (can_send_ret) ··· 779 780 can_skb_prv(skb)->skbcnt = 0; 780 781 781 782 cf = (struct canfd_frame *)skb->data; 782 - skb_put(skb, so->ll.mtu); 783 + skb_put_zero(skb, so->ll.mtu); 783 784 784 785 /* create consecutive frame */ 785 786 isotp_fill_dataframe(cf, so, ae, 0); ··· 789 790 so->tx.sn %= 16; 790 791 so->tx.bs++; 791 792 792 - if (so->ll.mtu == CANFD_MTU) 793 - cf->flags = so->ll.tx_flags; 793 + cf->flags = so->ll.tx_flags; 794 794 795 795 skb->dev = dev; 796 796 can_skb_set_owner(skb, sk); ··· 895 897 so->tx.idx = 0; 896 898 897 899 cf = (struct canfd_frame *)skb->data; 898 - skb_put(skb, so->ll.mtu); 900 + skb_put_zero(skb, so->ll.mtu); 899 901 900 902 /* check for single frame transmission depending on TX_DL */ 901 903 if (size <= so->tx.ll_dl - SF_PCI_SZ4 - ae - off) { ··· 937 939 } 938 940 939 941 /* send the first or only CAN frame */ 940 - if (so->ll.mtu == CANFD_MTU) 941 - cf->flags = so->ll.tx_flags; 942 + cf->flags = so->ll.tx_flags; 942 943 943 944 skb->dev = dev; 944 945 skb->sk = sk; ··· 1225 1228 if (ll.mtu != CAN_MTU && ll.mtu != CANFD_MTU) 1226 1229 return -EINVAL; 1227 1230 1228 - if (ll.mtu == CAN_MTU && ll.tx_dl > CAN_MAX_DLEN) 1231 + if (ll.mtu == CAN_MTU && 1232 + (ll.tx_dl > CAN_MAX_DLEN || ll.tx_flags != 0)) 1229 1233 return -EINVAL; 1230 1234 1231 1235 memcpy(&so->ll, &ll, sizeof(ll));
+31 -2
net/core/dev.c
··· 1184 1184 return -ENOMEM; 1185 1185 1186 1186 for_each_netdev(net, d) { 1187 + struct netdev_name_node *name_node; 1188 + list_for_each_entry(name_node, &d->name_node->list, list) { 1189 + if (!sscanf(name_node->name, name, &i)) 1190 + continue; 1191 + if (i < 0 || i >= max_netdevices) 1192 + continue; 1193 + 1194 + /* avoid cases where sscanf is not exact inverse of printf */ 1195 + snprintf(buf, IFNAMSIZ, name, i); 1196 + if (!strncmp(buf, name_node->name, IFNAMSIZ)) 1197 + set_bit(i, inuse); 1198 + } 1187 1199 if (!sscanf(d->name, name, &i)) 1188 1200 continue; 1189 1201 if (i < 0 || i >= max_netdevices) ··· 4306 4294 */ 4307 4295 thread = READ_ONCE(napi->thread); 4308 4296 if (thread) { 4297 + /* Avoid doing set_bit() if the thread is in 4298 + * INTERRUPTIBLE state, cause napi_thread_wait() 4299 + * makes sure to proceed with napi polling 4300 + * if the thread is explicitly woken from here. 4301 + */ 4302 + if (READ_ONCE(thread->state) != TASK_INTERRUPTIBLE) 4303 + set_bit(NAPI_STATE_SCHED_THREADED, &napi->state); 4309 4304 wake_up_process(thread); 4310 4305 return; 4311 4306 } ··· 6505 6486 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); 6506 6487 6507 6488 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED | 6489 + NAPIF_STATE_SCHED_THREADED | 6508 6490 NAPIF_STATE_PREFER_BUSY_POLL); 6509 6491 6510 6492 /* If STATE_MISSED was set, leave STATE_SCHED set, ··· 6988 6968 6989 6969 static int napi_thread_wait(struct napi_struct *napi) 6990 6970 { 6971 + bool woken = false; 6972 + 6991 6973 set_current_state(TASK_INTERRUPTIBLE); 6992 6974 6993 6975 while (!kthread_should_stop() && !napi_disable_pending(napi)) { 6994 - if (test_bit(NAPI_STATE_SCHED, &napi->state)) { 6976 + /* Testing SCHED_THREADED bit here to make sure the current 6977 + * kthread owns this napi and could poll on this napi. 6978 + * Testing SCHED bit is not enough because SCHED bit might be 6979 + * set by some other busy poll thread or by napi_disable(). 6980 + */ 6981 + if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) { 6995 6982 WARN_ON(!list_empty(&napi->poll_list)); 6996 6983 __set_current_state(TASK_RUNNING); 6997 6984 return 0; 6998 6985 } 6999 6986 7000 6987 schedule(); 6988 + /* woken being true indicates this thread owns this napi. */ 6989 + woken = true; 7001 6990 set_current_state(TASK_INTERRUPTIBLE); 7002 6991 } 7003 6992 __set_current_state(TASK_RUNNING); ··· 11375 11346 continue; 11376 11347 11377 11348 /* Leave virtual devices for the generic cleanup */ 11378 - if (dev->rtnl_link_ops) 11349 + if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund) 11379 11350 continue; 11380 11351 11381 11352 /* Push remaining network devices to init_net */
+23
net/core/drop_monitor.c
··· 1053 1053 return 0; 1054 1054 1055 1055 err_module_put: 1056 + for_each_possible_cpu(cpu) { 1057 + struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); 1058 + struct sk_buff *skb; 1059 + 1060 + del_timer_sync(&hw_data->send_timer); 1061 + cancel_work_sync(&hw_data->dm_alert_work); 1062 + while ((skb = __skb_dequeue(&hw_data->drop_queue))) { 1063 + struct devlink_trap_metadata *hw_metadata; 1064 + 1065 + hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata; 1066 + net_dm_hw_metadata_free(hw_metadata); 1067 + consume_skb(skb); 1068 + } 1069 + } 1056 1070 module_put(THIS_MODULE); 1057 1071 return rc; 1058 1072 } ··· 1148 1134 err_unregister_trace: 1149 1135 unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL); 1150 1136 err_module_put: 1137 + for_each_possible_cpu(cpu) { 1138 + struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu); 1139 + struct sk_buff *skb; 1140 + 1141 + del_timer_sync(&data->send_timer); 1142 + cancel_work_sync(&data->dm_alert_work); 1143 + while ((skb = __skb_dequeue(&data->drop_queue))) 1144 + consume_skb(skb); 1145 + } 1151 1146 module_put(THIS_MODULE); 1152 1147 return rc; 1153 1148 }
+47 -22
net/core/dst.c
··· 237 237 } 238 238 EXPORT_SYMBOL(__dst_destroy_metrics_generic); 239 239 240 - static struct dst_ops md_dst_ops = { 241 - .family = AF_UNSPEC, 240 + struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie) 241 + { 242 + return NULL; 243 + } 244 + 245 + u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old) 246 + { 247 + return NULL; 248 + } 249 + 250 + struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst, 251 + struct sk_buff *skb, 252 + const void *daddr) 253 + { 254 + return NULL; 255 + } 256 + 257 + void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, 258 + struct sk_buff *skb, u32 mtu, 259 + bool confirm_neigh) 260 + { 261 + } 262 + EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu); 263 + 264 + void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk, 265 + struct sk_buff *skb) 266 + { 267 + } 268 + EXPORT_SYMBOL_GPL(dst_blackhole_redirect); 269 + 270 + unsigned int dst_blackhole_mtu(const struct dst_entry *dst) 271 + { 272 + unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 273 + 274 + return mtu ? : dst->dev->mtu; 275 + } 276 + EXPORT_SYMBOL_GPL(dst_blackhole_mtu); 277 + 278 + static struct dst_ops dst_blackhole_ops = { 279 + .family = AF_UNSPEC, 280 + .neigh_lookup = dst_blackhole_neigh_lookup, 281 + .check = dst_blackhole_check, 282 + .cow_metrics = dst_blackhole_cow_metrics, 283 + .update_pmtu = dst_blackhole_update_pmtu, 284 + .redirect = dst_blackhole_redirect, 285 + .mtu = dst_blackhole_mtu, 242 286 }; 243 - 244 - static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) 245 - { 246 - WARN_ONCE(1, "Attempting to call output on metadata dst\n"); 247 - kfree_skb(skb); 248 - return 0; 249 - } 250 - 251 - static int dst_md_discard(struct sk_buff *skb) 252 - { 253 - WARN_ONCE(1, "Attempting to call input on metadata dst\n"); 254 - kfree_skb(skb); 255 - return 0; 256 - } 257 287 258 288 static void __metadata_dst_init(struct metadata_dst *md_dst, 259 289 enum metadata_type type, u8 optslen) 260 - 261 290 { 262 291 struct dst_entry *dst; 263 292 264 293 dst = &md_dst->dst; 265 - dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE, 294 + dst_init(dst, &dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 266 295 DST_METADATA | DST_NOCOUNT); 267 - 268 - dst->input = dst_md_discard; 269 - dst->output = dst_md_discard_out; 270 - 271 296 memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst)); 272 297 md_dst->type = type; 273 298 }
+10 -2
net/core/filter.c
··· 5658 5658 if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) 5659 5659 return -EINVAL; 5660 5660 5661 - if (unlikely(flags & BPF_MTU_CHK_SEGS && len_diff)) 5661 + if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) 5662 5662 return -EINVAL; 5663 5663 5664 5664 dev = __dev_via_ifindex(dev, ifindex); ··· 5668 5668 mtu = READ_ONCE(dev->mtu); 5669 5669 5670 5670 dev_len = mtu + dev->hard_header_len; 5671 - skb_len = skb->len + len_diff; /* minus result pass check */ 5671 + 5672 + /* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ 5673 + skb_len = *mtu_len ? *mtu_len + dev->hard_header_len : skb->len; 5674 + 5675 + skb_len += len_diff; /* minus result pass check */ 5672 5676 if (skb_len <= dev_len) { 5673 5677 ret = BPF_MTU_CHK_RET_SUCCESS; 5674 5678 goto out; ··· 5716 5712 5717 5713 /* Add L2-header as dev MTU is L3 size */ 5718 5714 dev_len = mtu + dev->hard_header_len; 5715 + 5716 + /* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */ 5717 + if (*mtu_len) 5718 + xdp_len = *mtu_len + dev->hard_header_len; 5719 5719 5720 5720 xdp_len += len_diff; /* minus result pass check */ 5721 5721 if (xdp_len > dev_len)
+1 -1
net/core/flow_dissector.c
··· 176 176 * avoid confusion with packets without such field 177 177 */ 178 178 if (icmp_has_id(ih->type)) 179 - key_icmp->id = ih->un.echo.id ? : 1; 179 + key_icmp->id = ih->un.echo.id ? ntohs(ih->un.echo.id) : 1; 180 180 else 181 181 key_icmp->id = 0; 182 182 }
+28 -16
net/core/sock.c
··· 3440 3440 twsk_prot->twsk_slab = NULL; 3441 3441 } 3442 3442 3443 + static int tw_prot_init(const struct proto *prot) 3444 + { 3445 + struct timewait_sock_ops *twsk_prot = prot->twsk_prot; 3446 + 3447 + if (!twsk_prot) 3448 + return 0; 3449 + 3450 + twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", 3451 + prot->name); 3452 + if (!twsk_prot->twsk_slab_name) 3453 + return -ENOMEM; 3454 + 3455 + twsk_prot->twsk_slab = 3456 + kmem_cache_create(twsk_prot->twsk_slab_name, 3457 + twsk_prot->twsk_obj_size, 0, 3458 + SLAB_ACCOUNT | prot->slab_flags, 3459 + NULL); 3460 + if (!twsk_prot->twsk_slab) { 3461 + pr_crit("%s: Can't create timewait sock SLAB cache!\n", 3462 + prot->name); 3463 + return -ENOMEM; 3464 + } 3465 + 3466 + return 0; 3467 + } 3468 + 3443 3469 static void req_prot_cleanup(struct request_sock_ops *rsk_prot) 3444 3470 { 3445 3471 if (!rsk_prot) ··· 3522 3496 if (req_prot_init(prot)) 3523 3497 goto out_free_request_sock_slab; 3524 3498 3525 - if (prot->twsk_prot != NULL) { 3526 - prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); 3527 - 3528 - if (prot->twsk_prot->twsk_slab_name == NULL) 3529 - goto out_free_request_sock_slab; 3530 - 3531 - prot->twsk_prot->twsk_slab = 3532 - kmem_cache_create(prot->twsk_prot->twsk_slab_name, 3533 - prot->twsk_prot->twsk_obj_size, 3534 - 0, 3535 - SLAB_ACCOUNT | 3536 - prot->slab_flags, 3537 - NULL); 3538 - if (prot->twsk_prot->twsk_slab == NULL) 3539 - goto out_free_timewait_sock_slab; 3540 - } 3499 + if (tw_prot_init(prot)) 3500 + goto out_free_timewait_sock_slab; 3541 3501 } 3542 3502 3543 3503 mutex_lock(&proto_list_mutex);
+5
net/dccp/ipv6.c
··· 319 319 if (!ipv6_unicast_destination(skb)) 320 320 return 0; /* discard, don't send a reset here */ 321 321 322 + if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { 323 + __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); 324 + return 0; 325 + } 326 + 322 327 if (dccp_bad_service_code(sk, service)) { 323 328 dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; 324 329 goto drop;
+7 -4
net/dsa/dsa2.c
··· 1066 1066 { 1067 1067 struct dsa_switch *ds = dp->ds; 1068 1068 struct dsa_switch_tree *dst = ds->dst; 1069 + const struct dsa_device_ops *tag_ops; 1069 1070 enum dsa_tag_protocol tag_protocol; 1070 1071 1071 1072 tag_protocol = dsa_get_tag_protocol(dp, master); ··· 1081 1080 * nothing to do here. 1082 1081 */ 1083 1082 } else { 1084 - dst->tag_ops = dsa_tag_driver_get(tag_protocol); 1085 - if (IS_ERR(dst->tag_ops)) { 1086 - if (PTR_ERR(dst->tag_ops) == -ENOPROTOOPT) 1083 + tag_ops = dsa_tag_driver_get(tag_protocol); 1084 + if (IS_ERR(tag_ops)) { 1085 + if (PTR_ERR(tag_ops) == -ENOPROTOOPT) 1087 1086 return -EPROBE_DEFER; 1088 1087 dev_warn(ds->dev, "No tagger for this switch\n"); 1089 1088 dp->master = NULL; 1090 - return PTR_ERR(dst->tag_ops); 1089 + return PTR_ERR(tag_ops); 1091 1090 } 1091 + 1092 + dst->tag_ops = tag_ops; 1092 1093 } 1093 1094 1094 1095 dp->master = master;
+5 -2
net/ipv4/inet_connection_sock.c
··· 705 705 return found; 706 706 } 707 707 708 - void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) 708 + bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) 709 709 { 710 - if (reqsk_queue_unlink(req)) { 710 + bool unlinked = reqsk_queue_unlink(req); 711 + 712 + if (unlinked) { 711 713 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); 712 714 reqsk_put(req); 713 715 } 716 + return unlinked; 714 717 } 715 718 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop); 716 719
+8 -6
net/ipv4/ipconfig.c
··· 309 309 */ 310 310 static void __init ic_close_devs(void) 311 311 { 312 - struct net_device *selected_dev = ic_dev->dev; 312 + struct net_device *selected_dev = ic_dev ? ic_dev->dev : NULL; 313 313 struct ic_device *d, *next; 314 314 struct net_device *dev; 315 315 ··· 317 317 next = ic_first_dev; 318 318 while ((d = next)) { 319 319 bool bring_down = (d != ic_dev); 320 - struct net_device *lower_dev; 320 + struct net_device *lower; 321 321 struct list_head *iter; 322 322 323 323 next = d->next; 324 324 dev = d->dev; 325 325 326 - netdev_for_each_lower_dev(selected_dev, lower_dev, iter) { 327 - if (dev == lower_dev) { 328 - bring_down = false; 329 - break; 326 + if (selected_dev) { 327 + netdev_for_each_lower_dev(selected_dev, lower, iter) { 328 + if (dev == lower) { 329 + bring_down = false; 330 + break; 331 + } 330 332 } 331 333 } 332 334 if (bring_down) {
+8 -8
net/ipv4/netfilter/arp_tables.c
··· 203 203 204 204 local_bh_disable(); 205 205 addend = xt_write_recseq_begin(); 206 - private = rcu_access_pointer(table->private); 206 + private = READ_ONCE(table->private); /* Address dependency. */ 207 207 cpu = smp_processor_id(); 208 208 table_base = private->entries; 209 209 jumpstack = (struct arpt_entry **)private->jumpstack[cpu]; ··· 649 649 { 650 650 unsigned int countersize; 651 651 struct xt_counters *counters; 652 - const struct xt_table_info *private = xt_table_get_private_protected(table); 652 + const struct xt_table_info *private = table->private; 653 653 654 654 /* We need atomic snapshot of counters: rest doesn't change 655 655 * (other than comefrom, which userspace doesn't care ··· 673 673 unsigned int off, num; 674 674 const struct arpt_entry *e; 675 675 struct xt_counters *counters; 676 - struct xt_table_info *private = xt_table_get_private_protected(table); 676 + struct xt_table_info *private = table->private; 677 677 int ret = 0; 678 678 void *loc_cpu_entry; 679 679 ··· 807 807 t = xt_request_find_table_lock(net, NFPROTO_ARP, name); 808 808 if (!IS_ERR(t)) { 809 809 struct arpt_getinfo info; 810 - const struct xt_table_info *private = xt_table_get_private_protected(t); 810 + const struct xt_table_info *private = t->private; 811 811 #ifdef CONFIG_COMPAT 812 812 struct xt_table_info tmp; 813 813 ··· 860 860 861 861 t = xt_find_table_lock(net, NFPROTO_ARP, get.name); 862 862 if (!IS_ERR(t)) { 863 - const struct xt_table_info *private = xt_table_get_private_protected(t); 863 + const struct xt_table_info *private = t->private; 864 864 865 865 if (get.size == private->size) 866 866 ret = copy_entries_to_user(private->size, ··· 1017 1017 } 1018 1018 1019 1019 local_bh_disable(); 1020 - private = xt_table_get_private_protected(t); 1020 + private = t->private; 1021 1021 if (private->number != tmp.num_counters) { 1022 1022 ret = -EINVAL; 1023 1023 goto unlock_up_free; ··· 1330 1330 void __user *userptr) 1331 1331 { 1332 1332 struct xt_counters *counters; 1333 - const struct xt_table_info *private = xt_table_get_private_protected(table); 1333 + const struct xt_table_info *private = table->private; 1334 1334 void __user *pos; 1335 1335 unsigned int size; 1336 1336 int ret = 0; ··· 1379 1379 xt_compat_lock(NFPROTO_ARP); 1380 1380 t = xt_find_table_lock(net, NFPROTO_ARP, get.name); 1381 1381 if (!IS_ERR(t)) { 1382 - const struct xt_table_info *private = xt_table_get_private_protected(t); 1382 + const struct xt_table_info *private = t->private; 1383 1383 struct xt_table_info info; 1384 1384 1385 1385 ret = compat_table_info(private, &info);
+8 -8
net/ipv4/netfilter/ip_tables.c
··· 258 258 WARN_ON(!(table->valid_hooks & (1 << hook))); 259 259 local_bh_disable(); 260 260 addend = xt_write_recseq_begin(); 261 - private = rcu_access_pointer(table->private); 261 + private = READ_ONCE(table->private); /* Address dependency. */ 262 262 cpu = smp_processor_id(); 263 263 table_base = private->entries; 264 264 jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; ··· 791 791 { 792 792 unsigned int countersize; 793 793 struct xt_counters *counters; 794 - const struct xt_table_info *private = xt_table_get_private_protected(table); 794 + const struct xt_table_info *private = table->private; 795 795 796 796 /* We need atomic snapshot of counters: rest doesn't change 797 797 (other than comefrom, which userspace doesn't care ··· 815 815 unsigned int off, num; 816 816 const struct ipt_entry *e; 817 817 struct xt_counters *counters; 818 - const struct xt_table_info *private = xt_table_get_private_protected(table); 818 + const struct xt_table_info *private = table->private; 819 819 int ret = 0; 820 820 const void *loc_cpu_entry; 821 821 ··· 964 964 t = xt_request_find_table_lock(net, AF_INET, name); 965 965 if (!IS_ERR(t)) { 966 966 struct ipt_getinfo info; 967 - const struct xt_table_info *private = xt_table_get_private_protected(t); 967 + const struct xt_table_info *private = t->private; 968 968 #ifdef CONFIG_COMPAT 969 969 struct xt_table_info tmp; 970 970 ··· 1018 1018 1019 1019 t = xt_find_table_lock(net, AF_INET, get.name); 1020 1020 if (!IS_ERR(t)) { 1021 - const struct xt_table_info *private = xt_table_get_private_protected(t); 1021 + const struct xt_table_info *private = t->private; 1022 1022 if (get.size == private->size) 1023 1023 ret = copy_entries_to_user(private->size, 1024 1024 t, uptr->entrytable); ··· 1173 1173 } 1174 1174 1175 1175 local_bh_disable(); 1176 - private = xt_table_get_private_protected(t); 1176 + private = t->private; 1177 1177 if (private->number != tmp.num_counters) { 1178 1178 ret = -EINVAL; 1179 1179 goto unlock_up_free; ··· 1543 1543 void __user *userptr) 1544 1544 { 1545 1545 struct xt_counters *counters; 1546 - const struct xt_table_info *private = xt_table_get_private_protected(table); 1546 + const struct xt_table_info *private = table->private; 1547 1547 void __user *pos; 1548 1548 unsigned int size; 1549 1549 int ret = 0; ··· 1589 1589 xt_compat_lock(AF_INET); 1590 1590 t = xt_find_table_lock(net, AF_INET, get.name); 1591 1591 if (!IS_ERR(t)) { 1592 - const struct xt_table_info *private = xt_table_get_private_protected(t); 1592 + const struct xt_table_info *private = t->private; 1593 1593 struct xt_table_info info; 1594 1594 ret = compat_table_info(private, &info); 1595 1595 if (!ret && get.size == info.size)
+8 -37
net/ipv4/route.c
··· 2687 2687 return rth; 2688 2688 } 2689 2689 2690 - static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie) 2691 - { 2692 - return NULL; 2693 - } 2694 - 2695 - static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst) 2696 - { 2697 - unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 2698 - 2699 - return mtu ? : dst->dev->mtu; 2700 - } 2701 - 2702 - static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, 2703 - struct sk_buff *skb, u32 mtu, 2704 - bool confirm_neigh) 2705 - { 2706 - } 2707 - 2708 - static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk, 2709 - struct sk_buff *skb) 2710 - { 2711 - } 2712 - 2713 - static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst, 2714 - unsigned long old) 2715 - { 2716 - return NULL; 2717 - } 2718 - 2719 2690 static struct dst_ops ipv4_dst_blackhole_ops = { 2720 - .family = AF_INET, 2721 - .check = ipv4_blackhole_dst_check, 2722 - .mtu = ipv4_blackhole_mtu, 2723 - .default_advmss = ipv4_default_advmss, 2724 - .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2725 - .redirect = ipv4_rt_blackhole_redirect, 2726 - .cow_metrics = ipv4_rt_blackhole_cow_metrics, 2727 - .neigh_lookup = ipv4_neigh_lookup, 2691 + .family = AF_INET, 2692 + .default_advmss = ipv4_default_advmss, 2693 + .neigh_lookup = ipv4_neigh_lookup, 2694 + .check = dst_blackhole_check, 2695 + .cow_metrics = dst_blackhole_cow_metrics, 2696 + .update_pmtu = dst_blackhole_update_pmtu, 2697 + .redirect = dst_blackhole_redirect, 2698 + .mtu = dst_blackhole_mtu, 2728 2699 }; 2729 2700 2730 2701 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
+5 -2
net/ipv4/tcp_minisocks.c
··· 804 804 tcp_reset(sk, skb); 805 805 } 806 806 if (!fastopen) { 807 - inet_csk_reqsk_queue_drop(sk, req); 808 - __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); 807 + bool unlinked = inet_csk_reqsk_queue_drop(sk, req); 808 + 809 + if (unlinked) 810 + __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); 811 + *req_stolen = !unlinked; 809 812 } 810 813 return NULL; 811 814 }
+1 -1
net/ipv6/ip6_fib.c
··· 2486 2486 const struct net_device *dev; 2487 2487 2488 2488 if (rt->nh) 2489 - fib6_nh = nexthop_fib6_nh(rt->nh); 2489 + fib6_nh = nexthop_fib6_nh_bh(rt->nh); 2490 2490 2491 2491 seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen); 2492 2492
-10
net/ipv6/ip6_input.c
··· 245 245 if (ipv6_addr_is_multicast(&hdr->saddr)) 246 246 goto err; 247 247 248 - /* While RFC4291 is not explicit about v4mapped addresses 249 - * in IPv6 headers, it seems clear linux dual-stack 250 - * model can not deal properly with these. 251 - * Security models could be fooled by ::ffff:127.0.0.1 for example. 252 - * 253 - * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02 254 - */ 255 - if (ipv6_addr_v4mapped(&hdr->saddr)) 256 - goto err; 257 - 258 248 skb->transport_header = skb->network_header + sizeof(*hdr); 259 249 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); 260 250
+8 -8
net/ipv6/netfilter/ip6_tables.c
··· 280 280 281 281 local_bh_disable(); 282 282 addend = xt_write_recseq_begin(); 283 - private = rcu_access_pointer(table->private); 283 + private = READ_ONCE(table->private); /* Address dependency. */ 284 284 cpu = smp_processor_id(); 285 285 table_base = private->entries; 286 286 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; ··· 807 807 { 808 808 unsigned int countersize; 809 809 struct xt_counters *counters; 810 - const struct xt_table_info *private = xt_table_get_private_protected(table); 810 + const struct xt_table_info *private = table->private; 811 811 812 812 /* We need atomic snapshot of counters: rest doesn't change 813 813 (other than comefrom, which userspace doesn't care ··· 831 831 unsigned int off, num; 832 832 const struct ip6t_entry *e; 833 833 struct xt_counters *counters; 834 - const struct xt_table_info *private = xt_table_get_private_protected(table); 834 + const struct xt_table_info *private = table->private; 835 835 int ret = 0; 836 836 const void *loc_cpu_entry; 837 837 ··· 980 980 t = xt_request_find_table_lock(net, AF_INET6, name); 981 981 if (!IS_ERR(t)) { 982 982 struct ip6t_getinfo info; 983 - const struct xt_table_info *private = xt_table_get_private_protected(t); 983 + const struct xt_table_info *private = t->private; 984 984 #ifdef CONFIG_COMPAT 985 985 struct xt_table_info tmp; 986 986 ··· 1035 1035 1036 1036 t = xt_find_table_lock(net, AF_INET6, get.name); 1037 1037 if (!IS_ERR(t)) { 1038 - struct xt_table_info *private = xt_table_get_private_protected(t); 1038 + struct xt_table_info *private = t->private; 1039 1039 if (get.size == private->size) 1040 1040 ret = copy_entries_to_user(private->size, 1041 1041 t, uptr->entrytable); ··· 1189 1189 } 1190 1190 1191 1191 local_bh_disable(); 1192 - private = xt_table_get_private_protected(t); 1192 + private = t->private; 1193 1193 if (private->number != tmp.num_counters) { 1194 1194 ret = -EINVAL; 1195 1195 goto unlock_up_free; ··· 1552 1552 void __user *userptr) 1553 1553 { 1554 1554 struct xt_counters *counters; 1555 - const struct xt_table_info *private = xt_table_get_private_protected(table); 1555 + const struct xt_table_info *private = table->private; 1556 1556 void __user *pos; 1557 1557 unsigned int size; 1558 1558 int ret = 0; ··· 1598 1598 xt_compat_lock(AF_INET6); 1599 1599 t = xt_find_table_lock(net, AF_INET6, get.name); 1600 1600 if (!IS_ERR(t)) { 1601 - const struct xt_table_info *private = xt_table_get_private_protected(t); 1601 + const struct xt_table_info *private = t->private; 1602 1602 struct xt_table_info info; 1603 1603 ret = compat_table_info(private, &info); 1604 1604 if (!ret && get.size == info.size)
+9 -27
net/ipv6/route.c
··· 260 260 .confirm_neigh = ip6_confirm_neigh, 261 261 }; 262 262 263 - static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst) 264 - { 265 - unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 266 - 267 - return mtu ? : dst->dev->mtu; 268 - } 269 - 270 - static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, 271 - struct sk_buff *skb, u32 mtu, 272 - bool confirm_neigh) 273 - { 274 - } 275 - 276 - static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk, 277 - struct sk_buff *skb) 278 - { 279 - } 280 - 281 263 static struct dst_ops ip6_dst_blackhole_ops = { 282 - .family = AF_INET6, 283 - .destroy = ip6_dst_destroy, 284 - .check = ip6_dst_check, 285 - .mtu = ip6_blackhole_mtu, 286 - .default_advmss = ip6_default_advmss, 287 - .update_pmtu = ip6_rt_blackhole_update_pmtu, 288 - .redirect = ip6_rt_blackhole_redirect, 289 - .cow_metrics = dst_cow_metrics_generic, 290 - .neigh_lookup = ip6_dst_neigh_lookup, 264 + .family = AF_INET6, 265 + .default_advmss = ip6_default_advmss, 266 + .neigh_lookup = ip6_dst_neigh_lookup, 267 + .check = ip6_dst_check, 268 + .destroy = ip6_dst_destroy, 269 + .cow_metrics = dst_cow_metrics_generic, 270 + .update_pmtu = dst_blackhole_update_pmtu, 271 + .redirect = dst_blackhole_redirect, 272 + .mtu = dst_blackhole_mtu, 291 273 }; 292 274 293 275 static const u32 ip6_template_metrics[RTAX_MAX] = {
+5
net/ipv6/tcp_ipv6.c
··· 1175 1175 if (!ipv6_unicast_destination(skb)) 1176 1176 goto drop; 1177 1177 1178 + if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { 1179 + __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); 1180 + return 0; 1181 + } 1182 + 1178 1183 return tcp_conn_request(&tcp6_request_sock_ops, 1179 1184 &tcp_request_sock_ipv6_ops, sk, skb); 1180 1185
+3 -2
net/mac80211/aead_api.c
··· 23 23 struct aead_request *aead_req; 24 24 int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm); 25 25 u8 *__aad; 26 + int ret; 26 27 27 28 aead_req = kzalloc(reqsize + aad_len, GFP_ATOMIC); 28 29 if (!aead_req) ··· 41 40 aead_request_set_crypt(aead_req, sg, sg, data_len, b_0); 42 41 aead_request_set_ad(aead_req, sg[0].length); 43 42 44 - crypto_aead_encrypt(aead_req); 43 + ret = crypto_aead_encrypt(aead_req); 45 44 kfree_sensitive(aead_req); 46 45 47 - return 0; 46 + return ret; 48 47 } 49 48 50 49 int aead_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
+3 -2
net/mac80211/aes_gmac.c
··· 22 22 struct aead_request *aead_req; 23 23 int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm); 24 24 const __le16 *fc; 25 + int ret; 25 26 26 27 if (data_len < GMAC_MIC_LEN) 27 28 return -EINVAL; ··· 60 59 aead_request_set_crypt(aead_req, sg, sg, 0, iv); 61 60 aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len); 62 61 63 - crypto_aead_encrypt(aead_req); 62 + ret = crypto_aead_encrypt(aead_req); 64 63 kfree_sensitive(aead_req); 65 64 66 - return 0; 65 + return ret; 67 66 } 68 67 69 68 struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
+2 -2
net/mac80211/cfg.c
··· 2950 2950 continue; 2951 2951 2952 2952 for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) { 2953 - if (~sdata->rc_rateidx_mcs_mask[i][j]) { 2953 + if (sdata->rc_rateidx_mcs_mask[i][j] != 0xff) { 2954 2954 sdata->rc_has_mcs_mask[i] = true; 2955 2955 break; 2956 2956 } 2957 2957 } 2958 2958 2959 2959 for (j = 0; j < NL80211_VHT_NSS_MAX; j++) { 2960 - if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) { 2960 + if (sdata->rc_rateidx_vht_mcs_mask[i][j] != 0xffff) { 2961 2961 sdata->rc_has_vht_mcs_mask[i] = true; 2962 2962 break; 2963 2963 }
+2
net/mac80211/ibss.c
··· 1874 1874 1875 1875 /* remove beacon */ 1876 1876 kfree(sdata->u.ibss.ie); 1877 + sdata->u.ibss.ie = NULL; 1878 + sdata->u.ibss.ie_len = 0; 1877 1879 1878 1880 /* on the next join, re-program HT parameters */ 1879 1881 memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
+12 -1
net/mac80211/main.c
··· 973 973 continue; 974 974 975 975 if (!dflt_chandef.chan) { 976 + /* 977 + * Assign the first enabled channel to dflt_chandef 978 + * from the list of channels 979 + */ 980 + for (i = 0; i < sband->n_channels; i++) 981 + if (!(sband->channels[i].flags & 982 + IEEE80211_CHAN_DISABLED)) 983 + break; 984 + /* if none found then use the first anyway */ 985 + if (i == sband->n_channels) 986 + i = 0; 976 987 cfg80211_chandef_create(&dflt_chandef, 977 - &sband->channels[0], 988 + &sband->channels[i], 978 989 NL80211_CHAN_NO_HT); 979 990 /* init channel we're on */ 980 991 if (!local->use_chanctx && !local->_oper_chandef.chan) {
+1 -1
net/mac80211/mlme.c
··· 5071 5071 he_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, 5072 5072 ies->data, ies->len); 5073 5073 if (he_oper_ie && 5074 - he_oper_ie[1] == ieee80211_he_oper_size(&he_oper_ie[3])) 5074 + he_oper_ie[1] >= ieee80211_he_oper_size(&he_oper_ie[3])) 5075 5075 he_oper = (void *)(he_oper_ie + 3); 5076 5076 else 5077 5077 he_oper = NULL;
-2
net/mac80211/rc80211_minstrel_ht.c
··· 805 805 static u16 806 806 minstrel_ht_next_inc_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur) 807 807 { 808 - struct minstrel_mcs_group_data *mg; 809 808 u8 type = MINSTREL_SAMPLE_TYPE_INC; 810 809 int i, index = 0; 811 810 u8 group; ··· 812 813 group = mi->sample[type].sample_group; 813 814 for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) { 814 815 group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups); 815 - mg = &mi->groups[group]; 816 816 817 817 index = minstrel_ht_group_min_rate_offset(mi, group, 818 818 fast_rate_dur);
+1 -1
net/mac80211/util.c
··· 968 968 break; 969 969 case WLAN_EID_EXT_HE_OPERATION: 970 970 if (len >= sizeof(*elems->he_operation) && 971 - len == ieee80211_he_oper_size(data) - 1) { 971 + len >= ieee80211_he_oper_size(data) - 1) { 972 972 if (crc) 973 973 *crc = crc32_be(*crc, (void *)elem, 974 974 elem->datalen + 2);
+14 -10
net/mptcp/options.c
··· 567 567 } 568 568 569 569 static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id, 570 - struct in_addr *addr) 570 + struct in_addr *addr, u16 port) 571 571 { 572 572 u8 hmac[SHA256_DIGEST_SIZE]; 573 573 u8 msg[7]; 574 574 575 575 msg[0] = addr_id; 576 576 memcpy(&msg[1], &addr->s_addr, 4); 577 - msg[5] = 0; 578 - msg[6] = 0; 577 + msg[5] = port >> 8; 578 + msg[6] = port & 0xFF; 579 579 580 580 mptcp_crypto_hmac_sha(key1, key2, msg, 7, hmac); 581 581 ··· 584 584 585 585 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 586 586 static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id, 587 - struct in6_addr *addr) 587 + struct in6_addr *addr, u16 port) 588 588 { 589 589 u8 hmac[SHA256_DIGEST_SIZE]; 590 590 u8 msg[19]; 591 591 592 592 msg[0] = addr_id; 593 593 memcpy(&msg[1], &addr->s6_addr, 16); 594 - msg[17] = 0; 595 - msg[18] = 0; 594 + msg[17] = port >> 8; 595 + msg[18] = port & 0xFF; 596 596 597 597 mptcp_crypto_hmac_sha(key1, key2, msg, 19, hmac); 598 598 ··· 646 646 opts->ahmac = add_addr_generate_hmac(msk->local_key, 647 647 msk->remote_key, 648 648 opts->addr_id, 649 - &opts->addr); 649 + &opts->addr, 650 + opts->port); 650 651 } 651 652 } 652 653 #if IS_ENABLED(CONFIG_MPTCP_IPV6) ··· 658 657 opts->ahmac = add_addr6_generate_hmac(msk->local_key, 659 658 msk->remote_key, 660 659 opts->addr_id, 661 - &opts->addr6); 660 + &opts->addr6, 661 + opts->port); 662 662 } 663 663 } 664 664 #endif ··· 964 962 if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) 965 963 hmac = add_addr_generate_hmac(msk->remote_key, 966 964 msk->local_key, 967 - mp_opt->addr_id, &mp_opt->addr); 965 + mp_opt->addr_id, &mp_opt->addr, 966 + mp_opt->port); 968 967 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 969 968 else 970 969 hmac = add_addr6_generate_hmac(msk->remote_key, 971 970 msk->local_key, 972 - mp_opt->addr_id, &mp_opt->addr6); 971 + mp_opt->addr_id, &mp_opt->addr6, 972 + mp_opt->port); 973 973 #endif 974 974 975 975 pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
+2 -2
net/mptcp/protocol.c
··· 2968 2968 for (;;) { 2969 2969 flags = 0; 2970 2970 if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags)) 2971 - flags |= MPTCP_PUSH_PENDING; 2971 + flags |= BIT(MPTCP_PUSH_PENDING); 2972 2972 if (!flags) 2973 2973 break; 2974 2974 ··· 2981 2981 */ 2982 2982 2983 2983 spin_unlock_bh(&sk->sk_lock.slock); 2984 - if (flags & MPTCP_PUSH_PENDING) 2984 + if (flags & BIT(MPTCP_PUSH_PENDING)) 2985 2985 __mptcp_push_pending(sk, 0); 2986 2986 2987 2987 cond_resched();
+5
net/mptcp/subflow.c
··· 477 477 if (!ipv6_unicast_destination(skb)) 478 478 goto drop; 479 479 480 + if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { 481 + __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); 482 + return 0; 483 + } 484 + 480 485 return tcp_conn_request(&mptcp_subflow_request_sock_ops, 481 486 &subflow_request_sock_ipv6_ops, sk, skb); 482 487
+1
net/netfilter/nf_conntrack_netlink.c
··· 2962 2962 memset(&m, 0xFF, sizeof(m)); 2963 2963 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3)); 2964 2964 m.src.u.all = mask->src.u.all; 2965 + m.src.l3num = tuple->src.l3num; 2965 2966 m.dst.protonum = tuple->dst.protonum; 2966 2967 2967 2968 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK);
-3
net/netfilter/nf_conntrack_proto_gre.c
··· 218 218 enum ip_conntrack_info ctinfo, 219 219 const struct nf_hook_state *state) 220 220 { 221 - if (state->pf != NFPROTO_IPV4) 222 - return -NF_ACCEPT; 223 - 224 221 if (!nf_ct_is_confirmed(ct)) { 225 222 unsigned int *timeouts = nf_ct_timeout_lookup(ct); 226 223
+1 -1
net/netfilter/nf_flow_table_core.c
··· 506 506 { 507 507 int err; 508 508 509 - INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc); 509 + INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc); 510 510 flow_block_init(&flowtable->flow_block); 511 511 init_rwsem(&flowtable->flow_block_lock); 512 512
+21 -1
net/netfilter/nf_tables_api.c
··· 6783 6783 6784 6784 list_for_each_entry(hook, hook_list, list) { 6785 6785 list_for_each_entry(ft, &table->flowtables, list) { 6786 + if (!nft_is_active_next(net, ft)) 6787 + continue; 6788 + 6786 6789 list_for_each_entry(hook2, &ft->hook_list, list) { 6787 6790 if (hook->ops.dev == hook2->ops.dev && 6788 6791 hook->ops.pf == hook2->ops.pf) { ··· 6845 6842 struct nft_hook *hook, *next; 6846 6843 struct nft_trans *trans; 6847 6844 bool unregister = false; 6845 + u32 flags; 6848 6846 int err; 6849 6847 6850 6848 err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK], ··· 6858 6854 list_del(&hook->list); 6859 6855 kfree(hook); 6860 6856 } 6857 + } 6858 + 6859 + if (nla[NFTA_FLOWTABLE_FLAGS]) { 6860 + flags = ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS])); 6861 + if (flags & ~NFT_FLOWTABLE_MASK) 6862 + return -EOPNOTSUPP; 6863 + if ((flowtable->data.flags & NFT_FLOWTABLE_HW_OFFLOAD) ^ 6864 + (flags & NFT_FLOWTABLE_HW_OFFLOAD)) 6865 + return -EOPNOTSUPP; 6866 + } else { 6867 + flags = flowtable->data.flags; 6861 6868 } 6862 6869 6863 6870 err = nft_register_flowtable_net_hooks(ctx->net, ctx->table, ··· 6884 6869 goto err_flowtable_update_hook; 6885 6870 } 6886 6871 6872 + nft_trans_flowtable_flags(trans) = flags; 6887 6873 nft_trans_flowtable(trans) = flowtable; 6888 6874 nft_trans_flowtable_update(trans) = true; 6889 6875 INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans)); ··· 6979 6963 if (nla[NFTA_FLOWTABLE_FLAGS]) { 6980 6964 flowtable->data.flags = 6981 6965 ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS])); 6982 - if (flowtable->data.flags & ~NFT_FLOWTABLE_MASK) 6966 + if (flowtable->data.flags & ~NFT_FLOWTABLE_MASK) { 6967 + err = -EOPNOTSUPP; 6983 6968 goto err3; 6969 + } 6984 6970 } 6985 6971 6986 6972 write_pnet(&flowtable->data.net, net); ··· 8194 8176 break; 8195 8177 case NFT_MSG_NEWFLOWTABLE: 8196 8178 if (nft_trans_flowtable_update(trans)) { 8179 + nft_trans_flowtable(trans)->data.flags = 8180 + nft_trans_flowtable_flags(trans); 8197 8181 nf_tables_flowtable_notify(&trans->ctx, 8198 8182 nft_trans_flowtable(trans), 8199 8183 &nft_trans_flowtable_hooks(trans),
+34 -15
net/netfilter/x_tables.c
··· 1351 1351 } 1352 1352 EXPORT_SYMBOL(xt_counters_alloc); 1353 1353 1354 - struct xt_table_info 1355 - *xt_table_get_private_protected(const struct xt_table *table) 1356 - { 1357 - return rcu_dereference_protected(table->private, 1358 - mutex_is_locked(&xt[table->af].mutex)); 1359 - } 1360 - EXPORT_SYMBOL(xt_table_get_private_protected); 1361 - 1362 1354 struct xt_table_info * 1363 1355 xt_replace_table(struct xt_table *table, 1364 1356 unsigned int num_counters, ··· 1358 1366 int *error) 1359 1367 { 1360 1368 struct xt_table_info *private; 1369 + unsigned int cpu; 1361 1370 int ret; 1362 1371 1363 1372 ret = xt_jumpstack_alloc(newinfo); ··· 1368 1375 } 1369 1376 1370 1377 /* Do the substitution. */ 1371 - private = xt_table_get_private_protected(table); 1378 + local_bh_disable(); 1379 + private = table->private; 1372 1380 1373 1381 /* Check inside lock: is the old number correct? */ 1374 1382 if (num_counters != private->number) { 1375 1383 pr_debug("num_counters != table->private->number (%u/%u)\n", 1376 1384 num_counters, private->number); 1385 + local_bh_enable(); 1377 1386 *error = -EAGAIN; 1378 1387 return NULL; 1379 1388 } 1380 1389 1381 1390 newinfo->initial_entries = private->initial_entries; 1391 + /* 1392 + * Ensure contents of newinfo are visible before assigning to 1393 + * private. 1394 + */ 1395 + smp_wmb(); 1396 + table->private = newinfo; 1382 1397 1383 - rcu_assign_pointer(table->private, newinfo); 1384 - synchronize_rcu(); 1398 + /* make sure all cpus see new ->private value */ 1399 + smp_mb(); 1400 + 1401 + /* 1402 + * Even though table entries have now been swapped, other CPU's 1403 + * may still be using the old entries... 1404 + */ 1405 + local_bh_enable(); 1406 + 1407 + /* ... so wait for even xt_recseq on all cpus */ 1408 + for_each_possible_cpu(cpu) { 1409 + seqcount_t *s = &per_cpu(xt_recseq, cpu); 1410 + u32 seq = raw_read_seqcount(s); 1411 + 1412 + if (seq & 1) { 1413 + do { 1414 + cond_resched(); 1415 + cpu_relax(); 1416 + } while (seq == raw_read_seqcount(s)); 1417 + } 1418 + } 1385 1419 1386 1420 audit_log_nfcfg(table->name, table->af, private->number, 1387 1421 !private->number ? AUDIT_XT_OP_REGISTER : ··· 1444 1424 } 1445 1425 1446 1426 /* Simplifies replace_table code. */ 1447 - rcu_assign_pointer(table->private, bootstrap); 1427 + table->private = bootstrap; 1448 1428 1449 1429 if (!xt_replace_table(table, 0, newinfo, &ret)) 1450 1430 goto unlock; 1451 1431 1452 - private = xt_table_get_private_protected(table); 1432 + private = table->private; 1453 1433 pr_debug("table->private->number = %u\n", private->number); 1454 1434 1455 1435 /* save number of initial entries */ ··· 1472 1452 struct xt_table_info *private; 1473 1453 1474 1454 mutex_lock(&xt[table->af].mutex); 1475 - private = xt_table_get_private_protected(table); 1476 - RCU_INIT_POINTER(table->private, NULL); 1455 + private = table->private; 1477 1456 list_del(&table->list); 1478 1457 mutex_unlock(&xt[table->af].mutex); 1479 1458 audit_log_nfcfg(table->name, table->af, private->number,
+5 -3
net/openvswitch/conntrack.c
··· 271 271 /* This is called to initialize CT key fields possibly coming in from the local 272 272 * stack. 273 273 */ 274 - void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) 274 + void ovs_ct_fill_key(const struct sk_buff *skb, 275 + struct sw_flow_key *key, 276 + bool post_ct) 275 277 { 276 - ovs_ct_update_key(skb, NULL, key, false, false); 278 + ovs_ct_update_key(skb, NULL, key, post_ct, false); 277 279 } 278 280 279 281 int ovs_ct_put_key(const struct sw_flow_key *swkey, ··· 1334 1332 if (skb_nfct(skb)) { 1335 1333 nf_conntrack_put(skb_nfct(skb)); 1336 1334 nf_ct_set(skb, NULL, IP_CT_UNTRACKED); 1337 - ovs_ct_fill_key(skb, key); 1335 + ovs_ct_fill_key(skb, key, false); 1338 1336 } 1339 1337 1340 1338 return 0;
+4 -2
net/openvswitch/conntrack.h
··· 25 25 const struct ovs_conntrack_info *); 26 26 int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key); 27 27 28 - void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key); 28 + void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key, 29 + bool post_ct); 29 30 int ovs_ct_put_key(const struct sw_flow_key *swkey, 30 31 const struct sw_flow_key *output, struct sk_buff *skb); 31 32 void ovs_ct_free_action(const struct nlattr *a); ··· 75 74 } 76 75 77 76 static inline void ovs_ct_fill_key(const struct sk_buff *skb, 78 - struct sw_flow_key *key) 77 + struct sw_flow_key *key, 78 + bool post_ct) 79 79 { 80 80 key->ct_state = 0; 81 81 key->ct_zone = 0;
+3 -1
net/openvswitch/flow.c
··· 857 857 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 858 858 struct tc_skb_ext *tc_ext; 859 859 #endif 860 + bool post_ct = false; 860 861 int res, err; 861 862 862 863 /* Extract metadata from packet. */ ··· 896 895 tc_ext = skb_ext_find(skb, TC_SKB_EXT); 897 896 key->recirc_id = tc_ext ? tc_ext->chain : 0; 898 897 OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0; 898 + post_ct = tc_ext ? tc_ext->post_ct : false; 899 899 } else { 900 900 key->recirc_id = 0; 901 901 } ··· 906 904 907 905 err = key_extract(skb, key); 908 906 if (!err) 909 - ovs_ct_fill_key(skb, key); /* Must be after key_extract(). */ 907 + ovs_ct_fill_key(skb, key, post_ct); /* Must be after key_extract(). */ 910 908 return err; 911 909 } 912 910
+5
net/qrtr/qrtr.c
··· 1058 1058 rc = copied; 1059 1059 1060 1060 if (addr) { 1061 + /* There is an anonymous 2-byte hole after sq_family, 1062 + * make sure to clear it. 1063 + */ 1064 + memset(addr, 0, sizeof(*addr)); 1065 + 1061 1066 addr->sq_family = AF_QIPCRTR; 1062 1067 addr->sq_node = cb->src_node; 1063 1068 addr->sq_port = cb->src_port;
+4 -2
net/sched/act_ct.c
··· 945 945 tcf_lastuse_update(&c->tcf_tm); 946 946 947 947 if (clear) { 948 + qdisc_skb_cb(skb)->post_ct = false; 948 949 ct = nf_ct_get(skb, &ctinfo); 949 950 if (ct) { 950 951 nf_conntrack_put(&ct->ct_general); 951 952 nf_ct_set(skb, NULL, IP_CT_UNTRACKED); 952 953 } 953 954 954 - goto out; 955 + goto out_clear; 955 956 } 956 957 957 958 family = tcf_ct_skb_nf_family(skb); ··· 1031 1030 skb_push_rcsum(skb, nh_ofs); 1032 1031 1033 1032 out: 1034 - tcf_action_update_bstats(&c->common, skb); 1035 1033 qdisc_skb_cb(skb)->post_ct = true; 1034 + out_clear: 1035 + tcf_action_update_bstats(&c->common, skb); 1036 1036 if (defrag) 1037 1037 qdisc_skb_cb(skb)->pkt_len = skb->len; 1038 1038 return retval;
+1
net/sched/cls_api.c
··· 1629 1629 return TC_ACT_SHOT; 1630 1630 ext->chain = last_executed_chain; 1631 1631 ext->mru = qdisc_skb_cb(skb)->mru; 1632 + ext->post_ct = qdisc_skb_cb(skb)->post_ct; 1632 1633 } 1633 1634 1634 1635 return ret;
+1 -1
net/sched/cls_flower.c
··· 1451 1451 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 1452 1452 sizeof(key->ct_state)); 1453 1453 1454 - err = fl_validate_ct_state(mask->ct_state, 1454 + err = fl_validate_ct_state(key->ct_state & mask->ct_state, 1455 1455 tb[TCA_FLOWER_KEY_CT_STATE_MASK], 1456 1456 extack); 1457 1457 if (err)
+4 -3
net/sched/sch_choke.c
··· 345 345 struct sk_buff **old = NULL; 346 346 unsigned int mask; 347 347 u32 max_P; 348 + u8 *stab; 348 349 349 350 if (opt == NULL) 350 351 return -EINVAL; ··· 362 361 max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0; 363 362 364 363 ctl = nla_data(tb[TCA_CHOKE_PARMS]); 365 - 366 - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) 364 + stab = nla_data(tb[TCA_CHOKE_STAB]); 365 + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) 367 366 return -EINVAL; 368 367 369 368 if (ctl->limit > CHOKE_MAX_QUEUE) ··· 413 412 414 413 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, 415 414 ctl->Plog, ctl->Scell_log, 416 - nla_data(tb[TCA_CHOKE_STAB]), 415 + stab, 417 416 max_P); 418 417 red_set_vars(&q->vars); 419 418
+1 -1
net/sched/sch_gred.c
··· 480 480 struct gred_sched *table = qdisc_priv(sch); 481 481 struct gred_sched_data *q = table->tab[dp]; 482 482 483 - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) { 483 + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) { 484 484 NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters"); 485 485 return -EINVAL; 486 486 }
+13 -6
net/sched/sch_htb.c
··· 1020 1020 struct nlattr *tb[TCA_HTB_MAX + 1]; 1021 1021 struct tc_htb_glob *gopt; 1022 1022 unsigned int ntx; 1023 + bool offload; 1023 1024 int err; 1024 1025 1025 1026 qdisc_watchdog_init(&q->watchdog, sch); ··· 1045 1044 if (gopt->version != HTB_VER >> 16) 1046 1045 return -EINVAL; 1047 1046 1048 - q->offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]); 1047 + offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]); 1049 1048 1050 - if (q->offload) { 1049 + if (offload) { 1051 1050 if (sch->parent != TC_H_ROOT) 1052 1051 return -EOPNOTSUPP; 1053 1052 ··· 1077 1076 q->rate2quantum = 1; 1078 1077 q->defcls = gopt->defcls; 1079 1078 1080 - if (!q->offload) 1079 + if (!offload) 1081 1080 return 0; 1082 1081 1083 1082 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) { ··· 1108 1107 if (err) 1109 1108 goto err_free_qdiscs; 1110 1109 1110 + /* Defer this assignment, so that htb_destroy skips offload-related 1111 + * parts (especially calling ndo_setup_tc) on errors. 1112 + */ 1113 + q->offload = true; 1114 + 1111 1115 return 0; 1112 1116 1113 1117 err_free_qdiscs: 1114 - /* TC_HTB_CREATE call failed, avoid any further calls to the driver. */ 1115 - q->offload = false; 1116 - 1117 1118 for (ntx = 0; ntx < q->num_direct_qdiscs && q->direct_qdiscs[ntx]; 1118 1119 ntx++) 1119 1120 qdisc_put(q->direct_qdiscs[ntx]); ··· 1343 1340 { 1344 1341 struct net_device *dev = qdisc_dev(sch); 1345 1342 struct tc_htb_qopt_offload offload_opt; 1343 + struct htb_sched *q = qdisc_priv(sch); 1346 1344 int err; 1345 + 1346 + if (!q->offload) 1347 + return sch->dev_queue; 1347 1348 1348 1349 offload_opt = (struct tc_htb_qopt_offload) { 1349 1350 .command = TC_HTB_LEAF_QUERY_QUEUE,
+5 -2
net/sched/sch_red.c
··· 242 242 unsigned char flags; 243 243 int err; 244 244 u32 max_P; 245 + u8 *stab; 245 246 246 247 if (tb[TCA_RED_PARMS] == NULL || 247 248 tb[TCA_RED_STAB] == NULL) ··· 251 250 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0; 252 251 253 252 ctl = nla_data(tb[TCA_RED_PARMS]); 254 - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) 253 + stab = nla_data(tb[TCA_RED_STAB]); 254 + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, 255 + ctl->Scell_log, stab)) 255 256 return -EINVAL; 256 257 257 258 err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS, ··· 291 288 red_set_parms(&q->parms, 292 289 ctl->qth_min, ctl->qth_max, ctl->Wlog, 293 290 ctl->Plog, ctl->Scell_log, 294 - nla_data(tb[TCA_RED_STAB]), 291 + stab, 295 292 max_P); 296 293 red_set_vars(&q->vars); 297 294
+1 -1
net/sched/sch_sfq.c
··· 647 647 } 648 648 649 649 if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, 650 - ctl_v1->Wlog, ctl_v1->Scell_log)) 650 + ctl_v1->Wlog, ctl_v1->Scell_log, NULL)) 651 651 return -EINVAL; 652 652 if (ctl_v1 && ctl_v1->qth_min) { 653 653 p = kmalloc(sizeof(*p), GFP_KERNEL);
-7
net/sctp/output.c
··· 584 584 goto out; 585 585 } 586 586 587 - rcu_read_lock(); 588 - if (__sk_dst_get(sk) != tp->dst) { 589 - dst_hold(tp->dst); 590 - sk_setup_caps(sk, tp->dst); 591 - } 592 - rcu_read_unlock(); 593 - 594 587 /* pack up chunks */ 595 588 pkt_count = sctp_packet_pack(packet, head, gso, gfp); 596 589 if (!pkt_count) {
+7
net/sctp/outqueue.c
··· 1135 1135 1136 1136 static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx) 1137 1137 { 1138 + struct sock *sk = ctx->asoc->base.sk; 1138 1139 struct list_head *ltransport; 1139 1140 struct sctp_packet *packet; 1140 1141 struct sctp_transport *t; ··· 1145 1144 t = list_entry(ltransport, struct sctp_transport, send_ready); 1146 1145 packet = &t->packet; 1147 1146 if (!sctp_packet_empty(packet)) { 1147 + rcu_read_lock(); 1148 + if (t->dst && __sk_dst_get(sk) != t->dst) { 1149 + dst_hold(t->dst); 1150 + sk_setup_caps(sk, t->dst); 1151 + } 1152 + rcu_read_unlock(); 1148 1153 error = sctp_packet_transmit(packet, ctx->gfp); 1149 1154 if (error < 0) 1150 1155 ctx->q->asoc->base.sk->sk_err = -error;
+8 -3
net/tipc/node.c
··· 2895 2895 2896 2896 #ifdef CONFIG_TIPC_CRYPTO 2897 2897 static int tipc_nl_retrieve_key(struct nlattr **attrs, 2898 - struct tipc_aead_key **key) 2898 + struct tipc_aead_key **pkey) 2899 2899 { 2900 2900 struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY]; 2901 + struct tipc_aead_key *key; 2901 2902 2902 2903 if (!attr) 2903 2904 return -ENODATA; 2904 2905 2905 - *key = (struct tipc_aead_key *)nla_data(attr); 2906 - if (nla_len(attr) < tipc_aead_key_size(*key)) 2906 + if (nla_len(attr) < sizeof(*key)) 2907 + return -EINVAL; 2908 + key = (struct tipc_aead_key *)nla_data(attr); 2909 + if (key->keylen > TIPC_AEAD_KEYLEN_MAX || 2910 + nla_len(attr) < tipc_aead_key_size(key)) 2907 2911 return -EINVAL; 2908 2912 2913 + *pkey = key; 2909 2914 return 0; 2910 2915 } 2911 2916
+1
net/vmw_vsock/af_vsock.c
··· 755 755 vsk->buffer_size = psk->buffer_size; 756 756 vsk->buffer_min_size = psk->buffer_min_size; 757 757 vsk->buffer_max_size = psk->buffer_max_size; 758 + security_sk_clone(parent, sk); 758 759 } else { 759 760 vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN); 760 761 vsk->owner = get_current_cred();
+8 -4
net/wireless/nl80211.c
··· 70 70 struct wireless_dev *result = NULL; 71 71 bool have_ifidx = attrs[NL80211_ATTR_IFINDEX]; 72 72 bool have_wdev_id = attrs[NL80211_ATTR_WDEV]; 73 - u64 wdev_id; 73 + u64 wdev_id = 0; 74 74 int wiphy_idx = -1; 75 75 int ifidx = -1; 76 76 ··· 14789 14789 #define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\ 14790 14790 NL80211_FLAG_CHECK_NETDEV_UP) 14791 14791 #define NL80211_FLAG_CLEAR_SKB 0x20 14792 + #define NL80211_FLAG_NO_WIPHY_MTX 0x40 14792 14793 14793 14794 static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb, 14794 14795 struct genl_info *info) ··· 14841 14840 info->user_ptr[0] = rdev; 14842 14841 } 14843 14842 14844 - if (rdev) { 14843 + if (rdev && !(ops->internal_flags & NL80211_FLAG_NO_WIPHY_MTX)) { 14845 14844 wiphy_lock(&rdev->wiphy); 14846 14845 /* we keep the mutex locked until post_doit */ 14847 14846 __release(&rdev->wiphy.mtx); ··· 14866 14865 } 14867 14866 } 14868 14867 14869 - if (info->user_ptr[0]) { 14868 + if (info->user_ptr[0] && 14869 + !(ops->internal_flags & NL80211_FLAG_NO_WIPHY_MTX)) { 14870 14870 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 14871 14871 14872 14872 /* we kept the mutex locked since pre_doit */ ··· 15331 15329 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 15332 15330 .doit = nl80211_wiphy_netns, 15333 15331 .flags = GENL_UNS_ADMIN_PERM, 15334 - .internal_flags = NL80211_FLAG_NEED_WIPHY, 15332 + .internal_flags = NL80211_FLAG_NEED_WIPHY | 15333 + NL80211_FLAG_NEED_RTNL | 15334 + NL80211_FLAG_NO_WIPHY_MTX, 15335 15335 }, 15336 15336 { 15337 15337 .cmd = NL80211_CMD_GET_SURVEY,
+2
scripts/module.lds.S
··· 20 20 21 21 __patchable_function_entries : { *(__patchable_function_entries) } 22 22 23 + #ifdef CONFIG_LTO_CLANG 23 24 /* 24 25 * With CONFIG_LTO_CLANG, LLD always enables -fdata-sections and 25 26 * -ffunction-sections, which increases the size of the final module. ··· 42 41 } 43 42 44 43 .text : { *(.text .text.[0-9a-zA-Z_]*) } 44 + #endif 45 45 } 46 46 47 47 /* bring in arch-specific sections */
+8
security/integrity/iint.c
··· 98 98 struct rb_node *node, *parent = NULL; 99 99 struct integrity_iint_cache *iint, *test_iint; 100 100 101 + /* 102 + * The integrity's "iint_cache" is initialized at security_init(), 103 + * unless it is not included in the ordered list of LSMs enabled 104 + * on the boot command line. 105 + */ 106 + if (!iint_cache) 107 + panic("%s: lsm=integrity required.\n", __func__); 108 + 101 109 iint = integrity_iint_find(inode); 102 110 if (iint) 103 111 return iint;
+11 -4
security/selinux/include/security.h
··· 219 219 return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS]); 220 220 } 221 221 222 + struct selinux_policy_convert_data; 223 + 224 + struct selinux_load_state { 225 + struct selinux_policy *policy; 226 + struct selinux_policy_convert_data *convert_data; 227 + }; 228 + 222 229 int security_mls_enabled(struct selinux_state *state); 223 230 int security_load_policy(struct selinux_state *state, 224 - void *data, size_t len, 225 - struct selinux_policy **newpolicyp); 231 + void *data, size_t len, 232 + struct selinux_load_state *load_state); 226 233 void selinux_policy_commit(struct selinux_state *state, 227 - struct selinux_policy *newpolicy); 234 + struct selinux_load_state *load_state); 228 235 void selinux_policy_cancel(struct selinux_state *state, 229 - struct selinux_policy *policy); 236 + struct selinux_load_state *load_state); 230 237 int security_read_policy(struct selinux_state *state, 231 238 void **data, size_t *len); 232 239 int security_read_state_kernel(struct selinux_state *state,
+9 -13
security/selinux/selinuxfs.c
··· 563 563 564 564 ret = sel_make_bools(newpolicy, tmp_bool_dir, &tmp_bool_num, 565 565 &tmp_bool_names, &tmp_bool_values); 566 - if (ret) { 567 - pr_err("SELinux: failed to load policy booleans\n"); 566 + if (ret) 568 567 goto out; 569 - } 570 568 571 569 ret = sel_make_classes(newpolicy, tmp_class_dir, 572 570 &fsi->last_class_ino); 573 - if (ret) { 574 - pr_err("SELinux: failed to load policy classes\n"); 571 + if (ret) 575 572 goto out; 576 - } 577 573 578 574 /* booleans */ 579 575 old_dentry = fsi->bool_dir; ··· 612 616 613 617 { 614 618 struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info; 615 - struct selinux_policy *newpolicy; 619 + struct selinux_load_state load_state; 616 620 ssize_t length; 617 621 void *data = NULL; 618 622 ··· 638 642 if (copy_from_user(data, buf, count) != 0) 639 643 goto out; 640 644 641 - length = security_load_policy(fsi->state, data, count, &newpolicy); 645 + length = security_load_policy(fsi->state, data, count, &load_state); 642 646 if (length) { 643 647 pr_warn_ratelimited("SELinux: failed to load policy\n"); 644 648 goto out; 645 649 } 646 650 647 - length = sel_make_policy_nodes(fsi, newpolicy); 651 + length = sel_make_policy_nodes(fsi, load_state.policy); 648 652 if (length) { 649 - selinux_policy_cancel(fsi->state, newpolicy); 650 - goto out1; 653 + pr_warn_ratelimited("SELinux: failed to initialize selinuxfs\n"); 654 + selinux_policy_cancel(fsi->state, &load_state); 655 + goto out; 651 656 } 652 657 653 - selinux_policy_commit(fsi->state, newpolicy); 658 + selinux_policy_commit(fsi->state, &load_state); 654 659 655 660 length = count; 656 661 657 - out1: 658 662 audit_log(audit_context(), GFP_KERNEL, AUDIT_MAC_POLICY_LOAD, 659 663 "auid=%u ses=%u lsm=selinux res=1", 660 664 from_kuid(&init_user_ns, audit_get_loginuid(current)),
+39 -24
security/selinux/ss/services.c
··· 67 67 #include "policycap_names.h" 68 68 #include "ima.h" 69 69 70 + struct convert_context_args { 71 + struct selinux_state *state; 72 + struct policydb *oldp; 73 + struct policydb *newp; 74 + }; 75 + 76 + struct selinux_policy_convert_data { 77 + struct convert_context_args args; 78 + struct sidtab_convert_params sidtab_params; 79 + }; 80 + 70 81 /* Forward declaration. */ 71 82 static int context_struct_to_string(struct policydb *policydb, 72 83 struct context *context, ··· 1985 1974 return 0; 1986 1975 } 1987 1976 1988 - struct convert_context_args { 1989 - struct selinux_state *state; 1990 - struct policydb *oldp; 1991 - struct policydb *newp; 1992 - }; 1993 - 1994 1977 /* 1995 1978 * Convert the values in the security context 1996 1979 * structure `oldc' from the values specified ··· 2164 2159 } 2165 2160 2166 2161 void selinux_policy_cancel(struct selinux_state *state, 2167 - struct selinux_policy *policy) 2162 + struct selinux_load_state *load_state) 2168 2163 { 2169 2164 struct selinux_policy *oldpolicy; 2170 2165 ··· 2172 2167 lockdep_is_held(&state->policy_mutex)); 2173 2168 2174 2169 sidtab_cancel_convert(oldpolicy->sidtab); 2175 - selinux_policy_free(policy); 2170 + selinux_policy_free(load_state->policy); 2171 + kfree(load_state->convert_data); 2176 2172 } 2177 2173 2178 2174 static void selinux_notify_policy_change(struct selinux_state *state, ··· 2189 2183 } 2190 2184 2191 2185 void selinux_policy_commit(struct selinux_state *state, 2192 - struct selinux_policy *newpolicy) 2186 + struct selinux_load_state *load_state) 2193 2187 { 2194 - struct selinux_policy *oldpolicy; 2188 + struct selinux_policy *oldpolicy, *newpolicy = load_state->policy; 2195 2189 u32 seqno; 2196 2190 2197 2191 oldpolicy = rcu_dereference_protected(state->policy, ··· 2231 2225 /* Free the old policy */ 2232 2226 synchronize_rcu(); 2233 2227 selinux_policy_free(oldpolicy); 2228 + kfree(load_state->convert_data); 2234 2229 2235 2230 /* Notify others of the policy change */ 2236 2231 selinux_notify_policy_change(state, seqno); ··· 2248 2241 * loading the new policy. 2249 2242 */ 2250 2243 int security_load_policy(struct selinux_state *state, void *data, size_t len, 2251 - struct selinux_policy **newpolicyp) 2244 + struct selinux_load_state *load_state) 2252 2245 { 2253 2246 struct selinux_policy *newpolicy, *oldpolicy; 2254 - struct sidtab_convert_params convert_params; 2255 - struct convert_context_args args; 2247 + struct selinux_policy_convert_data *convert_data; 2256 2248 int rc = 0; 2257 2249 struct policy_file file = { data, len }, *fp = &file; 2258 2250 ··· 2281 2275 goto err_mapping; 2282 2276 } 2283 2277 2284 - 2285 2278 if (!selinux_initialized(state)) { 2286 2279 /* First policy load, so no need to preserve state from old policy */ 2287 - *newpolicyp = newpolicy; 2280 + load_state->policy = newpolicy; 2281 + load_state->convert_data = NULL; 2288 2282 return 0; 2289 2283 } 2290 2284 ··· 2298 2292 goto err_free_isids; 2299 2293 } 2300 2294 2295 + convert_data = kmalloc(sizeof(*convert_data), GFP_KERNEL); 2296 + if (!convert_data) { 2297 + rc = -ENOMEM; 2298 + goto err_free_isids; 2299 + } 2300 + 2301 2301 /* 2302 2302 * Convert the internal representations of contexts 2303 2303 * in the new SID table. 2304 2304 */ 2305 - args.state = state; 2306 - args.oldp = &oldpolicy->policydb; 2307 - args.newp = &newpolicy->policydb; 2305 + convert_data->args.state = state; 2306 + convert_data->args.oldp = &oldpolicy->policydb; 2307 + convert_data->args.newp = &newpolicy->policydb; 2308 2308 2309 - convert_params.func = convert_context; 2310 - convert_params.args = &args; 2311 - convert_params.target = newpolicy->sidtab; 2309 + convert_data->sidtab_params.func = convert_context; 2310 + convert_data->sidtab_params.args = &convert_data->args; 2311 + convert_data->sidtab_params.target = newpolicy->sidtab; 2312 2312 2313 - rc = sidtab_convert(oldpolicy->sidtab, &convert_params); 2313 + rc = sidtab_convert(oldpolicy->sidtab, &convert_data->sidtab_params); 2314 2314 if (rc) { 2315 2315 pr_err("SELinux: unable to convert the internal" 2316 2316 " representation of contexts in the new SID" 2317 2317 " table\n"); 2318 - goto err_free_isids; 2318 + goto err_free_convert_data; 2319 2319 } 2320 2320 2321 - *newpolicyp = newpolicy; 2321 + load_state->policy = newpolicy; 2322 + load_state->convert_data = convert_data; 2322 2323 return 0; 2323 2324 2325 + err_free_convert_data: 2326 + kfree(convert_data); 2324 2327 err_free_isids: 2325 2328 sidtab_destroy(newpolicy->sidtab); 2326 2329 err_mapping:
+1 -1
security/tomoyo/network.c
··· 613 613 static bool tomoyo_kernel_service(void) 614 614 { 615 615 /* Nothing to do if I am a kernel service. */ 616 - return (current->flags & (PF_KTHREAD | PF_IO_WORKER)) == PF_KTHREAD; 616 + return current->flags & PF_KTHREAD; 617 617 } 618 618 619 619 /**
+8
sound/pci/hda/hda_intel.c
··· 989 989 struct snd_card *card = dev_get_drvdata(dev); 990 990 struct azx *chip; 991 991 992 + if (!azx_is_pm_ready(card)) 993 + return 0; 994 + 992 995 chip = card->private_data; 993 996 chip->pm_prepared = 1; 997 + snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); 994 998 995 999 flush_work(&azx_bus(chip)->unsol_work); 996 1000 ··· 1009 1005 struct snd_card *card = dev_get_drvdata(dev); 1010 1006 struct azx *chip; 1011 1007 1008 + if (!azx_is_pm_ready(card)) 1009 + return; 1010 + 1012 1011 chip = card->private_data; 1012 + snd_power_change_state(card, SNDRV_CTL_POWER_D0); 1013 1013 chip->pm_prepared = 0; 1014 1014 } 1015 1015
+3 -1
sound/pci/hda/patch_realtek.c
··· 5256 5256 case 0x10ec0274: 5257 5257 case 0x10ec0294: 5258 5258 alc_process_coef_fw(codec, coef0274); 5259 - msleep(80); 5259 + msleep(850); 5260 5260 val = alc_read_coef_idx(codec, 0x46); 5261 5261 is_ctia = (val & 0x00f0) == 0x00f0; 5262 5262 break; ··· 5440 5440 struct hda_jack_callback *jack) 5441 5441 { 5442 5442 snd_hda_gen_hp_automute(codec, jack); 5443 + alc_update_headset_mode(codec); 5443 5444 } 5444 5445 5445 5446 static void alc_probe_headset_mode(struct hda_codec *codec) ··· 8058 8057 ALC285_FIXUP_HP_GPIO_AMP_INIT), 8059 8058 SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED), 8060 8059 SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), 8060 + SND_PCI_QUIRK(0x103c, 0x87f2, "HP ProBook 640 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), 8061 8061 SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED), 8062 8062 SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED), 8063 8063 SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+1
sound/usb/quirks.c
··· 1521 1521 case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */ 1522 1522 case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */ 1523 1523 case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */ 1524 + case USB_ID(0x046d, 0x084c): /* Logitech ConferenceCam Connect */ 1524 1525 return true; 1525 1526 } 1526 1527
+13
tools/include/uapi/linux/kvm.h
··· 1154 1154 #define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0) 1155 1155 #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) 1156 1156 #define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) 1157 + #define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3) 1157 1158 1158 1159 struct kvm_xen_hvm_config { 1159 1160 __u32 flags; ··· 1622 1621 union { 1623 1622 __u64 gpa; 1624 1623 __u64 pad[8]; 1624 + struct { 1625 + __u64 state; 1626 + __u64 state_entry_time; 1627 + __u64 time_running; 1628 + __u64 time_runnable; 1629 + __u64 time_blocked; 1630 + __u64 time_offline; 1631 + } runstate; 1625 1632 } u; 1626 1633 }; 1627 1634 1628 1635 /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ 1629 1636 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0 1630 1637 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1 1638 + #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2 1639 + #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3 1640 + #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4 1641 + #define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5 1631 1642 1632 1643 /* Secure Encrypted Virtualization command */ 1633 1644 enum sev_cmd_id {
+1
tools/kvm/kvm_stat/kvm_stat.service
··· 9 9 ExecStart=/usr/bin/kvm_stat -dtcz -s 10 -L /var/log/kvm_stat.csv 10 10 ExecReload=/bin/kill -HUP $MAINPID 11 11 Restart=always 12 + RestartSec=60s 12 13 SyslogIdentifier=kvm_stat 13 14 SyslogLevel=debug 14 15
+1 -1
tools/lib/bpf/Makefile
··· 215 215 if [ ! -d '$(DESTDIR_SQ)$2' ]; then \ 216 216 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \ 217 217 fi; \ 218 - $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2' 218 + $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2' 219 219 endef 220 220 221 221 install_lib: all_cmd
+1 -1
tools/lib/bpf/btf_dump.c
··· 462 462 return err; 463 463 464 464 case BTF_KIND_ARRAY: 465 - return btf_dump_order_type(d, btf_array(t)->type, through_ptr); 465 + return btf_dump_order_type(d, btf_array(t)->type, false); 466 466 467 467 case BTF_KIND_STRUCT: 468 468 case BTF_KIND_UNION: {
+2 -1
tools/lib/bpf/libbpf.c
··· 1181 1181 if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) { 1182 1182 pr_warn("elf: failed to get section names strings from %s: %s\n", 1183 1183 obj->path, elf_errmsg(-1)); 1184 - return -LIBBPF_ERRNO__FORMAT; 1184 + err = -LIBBPF_ERRNO__FORMAT; 1185 + goto errout; 1185 1186 } 1186 1187 1187 1188 /* Old LLVM set e_machine to EM_NONE */
+1 -1
tools/lib/bpf/netlink.c
··· 40 40 memset(&sa, 0, sizeof(sa)); 41 41 sa.nl_family = AF_NETLINK; 42 42 43 - sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); 43 + sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE); 44 44 if (sock < 0) 45 45 return -errno; 46 46
+34 -25
tools/perf/builtin-daemon.c
··· 402 402 int status; 403 403 pid_t pid; 404 404 405 + /* 406 + * Take signal fd data as pure signal notification and check all 407 + * the sessions state. The reason is that multiple signals can get 408 + * coalesced in kernel and we can receive only single signal even 409 + * if multiple SIGCHLD were generated. 410 + */ 405 411 err = read(daemon->signal_fd, &si, sizeof(struct signalfd_siginfo)); 406 - if (err != sizeof(struct signalfd_siginfo)) 412 + if (err != sizeof(struct signalfd_siginfo)) { 413 + pr_err("failed to read signal fd\n"); 407 414 return -1; 415 + } 408 416 409 417 list_for_each_entry(session, &daemon->sessions, list) { 410 - 411 - if (session->pid != (int) si.ssi_pid) 418 + if (session->pid == -1) 412 419 continue; 413 420 414 - pid = waitpid(session->pid, &status, 0); 415 - if (pid == session->pid) { 416 - if (WIFEXITED(status)) { 417 - pr_info("session '%s' exited, status=%d\n", 418 - session->name, WEXITSTATUS(status)); 419 - } else if (WIFSIGNALED(status)) { 420 - pr_info("session '%s' killed (signal %d)\n", 421 - session->name, WTERMSIG(status)); 422 - } else if (WIFSTOPPED(status)) { 423 - pr_info("session '%s' stopped (signal %d)\n", 424 - session->name, WSTOPSIG(status)); 425 - } else { 426 - pr_info("session '%s' Unexpected status (0x%x)\n", 427 - session->name, status); 428 - } 421 + pid = waitpid(session->pid, &status, WNOHANG); 422 + if (pid <= 0) 423 + continue; 424 + 425 + if (WIFEXITED(status)) { 426 + pr_info("session '%s' exited, status=%d\n", 427 + session->name, WEXITSTATUS(status)); 428 + } else if (WIFSIGNALED(status)) { 429 + pr_info("session '%s' killed (signal %d)\n", 430 + session->name, WTERMSIG(status)); 431 + } else if (WIFSTOPPED(status)) { 432 + pr_info("session '%s' stopped (signal %d)\n", 433 + session->name, WSTOPSIG(status)); 434 + } else { 435 + pr_info("session '%s' Unexpected status (0x%x)\n", 436 + session->name, status); 429 437 } 430 438 431 439 session->state = KILL; 432 440 session->pid = -1; 433 - return pid; 434 441 } 435 442 436 443 return 0; ··· 450 443 .fd = daemon->signal_fd, 451 444 .events = POLLIN, 452 445 }; 453 - pid_t wpid = 0, pid = session->pid; 454 446 time_t start; 455 447 456 448 start = time(NULL); ··· 458 452 int err = poll(&pollfd, 1, 1000); 459 453 460 454 if (err > 0) { 461 - wpid = handle_signalfd(daemon); 455 + handle_signalfd(daemon); 462 456 } else if (err < 0) { 463 457 perror("failed: poll\n"); 464 458 return -1; ··· 466 460 467 461 if (start + secs < time(NULL)) 468 462 return -1; 469 - } while (wpid != pid); 463 + } while (session->pid != -1); 470 464 471 465 return 0; 472 466 } ··· 908 902 daemon_session__signal(session, SIGKILL); 909 903 break; 910 904 default: 911 - break; 905 + pr_err("failed to wait for session %s\n", 906 + session->name); 907 + return; 912 908 } 913 909 how++; 914 910 ··· 963 955 daemon__signal(daemon, SIGKILL); 964 956 break; 965 957 default: 966 - break; 958 + pr_err("failed to wait for sessions\n"); 959 + return; 967 960 } 968 961 how++; 969 962 ··· 1353 1344 close(sock_fd); 1354 1345 if (conf_fd != -1) 1355 1346 close(conf_fd); 1356 - if (conf_fd != -1) 1347 + if (signal_fd != -1) 1357 1348 close(signal_fd); 1358 1349 1359 1350 pr_info("daemon exited\n");
+1 -8
tools/perf/tests/bpf.c
··· 86 86 .msg_load_fail = "check your vmlinux setting?", 87 87 .target_func = &epoll_pwait_loop, 88 88 .expect_result = (NR_ITERS + 1) / 2, 89 - .pin = true, 89 + .pin = true, 90 90 }, 91 91 #ifdef HAVE_BPF_PROLOGUE 92 92 { ··· 99 99 .expect_result = (NR_ITERS + 1) / 4, 100 100 }, 101 101 #endif 102 - { 103 - .prog_id = LLVM_TESTCASE_BPF_RELOCATION, 104 - .desc = "BPF relocation checker", 105 - .name = "[bpf_relocation_test]", 106 - .msg_compile_fail = "fix 'perf test LLVM' first", 107 - .msg_load_fail = "libbpf error when dealing with relocation", 108 - }, 109 102 }; 110 103 111 104 static int do_test(struct bpf_object *obj, int (*func)(void),
+1 -1
tools/perf/tests/shell/daemon.sh
··· 1 - #!/bin/sh 1 + #!/bin/bash 2 2 # daemon operations 3 3 # SPDX-License-Identifier: GPL-2.0 4 4
-4
tools/perf/util/auxtrace.c
··· 298 298 queue->set = true; 299 299 queue->tid = buffer->tid; 300 300 queue->cpu = buffer->cpu; 301 - } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) { 302 - pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n", 303 - queue->cpu, queue->tid, buffer->cpu, buffer->tid); 304 - return -EINVAL; 305 301 } 306 302 307 303 buffer->buffer_nr = queues->next_buffer_nr++;
+10 -3
tools/perf/util/bpf-event.c
··· 196 196 } 197 197 198 198 if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) { 199 + free(info_linear); 199 200 pr_debug("%s: the kernel is too old, aborting\n", __func__); 200 201 return -2; 201 202 } 202 203 203 204 info = &info_linear->info; 205 + if (!info->jited_ksyms) { 206 + free(info_linear); 207 + return -1; 208 + } 204 209 205 210 /* number of ksyms, func_lengths, and tags should match */ 206 211 sub_prog_cnt = info->nr_jited_ksyms; 207 212 if (sub_prog_cnt != info->nr_prog_tags || 208 - sub_prog_cnt != info->nr_jited_func_lens) 213 + sub_prog_cnt != info->nr_jited_func_lens) { 214 + free(info_linear); 209 215 return -1; 216 + } 210 217 211 218 /* check BTF func info support */ 212 219 if (info->btf_id && info->nr_func_info && info->func_info_rec_size) { 213 220 /* btf func info number should be same as sub_prog_cnt */ 214 221 if (sub_prog_cnt != info->nr_func_info) { 215 222 pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__); 216 - err = -1; 217 - goto out; 223 + free(info_linear); 224 + return -1; 218 225 } 219 226 if (btf__get_from_id(info->btf_id, &btf)) { 220 227 pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
+3
tools/perf/util/parse-events.c
··· 356 356 struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) : 357 357 cpu_list ? perf_cpu_map__new(cpu_list) : NULL; 358 358 359 + if (pmu && attr->type == PERF_TYPE_RAW) 360 + perf_pmu__warn_invalid_config(pmu, attr->config, name); 361 + 359 362 if (init_attr) 360 363 event_attr_init(attr); 361 364
+33
tools/perf/util/pmu.c
··· 1812 1812 1813 1813 return nr_caps; 1814 1814 } 1815 + 1816 + void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config, 1817 + char *name) 1818 + { 1819 + struct perf_pmu_format *format; 1820 + __u64 masks = 0, bits; 1821 + char buf[100]; 1822 + unsigned int i; 1823 + 1824 + list_for_each_entry(format, &pmu->format, list) { 1825 + if (format->value != PERF_PMU_FORMAT_VALUE_CONFIG) 1826 + continue; 1827 + 1828 + for_each_set_bit(i, format->bits, PERF_PMU_FORMAT_BITS) 1829 + masks |= 1ULL << i; 1830 + } 1831 + 1832 + /* 1833 + * Kernel doesn't export any valid format bits. 1834 + */ 1835 + if (masks == 0) 1836 + return; 1837 + 1838 + bits = config & ~masks; 1839 + if (bits == 0) 1840 + return; 1841 + 1842 + bitmap_scnprintf((unsigned long *)&bits, sizeof(bits) * 8, buf, sizeof(buf)); 1843 + 1844 + pr_warning("WARNING: event '%s' not valid (bits %s of config " 1845 + "'%llx' not supported by kernel)!\n", 1846 + name ?: "N/A", buf, config); 1847 + }
+3
tools/perf/util/pmu.h
··· 123 123 124 124 int perf_pmu__caps_parse(struct perf_pmu *pmu); 125 125 126 + void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config, 127 + char *name); 128 + 126 129 #endif /* __PMU_H */
+6 -5
tools/perf/util/synthetic-events.c
··· 424 424 425 425 while (!io.eof) { 426 426 static const char anonstr[] = "//anon"; 427 - size_t size; 427 + size_t size, aligned_size; 428 428 429 429 /* ensure null termination since stack will be reused. */ 430 430 event->mmap2.filename[0] = '\0'; ··· 484 484 } 485 485 486 486 size = strlen(event->mmap2.filename) + 1; 487 - size = PERF_ALIGN(size, sizeof(u64)); 487 + aligned_size = PERF_ALIGN(size, sizeof(u64)); 488 488 event->mmap2.len -= event->mmap.start; 489 489 event->mmap2.header.size = (sizeof(event->mmap2) - 490 - (sizeof(event->mmap2.filename) - size)); 491 - memset(event->mmap2.filename + size, 0, machine->id_hdr_size); 490 + (sizeof(event->mmap2.filename) - aligned_size)); 491 + memset(event->mmap2.filename + size, 0, machine->id_hdr_size + 492 + (aligned_size - size)); 492 493 event->mmap2.header.size += machine->id_hdr_size; 493 494 event->mmap2.pid = tgid; 494 495 event->mmap2.tid = pid; ··· 759 758 for (i = 0; i < n; i++) { 760 759 char *end; 761 760 pid_t _pid; 762 - bool kernel_thread; 761 + bool kernel_thread = false; 763 762 764 763 _pid = strtol(dirent[i]->d_name, &end, 10); 765 764 if (*end)
+2
tools/perf/util/vdso.c
··· 133 133 if (dso != NULL) { 134 134 __dsos__add(&machine->dsos, dso); 135 135 dso__set_long_name(dso, long_name, false); 136 + /* Put dso here because __dsos_add already got it */ 137 + dso__put(dso); 136 138 } 137 139 138 140 return dso;
+2
tools/testing/kunit/configs/broken_on_uml.config
··· 40 40 # CONFIG_RESET_BRCMSTB_RESCAL is not set 41 41 # CONFIG_RESET_INTEL_GW is not set 42 42 # CONFIG_ADI_AXI_ADC is not set 43 + # CONFIG_DEBUG_PAGEALLOC is not set 44 + # CONFIG_PAGE_POISONING is not set
+1 -1
tools/testing/kunit/kunit_config.py
··· 13 13 CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$' 14 14 CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$' 15 15 16 - KconfigEntryBase = collections.namedtuple('KconfigEntry', ['name', 'value']) 16 + KconfigEntryBase = collections.namedtuple('KconfigEntryBase', ['name', 'value']) 17 17 18 18 class KconfigEntry(KconfigEntryBase): 19 19
+18 -3
tools/testing/radix-tree/idr-test.c
··· 296 296 return NULL; 297 297 } 298 298 299 + /* 300 + * There are always either 1 or 2 objects in the IDR. If we find nothing, 301 + * or we find something at an ID we didn't expect, that's a bug. 302 + */ 299 303 void idr_find_test_1(int anchor_id, int throbber_id) 300 304 { 301 305 pthread_t throbber; 302 306 time_t start = time(NULL); 303 307 304 - pthread_create(&throbber, NULL, idr_throbber, &throbber_id); 305 - 306 308 BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id, 307 309 anchor_id + 1, GFP_KERNEL) != anchor_id); 308 310 311 + pthread_create(&throbber, NULL, idr_throbber, &throbber_id); 312 + 313 + rcu_read_lock(); 309 314 do { 310 315 int id = 0; 311 316 void *entry = idr_get_next(&find_idr, &id); 312 - BUG_ON(entry != xa_mk_value(id)); 317 + rcu_read_unlock(); 318 + if ((id != anchor_id && id != throbber_id) || 319 + entry != xa_mk_value(id)) { 320 + printf("%s(%d, %d): %p at %d\n", __func__, anchor_id, 321 + throbber_id, entry, id); 322 + abort(); 323 + } 324 + rcu_read_lock(); 313 325 } while (time(NULL) < start + 11); 326 + rcu_read_unlock(); 314 327 315 328 pthread_join(throbber, NULL); 316 329 ··· 590 577 591 578 int __weak main(void) 592 579 { 580 + rcu_register_thread(); 593 581 radix_tree_init(); 594 582 idr_checks(); 595 583 ida_tests(); ··· 598 584 rcu_barrier(); 599 585 if (nr_allocated) 600 586 printf("nr_allocated = %d\n", nr_allocated); 587 + rcu_unregister_thread(); 601 588 return 0; 602 589 }
tools/testing/radix-tree/linux/compiler_types.h
+2
tools/testing/radix-tree/multiorder.c
··· 224 224 225 225 int __weak main(void) 226 226 { 227 + rcu_register_thread(); 227 228 radix_tree_init(); 228 229 multiorder_checks(); 230 + rcu_unregister_thread(); 229 231 return 0; 230 232 }
+2
tools/testing/radix-tree/xarray.c
··· 25 25 26 26 int __weak main(void) 27 27 { 28 + rcu_register_thread(); 28 29 radix_tree_init(); 29 30 xarray_tests(); 30 31 radix_tree_cpu_dead(1); 31 32 rcu_barrier(); 32 33 if (nr_allocated) 33 34 printf("nr_allocated = %d\n", nr_allocated); 35 + rcu_unregister_thread(); 34 36 return 0; 35 37 }
+17 -5
tools/testing/selftests/arm64/fp/sve-test.S
··· 284 284 // Set up test pattern in the FFR 285 285 // x0: pid 286 286 // x2: generation 287 + // 288 + // We need to generate a canonical FFR value, which consists of a number of 289 + // low "1" bits, followed by a number of zeros. This gives us 17 unique values 290 + // per 16 bits of FFR, so we create a 4 bit signature out of the PID and 291 + // generation, and use that as the initial number of ones in the pattern. 292 + // We fill the upper lanes of FFR with zeros. 287 293 // Beware: corrupts P0. 288 294 function setup_ffr 289 295 mov x4, x30 290 296 291 - bl pattern 297 + and w0, w0, #0x3 298 + bfi w0, w2, #2, #2 299 + mov w1, #1 300 + lsl w1, w1, w0 301 + sub w1, w1, #1 302 + 292 303 ldr x0, =ffrref 293 - ldr x1, =scratch 294 - rdvl x2, #1 295 - lsr x2, x2, #3 296 - bl memcpy 304 + strh w1, [x0], 2 305 + rdvl x1, #1 306 + lsr x1, x1, #3 307 + sub x1, x1, #2 308 + bl memclr 297 309 298 310 mov x0, #0 299 311 ldr x1, =ffrref
+4
tools/testing/selftests/bpf/prog_tests/check_mtu.c
··· 128 128 test_check_mtu_run_xdp(skel, skel->progs.xdp_use_helper, mtu); 129 129 test_check_mtu_run_xdp(skel, skel->progs.xdp_exceed_mtu, mtu); 130 130 test_check_mtu_run_xdp(skel, skel->progs.xdp_minus_delta, mtu); 131 + test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len, mtu); 132 + test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len_exceed, mtu); 131 133 132 134 cleanup: 133 135 test_check_mtu__destroy(skel); ··· 189 187 test_check_mtu_run_tc(skel, skel->progs.tc_exceed_mtu, mtu); 190 188 test_check_mtu_run_tc(skel, skel->progs.tc_exceed_mtu_da, mtu); 191 189 test_check_mtu_run_tc(skel, skel->progs.tc_minus_delta, mtu); 190 + test_check_mtu_run_tc(skel, skel->progs.tc_input_len, mtu); 191 + test_check_mtu_run_tc(skel, skel->progs.tc_input_len_exceed, mtu); 192 192 cleanup: 193 193 test_check_mtu__destroy(skel); 194 194 }
+82
tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2021 Facebook */ 3 + #define _GNU_SOURCE 4 + #include <sched.h> 5 + #include <test_progs.h> 6 + #include <time.h> 7 + #include <sys/mman.h> 8 + #include <sys/syscall.h> 9 + #include "fexit_sleep.skel.h" 10 + 11 + static int do_sleep(void *skel) 12 + { 13 + struct fexit_sleep *fexit_skel = skel; 14 + struct timespec ts1 = { .tv_nsec = 1 }; 15 + struct timespec ts2 = { .tv_sec = 10 }; 16 + 17 + fexit_skel->bss->pid = getpid(); 18 + (void)syscall(__NR_nanosleep, &ts1, NULL); 19 + (void)syscall(__NR_nanosleep, &ts2, NULL); 20 + return 0; 21 + } 22 + 23 + #define STACK_SIZE (1024 * 1024) 24 + static char child_stack[STACK_SIZE]; 25 + 26 + void test_fexit_sleep(void) 27 + { 28 + struct fexit_sleep *fexit_skel = NULL; 29 + int wstatus, duration = 0; 30 + pid_t cpid; 31 + int err, fexit_cnt; 32 + 33 + fexit_skel = fexit_sleep__open_and_load(); 34 + if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n")) 35 + goto cleanup; 36 + 37 + err = fexit_sleep__attach(fexit_skel); 38 + if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err)) 39 + goto cleanup; 40 + 41 + cpid = clone(do_sleep, child_stack + STACK_SIZE, CLONE_FILES | SIGCHLD, fexit_skel); 42 + if (CHECK(cpid == -1, "clone", strerror(errno))) 43 + goto cleanup; 44 + 45 + /* wait until first sys_nanosleep ends and second sys_nanosleep starts */ 46 + while (READ_ONCE(fexit_skel->bss->fentry_cnt) != 2); 47 + fexit_cnt = READ_ONCE(fexit_skel->bss->fexit_cnt); 48 + if (CHECK(fexit_cnt != 1, "fexit_cnt", "%d", fexit_cnt)) 49 + goto cleanup; 50 + 51 + /* close progs and detach them. That will trigger two nop5->jmp5 rewrites 52 + * in the trampolines to skip nanosleep_fexit prog. 53 + * The nanosleep_fentry prog will get detached first. 54 + * The nanosleep_fexit prog will get detached second. 55 + * Detaching will trigger freeing of both progs JITed images. 56 + * There will be two dying bpf_tramp_image-s, but only the initial 57 + * bpf_tramp_image (with both _fentry and _fexit progs will be stuck 58 + * waiting for percpu_ref_kill to confirm). The other one 59 + * will be freed quickly. 60 + */ 61 + close(bpf_program__fd(fexit_skel->progs.nanosleep_fentry)); 62 + close(bpf_program__fd(fexit_skel->progs.nanosleep_fexit)); 63 + fexit_sleep__detach(fexit_skel); 64 + 65 + /* kill the thread to unwind sys_nanosleep stack through the trampoline */ 66 + kill(cpid, 9); 67 + 68 + if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", strerror(errno))) 69 + goto cleanup; 70 + if (CHECK(WEXITSTATUS(wstatus) != 0, "exitstatus", "failed")) 71 + goto cleanup; 72 + 73 + /* The bypassed nanosleep_fexit prog shouldn't have executed. 74 + * Unlike progs the maps were not freed and directly accessible. 75 + */ 76 + fexit_cnt = READ_ONCE(fexit_skel->bss->fexit_cnt); 77 + if (CHECK(fexit_cnt != 1, "fexit_cnt", "%d", fexit_cnt)) 78 + goto cleanup; 79 + 80 + cleanup: 81 + fexit_sleep__destroy(fexit_skel); 82 + }
+8
tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
··· 174 174 }; 175 175 }; 176 176 177 + struct struct_in_array {}; 178 + 179 + struct struct_in_array_typed {}; 180 + 181 + typedef struct struct_in_array_typed struct_in_array_t[2]; 182 + 177 183 struct struct_with_embedded_stuff { 178 184 int a; 179 185 struct { ··· 209 203 } r[5]; 210 204 struct struct_in_struct s[10]; 211 205 int t[11]; 206 + struct struct_in_array (*u)[2]; 207 + struct_in_array_t *v; 212 208 }; 213 209 214 210 struct root_struct {
+31
tools/testing/selftests/bpf/progs/fexit_sleep.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2021 Facebook */ 3 + #include "vmlinux.h" 4 + #include <bpf/bpf_helpers.h> 5 + #include <bpf/bpf_tracing.h> 6 + 7 + char LICENSE[] SEC("license") = "GPL"; 8 + 9 + int pid = 0; 10 + int fentry_cnt = 0; 11 + int fexit_cnt = 0; 12 + 13 + SEC("fentry/__x64_sys_nanosleep") 14 + int BPF_PROG(nanosleep_fentry, const struct pt_regs *regs) 15 + { 16 + if ((int)bpf_get_current_pid_tgid() != pid) 17 + return 0; 18 + 19 + fentry_cnt++; 20 + return 0; 21 + } 22 + 23 + SEC("fexit/__x64_sys_nanosleep") 24 + int BPF_PROG(nanosleep_fexit, const struct pt_regs *regs, int ret) 25 + { 26 + if ((int)bpf_get_current_pid_tgid() != pid) 27 + return 0; 28 + 29 + fexit_cnt++; 30 + return 0; 31 + }
+92
tools/testing/selftests/bpf/progs/test_check_mtu.c
··· 105 105 return retval; 106 106 } 107 107 108 + SEC("xdp") 109 + int xdp_input_len(struct xdp_md *ctx) 110 + { 111 + int retval = XDP_PASS; /* Expected retval on successful test */ 112 + void *data_end = (void *)(long)ctx->data_end; 113 + void *data = (void *)(long)ctx->data; 114 + __u32 ifindex = GLOBAL_USER_IFINDEX; 115 + __u32 data_len = data_end - data; 116 + 117 + /* API allow user give length to check as input via mtu_len param, 118 + * resulting MTU value is still output in mtu_len param after call. 119 + * 120 + * Input len is L3, like MTU and iph->tot_len. 121 + * Remember XDP data_len is L2. 122 + */ 123 + __u32 mtu_len = data_len - ETH_HLEN; 124 + 125 + if (bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0)) 126 + retval = XDP_ABORTED; 127 + 128 + global_bpf_mtu_xdp = mtu_len; 129 + return retval; 130 + } 131 + 132 + SEC("xdp") 133 + int xdp_input_len_exceed(struct xdp_md *ctx) 134 + { 135 + int retval = XDP_ABORTED; /* Fail */ 136 + __u32 ifindex = GLOBAL_USER_IFINDEX; 137 + int err; 138 + 139 + /* API allow user give length to check as input via mtu_len param, 140 + * resulting MTU value is still output in mtu_len param after call. 141 + * 142 + * Input length value is L3 size like MTU. 143 + */ 144 + __u32 mtu_len = GLOBAL_USER_MTU; 145 + 146 + mtu_len += 1; /* Exceed with 1 */ 147 + 148 + err = bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0); 149 + if (err == BPF_MTU_CHK_RET_FRAG_NEEDED) 150 + retval = XDP_PASS ; /* Success in exceeding MTU check */ 151 + 152 + global_bpf_mtu_xdp = mtu_len; 153 + return retval; 154 + } 155 + 108 156 SEC("classifier") 109 157 int tc_use_helper(struct __sk_buff *ctx) 110 158 { ··· 240 192 */ 241 193 if (bpf_check_mtu(ctx, ifindex, &mtu_len, delta, 0)) 242 194 retval = BPF_DROP; 195 + 196 + global_bpf_mtu_xdp = mtu_len; 197 + return retval; 198 + } 199 + 200 + SEC("classifier") 201 + int tc_input_len(struct __sk_buff *ctx) 202 + { 203 + int retval = BPF_OK; /* Expected retval on successful test */ 204 + __u32 ifindex = GLOBAL_USER_IFINDEX; 205 + 206 + /* API allow user give length to check as input via mtu_len param, 207 + * resulting MTU value is still output in mtu_len param after call. 208 + * 209 + * Input length value is L3 size. 210 + */ 211 + __u32 mtu_len = GLOBAL_USER_MTU; 212 + 213 + if (bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0)) 214 + retval = BPF_DROP; 215 + 216 + global_bpf_mtu_xdp = mtu_len; 217 + return retval; 218 + } 219 + 220 + SEC("classifier") 221 + int tc_input_len_exceed(struct __sk_buff *ctx) 222 + { 223 + int retval = BPF_DROP; /* Fail */ 224 + __u32 ifindex = GLOBAL_USER_IFINDEX; 225 + int err; 226 + 227 + /* API allow user give length to check as input via mtu_len param, 228 + * resulting MTU value is still output in mtu_len param after call. 229 + * 230 + * Input length value is L3 size like MTU. 231 + */ 232 + __u32 mtu_len = GLOBAL_USER_MTU; 233 + 234 + mtu_len += 1; /* Exceed with 1 */ 235 + 236 + err = bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0); 237 + if (err == BPF_MTU_CHK_RET_FRAG_NEEDED) 238 + retval = BPF_OK; /* Success in exceeding MTU check */ 243 239 244 240 global_bpf_mtu_xdp = mtu_len; 245 241 return retval;
+2 -4
tools/testing/selftests/bpf/progs/test_tunnel_kern.c
··· 508 508 } 509 509 510 510 ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt)); 511 - if (ret < 0) { 512 - ERROR(ret); 513 - return TC_ACT_SHOT; 514 - } 511 + if (ret < 0) 512 + gopt.opt_class = 0; 515 513 516 514 bpf_trace_printk(fmt, sizeof(fmt), 517 515 key.tunnel_id, key.remote_ipv4, gopt.opt_class);
+19 -8
tools/testing/selftests/bpf/verifier/bounds_deduction.c
··· 6 6 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 7 7 BPF_EXIT_INSN(), 8 8 }, 9 - .result = REJECT, 9 + .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", 10 10 .errstr = "R0 tried to subtract pointer from scalar", 11 + .result = REJECT, 11 12 }, 12 13 { 13 14 "check deducing bounds from const, 2", ··· 21 20 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), 22 21 BPF_EXIT_INSN(), 23 22 }, 23 + .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types", 24 + .result_unpriv = REJECT, 24 25 .result = ACCEPT, 25 26 .retval = 1, 26 27 }, ··· 34 31 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 35 32 BPF_EXIT_INSN(), 36 33 }, 37 - .result = REJECT, 34 + .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", 38 35 .errstr = "R0 tried to subtract pointer from scalar", 36 + .result = REJECT, 39 37 }, 40 38 { 41 39 "check deducing bounds from const, 4", ··· 49 45 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), 50 46 BPF_EXIT_INSN(), 51 47 }, 48 + .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types", 49 + .result_unpriv = REJECT, 52 50 .result = ACCEPT, 53 51 }, 54 52 { ··· 61 55 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 62 56 BPF_EXIT_INSN(), 63 57 }, 64 - .result = REJECT, 58 + .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", 65 59 .errstr = "R0 tried to subtract pointer from scalar", 60 + .result = REJECT, 66 61 }, 67 62 { 68 63 "check deducing bounds from const, 6", ··· 74 67 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 75 68 BPF_EXIT_INSN(), 76 69 }, 77 - .result = REJECT, 70 + .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", 78 71 .errstr = "R0 tried to subtract pointer from scalar", 72 + .result = REJECT, 79 73 }, 80 74 { 81 75 "check deducing bounds from const, 7", ··· 88 80 offsetof(struct __sk_buff, mark)), 89 81 BPF_EXIT_INSN(), 90 82 }, 91 - .result = REJECT, 83 + .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types", 92 84 .errstr = "dereference of modified ctx ptr", 85 + .result = REJECT, 93 86 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 94 87 }, 95 88 { ··· 103 94 offsetof(struct __sk_buff, mark)), 104 95 BPF_EXIT_INSN(), 105 96 }, 106 - .result = REJECT, 97 + .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types", 107 98 .errstr = "dereference of modified ctx ptr", 99 + .result = REJECT, 108 100 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 109 101 }, 110 102 { ··· 116 106 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 117 107 BPF_EXIT_INSN(), 118 108 }, 119 - .result = REJECT, 109 + .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", 120 110 .errstr = "R0 tried to subtract pointer from scalar", 111 + .result = REJECT, 121 112 }, 122 113 { 123 114 "check deducing bounds from const, 10", ··· 130 119 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 131 120 BPF_EXIT_INSN(), 132 121 }, 133 - .result = REJECT, 134 122 .errstr = "math between ctx pointer and register with unbounded min value is not allowed", 123 + .result = REJECT, 135 124 },
+4
tools/testing/selftests/bpf/verifier/map_ptr.c
··· 75 75 BPF_EXIT_INSN(), 76 76 }, 77 77 .fixup_map_hash_16b = { 4 }, 78 + .result_unpriv = REJECT, 79 + .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types", 78 80 .result = ACCEPT, 79 81 }, 80 82 { ··· 93 91 BPF_EXIT_INSN(), 94 92 }, 95 93 .fixup_map_hash_16b = { 4 }, 94 + .result_unpriv = REJECT, 95 + .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types", 96 96 .result = ACCEPT, 97 97 },
+14 -1
tools/testing/selftests/bpf/verifier/unpriv.c
··· 497 497 .result = ACCEPT, 498 498 }, 499 499 { 500 - "unpriv: adding of fp", 500 + "unpriv: adding of fp, reg", 501 501 .insns = { 502 502 BPF_MOV64_IMM(BPF_REG_0, 0), 503 503 BPF_MOV64_IMM(BPF_REG_1, 0), 504 504 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), 505 + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), 506 + BPF_EXIT_INSN(), 507 + }, 508 + .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types", 509 + .result_unpriv = REJECT, 510 + .result = ACCEPT, 511 + }, 512 + { 513 + "unpriv: adding of fp, imm", 514 + .insns = { 515 + BPF_MOV64_IMM(BPF_REG_0, 0), 516 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 517 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0), 505 518 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), 506 519 BPF_EXIT_INSN(), 507 520 },
+22 -1
tools/testing/selftests/bpf/verifier/value_ptr_arith.c
··· 169 169 .fixup_map_array_48b = { 1 }, 170 170 .result = ACCEPT, 171 171 .result_unpriv = REJECT, 172 - .errstr_unpriv = "R2 tried to add from different maps or paths", 172 + .errstr_unpriv = "R2 tried to add from different maps, paths, or prohibited types", 173 173 .retval = 0, 174 174 }, 175 175 { ··· 515 515 .fixup_map_array_48b = { 3 }, 516 516 .result = ACCEPT, 517 517 .retval = 0xabcdef12, 518 + }, 519 + { 520 + "map access: value_ptr += N, value_ptr -= N known scalar", 521 + .insns = { 522 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 523 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 524 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 525 + BPF_LD_MAP_FD(BPF_REG_1, 0), 526 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 527 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 528 + BPF_MOV32_IMM(BPF_REG_1, 0x12345678), 529 + BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), 530 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), 531 + BPF_MOV64_IMM(BPF_REG_1, 2), 532 + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), 533 + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), 534 + BPF_EXIT_INSN(), 535 + }, 536 + .fixup_map_array_48b = { 3 }, 537 + .result = ACCEPT, 538 + .retval = 0x12345678, 518 539 }, 519 540 { 520 541 "map access: unknown scalar += value_ptr, 1",
+5 -5
tools/testing/selftests/kvm/hardware_disable_test.c
··· 108 108 kvm_vm_elf_load(vm, program_invocation_name, 0, 0); 109 109 vm_create_irqchip(vm); 110 110 111 - fprintf(stderr, "%s: [%d] start vcpus\n", __func__, run); 111 + pr_debug("%s: [%d] start vcpus\n", __func__, run); 112 112 for (i = 0; i < VCPU_NUM; ++i) { 113 113 vm_vcpu_add_default(vm, i, guest_code); 114 114 payloads[i].vm = vm; ··· 124 124 check_set_affinity(throw_away, &cpu_set); 125 125 } 126 126 } 127 - fprintf(stderr, "%s: [%d] all threads launched\n", __func__, run); 127 + pr_debug("%s: [%d] all threads launched\n", __func__, run); 128 128 sem_post(sem); 129 129 for (i = 0; i < VCPU_NUM; ++i) 130 130 check_join(threads[i], &b); ··· 147 147 if (pid == 0) 148 148 run_test(i); /* This function always exits */ 149 149 150 - fprintf(stderr, "%s: [%d] waiting semaphore\n", __func__, i); 150 + pr_debug("%s: [%d] waiting semaphore\n", __func__, i); 151 151 sem_wait(sem); 152 152 r = (rand() % DELAY_US_MAX) + 1; 153 - fprintf(stderr, "%s: [%d] waiting %dus\n", __func__, i, r); 153 + pr_debug("%s: [%d] waiting %dus\n", __func__, i, r); 154 154 usleep(r); 155 155 r = waitpid(pid, &s, WNOHANG); 156 156 TEST_ASSERT(r != pid, 157 157 "%s: [%d] child exited unexpectedly status: [%d]", 158 158 __func__, i, s); 159 - fprintf(stderr, "%s: [%d] killing child\n", __func__, i); 159 + pr_debug("%s: [%d] killing child\n", __func__, i); 160 160 kill(pid, SIGKILL); 161 161 } 162 162
+11 -2
tools/testing/selftests/kvm/x86_64/hyperv_clock.c
··· 80 80 GUEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100); 81 81 } 82 82 83 + static inline u64 get_tscpage_ts(struct ms_hyperv_tsc_page *tsc_page) 84 + { 85 + return mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset; 86 + } 87 + 83 88 static inline void check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page *tsc_page) 84 89 { 85 90 u64 r1, r2, t1, t2; 86 91 87 92 /* Compare TSC page clocksource with HV_X64_MSR_TIME_REF_COUNT */ 88 - t1 = mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset; 93 + t1 = get_tscpage_ts(tsc_page); 89 94 r1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT); 90 95 91 96 /* 10 ms tolerance */ 92 97 GUEST_ASSERT(r1 >= t1 && r1 - t1 < 100000); 93 98 nop_loop(); 94 99 95 - t2 = mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset; 100 + t2 = get_tscpage_ts(tsc_page); 96 101 r2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT); 97 102 GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000); 98 103 } ··· 135 130 136 131 tsc_offset = tsc_page->tsc_offset; 137 132 /* Call KVM_SET_CLOCK from userspace, check that TSC page was updated */ 133 + 138 134 GUEST_SYNC(7); 135 + /* Sanity check TSC page timestamp, it should be close to 0 */ 136 + GUEST_ASSERT(get_tscpage_ts(tsc_page) < 100000); 137 + 139 138 GUEST_ASSERT(tsc_page->tsc_offset != tsc_offset); 140 139 141 140 nop_loop();
+1 -1
tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
··· 658 658 # In accordance with INET_ECN_decapsulate() 659 659 __test_ecn_decap 00 00 0x00 660 660 __test_ecn_decap 01 01 0x01 661 - __test_ecn_decap 02 01 0x02 661 + __test_ecn_decap 02 01 0x01 662 662 __test_ecn_decap 01 03 0x03 663 663 __test_ecn_decap 02 03 0x03 664 664 test_ecn_decap_error
+20 -10
tools/testing/selftests/net/mptcp/mptcp_join.sh
··· 11 11 timeout=30 12 12 mptcp_connect="" 13 13 capture=0 14 + do_all_tests=1 14 15 15 16 TEST_COUNT=0 16 17 ··· 121 120 "$CBPF_MPTCP_SUBOPTION_ADD_ADDR" \ 122 121 -j DROP 123 122 } 124 - 125 - for arg in "$@"; do 126 - if [ "$arg" = "-c" ]; then 127 - capture=1 128 - fi 129 - done 130 123 131 124 ip -Version > /dev/null 2>&1 132 125 if [ $? -ne 0 ];then ··· 1216 1221 echo " -4 v4mapped_tests" 1217 1222 echo " -b backup_tests" 1218 1223 echo " -p add_addr_ports_tests" 1219 - echo " -c syncookies_tests" 1224 + echo " -k syncookies_tests" 1225 + echo " -c capture pcap files" 1220 1226 echo " -h help" 1221 1227 } 1222 1228 ··· 1231 1235 make_file "$sin" "server" 1 1232 1236 trap cleanup EXIT 1233 1237 1234 - if [ -z $1 ]; then 1238 + for arg in "$@"; do 1239 + # check for "capture" arg before launching tests 1240 + if [[ "${arg}" =~ ^"-"[0-9a-zA-Z]*"c"[0-9a-zA-Z]*$ ]]; then 1241 + capture=1 1242 + fi 1243 + 1244 + # exception for the capture option, the rest means: a part of the tests 1245 + if [ "${arg}" != "-c" ]; then 1246 + do_all_tests=0 1247 + fi 1248 + done 1249 + 1250 + if [ $do_all_tests -eq 1 ]; then 1235 1251 all_tests 1236 1252 exit $ret 1237 1253 fi 1238 1254 1239 - while getopts 'fsltra64bpch' opt; do 1255 + while getopts 'fsltra64bpkch' opt; do 1240 1256 case $opt in 1241 1257 f) 1242 1258 subflows_tests ··· 1280 1272 p) 1281 1273 add_addr_ports_tests 1282 1274 ;; 1283 - c) 1275 + k) 1284 1276 syncookies_tests 1277 + ;; 1278 + c) 1285 1279 ;; 1286 1280 h | *) 1287 1281 usage
+16 -16
tools/testing/selftests/net/reuseaddr_ports_exhausted.c
··· 30 30 }; 31 31 32 32 struct reuse_opts unreusable_opts[12] = { 33 - {0, 0, 0, 0}, 34 - {0, 0, 0, 1}, 35 - {0, 0, 1, 0}, 36 - {0, 0, 1, 1}, 37 - {0, 1, 0, 0}, 38 - {0, 1, 0, 1}, 39 - {0, 1, 1, 0}, 40 - {0, 1, 1, 1}, 41 - {1, 0, 0, 0}, 42 - {1, 0, 0, 1}, 43 - {1, 0, 1, 0}, 44 - {1, 0, 1, 1}, 33 + {{0, 0}, {0, 0}}, 34 + {{0, 0}, {0, 1}}, 35 + {{0, 0}, {1, 0}}, 36 + {{0, 0}, {1, 1}}, 37 + {{0, 1}, {0, 0}}, 38 + {{0, 1}, {0, 1}}, 39 + {{0, 1}, {1, 0}}, 40 + {{0, 1}, {1, 1}}, 41 + {{1, 0}, {0, 0}}, 42 + {{1, 0}, {0, 1}}, 43 + {{1, 0}, {1, 0}}, 44 + {{1, 0}, {1, 1}}, 45 45 }; 46 46 47 47 struct reuse_opts reusable_opts[4] = { 48 - {1, 1, 0, 0}, 49 - {1, 1, 0, 1}, 50 - {1, 1, 1, 0}, 51 - {1, 1, 1, 1}, 48 + {{1, 1}, {0, 0}}, 49 + {{1, 1}, {0, 1}}, 50 + {{1, 1}, {1, 0}}, 51 + {{1, 1}, {1, 1}}, 52 52 }; 53 53 54 54 int bind_port(struct __test_metadata *_metadata, int reuseaddr, int reuseport)
+2 -2
tools/testing/selftests/vm/Makefile
··· 101 101 ifeq ($(CAN_BUILD_I386),1) 102 102 $(BINARIES_32): CFLAGS += -m32 103 103 $(BINARIES_32): LDLIBS += -lrt -ldl -lm 104 - $(BINARIES_32): %_32: %.c 104 + $(BINARIES_32): $(OUTPUT)/%_32: %.c 105 105 $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@ 106 106 $(foreach t,$(TARGETS),$(eval $(call gen-target-rule-32,$(t)))) 107 107 endif ··· 109 109 ifeq ($(CAN_BUILD_X86_64),1) 110 110 $(BINARIES_64): CFLAGS += -m64 111 111 $(BINARIES_64): LDLIBS += -lrt -ldl 112 - $(BINARIES_64): %_64: %.c 112 + $(BINARIES_64): $(OUTPUT)/%_64: %.c 113 113 $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@ 114 114 $(foreach t,$(TARGETS),$(eval $(call gen-target-rule-64,$(t)))) 115 115 endif