Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR (net-7.0-rc8).

Conflicts:

net/ipv6/seg6_iptunnel.c
c3812651b522f ("seg6: separate dst_cache for input and output paths in seg6 lwtunnel")
78723a62b969a ("seg6: add per-route tunnel source address")
https://lore.kernel.org/adZhwtOYfo-0ImSa@sirena.org.uk

net/ipv4/icmp.c
fde29fd934932 ("ipv4: icmp: fix null-ptr-deref in icmp_build_probe()")
d98adfbdd5c01 ("ipv4: drop ipv6_stub usage and use direct function calls")
https://lore.kernel.org/adO3dccqnr6j-BL9@sirena.org.uk

Adjacent changes:

drivers/net/ethernet/stmicro/stmmac/chain_mode.c
51f4e090b9f8 ("net: stmmac: fix integer underflow in chain mode")
6b4286e05508 ("net: stmmac: rename STMMAC_GET_ENTRY() -> STMMAC_NEXT_ENTRY()")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+5337 -1964
+1
.get_maintainer.ignore
··· 1 1 Alan Cox <alan@lxorguk.ukuu.org.uk> 2 2 Alan Cox <root@hraefn.swansea.linux.org.uk> 3 3 Alyssa Rosenzweig <alyssa@rosenzweig.io> 4 + Askar Safin <safinaskar@gmail.com> 4 5 Christoph Hellwig <hch@lst.de> 5 6 Jeff Kirsher <jeffrey.t.kirsher@intel.com> 6 7 Marc Gonzalez <marc.w.gonzalez@free.fr>
+1
Documentation/devicetree/bindings/connector/usb-connector.yaml
··· 301 301 maxItems: 4 302 302 303 303 dependencies: 304 + pd-disable: [typec-power-opmode] 304 305 sink-vdos-v1: [ sink-vdos ] 305 306 sink-vdos: [ sink-vdos-v1 ] 306 307
+2 -3
Documentation/devicetree/bindings/display/msm/qcom,qcm2290-mdss.yaml
··· 33 33 - const: core 34 34 35 35 iommus: 36 - maxItems: 2 36 + maxItems: 1 37 37 38 38 interconnects: 39 39 items: ··· 107 107 interconnect-names = "mdp0-mem", 108 108 "cpu-cfg"; 109 109 110 - iommus = <&apps_smmu 0x420 0x2>, 111 - <&apps_smmu 0x421 0x0>; 110 + iommus = <&apps_smmu 0x420 0x2>; 112 111 ranges; 113 112 114 113 display-controller@5e01000 {
+2 -2
Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml
··· 37 37 const: 2 38 38 39 39 "#interrupt-cells": 40 - const: 1 40 + const: 2 41 41 42 42 ngpios: 43 43 description: ··· 86 86 gpio-controller; 87 87 #gpio-cells = <2>; 88 88 interrupt-controller; 89 - #interrupt-cells = <1>; 89 + #interrupt-cells = <2>; 90 90 interrupts = <53>, <53>, <53>, <53>, 91 91 <53>, <53>, <53>, <53>, 92 92 <53>, <53>, <53>, <53>,
+2 -5
Documentation/devicetree/bindings/media/qcom,qcm2290-venus.yaml
··· 42 42 - const: vcodec0_bus 43 43 44 44 iommus: 45 - maxItems: 5 45 + maxItems: 2 46 46 47 47 interconnects: 48 48 maxItems: 2 ··· 102 102 memory-region = <&pil_video_mem>; 103 103 104 104 iommus = <&apps_smmu 0x860 0x0>, 105 - <&apps_smmu 0x880 0x0>, 106 - <&apps_smmu 0x861 0x04>, 107 - <&apps_smmu 0x863 0x0>, 108 - <&apps_smmu 0x804 0xe0>; 105 + <&apps_smmu 0x880 0x0>; 109 106 110 107 interconnects = <&mmnrt_virt MASTER_VIDEO_P0 RPM_ALWAYS_TAG 111 108 &bimc SLAVE_EBI1 RPM_ALWAYS_TAG>,
+2 -2
Documentation/devicetree/bindings/net/nvidia,tegra234-mgbe.yaml
··· 42 42 - const: mgbe 43 43 - const: mac 44 44 - const: mac-divider 45 - - const: ptp-ref 45 + - const: ptp_ref 46 46 - const: rx-input-m 47 47 - const: rx-input 48 48 - const: tx ··· 133 133 <&bpmp TEGRA234_CLK_MGBE0_RX_PCS_M>, 134 134 <&bpmp TEGRA234_CLK_MGBE0_RX_PCS>, 135 135 <&bpmp TEGRA234_CLK_MGBE0_TX_PCS>; 136 - clock-names = "mgbe", "mac", "mac-divider", "ptp-ref", "rx-input-m", 136 + clock-names = "mgbe", "mac", "mac-divider", "ptp_ref", "rx-input-m", 137 137 "rx-input", "tx", "eee-pcs", "rx-pcs-input", "rx-pcs-m", 138 138 "rx-pcs", "tx-pcs"; 139 139 resets = <&bpmp TEGRA234_RESET_MGBE0_MAC>,
+139 -15
Documentation/process/security-bugs.rst
··· 5 5 6 6 Linux kernel developers take security very seriously. As such, we'd 7 7 like to know when a security bug is found so that it can be fixed and 8 - disclosed as quickly as possible. Please report security bugs to the 9 - Linux kernel security team. 8 + disclosed as quickly as possible. 9 + 10 + Preparing your report 11 + --------------------- 12 + 13 + Like with any bug report, a security bug report requires a lot of analysis work 14 + from the developers, so the more information you can share about the issue, the 15 + better. Please review the procedure outlined in 16 + Documentation/admin-guide/reporting-issues.rst if you are unclear about what 17 + information is helpful. The following information are absolutely necessary in 18 + **any** security bug report: 19 + 20 + * **affected kernel version range**: with no version indication, your report 21 + will not be processed. A significant part of reports are for bugs that 22 + have already been fixed, so it is extremely important that vulnerabilities 23 + are verified on recent versions (development tree or latest stable 24 + version), at least by verifying that the code has not changed since the 25 + version where it was detected. 26 + 27 + * **description of the problem**: a detailed description of the problem, with 28 + traces showing its manifestation, and why you consider that the observed 29 + behavior as a problem in the kernel, is necessary. 30 + 31 + * **reproducer**: developers will need to be able to reproduce the problem to 32 + consider a fix as effective. This includes both a way to trigger the issue 33 + and a way to confirm it happens. A reproducer with low complexity 34 + dependencies will be needed (source code, shell script, sequence of 35 + instructions, file-system image etc). Binary-only executables are not 36 + accepted. Working exploits are extremely helpful and will not be released 37 + without consent from the reporter, unless they are already public. By 38 + definition if an issue cannot be reproduced, it is not exploitable, thus it 39 + is not a security bug. 40 + 41 + * **conditions**: if the bug depends on certain configuration options, 42 + sysctls, permissions, timing, code modifications etc, these should be 43 + indicated. 44 + 45 + In addition, the following information are highly desirable: 46 + 47 + * **suspected location of the bug**: the file names and functions where the 48 + bug is suspected to be present are very important, at least to help forward 49 + the report to the appropriate maintainers. When not possible (for example, 50 + "system freezes each time I run this command"), the security team will help 51 + identify the source of the bug. 52 + 53 + * **a proposed fix**: bug reporters who have analyzed the cause of a bug in 54 + the source code almost always have an accurate idea on how to fix it, 55 + because they spent a long time studying it and its implications. Proposing 56 + a tested fix will save maintainers a lot of time, even if the fix ends up 57 + not being the right one, because it helps understand the bug. When 58 + proposing a tested fix, please always format it in a way that can be 59 + immediately merged (see Documentation/process/submitting-patches.rst). 60 + This will save some back-and-forth exchanges if it is accepted, and you 61 + will be credited for finding and fixing this issue. Note that in this case 62 + only a ``Signed-off-by:`` tag is needed, without ``Reported-by:`` when the 63 + reporter and author are the same. 64 + 65 + * **mitigations**: very often during a bug analysis, some ways of mitigating 66 + the issue appear. It is useful to share them, as they can be helpful to 67 + keep end users protected during the time it takes them to apply the fix. 68 + 69 + Identifying contacts 70 + -------------------- 71 + 72 + The most effective way to report a security bug is to send it directly to the 73 + affected subsystem's maintainers and Cc: the Linux kernel security team. Do 74 + not send it to a public list at this stage, unless you have good reasons to 75 + consider the issue as being public or trivial to discover (e.g. result of a 76 + widely available automated vulnerability scanning tool that can be repeated by 77 + anyone). 78 + 79 + If you're sending a report for issues affecting multiple parts in the kernel, 80 + even if they're fairly similar issues, please send individual messages (think 81 + that maintainers will not all work on the issues at the same time). The only 82 + exception is when an issue concerns closely related parts maintained by the 83 + exact same subset of maintainers, and these parts are expected to be fixed all 84 + at once by the same commit, then it may be acceptable to report them at once. 85 + 86 + One difficulty for most first-time reporters is to figure the right list of 87 + recipients to send a report to. In the Linux kernel, all official maintainers 88 + are trusted, so the consequences of accidentally including the wrong maintainer 89 + are essentially a bit more noise for that person, i.e. nothing dramatic. As 90 + such, a suitable method to figure the list of maintainers (which kernel 91 + security officers use) is to rely on the get_maintainer.pl script, tuned to 92 + only report maintainers. This script, when passed a file name, will look for 93 + its path in the MAINTAINERS file to figure a hierarchical list of relevant 94 + maintainers. Calling it a first time with the finest level of filtering will 95 + most of the time return a short list of this specific file's maintainers:: 96 + 97 + $ ./scripts/get_maintainer.pl --no-l --no-r --pattern-depth 1 \ 98 + drivers/example.c 99 + Developer One <dev1@example.com> (maintainer:example driver) 100 + Developer Two <dev2@example.org> (maintainer:example driver) 101 + 102 + These two maintainers should then receive the message. If the command does not 103 + return anything, it means the affected file is part of a wider subsystem, so we 104 + should be less specific:: 105 + 106 + $ ./scripts/get_maintainer.pl --no-l --no-r drivers/example.c 107 + Developer One <dev1@example.com> (maintainer:example subsystem) 108 + Developer Two <dev2@example.org> (maintainer:example subsystem) 109 + Developer Three <dev3@example.com> (maintainer:example subsystem [GENERAL]) 110 + Developer Four <dev4@example.org> (maintainer:example subsystem [GENERAL]) 111 + 112 + Here, picking the first, most specific ones, is sufficient. When the list is 113 + long, it is possible to produce a comma-delimited e-mail address list on a 114 + single line suitable for use in the To: field of a mailer like this:: 115 + 116 + $ ./scripts/get_maintainer.pl --no-tree --no-l --no-r --no-n --m \ 117 + --no-git-fallback --no-substatus --no-rolestats --no-multiline \ 118 + --pattern-depth 1 drivers/example.c 119 + dev1@example.com, dev2@example.org 120 + 121 + or this for the wider list:: 122 + 123 + $ ./scripts/get_maintainer.pl --no-tree --no-l --no-r --no-n --m \ 124 + --no-git-fallback --no-substatus --no-rolestats --no-multiline \ 125 + drivers/example.c 126 + dev1@example.com, dev2@example.org, dev3@example.com, dev4@example.org 127 + 128 + If at this point you're still facing difficulties spotting the right 129 + maintainers, **and only in this case**, it's possible to send your report to 130 + the Linux kernel security team only. Your message will be triaged, and you 131 + will receive instructions about whom to contact, if needed. Your message may 132 + equally be forwarded as-is to the relevant maintainers. 133 + 134 + Sending the report 135 + ------------------ 136 + 137 + Reports are to be sent over e-mail exclusively. Please use a working e-mail 138 + address, preferably the same that you want to appear in ``Reported-by`` tags 139 + if any. If unsure, send your report to yourself first. 10 140 11 141 The security team and maintainers almost always require additional 12 142 information beyond what was initially provided in a report and rely on ··· 148 18 or cannot effectively discuss their findings may be abandoned if the 149 19 communication does not quickly improve. 150 20 151 - As it is with any bug, the more information provided the easier it 152 - will be to diagnose and fix. Please review the procedure outlined in 153 - 'Documentation/admin-guide/reporting-issues.rst' if you are unclear about what 154 - information is helpful. Any exploit code is very helpful and will not 155 - be released without consent from the reporter unless it has already been 156 - made public. 157 - 21 + The report must be sent to maintainers, with the security team in ``Cc:``. 158 22 The Linux kernel security team can be contacted by email at 159 23 <security@kernel.org>. This is a private list of security officers 160 - who will help verify the bug report and develop and release a fix. 161 - If you already have a fix, please include it with your report, as 162 - that can speed up the process considerably. It is possible that the 163 - security team will bring in extra help from area maintainers to 164 - understand and fix the security vulnerability. 24 + who will help verify the bug report and assist developers working on a fix. 25 + It is possible that the security team will bring in extra help from area 26 + maintainers to understand and fix the security vulnerability. 165 27 166 28 Please send **plain text** emails without attachments where possible. 167 29 It is much harder to have a context-quoted discussion about a complex ··· 164 42 Markdown, HTML and RST formatted reports are particularly frowned upon since 165 43 they're quite hard to read for humans and encourage to use dedicated viewers, 166 44 sometimes online, which by definition is not acceptable for a confidential 167 - security report. 45 + security report. Note that some mailers tend to mangle formatting of plain 46 + text by default, please consult Documentation/process/email-clients.rst for 47 + more info. 168 48 169 49 Disclosure and embargoed information 170 50 ------------------------------------
+3 -3
MAINTAINERS
··· 1285 1285 1286 1286 AMD XGBE DRIVER 1287 1287 M: Raju Rangoju <Raju.Rangoju@amd.com> 1288 + M: Prashanth Kumar K R <PrashanthKumar.K.R@amd.com> 1288 1289 L: netdev@vger.kernel.org 1289 1290 S: Maintained 1290 1291 F: arch/arm64/boot/dts/amd/amd-seattle-xgbe*.dtsi ··· 21068 21067 F: net/atm/pppoatm.c 21069 21068 21070 21069 PPP OVER ETHERNET 21071 - M: Michal Ostrowski <mostrows@earthlink.net> 21072 - S: Maintained 21070 + S: Orphan 21073 21071 F: drivers/net/ppp/pppoe.c 21074 21072 F: drivers/net/ppp/pppox.c 21075 21073 ··· 22123 22123 F: drivers/infiniband/sw/rdmavt 22124 22124 22125 22125 RDS - RELIABLE DATAGRAM SOCKETS 22126 - M: Allison Henderson <allison.henderson@oracle.com> 22126 + M: Allison Henderson <achender@kernel.org> 22127 22127 L: netdev@vger.kernel.org 22128 22128 L: linux-rdma@vger.kernel.org 22129 22129 L: rds-devel@oss.oracle.com (moderated for non-subscribers)
+1 -1
Makefile
··· 2 2 VERSION = 7 3 3 PATCHLEVEL = 0 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc6 5 + EXTRAVERSION = -rc7 6 6 NAME = Baby Opossum Posse 7 7 8 8 # *DOCUMENTATION*
+1 -1
arch/arm/boot/dts/microchip/sam9x7.dtsi
··· 1226 1226 interrupt-controller; 1227 1227 #gpio-cells = <2>; 1228 1228 gpio-controller; 1229 - #gpio-lines = <26>; 1229 + #gpio-lines = <27>; 1230 1230 clocks = <&pmc PMC_TYPE_PERIPHERAL 3>; 1231 1231 }; 1232 1232
+1 -5
arch/arm/boot/dts/nxp/imx/imx6-logicpd-som.dtsi
··· 36 36 &gpmi { 37 37 pinctrl-names = "default"; 38 38 pinctrl-0 = <&pinctrl_gpmi_nand>; 39 + nand-on-flash-bbt; 39 40 status = "okay"; 40 - 41 - nand@0 { 42 - reg = <0>; 43 - nand-on-flash-bbt; 44 - }; 45 41 }; 46 42 47 43 &i2c3 {
+1 -5
arch/arm/boot/dts/nxp/imx/imx6qdl-icore.dtsi
··· 172 172 &gpmi { 173 173 pinctrl-names = "default"; 174 174 pinctrl-0 = <&pinctrl_gpmi_nand>; 175 + nand-on-flash-bbt; 175 176 status = "okay"; 176 - 177 - nand@0 { 178 - reg = <0>; 179 - nand-on-flash-bbt; 180 - }; 181 177 }; 182 178 183 179 &i2c1 {
+1 -5
arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi
··· 102 102 &gpmi { 103 103 pinctrl-names = "default"; 104 104 pinctrl-0 = <&pinctrl_gpmi_nand>; 105 + nand-on-flash-bbt; 105 106 status = "okay"; 106 - 107 - nand@0 { 108 - reg = <0>; 109 - nand-on-flash-bbt; 110 - }; 111 107 }; 112 108 113 109 &i2c1 {
+1 -5
arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi
··· 73 73 &gpmi { 74 74 pinctrl-names = "default"; 75 75 pinctrl-0 = <&pinctrl_gpmi_nand>; 76 + nand-on-flash-bbt; 76 77 status = "disabled"; 77 - 78 - nand@0 { 79 - reg = <0>; 80 - nand-on-flash-bbt; 81 - }; 82 78 }; 83 79 84 80 &i2c3 {
+1 -5
arch/arm/boot/dts/nxp/imx/imx6qdl-skov-cpu.dtsi
··· 260 260 &gpmi { 261 261 pinctrl-names = "default"; 262 262 pinctrl-0 = <&pinctrl_gpmi_nand>; 263 + nand-on-flash-bbt; 263 264 #address-cells = <1>; 264 265 #size-cells = <0>; 265 266 status = "okay"; 266 - 267 - nand@0 { 268 - reg = <0>; 269 - nand-on-flash-bbt; 270 - }; 271 267 }; 272 268 273 269 &i2c3 {
+1 -5
arch/arm/boot/dts/nxp/imx/imx6qdl-tx6.dtsi
··· 252 252 &gpmi { 253 253 pinctrl-names = "default"; 254 254 pinctrl-0 = <&pinctrl_gpmi_nand>; 255 + nand-on-flash-bbt; 255 256 fsl,no-blockmark-swap; 256 257 status = "okay"; 257 - 258 - nand@0 { 259 - reg = <0>; 260 - nand-on-flash-bbt; 261 - }; 262 258 }; 263 259 264 260 &i2c1 {
+1 -5
arch/arm/boot/dts/nxp/imx/imx6ul-geam.dts
··· 133 133 &gpmi { 134 134 pinctrl-names = "default"; 135 135 pinctrl-0 = <&pinctrl_gpmi_nand>; 136 + nand-on-flash-bbt; 136 137 status = "okay"; 137 - 138 - nand@0 { 139 - reg = <0>; 140 - nand-on-flash-bbt; 141 - }; 142 138 }; 143 139 144 140 &i2c1 {
+1 -5
arch/arm/boot/dts/nxp/imx/imx6ul-isiot.dtsi
··· 101 101 &gpmi { 102 102 pinctrl-names = "default"; 103 103 pinctrl-0 = <&pinctrl_gpmi_nand>; 104 + nand-on-flash-bbt; 104 105 status = "disabled"; 105 - 106 - nand@0 { 107 - reg = <0>; 108 - nand-on-flash-bbt; 109 - }; 110 106 }; 111 107 112 108 &i2c1 {
+1 -5
arch/arm/boot/dts/nxp/imx/imx6ul-phytec-phycore-som.dtsi
··· 63 63 &gpmi { 64 64 pinctrl-names = "default"; 65 65 pinctrl-0 = <&pinctrl_gpmi_nand>; 66 + nand-on-flash-bbt; 66 67 status = "disabled"; 67 - 68 - nand@0 { 69 - reg = <0>; 70 - nand-on-flash-bbt; 71 - }; 72 68 }; 73 69 74 70 &i2c1 {
+1 -5
arch/arm/boot/dts/nxp/imx/imx6ul-tx6ul.dtsi
··· 296 296 &gpmi { 297 297 pinctrl-names = "default"; 298 298 pinctrl-0 = <&pinctrl_gpmi_nand>; 299 + nand-on-flash-bbt; 299 300 fsl,no-blockmark-swap; 300 301 status = "okay"; 301 - 302 - nand@0 { 303 - reg = <0>; 304 - nand-on-flash-bbt; 305 - }; 306 302 }; 307 303 308 304 &i2c2 {
+4 -8
arch/arm/boot/dts/nxp/imx/imx6ull-colibri.dtsi
··· 160 160 pinctrl-names = "default"; 161 161 pinctrl-0 = <&pinctrl_gpmi_nand>; 162 162 fsl,use-minimum-ecc; 163 + nand-on-flash-bbt; 164 + nand-ecc-mode = "hw"; 165 + nand-ecc-strength = <8>; 166 + nand-ecc-step-size = <512>; 163 167 status = "okay"; 164 - 165 - nand@0 { 166 - reg = <0>; 167 - nand-on-flash-bbt; 168 - nand-ecc-mode = "hw"; 169 - nand-ecc-strength = <8>; 170 - nand-ecc-step-size = <512>; 171 - }; 172 168 }; 173 169 174 170 /* I2C3_SDA/SCL on SODIMM 194/196 (e.g. RTC on carrier board) */
+4 -8
arch/arm/boot/dts/nxp/imx/imx6ull-engicam-microgea.dtsi
··· 43 43 &gpmi { 44 44 pinctrl-names = "default"; 45 45 pinctrl-0 = <&pinctrl_gpmi_nand>; 46 + nand-ecc-mode = "hw"; 47 + nand-ecc-strength = <0>; 48 + nand-ecc-step-size = <0>; 49 + nand-on-flash-bbt; 46 50 status = "okay"; 47 - 48 - nand@0 { 49 - reg = <0>; 50 - nand-ecc-mode = "hw"; 51 - nand-ecc-strength = <0>; 52 - nand-ecc-step-size = <0>; 53 - nand-on-flash-bbt; 54 - }; 55 51 }; 56 52 57 53 &iomuxc {
+1 -5
arch/arm/boot/dts/nxp/imx/imx6ull-myir-mys-6ulx.dtsi
··· 60 60 &gpmi { 61 61 pinctrl-names = "default"; 62 62 pinctrl-0 = <&pinctrl_gpmi_nand>; 63 + nand-on-flash-bbt; 63 64 status = "disabled"; 64 - 65 - nand@0 { 66 - reg = <0>; 67 - nand-on-flash-bbt; 68 - }; 69 65 }; 70 66 71 67 &uart1 {
+1 -5
arch/arm/boot/dts/nxp/imx/imx6ulz-bsh-smm-m2.dts
··· 25 25 &gpmi { 26 26 pinctrl-names = "default"; 27 27 pinctrl-0 = <&pinctrl_gpmi_nand>; 28 + nand-on-flash-bbt; 28 29 status = "okay"; 29 - 30 - nand@0 { 31 - reg = <0>; 32 - nand-on-flash-bbt; 33 - }; 34 30 }; 35 31 36 32 &snvs_poweroff {
+2 -6
arch/arm/boot/dts/nxp/imx/imx7-colibri.dtsi
··· 375 375 /* NAND on such SKUs */ 376 376 &gpmi { 377 377 fsl,use-minimum-ecc; 378 + nand-ecc-mode = "hw"; 379 + nand-on-flash-bbt; 378 380 pinctrl-names = "default"; 379 381 pinctrl-0 = <&pinctrl_gpmi_nand>; 380 - 381 - nand@0 { 382 - reg = <0>; 383 - nand-ecc-mode = "hw"; 384 - nand-on-flash-bbt; 385 - }; 386 382 }; 387 383 388 384 /* On-module Power I2C */
+1
arch/arm64/Kconfig
··· 252 252 select HAVE_RSEQ 253 253 select HAVE_RUST if RUSTC_SUPPORTS_ARM64 254 254 select HAVE_STACKPROTECTOR 255 + select HAVE_STATIC_CALL if CFI 255 256 select HAVE_SYSCALL_TRACEPOINTS 256 257 select HAVE_KPROBES 257 258 select HAVE_KRETPROBES
+1 -1
arch/arm64/boot/dts/allwinner/sun55i-a523.dtsi
··· 901 901 interrupts = <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>; 902 902 clocks = <&r_ccu CLK_BUS_R_SPI>, <&r_ccu CLK_R_SPI>; 903 903 clock-names = "ahb", "mod"; 904 - dmas = <&dma 53>, <&dma 53>; 904 + dmas = <&mcu_dma 13>, <&mcu_dma 13>; 905 905 dma-names = "rx", "tx"; 906 906 resets = <&r_ccu RST_BUS_R_SPI>; 907 907 status = "disabled";
+1 -1
arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
··· 7 7 8 8 &a53_opp_table { 9 9 opp-1000000000 { 10 - opp-microvolt = <950000>; 10 + opp-microvolt = <1000000>; 11 11 }; 12 12 }; 13 13
+7 -17
arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
··· 880 880 regulator-max-microvolt = <1300000>; 881 881 regulator-boot-on; 882 882 regulator-ramp-delay = <1250>; 883 - rohm,dvs-run-voltage = <880000>; 884 - rohm,dvs-idle-voltage = <820000>; 885 - rohm,dvs-suspend-voltage = <810000>; 883 + rohm,dvs-run-voltage = <900000>; 884 + rohm,dvs-idle-voltage = <850000>; 885 + rohm,dvs-suspend-voltage = <850000>; 886 886 regulator-always-on; 887 887 }; 888 888 ··· 892 892 regulator-max-microvolt = <1300000>; 893 893 regulator-boot-on; 894 894 regulator-ramp-delay = <1250>; 895 - rohm,dvs-run-voltage = <950000>; 896 - rohm,dvs-idle-voltage = <850000>; 895 + rohm,dvs-run-voltage = <1000000>; 896 + rohm,dvs-idle-voltage = <900000>; 897 897 regulator-always-on; 898 898 }; 899 899 ··· 902 902 regulator-min-microvolt = <700000>; 903 903 regulator-max-microvolt = <1300000>; 904 904 regulator-boot-on; 905 - rohm,dvs-run-voltage = <850000>; 905 + rohm,dvs-run-voltage = <900000>; 906 906 }; 907 907 908 908 buck4_reg: BUCK4 { 909 909 regulator-name = "buck4"; 910 910 regulator-min-microvolt = <700000>; 911 911 regulator-max-microvolt = <1300000>; 912 - rohm,dvs-run-voltage = <930000>; 912 + rohm,dvs-run-voltage = <1000000>; 913 913 }; 914 914 915 915 buck5_reg: BUCK5 { ··· 1447 1447 pinctrl-0 = <&pinctrl_wdog>; 1448 1448 fsl,ext-reset-output; 1449 1449 status = "okay"; 1450 - }; 1451 - 1452 - &a53_opp_table { 1453 - opp-1000000000 { 1454 - opp-microvolt = <850000>; 1455 - }; 1456 - 1457 - opp-1500000000 { 1458 - opp-microvolt = <950000>; 1459 - }; 1460 1450 };
+1 -1
arch/arm64/boot/dts/freescale/imx8mq.dtsi
··· 1632 1632 <&clk IMX8MQ_GPU_PLL_OUT>, 1633 1633 <&clk IMX8MQ_GPU_PLL>; 1634 1634 assigned-clock-rates = <800000000>, <800000000>, 1635 - <800000000>, <800000000>, <0>; 1635 + <800000000>, <400000000>, <0>; 1636 1636 power-domains = <&pgc_gpu>; 1637 1637 }; 1638 1638
+10 -10
arch/arm64/boot/dts/freescale/imx91-tqma9131.dtsi
··· 272 272 /* enable SION for data and cmd pad due to ERR052021 */ 273 273 pinctrl_usdhc1: usdhc1grp { 274 274 fsl,pins = /* PD | FSEL 3 | DSE X5 */ 275 - <MX91_PAD_SD1_CLK__USDHC1_CLK 0x5be>, 275 + <MX91_PAD_SD1_CLK__USDHC1_CLK 0x59e>, 276 276 /* HYS | FSEL 0 | no drive */ 277 277 <MX91_PAD_SD1_STROBE__USDHC1_STROBE 0x1000>, 278 278 /* HYS | FSEL 3 | X5 */ 279 - <MX91_PAD_SD1_CMD__USDHC1_CMD 0x400011be>, 279 + <MX91_PAD_SD1_CMD__USDHC1_CMD 0x4000139e>, 280 280 /* HYS | FSEL 3 | X4 */ 281 - <MX91_PAD_SD1_DATA0__USDHC1_DATA0 0x4000119e>, 282 - <MX91_PAD_SD1_DATA1__USDHC1_DATA1 0x4000119e>, 283 - <MX91_PAD_SD1_DATA2__USDHC1_DATA2 0x4000119e>, 284 - <MX91_PAD_SD1_DATA3__USDHC1_DATA3 0x4000119e>, 285 - <MX91_PAD_SD1_DATA4__USDHC1_DATA4 0x4000119e>, 286 - <MX91_PAD_SD1_DATA5__USDHC1_DATA5 0x4000119e>, 287 - <MX91_PAD_SD1_DATA6__USDHC1_DATA6 0x4000119e>, 288 - <MX91_PAD_SD1_DATA7__USDHC1_DATA7 0x4000119e>; 281 + <MX91_PAD_SD1_DATA0__USDHC1_DATA0 0x4000139e>, 282 + <MX91_PAD_SD1_DATA1__USDHC1_DATA1 0x4000139e>, 283 + <MX91_PAD_SD1_DATA2__USDHC1_DATA2 0x4000139e>, 284 + <MX91_PAD_SD1_DATA3__USDHC1_DATA3 0x4000139e>, 285 + <MX91_PAD_SD1_DATA4__USDHC1_DATA4 0x4000139e>, 286 + <MX91_PAD_SD1_DATA5__USDHC1_DATA5 0x4000139e>, 287 + <MX91_PAD_SD1_DATA6__USDHC1_DATA6 0x4000139e>, 288 + <MX91_PAD_SD1_DATA7__USDHC1_DATA7 0x4000139e>; 289 289 }; 290 290 291 291 pinctrl_wdog: wdoggrp {
+2
arch/arm64/boot/dts/freescale/imx93-9x9-qsb.dts
··· 507 507 pinctrl-2 = <&pinctrl_usdhc1_200mhz>; 508 508 bus-width = <8>; 509 509 non-removable; 510 + fsl,tuning-step = <1>; 510 511 status = "okay"; 511 512 }; 512 513 ··· 520 519 vmmc-supply = <&reg_usdhc2_vmmc>; 521 520 bus-width = <4>; 522 521 no-mmc; 522 + fsl,tuning-step = <1>; 523 523 status = "okay"; 524 524 }; 525 525
+13 -13
arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
··· 271 271 /* enable SION for data and cmd pad due to ERR052021 */ 272 272 pinctrl_usdhc1: usdhc1grp { 273 273 fsl,pins = < 274 - /* PD | FSEL 3 | DSE X5 */ 275 - MX93_PAD_SD1_CLK__USDHC1_CLK 0x5be 274 + /* PD | FSEL 3 | DSE X4 */ 275 + MX93_PAD_SD1_CLK__USDHC1_CLK 0x59e 276 276 /* HYS | FSEL 0 | no drive */ 277 277 MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x1000 278 - /* HYS | FSEL 3 | X5 */ 279 - MX93_PAD_SD1_CMD__USDHC1_CMD 0x400011be 280 - /* HYS | FSEL 3 | X4 */ 281 - MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x4000119e 282 - MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x4000119e 283 - MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x4000119e 284 - MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x4000119e 285 - MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x4000119e 286 - MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x4000119e 287 - MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x4000119e 288 - MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x4000119e 278 + /* HYS | PU | FSEL 3 | DSE X4 */ 279 + MX93_PAD_SD1_CMD__USDHC1_CMD 0x4000139e 280 + /* HYS | PU | FSEL 3 | DSE X4 */ 281 + MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x4000139e 282 + MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x4000139e 283 + MX93_PAD_SD1_DATA2__USDHC1_DATA2 0x4000139e 284 + MX93_PAD_SD1_DATA3__USDHC1_DATA3 0x4000139e 285 + MX93_PAD_SD1_DATA4__USDHC1_DATA4 0x4000139e 286 + MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x4000139e 287 + MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x4000139e 288 + MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x4000139e 289 289 >; 290 290 }; 291 291
+1 -1
arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts
··· 179 179 }; 180 180 181 181 &pcie { 182 - reset-gpios = <&gpio4 4 GPIO_ACTIVE_HIGH>; 182 + reset-gpios = <&gpio4 4 GPIO_ACTIVE_LOW>; 183 183 vpcie-supply = <&reg_pcie>; 184 184 status = "okay"; 185 185 };
+1
arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
··· 122 122 #address-cells = <1>; 123 123 #size-cells = <1>; 124 124 ranges = <0x0 0x0 0xf0000000 0x10000000>; 125 + dma-ranges = <0x0 0x0 0x0 0x40000000>; 125 126 126 127 crg: clock-reset-controller@8a22000 { 127 128 compatible = "hisilicon,hi3798cv200-crg", "syscon", "simple-mfd";
+3 -8
arch/arm64/boot/dts/qcom/agatti.dtsi
··· 1669 1669 &bimc SLAVE_EBI1 RPM_ALWAYS_TAG>; 1670 1670 interconnect-names = "gfx-mem"; 1671 1671 1672 - iommus = <&adreno_smmu 0 1>, 1673 - <&adreno_smmu 2 0>; 1672 + iommus = <&adreno_smmu 0 1>; 1674 1673 operating-points-v2 = <&gpu_opp_table>; 1675 1674 power-domains = <&rpmpd QCM2290_VDDCX>; 1676 1675 qcom,gmu = <&gmu_wrapper>; ··· 1950 1951 1951 1952 power-domains = <&dispcc MDSS_GDSC>; 1952 1953 1953 - iommus = <&apps_smmu 0x420 0x2>, 1954 - <&apps_smmu 0x421 0x0>; 1954 + iommus = <&apps_smmu 0x420 0x2>; 1955 1955 interconnects = <&mmrt_virt MASTER_MDP0 RPM_ALWAYS_TAG 1956 1956 &bimc SLAVE_EBI1 RPM_ALWAYS_TAG>, 1957 1957 <&bimc MASTER_APPSS_PROC RPM_ALWAYS_TAG ··· 2434 2436 2435 2437 memory-region = <&pil_video_mem>; 2436 2438 iommus = <&apps_smmu 0x860 0x0>, 2437 - <&apps_smmu 0x880 0x0>, 2438 - <&apps_smmu 0x861 0x04>, 2439 - <&apps_smmu 0x863 0x0>, 2440 - <&apps_smmu 0x804 0xe0>; 2439 + <&apps_smmu 0x880 0x0>; 2441 2440 2442 2441 interconnects = <&mmnrt_virt MASTER_VIDEO_P0 RPM_ALWAYS_TAG 2443 2442 &bimc SLAVE_EBI1 RPM_ALWAYS_TAG>,
+1 -1
arch/arm64/boot/dts/qcom/hamoa.dtsi
··· 269 269 idle-state-name = "ret"; 270 270 arm,psci-suspend-param = <0x00000004>; 271 271 entry-latency-us = <180>; 272 - exit-latency-us = <500>; 272 + exit-latency-us = <320>; 273 273 min-residency-us = <600>; 274 274 }; 275 275 };
+7 -2
arch/arm64/boot/dts/qcom/monaco.dtsi
··· 765 765 hwlocks = <&tcsr_mutex 3>; 766 766 }; 767 767 768 + gunyah_md_mem: gunyah-md-region@91a80000 { 769 + reg = <0x0 0x91a80000 0x0 0x80000>; 770 + no-map; 771 + }; 772 + 768 773 lpass_machine_learning_mem: lpass-machine-learning-region@93b00000 { 769 774 reg = <0x0 0x93b00000 0x0 0xf00000>; 770 775 no-map; ··· 6419 6414 }; 6420 6415 6421 6416 qup_uart10_rts: qup-uart10-rts-state { 6422 - pins = "gpio84"; 6417 + pins = "gpio85"; 6423 6418 function = "qup1_se2"; 6424 6419 }; 6425 6420 6426 6421 qup_uart10_tx: qup-uart10-tx-state { 6427 - pins = "gpio85"; 6422 + pins = "gpio86"; 6428 6423 function = "qup1_se2"; 6429 6424 }; 6430 6425
+1 -1
arch/arm64/boot/dts/qcom/qcm6490-idp.dts
··· 177 177 pinctrl-0 = <&wcd_default>; 178 178 pinctrl-names = "default"; 179 179 180 - reset-gpios = <&tlmm 83 GPIO_ACTIVE_HIGH>; 180 + reset-gpios = <&tlmm 83 GPIO_ACTIVE_LOW>; 181 181 182 182 vdd-buck-supply = <&vreg_l17b_1p7>; 183 183 vdd-rxtx-supply = <&vreg_l18b_1p8>;
+10 -6
arch/arm64/boot/dts/qcom/x1-asus-zenbook-a14.dtsi
··· 1032 1032 }; 1033 1033 1034 1034 &pcie4 { 1035 - perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>; 1036 - wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>; 1037 - 1038 1035 pinctrl-0 = <&pcie4_default>; 1039 1036 pinctrl-names = "default"; 1040 1037 ··· 1045 1048 status = "okay"; 1046 1049 }; 1047 1050 1048 - &pcie6a { 1049 - perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; 1050 - wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; 1051 + &pcie4_port0 { 1052 + reset-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>; 1053 + wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>; 1054 + }; 1051 1055 1056 + &pcie6a { 1052 1057 vddpe-3v3-supply = <&vreg_nvme>; 1053 1058 1054 1059 pinctrl-0 = <&pcie6a_default>; ··· 1064 1065 vdda-pll-supply = <&vreg_l2j_1p2>; 1065 1066 1066 1067 status = "okay"; 1068 + }; 1069 + 1070 + &pcie6a_port0 { 1071 + reset-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; 1072 + wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; 1067 1073 }; 1068 1074 1069 1075 &pm8550_gpios {
+15 -9
arch/arm64/boot/dts/qcom/x1-crd.dtsi
··· 1216 1216 }; 1217 1217 1218 1218 &pcie4 { 1219 - perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>; 1220 - wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>; 1221 - 1222 1219 pinctrl-0 = <&pcie4_default>; 1223 1220 pinctrl-names = "default"; 1224 1221 1225 1222 status = "okay"; 1223 + }; 1224 + 1225 + &pcie4_port0 { 1226 + reset-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>; 1227 + wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>; 1226 1228 }; 1227 1229 1228 1230 &pcie4_phy { ··· 1235 1233 }; 1236 1234 1237 1235 &pcie5 { 1238 - perst-gpios = <&tlmm 149 GPIO_ACTIVE_LOW>; 1239 - wake-gpios = <&tlmm 151 GPIO_ACTIVE_LOW>; 1240 - 1241 1236 vddpe-3v3-supply = <&vreg_wwan>; 1242 1237 1243 1238 pinctrl-0 = <&pcie5_default>; ··· 1250 1251 status = "okay"; 1251 1252 }; 1252 1253 1253 - &pcie6a { 1254 - perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; 1255 - wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; 1254 + &pcie5_port0 { 1255 + reset-gpios = <&tlmm 149 GPIO_ACTIVE_LOW>; 1256 + wake-gpios = <&tlmm 151 GPIO_ACTIVE_LOW>; 1257 + }; 1256 1258 1259 + &pcie6a { 1257 1260 vddpe-3v3-supply = <&vreg_nvme>; 1258 1261 1259 1262 pinctrl-names = "default"; ··· 1269 1268 vdda-pll-supply = <&vreg_l2j_1p2>; 1270 1269 1271 1270 status = "okay"; 1271 + }; 1272 + 1273 + &pcie6a_port0 { 1274 + reset-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; 1275 + wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; 1272 1276 }; 1273 1277 1274 1278 &pm8550_gpios {
+8 -6
arch/arm64/boot/dts/qcom/x1-dell-thena.dtsi
··· 1081 1081 }; 1082 1082 1083 1083 &pcie4 { 1084 - perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>; 1085 - wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>; 1086 - 1087 1084 pinctrl-0 = <&pcie4_default>; 1088 1085 pinctrl-names = "default"; 1089 1086 ··· 1095 1098 }; 1096 1099 1097 1100 &pcie4_port0 { 1101 + reset-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>; 1102 + wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>; 1103 + 1098 1104 wifi@0 { 1099 1105 compatible = "pci17cb,1107"; 1100 1106 reg = <0x10000 0x0 0x0 0x0 0x0>; ··· 1115 1115 }; 1116 1116 1117 1117 &pcie6a { 1118 - perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; 1119 - wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; 1120 - 1121 1118 vddpe-3v3-supply = <&vreg_nvme>; 1122 1119 1123 1120 pinctrl-0 = <&pcie6a_default>; 1124 1121 pinctrl-names = "default"; 1125 1122 1126 1123 status = "okay"; 1124 + }; 1125 + 1126 + &pcie6a_port0 { 1127 + reset-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; 1128 + wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; 1127 1129 }; 1128 1130 1129 1131 &pcie6a_phy {
+8 -6
arch/arm64/boot/dts/qcom/x1-hp-omnibook-x14.dtsi
··· 1065 1065 }; 1066 1066 1067 1067 &pcie4 { 1068 - perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>; 1069 - wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>; 1070 - 1071 1068 pinctrl-0 = <&pcie4_default>; 1072 1069 pinctrl-names = "default"; 1073 1070 ··· 1079 1082 }; 1080 1083 1081 1084 &pcie4_port0 { 1085 + reset-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>; 1086 + wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>; 1087 + 1082 1088 wifi@0 { 1083 1089 compatible = "pci17cb,1107"; 1084 1090 reg = <0x10000 0x0 0x0 0x0 0x0>; ··· 1099 1099 }; 1100 1100 1101 1101 &pcie6a { 1102 - perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; 1103 - wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; 1104 - 1105 1102 vddpe-3v3-supply = <&vreg_nvme>; 1106 1103 1107 1104 pinctrl-0 = <&pcie6a_default>; 1108 1105 pinctrl-names = "default"; 1109 1106 1110 1107 status = "okay"; 1108 + }; 1109 + 1110 + &pcie6a_port0 { 1111 + reset-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; 1112 + wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; 1111 1113 }; 1112 1114 1113 1115 &pcie6a_phy {
+5 -3
arch/arm64/boot/dts/qcom/x1-microsoft-denali.dtsi
··· 964 964 }; 965 965 966 966 &pcie6a { 967 - perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; 968 - wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; 969 - 970 967 vddpe-3v3-supply = <&vreg_nvme>; 971 968 972 969 pinctrl-0 = <&pcie6a_default>; ··· 977 980 vdda-pll-supply = <&vreg_l2j_1p2>; 978 981 979 982 status = "okay"; 983 + }; 984 + 985 + &pcie6a_port0 { 986 + reset-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; 987 + wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; 980 988 }; 981 989 982 990 &pm8550_gpios {
+3 -3
arch/arm64/boot/dts/qcom/x1e80100-lenovo-yoga-slim7x.dts
··· 1126 1126 }; 1127 1127 1128 1128 &pcie4 { 1129 - perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>; 1130 - wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>; 1131 - 1132 1129 pinctrl-0 = <&pcie4_default>; 1133 1130 pinctrl-names = "default"; 1134 1131 ··· 1140 1143 }; 1141 1144 1142 1145 &pcie4_port0 { 1146 + reset-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>; 1147 + wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>; 1148 + 1143 1149 wifi@0 { 1144 1150 compatible = "pci17cb,1107"; 1145 1151 reg = <0x10000 0x0 0x0 0x0 0x0>;
+8 -7
arch/arm64/boot/dts/qcom/x1e80100-medion-sprchrgd-14-s1.dts
··· 1033 1033 }; 1034 1034 1035 1035 &pcie4 { 1036 - perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>; 1037 - wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>; 1038 - 1039 1036 pinctrl-0 = <&pcie4_default>; 1040 1037 pinctrl-names = "default"; 1041 1038 ··· 1047 1050 }; 1048 1051 1049 1052 &pcie4_port0 { 1053 + reset-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>; 1054 + wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>; 1055 + 1050 1056 wifi@0 { 1051 1057 compatible = "pci17cb,1107"; 1052 1058 reg = <0x10000 0x0 0x0 0x0 0x0>; ··· 1067 1067 }; 1068 1068 1069 1069 &pcie6a { 1070 - perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; 1071 - 1072 - wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; 1073 - 1074 1070 vddpe-3v3-supply = <&vreg_nvme>; 1075 1071 1076 1072 pinctrl-0 = <&pcie6a_default>; ··· 1080 1084 vdda-pll-supply = <&vreg_l2j_1p2>; 1081 1085 1082 1086 status = "okay"; 1087 + }; 1088 + 1089 + &pcie6a_port0 { 1090 + reset-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; 1091 + wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; 1083 1092 }; 1084 1093 1085 1094 &pm8550_gpios {
+8 -6
arch/arm64/boot/dts/qcom/x1p42100-lenovo-thinkbook-16.dts
··· 1131 1131 }; 1132 1132 1133 1133 &pcie4 { 1134 - perst-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>; 1135 - wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>; 1136 - 1137 1134 pinctrl-0 = <&pcie4_default>; 1138 1135 pinctrl-names = "default"; 1139 1136 ··· 1145 1148 }; 1146 1149 1147 1150 &pcie4_port0 { 1151 + reset-gpios = <&tlmm 146 GPIO_ACTIVE_LOW>; 1152 + wake-gpios = <&tlmm 148 GPIO_ACTIVE_LOW>; 1153 + 1148 1154 wifi@0 { 1149 1155 compatible = "pci17cb,1107"; 1150 1156 reg = <0x10000 0x0 0x0 0x0 0x0>; ··· 1165 1165 }; 1166 1166 1167 1167 &pcie6a { 1168 - perst-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; 1169 - wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; 1170 - 1171 1168 vddpe-3v3-supply = <&vreg_nvme>; 1172 1169 1173 1170 pinctrl-0 = <&pcie6a_default>; ··· 1178 1181 vdda-pll-supply = <&vreg_l2j_1p2>; 1179 1182 1180 1183 status = "okay"; 1184 + }; 1185 + 1186 + &pcie6a_port0 { 1187 + reset-gpios = <&tlmm 152 GPIO_ACTIVE_LOW>; 1188 + wake-gpios = <&tlmm 154 GPIO_ACTIVE_LOW>; 1181 1189 }; 1182 1190 1183 1191 &pm8550_pwm {
+11
arch/arm64/boot/dts/renesas/r8a779g3-sparrow-hawk.dts
··· 118 118 reg = <0x6 0x00000000 0x1 0x00000000>; 119 119 }; 120 120 121 + reserved-memory { 122 + #address-cells = <2>; 123 + #size-cells = <2>; 124 + ranges; 125 + 126 + tfa@40000000 { 127 + reg = <0x0 0x40000000 0x0 0x8000000>; 128 + no-map; 129 + }; 130 + }; 131 + 121 132 /* Page 27 / DSI to Display */ 122 133 dp-con { 123 134 compatible = "dp-connector";
-18
arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
··· 879 879 }; 880 880 }; 881 881 882 - wifi { 883 - wifi_host_wake_l: wifi-host-wake-l { 884 - rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>; 885 - }; 886 - }; 887 - 888 882 wireless-bluetooth { 889 883 bt_wake_pin: bt-wake-pin { 890 884 rockchip,pins = <2 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>; ··· 936 942 pinctrl-names = "default"; 937 943 pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>; 938 944 sd-uhs-sdr104; 939 - #address-cells = <1>; 940 - #size-cells = <0>; 941 945 status = "okay"; 942 - 943 - brcmf: wifi@1 { 944 - compatible = "brcm,bcm4329-fmac"; 945 - reg = <1>; 946 - interrupt-parent = <&gpio0>; 947 - interrupts = <RK_PA3 IRQ_TYPE_LEVEL_HIGH>; 948 - interrupt-names = "host-wake"; 949 - pinctrl-names = "default"; 950 - pinctrl-0 = <&wifi_host_wake_l>; 951 - }; 952 946 }; 953 947 954 948 &sdhci {
+31
arch/arm64/include/asm/static_call.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _ASM_STATIC_CALL_H 3 + #define _ASM_STATIC_CALL_H 4 + 5 + #define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, target) \ 6 + asm(" .pushsection .static_call.text, \"ax\" \n" \ 7 + " .align 4 \n" \ 8 + " .globl " name " \n" \ 9 + name ": \n" \ 10 + " hint 34 /* BTI C */ \n" \ 11 + " adrp x16, 1f \n" \ 12 + " ldr x16, [x16, :lo12:1f] \n" \ 13 + " br x16 \n" \ 14 + " .type " name ", %function \n" \ 15 + " .size " name ", . - " name " \n" \ 16 + " .popsection \n" \ 17 + " .pushsection .rodata, \"a\" \n" \ 18 + " .align 3 \n" \ 19 + "1: .quad " target " \n" \ 20 + " .popsection \n") 21 + 22 + #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \ 23 + __ARCH_DEFINE_STATIC_CALL_TRAMP(STATIC_CALL_TRAMP_STR(name), #func) 24 + 25 + #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \ 26 + ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0) 27 + 28 + #define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) \ 29 + ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0) 30 + 31 + #endif /* _ASM_STATIC_CALL_H */
+1
arch/arm64/kernel/Makefile
··· 46 46 obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o 47 47 obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o 48 48 obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 49 + obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o 49 50 obj-$(CONFIG_CPU_PM) += sleep.o suspend.o 50 51 obj-$(CONFIG_KGDB) += kgdb.o 51 52 obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o
+23
arch/arm64/kernel/static_call.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/static_call.h> 3 + #include <linux/memory.h> 4 + #include <asm/text-patching.h> 5 + 6 + void arch_static_call_transform(void *site, void *tramp, void *func, bool tail) 7 + { 8 + u64 literal; 9 + int ret; 10 + 11 + if (!func) 12 + func = __static_call_return0; 13 + 14 + /* decode the instructions to discover the literal address */ 15 + literal = ALIGN_DOWN((u64)tramp + 4, SZ_4K) + 16 + aarch64_insn_adrp_get_offset(le32_to_cpup(tramp + 4)) + 17 + 8 * aarch64_insn_decode_immediate(AARCH64_INSN_IMM_12, 18 + le32_to_cpup(tramp + 8)); 19 + 20 + ret = aarch64_insn_write_literal_u64((void *)literal, (u64)func); 21 + WARN_ON_ONCE(ret); 22 + } 23 + EXPORT_SYMBOL_GPL(arch_static_call_transform);
+1
arch/arm64/kernel/vmlinux.lds.S
··· 191 191 LOCK_TEXT 192 192 KPROBES_TEXT 193 193 HYPERVISOR_TEXT 194 + STATIC_CALL_TEXT 194 195 *(.gnu.warning) 195 196 } 196 197
-1
arch/mips/include/asm/cpu-features.h
··· 484 484 # endif 485 485 # ifndef cpu_vmbits 486 486 # define cpu_vmbits cpu_data[0].vmbits 487 - # define __NEED_VMBITS_PROBE 488 487 # endif 489 488 #endif 490 489
-2
arch/mips/include/asm/cpu-info.h
··· 80 80 int srsets; /* Shadow register sets */ 81 81 int package;/* physical package number */ 82 82 unsigned int globalnumber; 83 - #ifdef CONFIG_64BIT 84 83 int vmbits; /* Virtual memory size in bits */ 85 - #endif 86 84 void *data; /* Additional data */ 87 85 unsigned int watch_reg_count; /* Number that exist */ 88 86 unsigned int watch_reg_use_cnt; /* Usable by ptrace */
+2
arch/mips/include/asm/mipsregs.h
··· 1871 1871 1872 1872 #define read_c0_entryhi() __read_ulong_c0_register($10, 0) 1873 1873 #define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val) 1874 + #define read_c0_entryhi_64() __read_64bit_c0_register($10, 0) 1875 + #define write_c0_entryhi_64(val) __write_64bit_c0_register($10, 0, val) 1874 1876 1875 1877 #define read_c0_guestctl1() __read_32bit_c0_register($10, 4) 1876 1878 #define write_c0_guestctl1(val) __write_32bit_c0_register($10, 4, val)
+8 -5
arch/mips/kernel/cpu-probe.c
··· 210 210 211 211 static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) 212 212 { 213 - #ifdef __NEED_VMBITS_PROBE 214 - write_c0_entryhi(0x3fffffffffffe000ULL); 215 - back_to_back_c0_hazard(); 216 - c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL); 217 - #endif 213 + int vmbits = 31; 214 + 215 + if (cpu_has_64bits) { 216 + write_c0_entryhi_64(0x3fffffffffffe000ULL); 217 + back_to_back_c0_hazard(); 218 + vmbits = fls64(read_c0_entryhi_64() & 0x3fffffffffffe000ULL); 219 + } 220 + c->vmbits = vmbits; 218 221 } 219 222 220 223 static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
+2
arch/mips/kernel/cpu-r3k-probe.c
··· 137 137 else 138 138 cpu_set_nofpu_opts(c); 139 139 140 + c->vmbits = 31; 141 + 140 142 reserve_exception_space(0, 0x400); 141 143 } 142 144
+3 -3
arch/mips/lib/multi3.c
··· 4 4 #include "libgcc.h" 5 5 6 6 /* 7 - * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for 7 + * GCC 9 & older can suboptimally generate __multi3 calls for mips64r6, so for 8 8 * that specific case only we implement that intrinsic here. 9 9 * 10 10 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981 11 11 */ 12 - #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8) 12 + #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 10) 13 13 14 14 /* multiply 64-bit values, low 64-bits returned */ 15 15 static inline long long notrace dmulu(long long a, long long b) ··· 51 51 } 52 52 EXPORT_SYMBOL(__multi3); 53 53 54 - #endif /* 64BIT && CPU_MIPSR6 && GCC7 */ 54 + #endif /* 64BIT && CPU_MIPSR6 && GCC9 */
+17 -1
arch/mips/loongson64/env.c
··· 17 17 #include <linux/dma-map-ops.h> 18 18 #include <linux/export.h> 19 19 #include <linux/libfdt.h> 20 + #include <linux/minmax.h> 20 21 #include <linux/pci_ids.h> 22 + #include <linux/serial_core.h> 21 23 #include <linux/string_choices.h> 22 24 #include <asm/bootinfo.h> 23 25 #include <loongson.h> ··· 108 106 109 107 is_loongson64g = (read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G; 110 108 111 - for (i = 0; i < system->nr_uarts; i++) { 109 + for (i = 0; i < min(system->nr_uarts, MAX_UARTS); i++) { 112 110 uartdev = &system->uarts[i]; 111 + 112 + /* 113 + * Some firmware does not set nr_uarts properly and passes empty 114 + * items. Ignore them silently. 115 + */ 116 + if (uartdev->uart_base == 0) 117 + continue; 118 + 119 + /* Our DT only works with UPIO_MEM. */ 120 + if (uartdev->iotype != UPIO_MEM) { 121 + pr_warn("Ignore UART 0x%llx with iotype %u passed by firmware\n", 122 + uartdev->uart_base, uartdev->iotype); 123 + continue; 124 + } 113 125 114 126 ret = lefi_fixup_fdt_serial(fdt_buf, uartdev->uart_base, 115 127 uartdev->uartclk);
+2 -1
arch/mips/mm/cache.c
··· 207 207 { 208 208 if (IS_ENABLED(CONFIG_CPU_R3000) && cpu_has_3k_cache) 209 209 r3k_cache_init(); 210 - if (IS_ENABLED(CONFIG_CPU_R4K_CACHE_TLB) && cpu_has_4k_cache) 210 + if ((IS_ENABLED(CONFIG_CPU_R4K_CACHE_TLB) || 211 + IS_ENABLED(CONFIG_CPU_SB1)) && cpu_has_4k_cache) 211 212 r4k_cache_init(); 212 213 213 214 if (IS_ENABLED(CONFIG_CPU_CAVIUM_OCTEON) && cpu_has_octeon_cache)
+231 -56
arch/mips/mm/tlb-r4k.c
··· 13 13 #include <linux/sched.h> 14 14 #include <linux/smp.h> 15 15 #include <linux/memblock.h> 16 + #include <linux/minmax.h> 16 17 #include <linux/mm.h> 17 18 #include <linux/hugetlb.h> 18 19 #include <linux/export.h> ··· 25 24 #include <asm/hazards.h> 26 25 #include <asm/mmu_context.h> 27 26 #include <asm/tlb.h> 27 + #include <asm/tlbdebug.h> 28 28 #include <asm/tlbex.h> 29 29 #include <asm/tlbmisc.h> 30 30 #include <asm/setup.h> ··· 513 511 __setup("ntlb=", set_ntlb); 514 512 515 513 516 - /* Comparison function for EntryHi VPN fields. */ 517 - static int r4k_vpn_cmp(const void *a, const void *b) 514 + /* The start bit position of VPN2 and Mask in EntryHi/PageMask registers. */ 515 + #define VPN2_SHIFT 13 516 + 517 + /* Read full EntryHi even with CONFIG_32BIT. */ 518 + static inline unsigned long long read_c0_entryhi_native(void) 518 519 { 519 - long v = *(unsigned long *)a - *(unsigned long *)b; 520 - int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0; 521 - return s ? (v != 0) | v >> s : v; 520 + return cpu_has_64bits ? read_c0_entryhi_64() : read_c0_entryhi(); 521 + } 522 + 523 + /* Write full EntryHi even with CONFIG_32BIT. */ 524 + static inline void write_c0_entryhi_native(unsigned long long v) 525 + { 526 + if (cpu_has_64bits) 527 + write_c0_entryhi_64(v); 528 + else 529 + write_c0_entryhi(v); 530 + } 531 + 532 + /* TLB entry state for uniquification. */ 533 + struct tlbent { 534 + unsigned long long wired:1; 535 + unsigned long long global:1; 536 + unsigned long long asid:10; 537 + unsigned long long vpn:51; 538 + unsigned long long pagesz:5; 539 + unsigned long long index:14; 540 + }; 541 + 542 + /* 543 + * Comparison function for TLB entry sorting. Place wired entries first, 544 + * then global entries, then order by the increasing VPN/ASID and the 545 + * decreasing page size. This lets us avoid clashes with wired entries 546 + * easily and get entries for larger pages out of the way first. 547 + * 548 + * We could group bits so as to reduce the number of comparisons, but this 549 + * is seldom executed and not performance-critical, so prefer legibility. 550 + */ 551 + static int r4k_entry_cmp(const void *a, const void *b) 552 + { 553 + struct tlbent ea = *(struct tlbent *)a, eb = *(struct tlbent *)b; 554 + 555 + if (ea.wired > eb.wired) 556 + return -1; 557 + else if (ea.wired < eb.wired) 558 + return 1; 559 + else if (ea.global > eb.global) 560 + return -1; 561 + else if (ea.global < eb.global) 562 + return 1; 563 + else if (ea.vpn < eb.vpn) 564 + return -1; 565 + else if (ea.vpn > eb.vpn) 566 + return 1; 567 + else if (ea.asid < eb.asid) 568 + return -1; 569 + else if (ea.asid > eb.asid) 570 + return 1; 571 + else if (ea.pagesz > eb.pagesz) 572 + return -1; 573 + else if (ea.pagesz < eb.pagesz) 574 + return 1; 575 + else 576 + return 0; 577 + } 578 + 579 + /* 580 + * Fetch all the TLB entries. Mask individual VPN values retrieved with 581 + * the corresponding page mask and ignoring any 1KiB extension as we'll 582 + * be using 4KiB pages for uniquification. 583 + */ 584 + static void __ref r4k_tlb_uniquify_read(struct tlbent *tlb_vpns, int tlbsize) 585 + { 586 + int start = num_wired_entries(); 587 + unsigned long long vpn_mask; 588 + bool global; 589 + int i; 590 + 591 + vpn_mask = GENMASK(current_cpu_data.vmbits - 1, VPN2_SHIFT); 592 + vpn_mask |= cpu_has_64bits ? 3ULL << 62 : 1 << 31; 593 + 594 + for (i = 0; i < tlbsize; i++) { 595 + unsigned long long entryhi, vpn, mask, asid; 596 + unsigned int pagesz; 597 + 598 + write_c0_index(i); 599 + mtc0_tlbr_hazard(); 600 + tlb_read(); 601 + tlb_read_hazard(); 602 + 603 + global = !!(read_c0_entrylo0() & ENTRYLO_G); 604 + entryhi = read_c0_entryhi_native(); 605 + mask = read_c0_pagemask(); 606 + 607 + asid = entryhi & cpu_asid_mask(&current_cpu_data); 608 + vpn = (entryhi & vpn_mask & ~mask) >> VPN2_SHIFT; 609 + pagesz = ilog2((mask >> VPN2_SHIFT) + 1); 610 + 611 + tlb_vpns[i].global = global; 612 + tlb_vpns[i].asid = global ? 0 : asid; 613 + tlb_vpns[i].vpn = vpn; 614 + tlb_vpns[i].pagesz = pagesz; 615 + tlb_vpns[i].wired = i < start; 616 + tlb_vpns[i].index = i; 617 + } 618 + } 619 + 620 + /* 621 + * Write unique values to all but the wired TLB entries each, using 622 + * the 4KiB page size. This size might not be supported with R6, but 623 + * EHINV is mandatory for R6, so we won't ever be called in that case. 624 + * 625 + * A sorted table is supplied with any wired entries at the beginning, 626 + * followed by any global entries, and then finally regular entries. 627 + * We start at the VPN and ASID values of zero and only assign user 628 + * addresses, therefore guaranteeing no clash with addresses produced 629 + * by UNIQUE_ENTRYHI. We avoid any VPN values used by wired or global 630 + * entries, by increasing the VPN value beyond the span of such entry. 631 + * 632 + * When a VPN/ASID clash is found with a regular entry we increment the 633 + * ASID instead until no VPN/ASID clash has been found or the ASID space 634 + * has been exhausted, in which case we increase the VPN value beyond 635 + * the span of the largest clashing entry. 636 + * 637 + * We do not need to be concerned about FTLB or MMID configurations as 638 + * those are required to implement the EHINV feature. 639 + */ 640 + static void __ref r4k_tlb_uniquify_write(struct tlbent *tlb_vpns, int tlbsize) 641 + { 642 + unsigned long long asid, vpn, vpn_size, pagesz; 643 + int widx, gidx, idx, sidx, lidx, i; 644 + 645 + vpn_size = 1ULL << (current_cpu_data.vmbits - VPN2_SHIFT); 646 + pagesz = ilog2((PM_4K >> VPN2_SHIFT) + 1); 647 + 648 + write_c0_pagemask(PM_4K); 649 + write_c0_entrylo0(0); 650 + write_c0_entrylo1(0); 651 + 652 + asid = 0; 653 + vpn = 0; 654 + widx = 0; 655 + gidx = 0; 656 + for (sidx = 0; sidx < tlbsize && tlb_vpns[sidx].wired; sidx++) 657 + ; 658 + for (lidx = sidx; lidx < tlbsize && tlb_vpns[lidx].global; lidx++) 659 + ; 660 + idx = gidx = sidx + 1; 661 + for (i = sidx; i < tlbsize; i++) { 662 + unsigned long long entryhi, vpn_pagesz = 0; 663 + 664 + while (1) { 665 + if (WARN_ON(vpn >= vpn_size)) { 666 + dump_tlb_all(); 667 + /* Pray local_flush_tlb_all() will cope. */ 668 + return; 669 + } 670 + 671 + /* VPN must be below the next wired entry. */ 672 + if (widx < sidx && vpn >= tlb_vpns[widx].vpn) { 673 + vpn = max(vpn, 674 + (tlb_vpns[widx].vpn + 675 + (1ULL << tlb_vpns[widx].pagesz))); 676 + asid = 0; 677 + widx++; 678 + continue; 679 + } 680 + /* VPN must be below the next global entry. */ 681 + if (gidx < lidx && vpn >= tlb_vpns[gidx].vpn) { 682 + vpn = max(vpn, 683 + (tlb_vpns[gidx].vpn + 684 + (1ULL << tlb_vpns[gidx].pagesz))); 685 + asid = 0; 686 + gidx++; 687 + continue; 688 + } 689 + /* Try to find a free ASID so as to conserve VPNs. */ 690 + if (idx < tlbsize && vpn == tlb_vpns[idx].vpn && 691 + asid == tlb_vpns[idx].asid) { 692 + unsigned long long idx_pagesz; 693 + 694 + idx_pagesz = tlb_vpns[idx].pagesz; 695 + vpn_pagesz = max(vpn_pagesz, idx_pagesz); 696 + do 697 + idx++; 698 + while (idx < tlbsize && 699 + vpn == tlb_vpns[idx].vpn && 700 + asid == tlb_vpns[idx].asid); 701 + asid++; 702 + if (asid > cpu_asid_mask(&current_cpu_data)) { 703 + vpn += vpn_pagesz; 704 + asid = 0; 705 + vpn_pagesz = 0; 706 + } 707 + continue; 708 + } 709 + /* VPN mustn't be above the next regular entry. */ 710 + if (idx < tlbsize && vpn > tlb_vpns[idx].vpn) { 711 + vpn = max(vpn, 712 + (tlb_vpns[idx].vpn + 713 + (1ULL << tlb_vpns[idx].pagesz))); 714 + asid = 0; 715 + idx++; 716 + continue; 717 + } 718 + break; 719 + } 720 + 721 + entryhi = (vpn << VPN2_SHIFT) | asid; 722 + write_c0_entryhi_native(entryhi); 723 + write_c0_index(tlb_vpns[i].index); 724 + mtc0_tlbw_hazard(); 725 + tlb_write_indexed(); 726 + 727 + tlb_vpns[i].asid = asid; 728 + tlb_vpns[i].vpn = vpn; 729 + tlb_vpns[i].pagesz = pagesz; 730 + 731 + asid++; 732 + if (asid > cpu_asid_mask(&current_cpu_data)) { 733 + vpn += 1ULL << pagesz; 734 + asid = 0; 735 + } 736 + } 522 737 } 523 738 524 739 /* ··· 746 527 { 747 528 int tlbsize = current_cpu_data.tlbsize; 748 529 bool use_slab = slab_is_available(); 749 - int start = num_wired_entries(); 750 530 phys_addr_t tlb_vpn_size; 751 - unsigned long *tlb_vpns; 752 - unsigned long vpn_mask; 753 - int cnt, ent, idx, i; 754 - 755 - vpn_mask = GENMASK(cpu_vmbits - 1, 13); 756 - vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31; 531 + struct tlbent *tlb_vpns; 757 532 758 533 tlb_vpn_size = tlbsize * sizeof(*tlb_vpns); 759 534 tlb_vpns = (use_slab ? 760 - kmalloc(tlb_vpn_size, GFP_KERNEL) : 535 + kmalloc(tlb_vpn_size, GFP_ATOMIC) : 761 536 memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns))); 762 537 if (WARN_ON(!tlb_vpns)) 763 538 return; /* Pray local_flush_tlb_all() is good enough. */ 764 539 765 540 htw_stop(); 766 541 767 - for (i = start, cnt = 0; i < tlbsize; i++, cnt++) { 768 - unsigned long vpn; 542 + r4k_tlb_uniquify_read(tlb_vpns, tlbsize); 769 543 770 - write_c0_index(i); 771 - mtc0_tlbr_hazard(); 772 - tlb_read(); 773 - tlb_read_hazard(); 774 - vpn = read_c0_entryhi(); 775 - vpn &= vpn_mask & PAGE_MASK; 776 - tlb_vpns[cnt] = vpn; 544 + sort(tlb_vpns, tlbsize, sizeof(*tlb_vpns), r4k_entry_cmp, NULL); 777 545 778 - /* Prevent any large pages from overlapping regular ones. */ 779 - write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK); 780 - mtc0_tlbw_hazard(); 781 - tlb_write_indexed(); 782 - tlbw_use_hazard(); 783 - } 784 - 785 - sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL); 546 + r4k_tlb_uniquify_write(tlb_vpns, tlbsize); 786 547 787 548 write_c0_pagemask(PM_DEFAULT_MASK); 788 - write_c0_entrylo0(0); 789 - write_c0_entrylo1(0); 790 - 791 - idx = 0; 792 - ent = tlbsize; 793 - for (i = start; i < tlbsize; i++) 794 - while (1) { 795 - unsigned long entryhi, vpn; 796 - 797 - entryhi = UNIQUE_ENTRYHI(ent); 798 - vpn = entryhi & vpn_mask & PAGE_MASK; 799 - 800 - if (idx >= cnt || vpn < tlb_vpns[idx]) { 801 - write_c0_entryhi(entryhi); 802 - write_c0_index(i); 803 - mtc0_tlbw_hazard(); 804 - tlb_write_indexed(); 805 - ent++; 806 - break; 807 - } else if (vpn == tlb_vpns[idx]) { 808 - ent++; 809 - } else { 810 - idx++; 811 - } 812 - } 813 549 814 550 tlbw_use_hazard(); 815 551 htw_start(); ··· 814 640 temp_tlb_entry = current_cpu_data.tlbsize - 1; 815 641 816 642 /* From this point on the ARC firmware is dead. */ 817 - r4k_tlb_uniquify(); 643 + if (!cpu_has_tlbinv) 644 + r4k_tlb_uniquify(); 818 645 local_flush_tlb_all(); 819 646 820 647 /* Did I tell you that ARC SUCKS? */
+4 -4
arch/mips/ralink/clk.c
··· 21 21 { 22 22 switch (ralink_soc) { 23 23 case RT2880_SOC: 24 - *idx = 0; 24 + *idx = 1; 25 25 return "ralink,rt2880-sysc"; 26 26 case RT3883_SOC: 27 - *idx = 0; 27 + *idx = 1; 28 28 return "ralink,rt3883-sysc"; 29 29 case RT305X_SOC_RT3050: 30 - *idx = 0; 30 + *idx = 1; 31 31 return "ralink,rt3050-sysc"; 32 32 case RT305X_SOC_RT3052: 33 - *idx = 0; 33 + *idx = 1; 34 34 return "ralink,rt3052-sysc"; 35 35 case RT305X_SOC_RT3350: 36 36 *idx = 1;
+2 -2
arch/powerpc/kernel/dma-iommu.c
··· 67 67 } 68 68 bool arch_dma_alloc_direct(struct device *dev) 69 69 { 70 - if (dev->dma_ops_bypass) 70 + if (dev->dma_ops_bypass && dev->bus_dma_limit) 71 71 return true; 72 72 73 73 return false; ··· 75 75 76 76 bool arch_dma_free_direct(struct device *dev, dma_addr_t dma_handle) 77 77 { 78 - if (!dev->dma_ops_bypass) 78 + if (!dev->dma_ops_bypass || !dev->bus_dma_limit) 79 79 return false; 80 80 81 81 return is_direct_handle(dev, dma_handle);
+4
arch/riscv/include/asm/runtime-const.h
··· 2 2 #ifndef _ASM_RISCV_RUNTIME_CONST_H 3 3 #define _ASM_RISCV_RUNTIME_CONST_H 4 4 5 + #ifdef MODULE 6 + #error "Cannot use runtime-const infrastructure from modules" 7 + #endif 8 + 5 9 #include <asm/asm.h> 6 10 #include <asm/alternative.h> 7 11 #include <asm/cacheflush.h>
+7 -6
arch/riscv/include/uapi/asm/ptrace.h
··· 9 9 #ifndef __ASSEMBLER__ 10 10 11 11 #include <linux/types.h> 12 + #include <linux/const.h> 12 13 13 14 #define PTRACE_GETFDPIC 33 14 15 ··· 139 138 #define PTRACE_CFI_SS_LOCK_BIT 4 140 139 #define PTRACE_CFI_SS_PTR_BIT 5 141 140 142 - #define PTRACE_CFI_LP_EN_STATE BIT(PTRACE_CFI_LP_EN_BIT) 143 - #define PTRACE_CFI_LP_LOCK_STATE BIT(PTRACE_CFI_LP_LOCK_BIT) 144 - #define PTRACE_CFI_ELP_STATE BIT(PTRACE_CFI_ELP_BIT) 145 - #define PTRACE_CFI_SS_EN_STATE BIT(PTRACE_CFI_SS_EN_BIT) 146 - #define PTRACE_CFI_SS_LOCK_STATE BIT(PTRACE_CFI_SS_LOCK_BIT) 147 - #define PTRACE_CFI_SS_PTR_STATE BIT(PTRACE_CFI_SS_PTR_BIT) 141 + #define PTRACE_CFI_LP_EN_STATE _BITUL(PTRACE_CFI_LP_EN_BIT) 142 + #define PTRACE_CFI_LP_LOCK_STATE _BITUL(PTRACE_CFI_LP_LOCK_BIT) 143 + #define PTRACE_CFI_ELP_STATE _BITUL(PTRACE_CFI_ELP_BIT) 144 + #define PTRACE_CFI_SS_EN_STATE _BITUL(PTRACE_CFI_SS_EN_BIT) 145 + #define PTRACE_CFI_SS_LOCK_STATE _BITUL(PTRACE_CFI_SS_LOCK_BIT) 146 + #define PTRACE_CFI_SS_PTR_STATE _BITUL(PTRACE_CFI_SS_PTR_BIT) 148 147 149 148 #define PRACE_CFI_STATE_INVALID_MASK ~(PTRACE_CFI_LP_EN_STATE | \ 150 149 PTRACE_CFI_LP_LOCK_STATE | \
+4 -3
arch/riscv/kernel/kgdb.c
··· 175 175 {DBG_REG_T1, GDB_SIZEOF_REG, offsetof(struct pt_regs, t1)}, 176 176 {DBG_REG_T2, GDB_SIZEOF_REG, offsetof(struct pt_regs, t2)}, 177 177 {DBG_REG_FP, GDB_SIZEOF_REG, offsetof(struct pt_regs, s0)}, 178 - {DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)}, 178 + {DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, s1)}, 179 179 {DBG_REG_A0, GDB_SIZEOF_REG, offsetof(struct pt_regs, a0)}, 180 180 {DBG_REG_A1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)}, 181 181 {DBG_REG_A2, GDB_SIZEOF_REG, offsetof(struct pt_regs, a2)}, ··· 244 244 gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6]; 245 245 gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7]; 246 246 gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8]; 247 - gdb_regs[DBG_REG_S9_OFF] = task->thread.s[10]; 248 - gdb_regs[DBG_REG_S10_OFF] = task->thread.s[11]; 247 + gdb_regs[DBG_REG_S9_OFF] = task->thread.s[9]; 248 + gdb_regs[DBG_REG_S10_OFF] = task->thread.s[10]; 249 + gdb_regs[DBG_REG_S11_OFF] = task->thread.s[11]; 249 250 gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra; 250 251 } 251 252
+11 -10
arch/riscv/kernel/patch.c
··· 42 42 static __always_inline void *patch_map(void *addr, const unsigned int fixmap) 43 43 { 44 44 uintptr_t uintaddr = (uintptr_t) addr; 45 - struct page *page; 45 + phys_addr_t phys; 46 46 47 - if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr)) 48 - page = phys_to_page(__pa_symbol(addr)); 49 - else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) 50 - page = vmalloc_to_page(addr); 51 - else 47 + if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr)) { 48 + phys = __pa_symbol(addr); 49 + } else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) { 50 + struct page *page = vmalloc_to_page(addr); 51 + 52 + BUG_ON(!page); 53 + phys = page_to_phys(page) + offset_in_page(addr); 54 + } else { 52 55 return addr; 56 + } 53 57 54 - BUG_ON(!page); 55 - 56 - return (void *)set_fixmap_offset(fixmap, page_to_phys(page) + 57 - offset_in_page(addr)); 58 + return (void *)set_fixmap_offset(fixmap, phys); 58 59 } 59 60 60 61 static void patch_unmap(int fixmap)
+3 -1
arch/riscv/kernel/process.c
··· 347 347 if (arg & PR_TAGGED_ADDR_ENABLE && (tagged_addr_disabled || !pmlen)) 348 348 return -EINVAL; 349 349 350 - if (!(arg & PR_TAGGED_ADDR_ENABLE)) 350 + if (!(arg & PR_TAGGED_ADDR_ENABLE)) { 351 351 pmlen = PMLEN_0; 352 + pmm = ENVCFG_PMM_PMLEN_0; 353 + } 352 354 353 355 if (mmap_write_lock_killable(mm)) 354 356 return -EINTR;
+5 -1
arch/s390/kernel/perf_cpum_sf.c
··· 1168 1168 static void hw_perf_event_update(struct perf_event *event, int flush_all) 1169 1169 { 1170 1170 unsigned long long event_overflow, sampl_overflow, num_sdb; 1171 + struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1171 1172 struct hw_perf_event *hwc = &event->hw; 1172 1173 union hws_trailer_header prev, new; 1173 1174 struct hws_trailer_entry *te; ··· 1248 1247 * are dropped. 1249 1248 * Slightly increase the interval to avoid hitting this limit. 1250 1249 */ 1251 - if (event_overflow) 1250 + if (event_overflow) { 1252 1251 SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10); 1252 + if (SAMPL_RATE(hwc) > cpuhw->qsi.max_sampl_rate) 1253 + SAMPL_RATE(hwc) = cpuhw->qsi.max_sampl_rate; 1254 + } 1253 1255 } 1254 1256 1255 1257 static inline unsigned long aux_sdb_index(struct aux_buffer *aux,
+4 -2
arch/x86/events/intel/core.c
··· 4855 4855 intel_pmu_set_acr_caused_constr(leader, idx++, cause_mask); 4856 4856 4857 4857 if (leader->nr_siblings) { 4858 - for_each_sibling_event(sibling, leader) 4859 - intel_pmu_set_acr_caused_constr(sibling, idx++, cause_mask); 4858 + for_each_sibling_event(sibling, leader) { 4859 + if (is_x86_event(sibling)) 4860 + intel_pmu_set_acr_caused_constr(sibling, idx++, cause_mask); 4861 + } 4860 4862 } 4861 4863 4862 4864 if (leader != event)
+14
arch/x86/kernel/Makefile
··· 44 44 KCOV_INSTRUMENT_unwind_frame.o := n 45 45 KCOV_INSTRUMENT_unwind_guess.o := n 46 46 47 + # Disable KCOV to prevent crashes during kexec: load_segments() invalidates 48 + # the GS base, which KCOV relies on for per-CPU data. 49 + # 50 + # As KCOV and KEXEC compatibility should be preserved (e.g. syzkaller is 51 + # using it to collect crash dumps during kernel fuzzing), disabling 52 + # KCOV for KEXEC kernels is not an option. Selectively disabling KCOV 53 + # instrumentation for individual affected functions can be fragile, while 54 + # adding more checks to KCOV would slow it down. 55 + # 56 + # As a compromise solution, disable KCOV instrumentation for the whole 57 + # source code file. If its coverage is ever needed, other approaches 58 + # should be considered. 59 + KCOV_INSTRUMENT_machine_kexec_64.o := n 60 + 47 61 CFLAGS_head32.o := -fno-stack-protector 48 62 CFLAGS_head64.o := -fno-stack-protector 49 63 CFLAGS_irq.o := -I $(src)/../include/asm/trace
+2 -1
arch/x86/kernel/shstk.c
··· 351 351 need_to_check_vma = PAGE_ALIGN(*ssp) == *ssp; 352 352 353 353 if (need_to_check_vma) 354 - mmap_read_lock_killable(current->mm); 354 + if (mmap_read_lock_killable(current->mm)) 355 + return -EINTR; 355 356 356 357 err = get_shstk_data(&token_addr, (unsigned long __user *)*ssp); 357 358 if (unlikely(err))
+2
arch/x86/mm/Makefile
··· 4 4 KCOV_INSTRUMENT_mem_encrypt.o := n 5 5 KCOV_INSTRUMENT_mem_encrypt_amd.o := n 6 6 KCOV_INSTRUMENT_pgprot.o := n 7 + # See the "Disable KCOV" comment in arch/x86/kernel/Makefile. 8 + KCOV_INSTRUMENT_physaddr.o := n 7 9 8 10 KASAN_SANITIZE_mem_encrypt.o := n 9 11 KASAN_SANITIZE_mem_encrypt_amd.o := n
+18 -6
arch/x86/platform/geode/geode-common.c
··· 28 28 .properties = geode_gpio_keys_props, 29 29 }; 30 30 31 - static struct property_entry geode_restart_key_props[] = { 32 - { /* Placeholder for GPIO property */ }, 31 + static struct software_node_ref_args geode_restart_gpio_ref; 32 + 33 + static const struct property_entry geode_restart_key_props[] = { 34 + PROPERTY_ENTRY_REF_ARRAY_LEN("gpios", &geode_restart_gpio_ref, 1), 33 35 PROPERTY_ENTRY_U32("linux,code", KEY_RESTART), 34 36 PROPERTY_ENTRY_STRING("label", "Reset button"), 35 37 PROPERTY_ENTRY_U32("debounce-interval", 100), ··· 66 64 struct platform_device *pd; 67 65 int err; 68 66 69 - geode_restart_key_props[0] = PROPERTY_ENTRY_GPIO("gpios", 70 - &geode_gpiochip_node, 67 + geode_restart_gpio_ref = SOFTWARE_NODE_REFERENCE(&geode_gpiochip_node, 71 68 pin, GPIO_ACTIVE_LOW); 72 69 73 70 err = software_node_register_node_group(geode_gpio_keys_swnodes); ··· 100 99 const struct software_node *group[MAX_LEDS + 2] = { 0 }; 101 100 struct software_node *swnodes; 102 101 struct property_entry *props; 102 + struct software_node_ref_args *gpio_refs; 103 103 struct platform_device_info led_info = { 104 104 .name = "leds-gpio", 105 105 .id = PLATFORM_DEVID_NONE, ··· 129 127 goto err_free_swnodes; 130 128 } 131 129 130 + gpio_refs = kzalloc_objs(*gpio_refs, n_leds); 131 + if (!gpio_refs) { 132 + err = -ENOMEM; 133 + goto err_free_props; 134 + } 135 + 132 136 group[0] = &geode_gpio_leds_node; 133 137 for (i = 0; i < n_leds; i++) { 134 138 node_name = kasprintf(GFP_KERNEL, "%s:%d", label, i); ··· 143 135 goto err_free_names; 144 136 } 145 137 138 + gpio_refs[i] = SOFTWARE_NODE_REFERENCE(&geode_gpiochip_node, 139 + leds[i].pin, 140 + GPIO_ACTIVE_LOW); 146 141 props[i * 3 + 0] = 147 - PROPERTY_ENTRY_GPIO("gpios", &geode_gpiochip_node, 148 - leds[i].pin, GPIO_ACTIVE_LOW); 142 + PROPERTY_ENTRY_REF_ARRAY_LEN("gpios", &gpio_refs[i], 1); 149 143 props[i * 3 + 1] = 150 144 PROPERTY_ENTRY_STRING("linux,default-trigger", 151 145 leds[i].default_on ? ··· 181 171 err_free_names: 182 172 while (--i >= 0) 183 173 kfree(swnodes[i].name); 174 + kfree(gpio_refs); 175 + err_free_props: 184 176 kfree(props); 185 177 err_free_swnodes: 186 178 kfree(swnodes);
+13 -40
crypto/af_alg.c
··· 623 623 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); 624 624 sgl->cur = 0; 625 625 626 - if (sg) 626 + if (sg) { 627 + sg_unmark_end(sg + MAX_SGL_ENTS - 1); 627 628 sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); 629 + } 628 630 629 631 list_add_tail(&sgl->list, &ctx->tsgl_list); 630 632 } ··· 637 635 /** 638 636 * af_alg_count_tsgl - Count number of TX SG entries 639 637 * 640 - * The counting starts from the beginning of the SGL to @bytes. If 641 - * an @offset is provided, the counting of the SG entries starts at the @offset. 638 + * The counting starts from the beginning of the SGL to @bytes. 642 639 * 643 640 * @sk: socket of connection to user space 644 641 * @bytes: Count the number of SG entries holding given number of bytes. 645 - * @offset: Start the counting of SG entries from the given offset. 646 642 * Return: Number of TX SG entries found given the constraints 647 643 */ 648 - unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) 644 + unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes) 649 645 { 650 646 const struct alg_sock *ask = alg_sk(sk); 651 647 const struct af_alg_ctx *ctx = ask->private; ··· 658 658 const struct scatterlist *sg = sgl->sg; 659 659 660 660 for (i = 0; i < sgl->cur; i++) { 661 - size_t bytes_count; 662 - 663 - /* Skip offset */ 664 - if (offset >= sg[i].length) { 665 - offset -= sg[i].length; 666 - bytes -= sg[i].length; 667 - continue; 668 - } 669 - 670 - bytes_count = sg[i].length - offset; 671 - 672 - offset = 0; 673 661 sgl_count++; 674 - 675 - /* If we have seen requested number of bytes, stop */ 676 - if (bytes_count >= bytes) 662 + if (sg[i].length >= bytes) 677 663 return sgl_count; 678 664 679 - bytes -= bytes_count; 665 + bytes -= sg[i].length; 680 666 } 681 667 } 682 668 ··· 674 688 * af_alg_pull_tsgl - Release the specified buffers from TX SGL 675 689 * 676 690 * If @dst is non-null, reassign the pages to @dst. The caller must release 677 - * the pages. If @dst_offset is given only reassign the pages to @dst starting 678 - * at the @dst_offset (byte). The caller must ensure that @dst is large 679 - * enough (e.g. by using af_alg_count_tsgl with the same offset). 691 + * the pages. 680 692 * 681 693 * @sk: socket of connection to user space 682 694 * @used: Number of bytes to pull from TX SGL 683 695 * @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The 684 696 * caller must release the buffers in dst. 685 - * @dst_offset: Reassign the TX SGL from given offset. All buffers before 686 - * reaching the offset is released. 687 697 */ 688 - void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, 689 - size_t dst_offset) 698 + void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst) 690 699 { 691 700 struct alg_sock *ask = alg_sk(sk); 692 701 struct af_alg_ctx *ctx = ask->private; ··· 706 725 * SG entries in dst. 707 726 */ 708 727 if (dst) { 709 - if (dst_offset >= plen) { 710 - /* discard page before offset */ 711 - dst_offset -= plen; 712 - } else { 713 - /* reassign page to dst after offset */ 714 - get_page(page); 715 - sg_set_page(dst + j, page, 716 - plen - dst_offset, 717 - sg[i].offset + dst_offset); 718 - dst_offset = 0; 719 - j++; 720 - } 728 + /* reassign page to dst after offset */ 729 + get_page(page); 730 + sg_set_page(dst + j, page, plen, sg[i].offset); 731 + j++; 721 732 } 722 733 723 734 sg[i].length -= plen;
+19 -81
crypto/algif_aead.c
··· 26 26 #include <crypto/internal/aead.h> 27 27 #include <crypto/scatterwalk.h> 28 28 #include <crypto/if_alg.h> 29 - #include <crypto/skcipher.h> 30 29 #include <linux/init.h> 31 30 #include <linux/list.h> 32 31 #include <linux/kernel.h> ··· 71 72 struct alg_sock *pask = alg_sk(psk); 72 73 struct af_alg_ctx *ctx = ask->private; 73 74 struct crypto_aead *tfm = pask->private; 74 - unsigned int i, as = crypto_aead_authsize(tfm); 75 + unsigned int as = crypto_aead_authsize(tfm); 75 76 struct af_alg_async_req *areq; 76 - struct af_alg_tsgl *tsgl, *tmp; 77 77 struct scatterlist *rsgl_src, *tsgl_src = NULL; 78 78 int err = 0; 79 79 size_t used = 0; /* [in] TX bufs to be en/decrypted */ ··· 152 154 outlen -= less; 153 155 } 154 156 157 + /* 158 + * Create a per request TX SGL for this request which tracks the 159 + * SG entries from the global TX SGL. 160 + */ 155 161 processed = used + ctx->aead_assoclen; 156 - list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) { 157 - for (i = 0; i < tsgl->cur; i++) { 158 - struct scatterlist *process_sg = tsgl->sg + i; 159 - 160 - if (!(process_sg->length) || !sg_page(process_sg)) 161 - continue; 162 - tsgl_src = process_sg; 163 - break; 164 - } 165 - if (tsgl_src) 166 - break; 167 - } 168 - if (processed && !tsgl_src) { 169 - err = -EFAULT; 162 + areq->tsgl_entries = af_alg_count_tsgl(sk, processed); 163 + if (!areq->tsgl_entries) 164 + areq->tsgl_entries = 1; 165 + areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl), 166 + areq->tsgl_entries), 167 + GFP_KERNEL); 168 + if (!areq->tsgl) { 169 + err = -ENOMEM; 170 170 goto free; 171 171 } 172 + sg_init_table(areq->tsgl, areq->tsgl_entries); 173 + af_alg_pull_tsgl(sk, processed, areq->tsgl); 174 + tsgl_src = areq->tsgl; 172 175 173 176 /* 174 177 * Copy of AAD from source to destination ··· 178 179 * when user space uses an in-place cipher operation, the kernel 179 180 * will copy the data as it does not see whether such in-place operation 180 181 * is initiated. 181 - * 182 - * To ensure efficiency, the following implementation ensure that the 183 - * ciphers are invoked to perform a crypto operation in-place. This 184 - * is achieved by memory management specified as follows. 185 182 */ 186 183 187 184 /* Use the RX SGL as source (and destination) for crypto op. */ 188 185 rsgl_src = areq->first_rsgl.sgl.sgt.sgl; 189 186 190 - if (ctx->enc) { 191 - /* 192 - * Encryption operation - The in-place cipher operation is 193 - * achieved by the following operation: 194 - * 195 - * TX SGL: AAD || PT 196 - * | | 197 - * | copy | 198 - * v v 199 - * RX SGL: AAD || PT || Tag 200 - */ 201 - memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, 202 - processed); 203 - af_alg_pull_tsgl(sk, processed, NULL, 0); 204 - } else { 205 - /* 206 - * Decryption operation - To achieve an in-place cipher 207 - * operation, the following SGL structure is used: 208 - * 209 - * TX SGL: AAD || CT || Tag 210 - * | | ^ 211 - * | copy | | Create SGL link. 212 - * v v | 213 - * RX SGL: AAD || CT ----+ 214 - */ 215 - 216 - /* Copy AAD || CT to RX SGL buffer for in-place operation. */ 217 - memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, outlen); 218 - 219 - /* Create TX SGL for tag and chain it to RX SGL. */ 220 - areq->tsgl_entries = af_alg_count_tsgl(sk, processed, 221 - processed - as); 222 - if (!areq->tsgl_entries) 223 - areq->tsgl_entries = 1; 224 - areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl), 225 - areq->tsgl_entries), 226 - GFP_KERNEL); 227 - if (!areq->tsgl) { 228 - err = -ENOMEM; 229 - goto free; 230 - } 231 - sg_init_table(areq->tsgl, areq->tsgl_entries); 232 - 233 - /* Release TX SGL, except for tag data and reassign tag data. */ 234 - af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as); 235 - 236 - /* chain the areq TX SGL holding the tag with RX SGL */ 237 - if (usedpages) { 238 - /* RX SGL present */ 239 - struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl; 240 - struct scatterlist *sg = sgl_prev->sgt.sgl; 241 - 242 - sg_unmark_end(sg + sgl_prev->sgt.nents - 1); 243 - sg_chain(sg, sgl_prev->sgt.nents + 1, areq->tsgl); 244 - } else 245 - /* no RX SGL present (e.g. authentication only) */ 246 - rsgl_src = areq->tsgl; 247 - } 187 + memcpy_sglist(rsgl_src, tsgl_src, ctx->aead_assoclen); 248 188 249 189 /* Initialize the crypto operation */ 250 - aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src, 190 + aead_request_set_crypt(&areq->cra_u.aead_req, tsgl_src, 251 191 areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv); 252 192 aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen); 253 193 aead_request_set_tfm(&areq->cra_u.aead_req, tfm); ··· 388 450 struct crypto_aead *tfm = pask->private; 389 451 unsigned int ivlen = crypto_aead_ivsize(tfm); 390 452 391 - af_alg_pull_tsgl(sk, ctx->used, NULL, 0); 453 + af_alg_pull_tsgl(sk, ctx->used, NULL); 392 454 sock_kzfree_s(sk, ctx->iv, ivlen); 393 455 sock_kfree_s(sk, ctx, ctx->len); 394 456 af_alg_release_parent(sk);
+3 -3
crypto/algif_skcipher.c
··· 138 138 * Create a per request TX SGL for this request which tracks the 139 139 * SG entries from the global TX SGL. 140 140 */ 141 - areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0); 141 + areq->tsgl_entries = af_alg_count_tsgl(sk, len); 142 142 if (!areq->tsgl_entries) 143 143 areq->tsgl_entries = 1; 144 144 areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl), ··· 149 149 goto free; 150 150 } 151 151 sg_init_table(areq->tsgl, areq->tsgl_entries); 152 - af_alg_pull_tsgl(sk, len, areq->tsgl, 0); 152 + af_alg_pull_tsgl(sk, len, areq->tsgl); 153 153 154 154 /* Initialize the crypto operation */ 155 155 skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm); ··· 363 363 struct alg_sock *pask = alg_sk(psk); 364 364 struct crypto_skcipher *tfm = pask->private; 365 365 366 - af_alg_pull_tsgl(sk, ctx->used, NULL, 0); 366 + af_alg_pull_tsgl(sk, ctx->used, NULL); 367 367 sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); 368 368 if (ctx->state) 369 369 sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));
+30 -20
crypto/authencesn.c
··· 207 207 u8 *ohash = areq_ctx->tail; 208 208 unsigned int cryptlen = req->cryptlen - authsize; 209 209 unsigned int assoclen = req->assoclen; 210 + struct scatterlist *src = req->src; 210 211 struct scatterlist *dst = req->dst; 211 212 u8 *ihash = ohash + crypto_ahash_digestsize(auth); 212 213 u32 tmp[2]; ··· 215 214 if (!authsize) 216 215 goto decrypt; 217 216 218 - /* Move high-order bits of sequence number back. */ 219 - scatterwalk_map_and_copy(tmp, dst, 4, 4, 0); 220 - scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0); 221 - scatterwalk_map_and_copy(tmp, dst, 0, 8, 1); 217 + if (src == dst) { 218 + /* Move high-order bits of sequence number back. */ 219 + scatterwalk_map_and_copy(tmp, dst, 4, 4, 0); 220 + scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0); 221 + scatterwalk_map_and_copy(tmp, dst, 0, 8, 1); 222 + } else 223 + memcpy_sglist(dst, src, assoclen); 222 224 223 225 if (crypto_memneq(ihash, ohash, authsize)) 224 226 return -EBADMSG; 225 227 226 228 decrypt: 227 229 228 - sg_init_table(areq_ctx->dst, 2); 230 + if (src != dst) 231 + src = scatterwalk_ffwd(areq_ctx->src, src, assoclen); 229 232 dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen); 230 233 231 234 skcipher_request_set_tfm(skreq, ctx->enc); 232 235 skcipher_request_set_callback(skreq, flags, 233 236 req->base.complete, req->base.data); 234 - skcipher_request_set_crypt(skreq, dst, dst, cryptlen, req->iv); 237 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv); 235 238 236 239 return crypto_skcipher_decrypt(skreq); 237 240 } ··· 260 255 unsigned int assoclen = req->assoclen; 261 256 unsigned int cryptlen = req->cryptlen; 262 257 u8 *ihash = ohash + crypto_ahash_digestsize(auth); 258 + struct scatterlist *src = req->src; 263 259 struct scatterlist *dst = req->dst; 264 260 u32 tmp[2]; 265 261 int err; ··· 268 262 if (assoclen < 8) 269 263 return -EINVAL; 270 264 271 - cryptlen -= authsize; 272 - 273 - if (req->src != dst) 274 - memcpy_sglist(dst, req->src, assoclen + cryptlen); 275 - 276 - scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen, 277 - authsize, 0); 278 - 279 265 if (!authsize) 280 266 goto tail; 281 267 282 - /* Move high-order bits of sequence number to the end. */ 283 - scatterwalk_map_and_copy(tmp, dst, 0, 8, 0); 284 - scatterwalk_map_and_copy(tmp, dst, 4, 4, 1); 285 - scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1); 268 + cryptlen -= authsize; 269 + scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen, 270 + authsize, 0); 286 271 287 - sg_init_table(areq_ctx->dst, 2); 288 - dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4); 272 + /* Move high-order bits of sequence number to the end. */ 273 + scatterwalk_map_and_copy(tmp, src, 0, 8, 0); 274 + if (src == dst) { 275 + scatterwalk_map_and_copy(tmp, dst, 4, 4, 1); 276 + scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1); 277 + dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4); 278 + } else { 279 + scatterwalk_map_and_copy(tmp, dst, 0, 4, 1); 280 + scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen - 4, 4, 1); 281 + 282 + src = scatterwalk_ffwd(areq_ctx->src, src, 8); 283 + dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4); 284 + memcpy_sglist(dst, src, assoclen + cryptlen - 8); 285 + dst = req->dst; 286 + } 289 287 290 288 ahash_request_set_tfm(ahreq, auth); 291 289 ahash_request_set_crypt(ahreq, dst, ohash, assoclen + cryptlen);
+7 -4
crypto/deflate.c
··· 164 164 165 165 do { 166 166 unsigned int dcur; 167 + unsigned long avail_in; 167 168 168 169 dcur = acomp_walk_next_dst(&walk); 169 - if (!dcur) { 170 - out_of_space = true; 171 - break; 172 - } 173 170 174 171 stream->avail_out = dcur; 175 172 stream->next_out = walk.dst.virt.addr; 173 + avail_in = stream->avail_in; 176 174 177 175 ret = zlib_inflate(stream, Z_NO_FLUSH); 176 + 177 + if (!dcur && avail_in == stream->avail_in) { 178 + out_of_space = true; 179 + break; 180 + } 178 181 179 182 dcur -= stream->avail_out; 180 183 acomp_walk_done_dst(&walk, dcur);
+45 -2
drivers/accel/qaic/qaic_control.c
··· 914 914 */ 915 915 return -ENODEV; 916 916 917 - if (status) { 917 + if (usr && status) { 918 918 /* 919 919 * Releasing resources failed on the device side, which puts 920 920 * us in a bind since they may still be in use, so enable the ··· 1109 1109 mutex_lock(&qdev->cntl_mutex); 1110 1110 if (!list_empty(&elem.list)) 1111 1111 list_del(&elem.list); 1112 + /* resp_worker() processed the response but the wait was interrupted */ 1113 + else if (ret == -ERESTARTSYS) 1114 + ret = 0; 1112 1115 if (!ret && !elem.buf) 1113 1116 ret = -ETIMEDOUT; 1114 1117 else if (ret > 0 && !elem.buf) ··· 1422 1419 } 1423 1420 mutex_unlock(&qdev->cntl_mutex); 1424 1421 1425 - if (!found) 1422 + if (!found) { 1423 + /* 1424 + * The user might have gone away at this point without waiting 1425 + * for QAIC_TRANS_DEACTIVATE_FROM_DEV transaction coming from 1426 + * the device. If this is not handled correctly, the host will 1427 + * not know that the DBC[n] has been freed on the device. 1428 + * Due to this failure in synchronization between the device and 1429 + * the host, if another user requests to activate a network, and 1430 + * the device assigns DBC[n] again, save_dbc_buf() will hang, 1431 + * waiting for dbc[n]->in_use to be set to false, which will not 1432 + * happen unless the qaic_dev_reset_clean_local_state() gets 1433 + * called by resetting the device (or re-inserting the module). 1434 + * 1435 + * As a solution, we look for QAIC_TRANS_DEACTIVATE_FROM_DEV 1436 + * transactions in the message before disposing of it, then 1437 + * handle releasing the DBC resources. 1438 + * 1439 + * Since the user has gone away, if the device could not 1440 + * deactivate the network (status != 0), there is no way to 1441 + * enable and reassign the DBC to the user. We can put trust in 1442 + * the device that it will release all the active DBCs in 1443 + * response to the QAIC_TRANS_TERMINATE_TO_DEV transaction, 1444 + * otherwise, the user can issue an soc_reset to the device. 1445 + */ 1446 + u32 msg_count = le32_to_cpu(msg->hdr.count); 1447 + u32 msg_len = le32_to_cpu(msg->hdr.len); 1448 + u32 len = 0; 1449 + int j; 1450 + 1451 + for (j = 0; j < msg_count && len < msg_len; ++j) { 1452 + struct wire_trans_hdr *trans_hdr; 1453 + 1454 + trans_hdr = (struct wire_trans_hdr *)(msg->data + len); 1455 + if (le32_to_cpu(trans_hdr->type) == QAIC_TRANS_DEACTIVATE_FROM_DEV) { 1456 + if (decode_deactivate(qdev, trans_hdr, &len, NULL)) 1457 + len += le32_to_cpu(trans_hdr->len); 1458 + } else { 1459 + len += le32_to_cpu(trans_hdr->len); 1460 + } 1461 + } 1426 1462 /* request must have timed out, drop packet */ 1427 1463 kfree(msg); 1464 + } 1428 1465 1429 1466 kfree(resp); 1430 1467 }
+7
drivers/acpi/riscv/rimt.c
··· 263 263 if (!rimt_fwnode) 264 264 return -EPROBE_DEFER; 265 265 266 + /* 267 + * EPROBE_DEFER ensures IOMMU is probed before the devices that 268 + * depend on them. During shutdown, however, the IOMMU may be removed 269 + * first, leading to issues. To avoid this, a device link is added 270 + * which enforces the correct removal order. 271 + */ 272 + device_link_add(dev, rimt_fwnode->dev, DL_FLAG_AUTOREMOVE_CONSUMER); 266 273 return acpi_iommu_fwspec_init(dev, deviceid, rimt_fwnode); 267 274 } 268 275
+5 -3
drivers/android/binder/page_range.rs
··· 13 13 // 14 14 // The shrinker will use trylock methods because it locks them in a different order. 15 15 16 + use crate::AssertSync; 17 + 16 18 use core::{ 17 19 marker::PhantomPinned, 18 20 mem::{size_of, size_of_val, MaybeUninit}, ··· 145 143 } 146 144 147 145 // We do not define any ops. For now, used only to check identity of vmas. 148 - static BINDER_VM_OPS: bindings::vm_operations_struct = pin_init::zeroed(); 146 + static BINDER_VM_OPS: AssertSync<bindings::vm_operations_struct> = AssertSync(pin_init::zeroed()); 149 147 150 148 // To ensure that we do not accidentally install pages into or zap pages from the wrong vma, we 151 149 // check its vm_ops and private data before using it. 152 150 fn check_vma(vma: &virt::VmaRef, owner: *const ShrinkablePageRange) -> Option<&virt::VmaMixedMap> { 153 151 // SAFETY: Just reading the vm_ops pointer of any active vma is safe. 154 152 let vm_ops = unsafe { (*vma.as_ptr()).vm_ops }; 155 - if !ptr::eq(vm_ops, &BINDER_VM_OPS) { 153 + if !ptr::eq(vm_ops, &BINDER_VM_OPS.0) { 156 154 return None; 157 155 } 158 156 ··· 344 342 345 343 // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on 346 344 // `vm_ops`. 347 - unsafe { (*vma.as_ptr()).vm_ops = &BINDER_VM_OPS }; 345 + unsafe { (*vma.as_ptr()).vm_ops = &BINDER_VM_OPS.0 }; 348 346 349 347 Ok(num_pages) 350 348 }
+1 -1
drivers/android/binder/rust_binder_main.rs
··· 306 306 /// Makes the inner type Sync. 307 307 #[repr(transparent)] 308 308 pub struct AssertSync<T>(T); 309 - // SAFETY: Used only to insert `file_operations` into a global, which is safe. 309 + // SAFETY: Used only to insert C bindings types into globals, which is safe. 310 310 unsafe impl<T> Sync for AssertSync<T> {} 311 311 312 312 /// File operations that rust_binderfs.c can use.
+14
drivers/ata/ahci.c
··· 68 68 /* board IDs for specific chipsets in alphabetical order */ 69 69 board_ahci_al, 70 70 board_ahci_avn, 71 + board_ahci_jmb585, 71 72 board_ahci_mcp65, 72 73 board_ahci_mcp77, 73 74 board_ahci_mcp89, ··· 212 211 .pio_mask = ATA_PIO4, 213 212 .udma_mask = ATA_UDMA6, 214 213 .port_ops = &ahci_avn_ops, 214 + }, 215 + /* JMicron JMB582/585: 64-bit DMA is broken, force 32-bit */ 216 + [board_ahci_jmb585] = { 217 + AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR | 218 + AHCI_HFLAG_32BIT_ONLY), 219 + .flags = AHCI_FLAG_COMMON, 220 + .pio_mask = ATA_PIO4, 221 + .udma_mask = ATA_UDMA6, 222 + .port_ops = &ahci_ops, 215 223 }, 216 224 [board_ahci_mcp65] = { 217 225 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | ··· 448 438 { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_pcs_quirk }, /* Comet Lake PCH RAID */ 449 439 /* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */ 450 440 { PCI_VDEVICE(INTEL, 0x4b63), board_ahci_pcs_quirk }, /* Elkhart Lake AHCI */ 441 + 442 + /* JMicron JMB582/585: force 32-bit DMA (broken 64-bit implementation) */ 443 + { PCI_VDEVICE(JMICRON, 0x0582), board_ahci_jmb585 }, 444 + { PCI_VDEVICE(JMICRON, 0x0585), board_ahci_jmb585 }, 451 445 452 446 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 453 447 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+5 -3
drivers/comedi/comedi_fops.c
··· 793 793 __comedi_clear_subdevice_runflags(s, COMEDI_SRF_RUNNING | 794 794 COMEDI_SRF_BUSY); 795 795 spin_unlock_irqrestore(&s->spin_lock, flags); 796 - if (comedi_is_runflags_busy(runflags)) { 796 + if (async) { 797 797 /* 798 798 * "Run active" counter was set to 1 when setting up the 799 799 * command. Decrement it and wait for it to become 0. 800 800 */ 801 - comedi_put_is_subdevice_running(s); 802 - wait_for_completion(&async->run_complete); 801 + if (comedi_is_runflags_busy(runflags)) { 802 + comedi_put_is_subdevice_running(s); 803 + wait_for_completion(&async->run_complete); 804 + } 803 805 comedi_buf_reset(s); 804 806 async->inttrig = NULL; 805 807 kfree(async->cmd.chanlist);
+8
drivers/comedi/drivers.c
··· 1063 1063 ret = -EIO; 1064 1064 goto out; 1065 1065 } 1066 + if (IS_ENABLED(CONFIG_LOCKDEP)) { 1067 + /* 1068 + * dev->spinlock is for private use by the attached low-level 1069 + * driver. Reinitialize it to stop lock-dependency tracking 1070 + * between attachments to different low-level drivers. 1071 + */ 1072 + spin_lock_init(&dev->spinlock); 1073 + } 1066 1074 dev->driver = driv; 1067 1075 dev->board_name = dev->board_ptr ? *(const char **)dev->board_ptr 1068 1076 : dev->driver->driver_name;
+12
drivers/comedi/drivers/dt2815.c
··· 175 175 ? current_range_type : voltage_range_type; 176 176 } 177 177 178 + /* 179 + * Check if hardware is present before attempting any I/O operations. 180 + * Reading 0xff from status register typically indicates no hardware 181 + * on the bus (floating bus reads as all 1s). 182 + */ 183 + if (inb(dev->iobase + DT2815_STATUS) == 0xff) { 184 + dev_err(dev->class_dev, 185 + "No hardware detected at I/O base 0x%lx\n", 186 + dev->iobase); 187 + return -ENODEV; 188 + } 189 + 178 190 /* Init the 2815 */ 179 191 outb(0x00, dev->iobase + DT2815_STATUS); 180 192 for (i = 0; i < 100; i++) {
+12 -4
drivers/comedi/drivers/me4000.c
··· 315 315 unsigned int val; 316 316 unsigned int i; 317 317 318 + /* Get data stream length from header. */ 319 + if (size >= 4) { 320 + file_length = (((unsigned int)data[0] & 0xff) << 24) + 321 + (((unsigned int)data[1] & 0xff) << 16) + 322 + (((unsigned int)data[2] & 0xff) << 8) + 323 + ((unsigned int)data[3] & 0xff); 324 + } 325 + if (size < 16 || file_length > size - 16) { 326 + dev_err(dev->class_dev, "Firmware length inconsistency\n"); 327 + return -EINVAL; 328 + } 329 + 318 330 if (!xilinx_iobase) 319 331 return -ENODEV; 320 332 ··· 358 346 outl(val, devpriv->plx_regbase + PLX9052_CNTRL); 359 347 360 348 /* Download Xilinx firmware */ 361 - file_length = (((unsigned int)data[0] & 0xff) << 24) + 362 - (((unsigned int)data[1] & 0xff) << 16) + 363 - (((unsigned int)data[2] & 0xff) << 8) + 364 - ((unsigned int)data[3] & 0xff); 365 349 usleep_range(10, 1000); 366 350 367 351 for (i = 0; i < file_length; i++) {
+19 -16
drivers/comedi/drivers/me_daq.c
··· 344 344 unsigned int file_length; 345 345 unsigned int i; 346 346 347 + /* 348 + * Format of the firmware 349 + * Build longs from the byte-wise coded header 350 + * Byte 1-3: length of the array 351 + * Byte 4-7: version 352 + * Byte 8-11: date 353 + * Byte 12-15: reserved 354 + */ 355 + if (size >= 4) { 356 + file_length = (((unsigned int)data[0] & 0xff) << 24) + 357 + (((unsigned int)data[1] & 0xff) << 16) + 358 + (((unsigned int)data[2] & 0xff) << 8) + 359 + ((unsigned int)data[3] & 0xff); 360 + } 361 + if (size < 16 || file_length > size - 16) { 362 + dev_err(dev->class_dev, "Firmware length inconsistency\n"); 363 + return -EINVAL; 364 + } 365 + 347 366 /* disable irq's on PLX */ 348 367 writel(0x00, devpriv->plx_regbase + PLX9052_INTCSR); 349 368 ··· 375 356 /* Write a dummy value to Xilinx */ 376 357 writeb(0x00, dev->mmio + 0x0); 377 358 sleep(1); 378 - 379 - /* 380 - * Format of the firmware 381 - * Build longs from the byte-wise coded header 382 - * Byte 1-3: length of the array 383 - * Byte 4-7: version 384 - * Byte 8-11: date 385 - * Byte 12-15: reserved 386 - */ 387 - if (size < 16) 388 - return -EINVAL; 389 - 390 - file_length = (((unsigned int)data[0] & 0xff) << 24) + 391 - (((unsigned int)data[1] & 0xff) << 16) + 392 - (((unsigned int)data[2] & 0xff) << 8) + 393 - ((unsigned int)data[3] & 0xff); 394 359 395 360 /* 396 361 * Loop for writing firmware byte by byte to xilinx
+2 -1
drivers/comedi/drivers/ni_atmio16d.c
··· 698 698 699 699 static void atmio16d_detach(struct comedi_device *dev) 700 700 { 701 - reset_atmio16d(dev); 701 + if (dev->private) 702 + reset_atmio16d(dev); 702 703 comedi_legacy_detach(dev); 703 704 } 704 705
+35 -32
drivers/counter/rz-mtu3-cnt.c
··· 107 107 struct rz_mtu3_cnt *const priv = counter_priv(counter); 108 108 unsigned long tmdr; 109 109 110 - pm_runtime_get_sync(priv->ch->dev); 110 + pm_runtime_get_sync(counter->parent); 111 111 tmdr = rz_mtu3_shared_reg_read(priv->ch, RZ_MTU3_TMDR3); 112 - pm_runtime_put(priv->ch->dev); 112 + pm_runtime_put(counter->parent); 113 113 114 114 if (id == RZ_MTU3_32_BIT_CH && test_bit(RZ_MTU3_TMDR3_LWA, &tmdr)) 115 115 return false; ··· 165 165 if (ret) 166 166 return ret; 167 167 168 - pm_runtime_get_sync(ch->dev); 168 + pm_runtime_get_sync(counter->parent); 169 169 if (count->id == RZ_MTU3_32_BIT_CH) 170 170 *val = rz_mtu3_32bit_ch_read(ch, RZ_MTU3_TCNTLW); 171 171 else 172 172 *val = rz_mtu3_16bit_ch_read(ch, RZ_MTU3_TCNT); 173 - pm_runtime_put(ch->dev); 173 + pm_runtime_put(counter->parent); 174 174 mutex_unlock(&priv->lock); 175 175 176 176 return 0; ··· 187 187 if (ret) 188 188 return ret; 189 189 190 - pm_runtime_get_sync(ch->dev); 190 + pm_runtime_get_sync(counter->parent); 191 191 if (count->id == RZ_MTU3_32_BIT_CH) 192 192 rz_mtu3_32bit_ch_write(ch, RZ_MTU3_TCNTLW, val); 193 193 else 194 194 rz_mtu3_16bit_ch_write(ch, RZ_MTU3_TCNT, val); 195 - pm_runtime_put(ch->dev); 195 + pm_runtime_put(counter->parent); 196 196 mutex_unlock(&priv->lock); 197 197 198 198 return 0; 199 199 } 200 200 201 201 static int rz_mtu3_count_function_read_helper(struct rz_mtu3_channel *const ch, 202 - struct rz_mtu3_cnt *const priv, 202 + struct counter_device *const counter, 203 203 enum counter_function *function) 204 204 { 205 205 u8 timer_mode; 206 206 207 - pm_runtime_get_sync(ch->dev); 207 + pm_runtime_get_sync(counter->parent); 208 208 timer_mode = rz_mtu3_8bit_ch_read(ch, RZ_MTU3_TMDR1); 209 - pm_runtime_put(ch->dev); 209 + pm_runtime_put(counter->parent); 210 210 211 211 switch (timer_mode & RZ_MTU3_TMDR1_PH_CNT_MODE_MASK) { 212 212 case RZ_MTU3_TMDR1_PH_CNT_MODE_1: ··· 240 240 if (ret) 241 241 return ret; 242 242 243 - ret = rz_mtu3_count_function_read_helper(ch, priv, function); 243 + ret = rz_mtu3_count_function_read_helper(ch, counter, function); 244 244 mutex_unlock(&priv->lock); 245 245 246 246 return ret; ··· 279 279 return -EINVAL; 280 280 } 281 281 282 - pm_runtime_get_sync(ch->dev); 282 + pm_runtime_get_sync(counter->parent); 283 283 rz_mtu3_8bit_ch_write(ch, RZ_MTU3_TMDR1, timer_mode); 284 - pm_runtime_put(ch->dev); 284 + pm_runtime_put(counter->parent); 285 285 mutex_unlock(&priv->lock); 286 286 287 287 return 0; ··· 300 300 if (ret) 301 301 return ret; 302 302 303 - pm_runtime_get_sync(ch->dev); 303 + pm_runtime_get_sync(counter->parent); 304 304 tsr = rz_mtu3_8bit_ch_read(ch, RZ_MTU3_TSR); 305 - pm_runtime_put(ch->dev); 305 + pm_runtime_put(counter->parent); 306 306 307 307 *direction = (tsr & RZ_MTU3_TSR_TCFD) ? 308 308 COUNTER_COUNT_DIRECTION_FORWARD : COUNTER_COUNT_DIRECTION_BACKWARD; ··· 377 377 return -EINVAL; 378 378 } 379 379 380 - pm_runtime_get_sync(ch->dev); 380 + pm_runtime_get_sync(counter->parent); 381 381 if (count->id == RZ_MTU3_32_BIT_CH) 382 382 rz_mtu3_32bit_ch_write(ch, RZ_MTU3_TGRALW, ceiling); 383 383 else 384 384 rz_mtu3_16bit_ch_write(ch, RZ_MTU3_TGRA, ceiling); 385 385 386 386 rz_mtu3_8bit_ch_write(ch, RZ_MTU3_TCR, RZ_MTU3_TCR_CCLR_TGRA); 387 - pm_runtime_put(ch->dev); 387 + pm_runtime_put(counter->parent); 388 388 mutex_unlock(&priv->lock); 389 389 390 390 return 0; ··· 495 495 static int rz_mtu3_count_enable_write(struct counter_device *counter, 496 496 struct counter_count *count, u8 enable) 497 497 { 498 - struct rz_mtu3_channel *const ch = rz_mtu3_get_ch(counter, count->id); 499 498 struct rz_mtu3_cnt *const priv = counter_priv(counter); 500 499 int ret = 0; 501 500 501 + mutex_lock(&priv->lock); 502 + 503 + if (priv->count_is_enabled[count->id] == enable) 504 + goto exit; 505 + 502 506 if (enable) { 503 - mutex_lock(&priv->lock); 504 - pm_runtime_get_sync(ch->dev); 507 + pm_runtime_get_sync(counter->parent); 505 508 ret = rz_mtu3_initialize_counter(counter, count->id); 506 509 if (ret == 0) 507 510 priv->count_is_enabled[count->id] = true; 508 - mutex_unlock(&priv->lock); 509 511 } else { 510 - mutex_lock(&priv->lock); 511 512 rz_mtu3_terminate_counter(counter, count->id); 512 513 priv->count_is_enabled[count->id] = false; 513 - pm_runtime_put(ch->dev); 514 - mutex_unlock(&priv->lock); 514 + pm_runtime_put(counter->parent); 515 515 } 516 + 517 + exit: 518 + mutex_unlock(&priv->lock); 516 519 517 520 return ret; 518 521 } ··· 543 540 if (ret) 544 541 return ret; 545 542 546 - pm_runtime_get_sync(priv->ch->dev); 543 + pm_runtime_get_sync(counter->parent); 547 544 tmdr = rz_mtu3_shared_reg_read(priv->ch, RZ_MTU3_TMDR3); 548 - pm_runtime_put(priv->ch->dev); 545 + pm_runtime_put(counter->parent); 549 546 *cascade_enable = test_bit(RZ_MTU3_TMDR3_LWA, &tmdr); 550 547 mutex_unlock(&priv->lock); 551 548 ··· 562 559 if (ret) 563 560 return ret; 564 561 565 - pm_runtime_get_sync(priv->ch->dev); 562 + pm_runtime_get_sync(counter->parent); 566 563 rz_mtu3_shared_reg_update_bit(priv->ch, RZ_MTU3_TMDR3, 567 564 RZ_MTU3_TMDR3_LWA, cascade_enable); 568 - pm_runtime_put(priv->ch->dev); 565 + pm_runtime_put(counter->parent); 569 566 mutex_unlock(&priv->lock); 570 567 571 568 return 0; ··· 582 579 if (ret) 583 580 return ret; 584 581 585 - pm_runtime_get_sync(priv->ch->dev); 582 + pm_runtime_get_sync(counter->parent); 586 583 tmdr = rz_mtu3_shared_reg_read(priv->ch, RZ_MTU3_TMDR3); 587 - pm_runtime_put(priv->ch->dev); 584 + pm_runtime_put(counter->parent); 588 585 *ext_input_phase_clock_select = test_bit(RZ_MTU3_TMDR3_PHCKSEL, &tmdr); 589 586 mutex_unlock(&priv->lock); 590 587 ··· 601 598 if (ret) 602 599 return ret; 603 600 604 - pm_runtime_get_sync(priv->ch->dev); 601 + pm_runtime_get_sync(counter->parent); 605 602 rz_mtu3_shared_reg_update_bit(priv->ch, RZ_MTU3_TMDR3, 606 603 RZ_MTU3_TMDR3_PHCKSEL, 607 604 ext_input_phase_clock_select); 608 - pm_runtime_put(priv->ch->dev); 605 + pm_runtime_put(counter->parent); 609 606 mutex_unlock(&priv->lock); 610 607 611 608 return 0; ··· 643 640 if (ret) 644 641 return ret; 645 642 646 - ret = rz_mtu3_count_function_read_helper(ch, priv, &function); 643 + ret = rz_mtu3_count_function_read_helper(ch, counter, &function); 647 644 if (ret) { 648 645 mutex_unlock(&priv->lock); 649 646 return ret;
+3 -3
drivers/cpufreq/cpufreq_governor.c
··· 468 468 /* Failure, so roll back. */ 469 469 pr_err("initialization failed (dbs_data kobject init error %d)\n", ret); 470 470 471 - kobject_put(&dbs_data->attr_set.kobj); 472 - 473 471 policy->governor_data = NULL; 474 472 475 473 if (!have_governor_per_policy()) 476 474 gov->gdbs_data = NULL; 477 - gov->exit(dbs_data); 475 + 476 + kobject_put(&dbs_data->attr_set.kobj); 477 + goto free_policy_dbs_info; 478 478 479 479 free_dbs_data: 480 480 kfree(dbs_data);
+2 -1
drivers/crypto/caam/caamalg_qi2.c
··· 3326 3326 if (aligned_len < keylen) 3327 3327 return -EOVERFLOW; 3328 3328 3329 - hashed_key = kmemdup(key, aligned_len, GFP_KERNEL); 3329 + hashed_key = kmalloc(aligned_len, GFP_KERNEL); 3330 3330 if (!hashed_key) 3331 3331 return -ENOMEM; 3332 + memcpy(hashed_key, key, keylen); 3332 3333 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); 3333 3334 if (ret) 3334 3335 goto bad_free_key;
+2 -1
drivers/crypto/caam/caamhash.c
··· 441 441 if (aligned_len < keylen) 442 442 return -EOVERFLOW; 443 443 444 - hashed_key = kmemdup(key, keylen, GFP_KERNEL); 444 + hashed_key = kmalloc(aligned_len, GFP_KERNEL); 445 445 if (!hashed_key) 446 446 return -ENOMEM; 447 + memcpy(hashed_key, key, keylen); 447 448 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); 448 449 if (ret) 449 450 goto bad_free_key;
+7 -4
drivers/crypto/tegra/tegra-se-aes.c
··· 529 529 .cra_name = "cbc(aes)", 530 530 .cra_driver_name = "cbc-aes-tegra", 531 531 .cra_priority = 500, 532 - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, 532 + .cra_flags = CRYPTO_ALG_ASYNC, 533 533 .cra_blocksize = AES_BLOCK_SIZE, 534 534 .cra_ctxsize = sizeof(struct tegra_aes_ctx), 535 535 .cra_alignmask = 0xf, ··· 550 550 .cra_name = "ecb(aes)", 551 551 .cra_driver_name = "ecb-aes-tegra", 552 552 .cra_priority = 500, 553 - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, 553 + .cra_flags = CRYPTO_ALG_ASYNC, 554 554 .cra_blocksize = AES_BLOCK_SIZE, 555 555 .cra_ctxsize = sizeof(struct tegra_aes_ctx), 556 556 .cra_alignmask = 0xf, ··· 572 572 .cra_name = "ctr(aes)", 573 573 .cra_driver_name = "ctr-aes-tegra", 574 574 .cra_priority = 500, 575 - .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC, 575 + .cra_flags = CRYPTO_ALG_ASYNC, 576 576 .cra_blocksize = 1, 577 577 .cra_ctxsize = sizeof(struct tegra_aes_ctx), 578 578 .cra_alignmask = 0xf, ··· 594 594 .cra_name = "xts(aes)", 595 595 .cra_driver_name = "xts-aes-tegra", 596 596 .cra_priority = 500, 597 + .cra_flags = CRYPTO_ALG_ASYNC, 597 598 .cra_blocksize = AES_BLOCK_SIZE, 598 599 .cra_ctxsize = sizeof(struct tegra_aes_ctx), 599 600 .cra_alignmask = (__alignof__(u64) - 1), ··· 1923 1922 .cra_name = "gcm(aes)", 1924 1923 .cra_driver_name = "gcm-aes-tegra", 1925 1924 .cra_priority = 500, 1925 + .cra_flags = CRYPTO_ALG_ASYNC, 1926 1926 .cra_blocksize = 1, 1927 1927 .cra_ctxsize = sizeof(struct tegra_aead_ctx), 1928 1928 .cra_alignmask = 0xf, ··· 1946 1944 .cra_name = "ccm(aes)", 1947 1945 .cra_driver_name = "ccm-aes-tegra", 1948 1946 .cra_priority = 500, 1947 + .cra_flags = CRYPTO_ALG_ASYNC, 1949 1948 .cra_blocksize = 1, 1950 1949 .cra_ctxsize = sizeof(struct tegra_aead_ctx), 1951 1950 .cra_alignmask = 0xf, ··· 1974 1971 .cra_name = "cmac(aes)", 1975 1972 .cra_driver_name = "tegra-se-cmac", 1976 1973 .cra_priority = 300, 1977 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 1974 + .cra_flags = CRYPTO_ALG_ASYNC, 1978 1975 .cra_blocksize = AES_BLOCK_SIZE, 1979 1976 .cra_ctxsize = sizeof(struct tegra_cmac_ctx), 1980 1977 .cra_alignmask = 0,
+17 -13
drivers/crypto/tegra/tegra-se-hash.c
··· 761 761 .cra_name = "sha1", 762 762 .cra_driver_name = "tegra-se-sha1", 763 763 .cra_priority = 300, 764 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 764 + .cra_flags = CRYPTO_ALG_ASYNC, 765 765 .cra_blocksize = SHA1_BLOCK_SIZE, 766 766 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 767 767 .cra_alignmask = 0, ··· 786 786 .cra_name = "sha224", 787 787 .cra_driver_name = "tegra-se-sha224", 788 788 .cra_priority = 300, 789 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 789 + .cra_flags = CRYPTO_ALG_ASYNC, 790 790 .cra_blocksize = SHA224_BLOCK_SIZE, 791 791 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 792 792 .cra_alignmask = 0, ··· 811 811 .cra_name = "sha256", 812 812 .cra_driver_name = "tegra-se-sha256", 813 813 .cra_priority = 300, 814 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 814 + .cra_flags = CRYPTO_ALG_ASYNC, 815 815 .cra_blocksize = SHA256_BLOCK_SIZE, 816 816 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 817 817 .cra_alignmask = 0, ··· 836 836 .cra_name = "sha384", 837 837 .cra_driver_name = "tegra-se-sha384", 838 838 .cra_priority = 300, 839 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 839 + .cra_flags = CRYPTO_ALG_ASYNC, 840 840 .cra_blocksize = SHA384_BLOCK_SIZE, 841 841 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 842 842 .cra_alignmask = 0, ··· 861 861 .cra_name = "sha512", 862 862 .cra_driver_name = "tegra-se-sha512", 863 863 .cra_priority = 300, 864 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 864 + .cra_flags = CRYPTO_ALG_ASYNC, 865 865 .cra_blocksize = SHA512_BLOCK_SIZE, 866 866 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 867 867 .cra_alignmask = 0, ··· 886 886 .cra_name = "sha3-224", 887 887 .cra_driver_name = "tegra-se-sha3-224", 888 888 .cra_priority = 300, 889 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 889 + .cra_flags = CRYPTO_ALG_ASYNC, 890 890 .cra_blocksize = SHA3_224_BLOCK_SIZE, 891 891 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 892 892 .cra_alignmask = 0, ··· 911 911 .cra_name = "sha3-256", 912 912 .cra_driver_name = "tegra-se-sha3-256", 913 913 .cra_priority = 300, 914 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 914 + .cra_flags = CRYPTO_ALG_ASYNC, 915 915 .cra_blocksize = SHA3_256_BLOCK_SIZE, 916 916 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 917 917 .cra_alignmask = 0, ··· 936 936 .cra_name = "sha3-384", 937 937 .cra_driver_name = "tegra-se-sha3-384", 938 938 .cra_priority = 300, 939 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 939 + .cra_flags = CRYPTO_ALG_ASYNC, 940 940 .cra_blocksize = SHA3_384_BLOCK_SIZE, 941 941 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 942 942 .cra_alignmask = 0, ··· 961 961 .cra_name = "sha3-512", 962 962 .cra_driver_name = "tegra-se-sha3-512", 963 963 .cra_priority = 300, 964 - .cra_flags = CRYPTO_ALG_TYPE_AHASH, 964 + .cra_flags = CRYPTO_ALG_ASYNC, 965 965 .cra_blocksize = SHA3_512_BLOCK_SIZE, 966 966 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 967 967 .cra_alignmask = 0, ··· 988 988 .cra_name = "hmac(sha224)", 989 989 .cra_driver_name = "tegra-se-hmac-sha224", 990 990 .cra_priority = 300, 991 - .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, 991 + .cra_flags = CRYPTO_ALG_ASYNC | 992 + CRYPTO_ALG_NEED_FALLBACK, 992 993 .cra_blocksize = SHA224_BLOCK_SIZE, 993 994 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 994 995 .cra_alignmask = 0, ··· 1016 1015 .cra_name = "hmac(sha256)", 1017 1016 .cra_driver_name = "tegra-se-hmac-sha256", 1018 1017 .cra_priority = 300, 1019 - .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, 1018 + .cra_flags = CRYPTO_ALG_ASYNC | 1019 + CRYPTO_ALG_NEED_FALLBACK, 1020 1020 .cra_blocksize = SHA256_BLOCK_SIZE, 1021 1021 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 1022 1022 .cra_alignmask = 0, ··· 1044 1042 .cra_name = "hmac(sha384)", 1045 1043 .cra_driver_name = "tegra-se-hmac-sha384", 1046 1044 .cra_priority = 300, 1047 - .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, 1045 + .cra_flags = CRYPTO_ALG_ASYNC | 1046 + CRYPTO_ALG_NEED_FALLBACK, 1048 1047 .cra_blocksize = SHA384_BLOCK_SIZE, 1049 1048 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 1050 1049 .cra_alignmask = 0, ··· 1072 1069 .cra_name = "hmac(sha512)", 1073 1070 .cra_driver_name = "tegra-se-hmac-sha512", 1074 1071 .cra_priority = 300, 1075 - .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, 1072 + .cra_flags = CRYPTO_ALG_ASYNC | 1073 + CRYPTO_ALG_NEED_FALLBACK, 1076 1074 .cra_blocksize = SHA512_BLOCK_SIZE, 1077 1075 .cra_ctxsize = sizeof(struct tegra_sha_ctx), 1078 1076 .cra_alignmask = 0,
+6 -4
drivers/firmware/microchip/mpfs-auto-update.c
··· 113 113 * be added here. 114 114 */ 115 115 116 - priv->flash = mpfs_sys_controller_get_flash(priv->sys_controller); 117 - if (!priv->flash) 118 - return FW_UPLOAD_ERR_HW_ERROR; 119 - 120 116 erase_size = round_up(erase_size, (u64)priv->flash->erasesize); 121 117 122 118 /* ··· 422 426 if (IS_ERR(priv->sys_controller)) 423 427 return dev_err_probe(dev, PTR_ERR(priv->sys_controller), 424 428 "Could not register as a sub device of the system controller\n"); 429 + 430 + priv->flash = mpfs_sys_controller_get_flash(priv->sys_controller); 431 + if (IS_ERR_OR_NULL(priv->flash)) { 432 + dev_dbg(dev, "No flash connected to the system controller, auto-update not supported\n"); 433 + return -ENODEV; 434 + } 425 435 426 436 priv->dev = dev; 427 437 platform_set_drvdata(pdev, priv);
+1
drivers/gpib/Kconfig
··· 122 122 depends on OF 123 123 select GPIB_COMMON 124 124 select GPIB_NEC7210 125 + depends on HAS_IOMEM 125 126 help 126 127 GPIB driver for Fluke based cda devices. 127 128
+73 -23
drivers/gpib/common/gpib_os.c
··· 888 888 if (read_cmd.completed_transfer_count > read_cmd.requested_transfer_count) 889 889 return -EINVAL; 890 890 891 - desc = handle_to_descriptor(file_priv, read_cmd.handle); 892 - if (!desc) 893 - return -EINVAL; 894 - 895 891 if (WARN_ON_ONCE(sizeof(userbuf) > sizeof(read_cmd.buffer_ptr))) 896 892 return -EFAULT; 897 893 ··· 899 903 /* Check write access to buffer */ 900 904 if (!access_ok(userbuf, remain)) 901 905 return -EFAULT; 906 + 907 + /* Lock descriptors to prevent concurrent close from freeing descriptor */ 908 + if (mutex_lock_interruptible(&file_priv->descriptors_mutex)) 909 + return -ERESTARTSYS; 910 + desc = handle_to_descriptor(file_priv, read_cmd.handle); 911 + if (!desc) { 912 + mutex_unlock(&file_priv->descriptors_mutex); 913 + return -EINVAL; 914 + } 915 + atomic_inc(&desc->descriptor_busy); 916 + mutex_unlock(&file_priv->descriptors_mutex); 902 917 903 918 atomic_set(&desc->io_in_progress, 1); 904 919 ··· 944 937 retval = copy_to_user((void __user *)arg, &read_cmd, sizeof(read_cmd)); 945 938 946 939 atomic_set(&desc->io_in_progress, 0); 940 + atomic_dec(&desc->descriptor_busy); 947 941 948 942 wake_up_interruptible(&board->wait); 949 943 if (retval) ··· 972 964 if (cmd.completed_transfer_count > cmd.requested_transfer_count) 973 965 return -EINVAL; 974 966 975 - desc = handle_to_descriptor(file_priv, cmd.handle); 976 - if (!desc) 977 - return -EINVAL; 978 - 979 967 userbuf = (u8 __user *)(unsigned long)cmd.buffer_ptr; 980 968 userbuf += cmd.completed_transfer_count; 981 969 ··· 983 979 /* Check read access to buffer */ 984 980 if (!access_ok(userbuf, remain)) 985 981 return -EFAULT; 982 + 983 + /* Lock descriptors to prevent concurrent close from freeing descriptor */ 984 + if (mutex_lock_interruptible(&file_priv->descriptors_mutex)) 985 + return -ERESTARTSYS; 986 + desc = handle_to_descriptor(file_priv, cmd.handle); 987 + if (!desc) { 988 + mutex_unlock(&file_priv->descriptors_mutex); 989 + return -EINVAL; 990 + } 991 + atomic_inc(&desc->descriptor_busy); 992 + mutex_unlock(&file_priv->descriptors_mutex); 986 993 987 994 /* 988 995 * Write buffer loads till we empty the user supplied buffer. ··· 1018 1003 userbuf += bytes_written; 1019 1004 if (retval < 0) { 1020 1005 atomic_set(&desc->io_in_progress, 0); 1006 + atomic_dec(&desc->descriptor_busy); 1021 1007 1022 1008 wake_up_interruptible(&board->wait); 1023 1009 break; ··· 1038 1022 */ 1039 1023 if (!no_clear_io_in_prog || fault) 1040 1024 atomic_set(&desc->io_in_progress, 0); 1025 + atomic_dec(&desc->descriptor_busy); 1041 1026 1042 1027 wake_up_interruptible(&board->wait); 1043 1028 if (fault) ··· 1064 1047 if (write_cmd.completed_transfer_count > write_cmd.requested_transfer_count) 1065 1048 return -EINVAL; 1066 1049 1067 - desc = handle_to_descriptor(file_priv, write_cmd.handle); 1068 - if (!desc) 1069 - return -EINVAL; 1070 - 1071 1050 userbuf = (u8 __user *)(unsigned long)write_cmd.buffer_ptr; 1072 1051 userbuf += write_cmd.completed_transfer_count; 1073 1052 ··· 1072 1059 /* Check read access to buffer */ 1073 1060 if (!access_ok(userbuf, remain)) 1074 1061 return -EFAULT; 1062 + 1063 + /* Lock descriptors to prevent concurrent close from freeing descriptor */ 1064 + if (mutex_lock_interruptible(&file_priv->descriptors_mutex)) 1065 + return -ERESTARTSYS; 1066 + desc = handle_to_descriptor(file_priv, write_cmd.handle); 1067 + if (!desc) { 1068 + mutex_unlock(&file_priv->descriptors_mutex); 1069 + return -EINVAL; 1070 + } 1071 + atomic_inc(&desc->descriptor_busy); 1072 + mutex_unlock(&file_priv->descriptors_mutex); 1075 1073 1076 1074 atomic_set(&desc->io_in_progress, 1); 1077 1075 ··· 1118 1094 fault = copy_to_user((void __user *)arg, &write_cmd, sizeof(write_cmd)); 1119 1095 1120 1096 atomic_set(&desc->io_in_progress, 0); 1097 + atomic_dec(&desc->descriptor_busy); 1121 1098 1122 1099 wake_up_interruptible(&board->wait); 1123 1100 if (fault) ··· 1301 1276 { 1302 1277 struct gpib_close_dev_ioctl cmd; 1303 1278 struct gpib_file_private *file_priv = filep->private_data; 1279 + struct gpib_descriptor *desc; 1280 + unsigned int pad; 1281 + int sad; 1304 1282 int retval; 1305 1283 1306 1284 retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd)); ··· 1312 1284 1313 1285 if (cmd.handle >= GPIB_MAX_NUM_DESCRIPTORS) 1314 1286 return -EINVAL; 1315 - if (!file_priv->descriptors[cmd.handle]) 1287 + 1288 + mutex_lock(&file_priv->descriptors_mutex); 1289 + desc = file_priv->descriptors[cmd.handle]; 1290 + if (!desc) { 1291 + mutex_unlock(&file_priv->descriptors_mutex); 1316 1292 return -EINVAL; 1317 - 1318 - retval = decrement_open_device_count(board, &board->device_list, 1319 - file_priv->descriptors[cmd.handle]->pad, 1320 - file_priv->descriptors[cmd.handle]->sad); 1321 - if (retval < 0) 1322 - return retval; 1323 - 1324 - kfree(file_priv->descriptors[cmd.handle]); 1293 + } 1294 + if (atomic_read(&desc->descriptor_busy)) { 1295 + mutex_unlock(&file_priv->descriptors_mutex); 1296 + return -EBUSY; 1297 + } 1298 + /* Remove from table while holding lock to prevent new IO from starting */ 1325 1299 file_priv->descriptors[cmd.handle] = NULL; 1300 + pad = desc->pad; 1301 + sad = desc->sad; 1302 + mutex_unlock(&file_priv->descriptors_mutex); 1326 1303 1327 - return 0; 1304 + retval = decrement_open_device_count(board, &board->device_list, pad, sad); 1305 + 1306 + kfree(desc); 1307 + return retval; 1328 1308 } 1329 1309 1330 1310 static int serial_poll_ioctl(struct gpib_board *board, unsigned long arg) ··· 1367 1331 if (retval) 1368 1332 return -EFAULT; 1369 1333 1334 + /* 1335 + * Lock descriptors to prevent concurrent close from freeing 1336 + * descriptor. ibwait() releases big_gpib_mutex when wait_mask 1337 + * is non-zero, so desc must be pinned with descriptor_busy. 1338 + */ 1339 + mutex_lock(&file_priv->descriptors_mutex); 1370 1340 desc = handle_to_descriptor(file_priv, wait_cmd.handle); 1371 - if (!desc) 1341 + if (!desc) { 1342 + mutex_unlock(&file_priv->descriptors_mutex); 1372 1343 return -EINVAL; 1344 + } 1345 + atomic_inc(&desc->descriptor_busy); 1346 + mutex_unlock(&file_priv->descriptors_mutex); 1373 1347 1374 1348 retval = ibwait(board, wait_cmd.wait_mask, wait_cmd.clear_mask, 1375 1349 wait_cmd.set_mask, &wait_cmd.ibsta, wait_cmd.usec_timeout, desc); 1350 + 1351 + atomic_dec(&desc->descriptor_busy); 1352 + 1376 1353 if (retval < 0) 1377 1354 return retval; 1378 1355 ··· 2084 2035 desc->is_board = 0; 2085 2036 desc->autopoll_enabled = 0; 2086 2037 atomic_set(&desc->io_in_progress, 0); 2038 + atomic_set(&desc->descriptor_busy, 0); 2087 2039 } 2088 2040 2089 2041 int gpib_register_driver(struct gpib_interface *interface, struct module *provider_module)
+8
drivers/gpib/include/gpib_types.h
··· 364 364 unsigned int pad; /* primary gpib address */ 365 365 int sad; /* secondary gpib address (negative means disabled) */ 366 366 atomic_t io_in_progress; 367 + /* 368 + * Kernel-only reference count to prevent descriptor from being 369 + * freed while IO handlers hold a pointer to it. Incremented 370 + * before each IO operation, decremented when done. Unlike 371 + * io_in_progress, this cannot be modified from userspace via 372 + * general_ibstatus(). 373 + */ 374 + atomic_t descriptor_busy; 367 375 unsigned is_board : 1; 368 376 unsigned autopoll_enabled : 1; 369 377 };
+2 -2
drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
··· 406 406 for (j = 0 ; j < MAX_DEV ; j++) { 407 407 if ((assigned_usb_minors & 1 << j) == 0) 408 408 continue; 409 - udev = usb_get_dev(interface_to_usbdev(lpvo_usb_interfaces[j])); 409 + udev = interface_to_usbdev(lpvo_usb_interfaces[j]); 410 410 device_path = kobject_get_path(&udev->dev.kobj, GFP_KERNEL); 411 411 match = gpib_match_device_path(&lpvo_usb_interfaces[j]->dev, 412 412 config->device_path); ··· 421 421 for (j = 0 ; j < MAX_DEV ; j++) { 422 422 if ((assigned_usb_minors & 1 << j) == 0) 423 423 continue; 424 - udev = usb_get_dev(interface_to_usbdev(lpvo_usb_interfaces[j])); 424 + udev = interface_to_usbdev(lpvo_usb_interfaces[j]); 425 425 DIA_LOG(1, "dev. %d: bus %d -> %d dev: %d -> %d\n", j, 426 426 udev->bus->busnum, config->pci_bus, udev->devnum, config->pci_slot); 427 427 if (config->pci_bus == udev->bus->busnum &&
+9 -1
drivers/gpio/gpio-mxc.c
··· 584 584 unsigned long config; 585 585 bool ret = false; 586 586 int i, type; 587 + bool is_imx8qm = of_device_is_compatible(port->dev->of_node, "fsl,imx8qm-gpio"); 587 588 588 589 static const u32 pad_type_map[] = { 589 590 IMX_SCU_WAKEUP_OFF, /* 0 */ 590 591 IMX_SCU_WAKEUP_RISE_EDGE, /* IRQ_TYPE_EDGE_RISING */ 591 592 IMX_SCU_WAKEUP_FALL_EDGE, /* IRQ_TYPE_EDGE_FALLING */ 592 - IMX_SCU_WAKEUP_FALL_EDGE, /* IRQ_TYPE_EDGE_BOTH */ 593 + IMX_SCU_WAKEUP_RISE_EDGE, /* IRQ_TYPE_EDGE_BOTH */ 593 594 IMX_SCU_WAKEUP_HIGH_LVL, /* IRQ_TYPE_LEVEL_HIGH */ 594 595 IMX_SCU_WAKEUP_OFF, /* 5 */ 595 596 IMX_SCU_WAKEUP_OFF, /* 6 */ ··· 605 604 config = pad_type_map[type]; 606 605 else 607 606 config = IMX_SCU_WAKEUP_OFF; 607 + 608 + if (is_imx8qm && config == IMX_SCU_WAKEUP_FALL_EDGE) { 609 + dev_warn_once(port->dev, 610 + "No falling-edge support for wakeup on i.MX8QM\n"); 611 + config = IMX_SCU_WAKEUP_OFF; 612 + } 613 + 608 614 ret |= mxc_gpio_generic_config(port, i, config); 609 615 } 610 616 }
+2 -2
drivers/gpio/gpio-qixis-fpga.c
··· 60 60 return PTR_ERR(reg); 61 61 62 62 regmap = devm_regmap_init_mmio(&pdev->dev, reg, &regmap_config_8r_8v); 63 - if (!regmap) 64 - return -ENODEV; 63 + if (IS_ERR(regmap)) 64 + return PTR_ERR(regmap); 65 65 66 66 /* In this case, the offset of our register is 0 inside the 67 67 * regmap area that we just created.
+41 -16
drivers/gpio/gpiolib-shared.c
··· 443 443 } 444 444 #endif /* CONFIG_RESET_GPIO */ 445 445 446 - int gpio_shared_add_proxy_lookup(struct device *consumer, const char *con_id, 447 - unsigned long lflags) 446 + int gpio_shared_add_proxy_lookup(struct device *consumer, struct fwnode_handle *fwnode, 447 + const char *con_id, unsigned long lflags) 448 448 { 449 449 const char *dev_id = dev_name(consumer); 450 450 struct gpiod_lookup_table *lookup; ··· 458 458 if (!ref->fwnode && device_is_compatible(consumer, "reset-gpio")) { 459 459 if (!gpio_shared_dev_is_reset_gpio(consumer, entry, ref)) 460 460 continue; 461 - } else if (!device_match_fwnode(consumer, ref->fwnode)) { 461 + } else if (fwnode != ref->fwnode) { 462 462 continue; 463 463 } 464 464 ··· 506 506 auxiliary_device_uninit(adev); 507 507 } 508 508 509 - int gpio_device_setup_shared(struct gpio_device *gdev) 509 + int gpiochip_setup_shared(struct gpio_chip *gc) 510 510 { 511 + struct gpio_device *gdev = gc->gpiodev; 511 512 struct gpio_shared_entry *entry; 512 513 struct gpio_shared_ref *ref; 513 514 struct gpio_desc *desc; ··· 539 538 if (list_count_nodes(&entry->refs) <= 1) 540 539 continue; 541 540 542 - desc = &gdev->descs[entry->offset]; 541 + scoped_guard(mutex, &entry->lock) { 542 + #if IS_ENABLED(CONFIG_OF) 543 + if (is_of_node(entry->fwnode) && gc->of_xlate) { 544 + /* 545 + * This is the earliest that we can tranlate the 546 + * devicetree offset to the chip offset. 547 + */ 548 + struct of_phandle_args gpiospec = { }; 543 549 544 - __set_bit(GPIOD_FLAG_SHARED, &desc->flags); 545 - /* 546 - * Shared GPIOs are not requested via the normal path. Make 547 - * them inaccessible to anyone even before we register the 548 - * chip. 549 - */ 550 - ret = gpiod_request_commit(desc, "shared"); 551 - if (ret) 552 - return ret; 550 + gpiospec.np = to_of_node(entry->fwnode); 551 + gpiospec.args_count = 2; 552 + gpiospec.args[0] = entry->offset; 553 553 554 - pr_debug("GPIO %u owned by %s is shared by multiple consumers\n", 555 - entry->offset, gpio_device_get_label(gdev)); 554 + ret = gc->of_xlate(gc, &gpiospec, NULL); 555 + if (ret < 0) 556 + return ret; 557 + 558 + entry->offset = ret; 559 + } 560 + #endif /* CONFIG_OF */ 561 + 562 + desc = &gdev->descs[entry->offset]; 563 + 564 + __set_bit(GPIOD_FLAG_SHARED, &desc->flags); 565 + /* 566 + * Shared GPIOs are not requested via the normal path. Make 567 + * them inaccessible to anyone even before we register the 568 + * chip. 569 + */ 570 + ret = gpiod_request_commit(desc, "shared"); 571 + if (ret) 572 + return ret; 573 + 574 + pr_debug("GPIO %u owned by %s is shared by multiple consumers\n", 575 + entry->offset, gpio_device_get_label(gdev)); 576 + } 556 577 557 578 list_for_each_entry(ref, &entry->refs, list) { 558 579 pr_debug("Setting up a shared GPIO entry for %s (con_id: '%s')\n", ··· 598 575 struct gpio_shared_ref *ref; 599 576 600 577 list_for_each_entry(entry, &gpio_shared_list, list) { 578 + guard(mutex)(&entry->lock); 579 + 601 580 if (!device_match_fwnode(&gdev->dev, entry->fwnode)) 602 581 continue; 603 582
+7 -4
drivers/gpio/gpiolib-shared.h
··· 11 11 struct gpio_device; 12 12 struct gpio_desc; 13 13 struct device; 14 + struct fwnode_handle; 14 15 15 16 #if IS_ENABLED(CONFIG_GPIO_SHARED) 16 17 17 - int gpio_device_setup_shared(struct gpio_device *gdev); 18 + int gpiochip_setup_shared(struct gpio_chip *gc); 18 19 void gpio_device_teardown_shared(struct gpio_device *gdev); 19 - int gpio_shared_add_proxy_lookup(struct device *consumer, const char *con_id, 20 - unsigned long lflags); 20 + int gpio_shared_add_proxy_lookup(struct device *consumer, 21 + struct fwnode_handle *fwnode, 22 + const char *con_id, unsigned long lflags); 21 23 22 24 #else 23 25 24 - static inline int gpio_device_setup_shared(struct gpio_device *gdev) 26 + static inline int gpiochip_setup_shared(struct gpio_chip *gc) 25 27 { 26 28 return 0; 27 29 } ··· 31 29 static inline void gpio_device_teardown_shared(struct gpio_device *gdev) { } 32 30 33 31 static inline int gpio_shared_add_proxy_lookup(struct device *consumer, 32 + struct fwnode_handle *fwnode, 34 33 const char *con_id, 35 34 unsigned long lflags) 36 35 {
+66 -69
drivers/gpio/gpiolib.c
··· 892 892 #define gcdev_unregister(gdev) device_del(&(gdev)->dev) 893 893 #endif 894 894 895 + /* 896 + * An initial reference count has been held in gpiochip_add_data_with_key(). 897 + * The caller should drop the reference via gpio_device_put() on errors. 898 + */ 895 899 static int gpiochip_setup_dev(struct gpio_device *gdev) 896 900 { 897 901 struct fwnode_handle *fwnode = dev_fwnode(&gdev->dev); 898 902 int ret; 899 - 900 - device_initialize(&gdev->dev); 901 903 902 904 /* 903 905 * If fwnode doesn't belong to another device, it's safe to clear its ··· 966 964 list_for_each_entry_srcu(gdev, &gpio_devices, list, 967 965 srcu_read_lock_held(&gpio_devices_srcu)) { 968 966 ret = gpiochip_setup_dev(gdev); 969 - if (ret) 967 + if (ret) { 968 + gpio_device_put(gdev); 970 969 dev_err(&gdev->dev, 971 970 "Failed to initialize gpio device (%d)\n", ret); 971 + } 972 972 } 973 973 } 974 974 ··· 1051 1047 int base = 0; 1052 1048 int ret; 1053 1049 1054 - /* 1055 - * First: allocate and populate the internal stat container, and 1056 - * set up the struct device. 1057 - */ 1058 1050 gdev = kzalloc(sizeof(*gdev), GFP_KERNEL); 1059 1051 if (!gdev) 1060 1052 return -ENOMEM; 1061 - 1062 - gdev->dev.type = &gpio_dev_type; 1063 - gdev->dev.bus = &gpio_bus_type; 1064 - gdev->dev.parent = gc->parent; 1065 - rcu_assign_pointer(gdev->chip, gc); 1066 - 1067 1053 gc->gpiodev = gdev; 1068 1054 gpiochip_set_data(gc, data); 1069 - 1070 - device_set_node(&gdev->dev, gpiochip_choose_fwnode(gc)); 1071 1055 1072 1056 ret = ida_alloc(&gpio_ida, GFP_KERNEL); 1073 1057 if (ret < 0) 1074 1058 goto err_free_gdev; 1075 1059 gdev->id = ret; 1076 1060 1077 - ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id); 1061 + ret = init_srcu_struct(&gdev->srcu); 1078 1062 if (ret) 1079 1063 goto err_free_ida; 1064 + rcu_assign_pointer(gdev->chip, gc); 1080 1065 1066 + ret = init_srcu_struct(&gdev->desc_srcu); 1067 + if (ret) 1068 + goto err_cleanup_gdev_srcu; 1069 + 1070 + ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id); 1071 + if (ret) 1072 + goto err_cleanup_desc_srcu; 1073 + 1074 + device_initialize(&gdev->dev); 1075 + /* 1076 + * After this point any allocated resources to `gdev` will be 1077 + * free():ed by gpiodev_release(). If you add new resources 1078 + * then make sure they get free():ed there. 1079 + */ 1080 + gdev->dev.type = &gpio_dev_type; 1081 + gdev->dev.bus = &gpio_bus_type; 1082 + gdev->dev.parent = gc->parent; 1083 + device_set_node(&gdev->dev, gpiochip_choose_fwnode(gc)); 1084 + 1085 + ret = gpiochip_get_ngpios(gc, &gdev->dev); 1086 + if (ret) 1087 + goto err_put_device; 1088 + gdev->ngpio = gc->ngpio; 1089 + 1090 + gdev->descs = kcalloc(gc->ngpio, sizeof(*gdev->descs), GFP_KERNEL); 1091 + if (!gdev->descs) { 1092 + ret = -ENOMEM; 1093 + goto err_put_device; 1094 + } 1095 + 1096 + gdev->label = kstrdup_const(gc->label ?: "unknown", GFP_KERNEL); 1097 + if (!gdev->label) { 1098 + ret = -ENOMEM; 1099 + goto err_put_device; 1100 + } 1101 + 1102 + gdev->can_sleep = gc->can_sleep; 1103 + rwlock_init(&gdev->line_state_lock); 1104 + RAW_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier); 1105 + BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier); 1106 + #ifdef CONFIG_PINCTRL 1107 + INIT_LIST_HEAD(&gdev->pin_ranges); 1108 + #endif 1081 1109 if (gc->parent && gc->parent->driver) 1082 1110 gdev->owner = gc->parent->driver->owner; 1083 1111 else if (gc->owner) ··· 1117 1081 gdev->owner = gc->owner; 1118 1082 else 1119 1083 gdev->owner = THIS_MODULE; 1120 - 1121 - ret = gpiochip_get_ngpios(gc, &gdev->dev); 1122 - if (ret) 1123 - goto err_free_dev_name; 1124 - 1125 - gdev->descs = kcalloc(gc->ngpio, sizeof(*gdev->descs), GFP_KERNEL); 1126 - if (!gdev->descs) { 1127 - ret = -ENOMEM; 1128 - goto err_free_dev_name; 1129 - } 1130 - 1131 - gdev->label = kstrdup_const(gc->label ?: "unknown", GFP_KERNEL); 1132 - if (!gdev->label) { 1133 - ret = -ENOMEM; 1134 - goto err_free_descs; 1135 - } 1136 - 1137 - gdev->ngpio = gc->ngpio; 1138 - gdev->can_sleep = gc->can_sleep; 1139 - 1140 - rwlock_init(&gdev->line_state_lock); 1141 - RAW_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier); 1142 - BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier); 1143 - 1144 - ret = init_srcu_struct(&gdev->srcu); 1145 - if (ret) 1146 - goto err_free_label; 1147 - 1148 - ret = init_srcu_struct(&gdev->desc_srcu); 1149 - if (ret) 1150 - goto err_cleanup_gdev_srcu; 1151 1084 1152 1085 scoped_guard(mutex, &gpio_devices_lock) { 1153 1086 /* ··· 1132 1127 if (base < 0) { 1133 1128 ret = base; 1134 1129 base = 0; 1135 - goto err_cleanup_desc_srcu; 1130 + goto err_put_device; 1136 1131 } 1137 1132 1138 1133 /* ··· 1152 1147 ret = gpiodev_add_to_list_unlocked(gdev); 1153 1148 if (ret) { 1154 1149 gpiochip_err(gc, "GPIO integer space overlap, cannot add chip\n"); 1155 - goto err_cleanup_desc_srcu; 1150 + goto err_put_device; 1156 1151 } 1157 1152 } 1158 - 1159 - #ifdef CONFIG_PINCTRL 1160 - INIT_LIST_HEAD(&gdev->pin_ranges); 1161 - #endif 1162 1153 1163 1154 if (gc->names) 1164 1155 gpiochip_set_desc_names(gc); ··· 1211 1210 if (ret) 1212 1211 goto err_remove_irqchip_mask; 1213 1212 1214 - ret = gpio_device_setup_shared(gdev); 1213 + ret = gpiochip_setup_shared(gc); 1215 1214 if (ret) 1216 1215 goto err_remove_irqchip; 1217 1216 ··· 1249 1248 scoped_guard(mutex, &gpio_devices_lock) 1250 1249 list_del_rcu(&gdev->list); 1251 1250 synchronize_srcu(&gpio_devices_srcu); 1252 - if (gdev->dev.release) { 1253 - /* release() has been registered by gpiochip_setup_dev() */ 1254 - gpio_device_put(gdev); 1255 - goto err_print_message; 1256 - } 1251 + err_put_device: 1252 + gpio_device_put(gdev); 1253 + goto err_print_message; 1254 + 1257 1255 err_cleanup_desc_srcu: 1258 1256 cleanup_srcu_struct(&gdev->desc_srcu); 1259 1257 err_cleanup_gdev_srcu: 1260 1258 cleanup_srcu_struct(&gdev->srcu); 1261 - err_free_label: 1262 - kfree_const(gdev->label); 1263 - err_free_descs: 1264 - kfree(gdev->descs); 1265 - err_free_dev_name: 1266 - kfree(dev_name(&gdev->dev)); 1267 1259 err_free_ida: 1268 1260 ida_free(&gpio_ida, gdev->id); 1269 1261 err_free_gdev: 1270 1262 kfree(gdev); 1263 + 1271 1264 err_print_message: 1272 1265 /* failures here can mean systems won't boot... */ 1273 1266 if (ret != -EPROBE_DEFER) { ··· 2460 2465 return -EBUSY; 2461 2466 2462 2467 offset = gpiod_hwgpio(desc); 2463 - if (!gpiochip_line_is_valid(guard.gc, offset)) 2464 - return -EINVAL; 2468 + if (!gpiochip_line_is_valid(guard.gc, offset)) { 2469 + ret = -EINVAL; 2470 + goto out_clear_bit; 2471 + } 2465 2472 2466 2473 /* NOTE: gpio_request() can be called in early boot, 2467 2474 * before IRQs are enabled, for non-sleeping (SOC) GPIOs. ··· 4714 4717 * lookup table for the proxy device as previously 4715 4718 * we only knew the consumer's fwnode. 4716 4719 */ 4717 - ret = gpio_shared_add_proxy_lookup(consumer, con_id, 4718 - lookupflags); 4720 + ret = gpio_shared_add_proxy_lookup(consumer, fwnode, 4721 + con_id, lookupflags); 4719 4722 if (ret) 4720 4723 return ERR_PTR(ret); 4721 4724
+6 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 2703 2703 if (r) 2704 2704 return r; 2705 2705 2706 - if (amdgpu_acpi_should_gpu_reset(adev)) 2707 - return amdgpu_asic_reset(adev); 2706 + if (amdgpu_acpi_should_gpu_reset(adev)) { 2707 + amdgpu_device_lock_reset_domain(adev->reset_domain); 2708 + r = amdgpu_asic_reset(adev); 2709 + amdgpu_device_unlock_reset_domain(adev->reset_domain); 2710 + return r; 2711 + } 2708 2712 return 0; 2709 2713 } 2710 2714
+44
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
··· 404 404 } 405 405 406 406 /** 407 + * amdgpu_gart_map_gfx9_mqd - map mqd and ctrl_stack dma_addresses into GART entries 408 + * 409 + * @adev: amdgpu_device pointer 410 + * @offset: offset into the GPU's gart aperture 411 + * @pages: number of pages to bind 412 + * @dma_addr: DMA addresses of pages 413 + * @flags: page table entry flags 414 + * 415 + * Map the MQD and control stack addresses into GART entries with the correct 416 + * memory types on gfxv9. The MQD occupies the first 4KB and is followed by 417 + * the control stack. The MQD uses UC (uncached) memory, while the control stack 418 + * uses NC (non-coherent) memory. 419 + */ 420 + void amdgpu_gart_map_gfx9_mqd(struct amdgpu_device *adev, uint64_t offset, 421 + int pages, dma_addr_t *dma_addr, uint64_t flags) 422 + { 423 + uint64_t page_base; 424 + unsigned int i, j, t; 425 + int idx; 426 + uint64_t ctrl_flags = AMDGPU_PTE_MTYPE_VG10(flags, AMDGPU_MTYPE_NC); 427 + void *dst; 428 + 429 + if (!adev->gart.ptr) 430 + return; 431 + 432 + if (!drm_dev_enter(adev_to_drm(adev), &idx)) 433 + return; 434 + 435 + t = offset / AMDGPU_GPU_PAGE_SIZE; 436 + dst = adev->gart.ptr; 437 + for (i = 0; i < pages; i++) { 438 + page_base = dma_addr[i]; 439 + for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) { 440 + if ((i == 0) && (j == 0)) 441 + amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags); 442 + else 443 + amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, ctrl_flags); 444 + page_base += AMDGPU_GPU_PAGE_SIZE; 445 + } 446 + } 447 + drm_dev_exit(idx); 448 + } 449 + 450 + /** 407 451 * amdgpu_gart_bind - bind pages into the gart page table 408 452 * 409 453 * @adev: amdgpu_device pointer
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
··· 62 62 void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, 63 63 int pages, dma_addr_t *dma_addr, uint64_t flags, 64 64 void *dst); 65 + void amdgpu_gart_map_gfx9_mqd(struct amdgpu_device *adev, uint64_t offset, 66 + int pages, dma_addr_t *dma_addr, uint64_t flags); 65 67 void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, 66 68 int pages, dma_addr_t *dma_addr, uint64_t flags); 67 69 void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
+4 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
··· 68 68 return -EINVAL; 69 69 70 70 spin_lock(&amdgpu_pasid_idr_lock); 71 + /* TODO: Need to replace the idr with an xarry, and then 72 + * handle the internal locking with ATOMIC safe paths. 73 + */ 71 74 pasid = idr_alloc_cyclic(&amdgpu_pasid_idr, NULL, 1, 72 - 1U << bits, GFP_KERNEL); 75 + 1U << bits, GFP_ATOMIC); 73 76 spin_unlock(&amdgpu_pasid_idr_lock); 74 77 75 78 if (pasid >= 0)
+3 -13
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 853 853 int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp); 854 854 uint64_t page_idx, pages_per_xcc; 855 855 int i; 856 - uint64_t ctrl_flags = AMDGPU_PTE_MTYPE_VG10(flags, AMDGPU_MTYPE_NC); 857 856 858 857 pages_per_xcc = total_pages; 859 858 do_div(pages_per_xcc, num_xcc); 860 859 861 860 for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) { 862 - /* MQD page: use default flags */ 863 - amdgpu_gart_bind(adev, 861 + amdgpu_gart_map_gfx9_mqd(adev, 864 862 gtt->offset + (page_idx << PAGE_SHIFT), 865 - 1, &gtt->ttm.dma_address[page_idx], flags); 866 - /* 867 - * Ctrl pages - modify the memory type to NC (ctrl_flags) from 868 - * the second page of the BO onward. 869 - */ 870 - amdgpu_gart_bind(adev, 871 - gtt->offset + ((page_idx + 1) << PAGE_SHIFT), 872 - pages_per_xcc - 1, 873 - &gtt->ttm.dma_address[page_idx + 1], 874 - ctrl_flags); 863 + pages_per_xcc, &gtt->ttm.dma_address[page_idx], 864 + flags); 875 865 } 876 866 } 877 867
+7
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
··· 600 600 goto unpin_bo; 601 601 } 602 602 603 + /* Validate doorbell_offset is within the doorbell BO */ 604 + if ((u64)db_info->doorbell_offset * db_size + db_size > 605 + amdgpu_bo_size(db_obj->obj)) { 606 + r = -EINVAL; 607 + goto unpin_bo; 608 + } 609 + 603 610 index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj, 604 611 db_info->doorbell_offset, db_size); 605 612 drm_dbg_driver(adev_to_drm(uq_mgr->adev),
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 173 173 #define AMDGPU_VA_RESERVED_SEQ64_SIZE (2ULL << 20) 174 174 #define AMDGPU_VA_RESERVED_SEQ64_START(adev) (AMDGPU_VA_RESERVED_CSA_START(adev) \ 175 175 - AMDGPU_VA_RESERVED_SEQ64_SIZE) 176 - #define AMDGPU_VA_RESERVED_TRAP_SIZE (2ULL << 12) 176 + #define AMDGPU_VA_RESERVED_TRAP_SIZE (1ULL << 16) 177 177 #define AMDGPU_VA_RESERVED_TRAP_START(adev) (AMDGPU_VA_RESERVED_SEQ64_START(adev) \ 178 178 - AMDGPU_VA_RESERVED_TRAP_SIZE) 179 179 #define AMDGPU_VA_RESERVED_BOTTOM (1ULL << 16)
+12 -4
drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
··· 324 324 325 325 r = amdgpu_userq_input_va_validate(adev, queue, compute_mqd->eop_va, 326 326 2048); 327 - if (r) 327 + if (r) { 328 + kfree(compute_mqd); 328 329 goto free_mqd; 330 + } 329 331 330 332 userq_props->eop_gpu_addr = compute_mqd->eop_va; 331 333 userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; ··· 367 365 368 366 r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->shadow_va, 369 367 shadow_info.shadow_size); 370 - if (r) 368 + if (r) { 369 + kfree(mqd_gfx_v11); 371 370 goto free_mqd; 371 + } 372 372 r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->csa_va, 373 373 shadow_info.csa_size); 374 - if (r) 374 + if (r) { 375 + kfree(mqd_gfx_v11); 375 376 goto free_mqd; 377 + } 376 378 377 379 kfree(mqd_gfx_v11); 378 380 } else if (queue->queue_type == AMDGPU_HW_IP_DMA) { ··· 396 390 } 397 391 r = amdgpu_userq_input_va_validate(adev, queue, mqd_sdma_v11->csa_va, 398 392 32); 399 - if (r) 393 + if (r) { 394 + kfree(mqd_sdma_v11); 400 395 goto free_mqd; 396 + } 401 397 402 398 userq_props->csa_addr = mqd_sdma_v11->csa_va; 403 399 kfree(mqd_sdma_v11);
+2 -1
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
··· 170 170 int retry_loop; 171 171 172 172 /* For a reset done at the end of S3, only wait for TOS to be unloaded */ 173 - if (adev->in_s3 && !(adev->flags & AMD_IS_APU) && amdgpu_in_reset(adev)) 173 + if ((adev->in_s4 || adev->in_s3) && !(adev->flags & AMD_IS_APU) && 174 + amdgpu_in_reset(adev)) 174 175 return psp_v11_wait_for_tos_unload(psp); 175 176 176 177 for (retry_loop = 0; retry_loop < 20; retry_loop++) {
+15 -8
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
··· 42 42 struct queue_properties *q) 43 43 { 44 44 if (mm->dev->kfd->cwsr_enabled && 45 - q->type == KFD_QUEUE_TYPE_COMPUTE) 46 - return ALIGN(q->ctl_stack_size, PAGE_SIZE) + 47 - ALIGN(sizeof(struct v9_mqd), PAGE_SIZE); 45 + q->type == KFD_QUEUE_TYPE_COMPUTE) { 46 + 47 + /* On gfxv9, the MQD resides in the first 4K page, 48 + * followed by the control stack. Align both to 49 + * AMDGPU_GPU_PAGE_SIZE to maintain the required 4K boundary. 50 + */ 51 + 52 + return ALIGN(ALIGN(q->ctl_stack_size, AMDGPU_GPU_PAGE_SIZE) + 53 + ALIGN(sizeof(struct v9_mqd), AMDGPU_GPU_PAGE_SIZE), PAGE_SIZE); 54 + } 48 55 49 56 return mm->mqd_size; 50 57 } ··· 158 151 if (!mqd_mem_obj) 159 152 return NULL; 160 153 retval = amdgpu_amdkfd_alloc_kernel_mem(node->adev, 161 - (ALIGN(q->ctl_stack_size, PAGE_SIZE) + 162 - ALIGN(sizeof(struct v9_mqd), PAGE_SIZE)) * 154 + (ALIGN(ALIGN(q->ctl_stack_size, AMDGPU_GPU_PAGE_SIZE) + 155 + ALIGN(sizeof(struct v9_mqd), AMDGPU_GPU_PAGE_SIZE), PAGE_SIZE)) * 163 156 NUM_XCC(node->xcc_mask), 164 157 mqd_on_vram(node->adev) ? AMDGPU_GEM_DOMAIN_VRAM : 165 158 AMDGPU_GEM_DOMAIN_GTT, ··· 367 360 struct kfd_context_save_area_header header; 368 361 369 362 /* Control stack is located one page after MQD. */ 370 - void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE); 363 + void *mqd_ctl_stack = (void *)((uintptr_t)mqd + AMDGPU_GPU_PAGE_SIZE); 371 364 372 365 m = get_mqd(mqd); 373 366 ··· 404 397 { 405 398 struct v9_mqd *m; 406 399 /* Control stack is located one page after MQD. */ 407 - void *ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE); 400 + void *ctl_stack = (void *)((uintptr_t)mqd + AMDGPU_GPU_PAGE_SIZE); 408 401 409 402 m = get_mqd(mqd); 410 403 ··· 450 443 *gart_addr = addr; 451 444 452 445 /* Control stack is located one page after MQD. */ 453 - ctl_stack = (void *)((uintptr_t)*mqd + PAGE_SIZE); 446 + ctl_stack = (void *)((uintptr_t)*mqd + AMDGPU_GPU_PAGE_SIZE); 454 447 memcpy(ctl_stack, ctl_stack_src, ctl_stack_size); 455 448 456 449 m->cp_hqd_pq_doorbell_control =
+2 -2
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 102 102 * The first chunk is the TBA used for the CWSR ISA code. The second 103 103 * chunk is used as TMA for user-mode trap handler setup in daisy-chain mode. 104 104 */ 105 - #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2) 106 - #define KFD_CWSR_TMA_OFFSET (PAGE_SIZE + 2048) 105 + #define KFD_CWSR_TBA_TMA_SIZE (AMDGPU_GPU_PAGE_SIZE * 2) 106 + #define KFD_CWSR_TMA_OFFSET (AMDGPU_GPU_PAGE_SIZE + 2048) 107 107 108 108 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ 109 109 (KFD_MAX_NUM_OF_PROCESSES * \
+6 -5
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
··· 249 249 topo_dev->node_props.gfx_target_version < 90000) 250 250 /* metadata_queue_size not supported on GFX7/GFX8 */ 251 251 expected_queue_size = 252 - properties->queue_size / 2; 252 + PAGE_ALIGN(properties->queue_size / 2); 253 253 else 254 254 expected_queue_size = 255 - properties->queue_size + properties->metadata_queue_size; 255 + PAGE_ALIGN(properties->queue_size + properties->metadata_queue_size); 256 256 257 257 vm = drm_priv_to_vm(pdd->drm_priv); 258 258 err = amdgpu_bo_reserve(vm->root.bo, false); ··· 492 492 cu_num = props->simd_count / props->simd_per_cu / NUM_XCC(dev->gpu->xcc_mask); 493 493 wave_num = get_num_waves(props, gfxv, cu_num); 494 494 495 - wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv, props), PAGE_SIZE); 495 + wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv, props), 496 + AMDGPU_GPU_PAGE_SIZE); 496 497 ctl_stack_size = wave_num * CNTL_STACK_BYTES_PER_WAVE(gfxv) + 8; 497 498 ctl_stack_size = ALIGN(SIZEOF_HSA_USER_CONTEXT_SAVE_AREA_HEADER + ctl_stack_size, 498 - PAGE_SIZE); 499 + AMDGPU_GPU_PAGE_SIZE); 499 500 500 501 if ((gfxv / 10000 * 10000) == 100000) { 501 502 /* HW design limits control stack size to 0x7000. ··· 508 507 509 508 props->ctl_stack_size = ctl_stack_size; 510 509 props->debug_memory_size = ALIGN(wave_num * DEBUGGER_BYTES_PER_WAVE, DEBUGGER_BYTES_ALIGN); 511 - props->cwsr_size = ctl_stack_size + wg_data_size; 510 + props->cwsr_size = ALIGN(ctl_stack_size + wg_data_size, PAGE_SIZE); 512 511 513 512 if (gfxv == 80002) /* GFX_VERSION_TONGA */ 514 513 props->eop_buffer_size = 0x8000;
+11 -6
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
··· 147 147 int edp_num; 148 148 uint32_t backlight = MAX_BACKLIGHT_LEVEL; 149 149 uint32_t user_level = MAX_BACKLIGHT_LEVEL; 150 + bool dchub_ref_freq_changed; 150 151 int current_dchub_ref_freq = 0; 151 152 152 153 if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) { ··· 361 360 dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; 362 361 dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0; 363 362 dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; 363 + 364 + /* sw and fw FAMS versions must match for support */ 364 365 dc->debug.fams2_config.bits.enable &= 365 - dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support 366 - if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box) 367 - || res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) { 366 + dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; 367 + dchub_ref_freq_changed = 368 + res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq; 369 + if ((!dc->debug.fams2_config.bits.enable || dchub_ref_freq_changed) && 370 + dc->res_pool->funcs->update_bw_bounding_box && 371 + dc->clk_mgr && dc->clk_mgr->bw_params) { 368 372 /* update bounding box if FAMS2 disabled, or if dchub clk has changed */ 369 - if (dc->clk_mgr) 370 - dc->res_pool->funcs->update_bw_bounding_box(dc, 371 - dc->clk_mgr->bw_params); 373 + dc->res_pool->funcs->update_bw_bounding_box(dc, 374 + dc->clk_mgr->bw_params); 372 375 } 373 376 } 374 377 }
+41
drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
··· 71 71 #include "dce/dce_dmcu.h" 72 72 #include "dce/dce_aux.h" 73 73 #include "dce/dce_i2c.h" 74 + #include "dio/dcn10/dcn10_dio.h" 74 75 75 76 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL 76 77 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f ··· 444 443 static const struct dcn_hubbub_mask hubbub_mask = { 445 444 HUBBUB_MASK_SH_LIST_DCN10(_MASK) 446 445 }; 446 + 447 + static const struct dcn_dio_registers dio_regs = { 448 + DIO_REG_LIST_DCN10() 449 + }; 450 + 451 + #define DIO_MASK_SH_LIST(mask_sh)\ 452 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 453 + 454 + static const struct dcn_dio_shift dio_shift = { 455 + DIO_MASK_SH_LIST(__SHIFT) 456 + }; 457 + 458 + static const struct dcn_dio_mask dio_mask = { 459 + DIO_MASK_SH_LIST(_MASK) 460 + }; 461 + 462 + static struct dio *dcn10_dio_create(struct dc_context *ctx) 463 + { 464 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 465 + 466 + if (!dio10) 467 + return NULL; 468 + 469 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 470 + 471 + return &dio10->base; 472 + } 447 473 448 474 static int map_transmitter_id_to_phy_instance( 449 475 enum transmitter transmitter) ··· 944 916 945 917 kfree(pool->base.hubbub); 946 918 pool->base.hubbub = NULL; 919 + 920 + if (pool->base.dio != NULL) { 921 + kfree(TO_DCN10_DIO(pool->base.dio)); 922 + pool->base.dio = NULL; 923 + } 947 924 948 925 for (i = 0; i < pool->base.pipe_count; i++) { 949 926 if (pool->base.opps[i] != NULL) ··· 1683 1650 if (pool->base.hubbub == NULL) { 1684 1651 BREAK_TO_DEBUGGER(); 1685 1652 dm_error("DC: failed to create hubbub!\n"); 1653 + goto fail; 1654 + } 1655 + 1656 + /* DIO */ 1657 + pool->base.dio = dcn10_dio_create(ctx); 1658 + if (pool->base.dio == NULL) { 1659 + BREAK_TO_DEBUGGER(); 1660 + dm_error("DC: failed to create dio!\n"); 1686 1661 goto fail; 1687 1662 } 1688 1663
+42
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
··· 82 82 #include "dce/dce_dmcu.h" 83 83 #include "dce/dce_aux.h" 84 84 #include "dce/dce_i2c.h" 85 + #include "dio/dcn10/dcn10_dio.h" 85 86 #include "vm_helper.h" 86 87 87 88 #include "link_enc_cfg.h" ··· 550 549 static const struct dcn_hubbub_mask hubbub_mask = { 551 550 HUBBUB_MASK_SH_LIST_DCN20(_MASK) 552 551 }; 552 + 553 + static const struct dcn_dio_registers dio_regs = { 554 + DIO_REG_LIST_DCN10() 555 + }; 556 + 557 + #define DIO_MASK_SH_LIST(mask_sh)\ 558 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 559 + 560 + static const struct dcn_dio_shift dio_shift = { 561 + DIO_MASK_SH_LIST(__SHIFT) 562 + }; 563 + 564 + static const struct dcn_dio_mask dio_mask = { 565 + DIO_MASK_SH_LIST(_MASK) 566 + }; 567 + 568 + static struct dio *dcn20_dio_create(struct dc_context *ctx) 569 + { 570 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 571 + 572 + if (!dio10) 573 + return NULL; 574 + 575 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 576 + 577 + return &dio10->base; 578 + } 553 579 554 580 #define vmid_regs(id)\ 555 581 [id] = {\ ··· 1132 1104 kfree(pool->base.hubbub); 1133 1105 pool->base.hubbub = NULL; 1134 1106 } 1107 + 1108 + if (pool->base.dio != NULL) { 1109 + kfree(TO_DCN10_DIO(pool->base.dio)); 1110 + pool->base.dio = NULL; 1111 + } 1112 + 1135 1113 for (i = 0; i < pool->base.pipe_count; i++) { 1136 1114 if (pool->base.dpps[i] != NULL) 1137 1115 dcn20_dpp_destroy(&pool->base.dpps[i]); ··· 2723 2689 if (pool->base.hubbub == NULL) { 2724 2690 BREAK_TO_DEBUGGER(); 2725 2691 dm_error("DC: failed to create hubbub!\n"); 2692 + goto create_fail; 2693 + } 2694 + 2695 + /* DIO */ 2696 + pool->base.dio = dcn20_dio_create(ctx); 2697 + if (pool->base.dio == NULL) { 2698 + BREAK_TO_DEBUGGER(); 2699 + dm_error("DC: failed to create dio!\n"); 2726 2700 goto create_fail; 2727 2701 } 2728 2702
+41
drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
··· 56 56 #include "dce/dce_aux.h" 57 57 #include "dce/dce_i2c.h" 58 58 #include "dcn10/dcn10_resource.h" 59 + #include "dio/dcn10/dcn10_dio.h" 59 60 60 61 #include "cyan_skillfish_ip_offset.h" 61 62 ··· 756 755 return &hubbub->base; 757 756 } 758 757 758 + static const struct dcn_dio_registers dio_regs = { 759 + DIO_REG_LIST_DCN10() 760 + }; 761 + 762 + #define DIO_MASK_SH_LIST(mask_sh)\ 763 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 764 + 765 + static const struct dcn_dio_shift dio_shift = { 766 + DIO_MASK_SH_LIST(__SHIFT) 767 + }; 768 + 769 + static const struct dcn_dio_mask dio_mask = { 770 + DIO_MASK_SH_LIST(_MASK) 771 + }; 772 + 773 + static struct dio *dcn201_dio_create(struct dc_context *ctx) 774 + { 775 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 776 + 777 + if (!dio10) 778 + return NULL; 779 + 780 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 781 + 782 + return &dio10->base; 783 + } 784 + 759 785 static struct timing_generator *dcn201_timing_generator_create( 760 786 struct dc_context *ctx, 761 787 uint32_t instance) ··· 956 928 if (pool->base.hubbub != NULL) { 957 929 kfree(pool->base.hubbub); 958 930 pool->base.hubbub = NULL; 931 + } 932 + 933 + if (pool->base.dio != NULL) { 934 + kfree(TO_DCN10_DIO(pool->base.dio)); 935 + pool->base.dio = NULL; 959 936 } 960 937 961 938 for (i = 0; i < pool->base.pipe_count; i++) { ··· 1306 1273 pool->base.hubbub = dcn201_hubbub_create(ctx); 1307 1274 if (pool->base.hubbub == NULL) { 1308 1275 dm_error("DC: failed to create hubbub!\n"); 1276 + goto create_fail; 1277 + } 1278 + 1279 + /* DIO */ 1280 + pool->base.dio = dcn201_dio_create(ctx); 1281 + if (pool->base.dio == NULL) { 1282 + BREAK_TO_DEBUGGER(); 1283 + dm_error("DC: failed to create dio!\n"); 1309 1284 goto create_fail; 1310 1285 } 1311 1286
+34
drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
··· 84 84 #include "dce/dce_dmcu.h" 85 85 #include "dce/dce_aux.h" 86 86 #include "dce/dce_i2c.h" 87 + #include "dio/dcn10/dcn10_dio.h" 87 88 #include "dcn21_resource.h" 88 89 #include "vm_helper.h" 89 90 #include "dcn20/dcn20_vmid.h" ··· 330 329 HUBBUB_MASK_SH_LIST_DCN21(_MASK) 331 330 }; 332 331 332 + static const struct dcn_dio_registers dio_regs = { 333 + DIO_REG_LIST_DCN10() 334 + }; 335 + 336 + static const struct dcn_dio_shift dio_shift = { 0 }; 337 + 338 + static const struct dcn_dio_mask dio_mask = { 0 }; 339 + 340 + static struct dio *dcn21_dio_create(struct dc_context *ctx) 341 + { 342 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 343 + 344 + if (!dio10) 345 + return NULL; 346 + 347 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 348 + 349 + return &dio10->base; 350 + } 333 351 334 352 #define vmid_regs(id)\ 335 353 [id] = {\ ··· 697 677 kfree(pool->base.hubbub); 698 678 pool->base.hubbub = NULL; 699 679 } 680 + 681 + if (pool->base.dio != NULL) { 682 + kfree(TO_DCN10_DIO(pool->base.dio)); 683 + pool->base.dio = NULL; 684 + } 685 + 700 686 for (i = 0; i < pool->base.pipe_count; i++) { 701 687 if (pool->base.dpps[i] != NULL) 702 688 dcn20_dpp_destroy(&pool->base.dpps[i]); ··· 1677 1651 if (pool->base.hubbub == NULL) { 1678 1652 BREAK_TO_DEBUGGER(); 1679 1653 dm_error("DC: failed to create hubbub!\n"); 1654 + goto create_fail; 1655 + } 1656 + 1657 + /* DIO */ 1658 + pool->base.dio = dcn21_dio_create(ctx); 1659 + if (pool->base.dio == NULL) { 1660 + BREAK_TO_DEBUGGER(); 1661 + dm_error("DC: failed to create dio!\n"); 1680 1662 goto create_fail; 1681 1663 } 1682 1664
+42
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
··· 60 60 #include "dml/display_mode_vba.h" 61 61 #include "dcn30/dcn30_dccg.h" 62 62 #include "dcn10/dcn10_resource.h" 63 + #include "dio/dcn10/dcn10_dio.h" 63 64 #include "link_service.h" 64 65 #include "dce/dce_panel_cntl.h" 65 66 ··· 887 886 return &hubbub3->base; 888 887 } 889 888 889 + static const struct dcn_dio_registers dio_regs = { 890 + DIO_REG_LIST_DCN10() 891 + }; 892 + 893 + #define DIO_MASK_SH_LIST(mask_sh)\ 894 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 895 + 896 + static const struct dcn_dio_shift dio_shift = { 897 + DIO_MASK_SH_LIST(__SHIFT) 898 + }; 899 + 900 + static const struct dcn_dio_mask dio_mask = { 901 + DIO_MASK_SH_LIST(_MASK) 902 + }; 903 + 904 + static struct dio *dcn30_dio_create(struct dc_context *ctx) 905 + { 906 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 907 + 908 + if (!dio10) 909 + return NULL; 910 + 911 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 912 + 913 + return &dio10->base; 914 + } 915 + 890 916 static struct timing_generator *dcn30_timing_generator_create( 891 917 struct dc_context *ctx, 892 918 uint32_t instance) ··· 1123 1095 kfree(pool->base.hubbub); 1124 1096 pool->base.hubbub = NULL; 1125 1097 } 1098 + 1099 + if (pool->base.dio != NULL) { 1100 + kfree(TO_DCN10_DIO(pool->base.dio)); 1101 + pool->base.dio = NULL; 1102 + } 1103 + 1126 1104 for (i = 0; i < pool->base.pipe_count; i++) { 1127 1105 if (pool->base.dpps[i] != NULL) 1128 1106 dcn30_dpp_destroy(&pool->base.dpps[i]); ··· 2495 2461 if (pool->base.hubbub == NULL) { 2496 2462 BREAK_TO_DEBUGGER(); 2497 2463 dm_error("DC: failed to create hubbub!\n"); 2464 + goto create_fail; 2465 + } 2466 + 2467 + /* DIO */ 2468 + pool->base.dio = dcn30_dio_create(ctx); 2469 + if (pool->base.dio == NULL) { 2470 + BREAK_TO_DEBUGGER(); 2471 + dm_error("DC: failed to create dio!\n"); 2498 2472 goto create_fail; 2499 2473 } 2500 2474
+42
drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
··· 59 59 #include "dml/display_mode_vba.h" 60 60 #include "dcn301/dcn301_dccg.h" 61 61 #include "dcn10/dcn10_resource.h" 62 + #include "dio/dcn10/dcn10_dio.h" 62 63 #include "dcn30/dcn30_dio_stream_encoder.h" 63 64 #include "dcn301/dcn301_dio_link_encoder.h" 64 65 #include "dcn301/dcn301_panel_cntl.h" ··· 844 843 return &hubbub3->base; 845 844 } 846 845 846 + static const struct dcn_dio_registers dio_regs = { 847 + DIO_REG_LIST_DCN10() 848 + }; 849 + 850 + #define DIO_MASK_SH_LIST(mask_sh)\ 851 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 852 + 853 + static const struct dcn_dio_shift dio_shift = { 854 + DIO_MASK_SH_LIST(__SHIFT) 855 + }; 856 + 857 + static const struct dcn_dio_mask dio_mask = { 858 + DIO_MASK_SH_LIST(_MASK) 859 + }; 860 + 861 + static struct dio *dcn301_dio_create(struct dc_context *ctx) 862 + { 863 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 864 + 865 + if (!dio10) 866 + return NULL; 867 + 868 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 869 + 870 + return &dio10->base; 871 + } 872 + 847 873 static struct timing_generator *dcn301_timing_generator_create( 848 874 struct dc_context *ctx, uint32_t instance) 849 875 { ··· 1094 1066 kfree(pool->base.hubbub); 1095 1067 pool->base.hubbub = NULL; 1096 1068 } 1069 + 1070 + if (pool->base.dio != NULL) { 1071 + kfree(TO_DCN10_DIO(pool->base.dio)); 1072 + pool->base.dio = NULL; 1073 + } 1074 + 1097 1075 for (i = 0; i < pool->base.pipe_count; i++) { 1098 1076 if (pool->base.dpps[i] != NULL) 1099 1077 dcn301_dpp_destroy(&pool->base.dpps[i]); ··· 1613 1579 if (pool->base.hubbub == NULL) { 1614 1580 BREAK_TO_DEBUGGER(); 1615 1581 dm_error("DC: failed to create hubbub!\n"); 1582 + goto create_fail; 1583 + } 1584 + 1585 + /* DIO */ 1586 + pool->base.dio = dcn301_dio_create(ctx); 1587 + if (pool->base.dio == NULL) { 1588 + BREAK_TO_DEBUGGER(); 1589 + dm_error("DC: failed to create dio!\n"); 1616 1590 goto create_fail; 1617 1591 } 1618 1592
+41
drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
··· 46 46 #include "dml/dcn30/dcn30_fpu.h" 47 47 48 48 #include "dcn10/dcn10_resource.h" 49 + #include "dio/dcn10/dcn10_dio.h" 49 50 50 51 #include "link_service.h" 51 52 ··· 253 252 static const struct dcn20_vmid_mask vmid_masks = { 254 253 DCN20_VMID_MASK_SH_LIST(_MASK) 255 254 }; 255 + 256 + static const struct dcn_dio_registers dio_regs = { 257 + DIO_REG_LIST_DCN10() 258 + }; 259 + 260 + #define DIO_MASK_SH_LIST(mask_sh)\ 261 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 262 + 263 + static const struct dcn_dio_shift dio_shift = { 264 + DIO_MASK_SH_LIST(__SHIFT) 265 + }; 266 + 267 + static const struct dcn_dio_mask dio_mask = { 268 + DIO_MASK_SH_LIST(_MASK) 269 + }; 270 + 271 + static struct dio *dcn302_dio_create(struct dc_context *ctx) 272 + { 273 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 274 + 275 + if (!dio10) 276 + return NULL; 277 + 278 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 279 + 280 + return &dio10->base; 281 + } 256 282 257 283 static struct hubbub *dcn302_hubbub_create(struct dc_context *ctx) 258 284 { ··· 1050 1022 pool->hubbub = NULL; 1051 1023 } 1052 1024 1025 + if (pool->dio != NULL) { 1026 + kfree(TO_DCN10_DIO(pool->dio)); 1027 + pool->dio = NULL; 1028 + } 1029 + 1053 1030 for (i = 0; i < pool->pipe_count; i++) { 1054 1031 if (pool->dpps[i] != NULL) { 1055 1032 kfree(TO_DCN20_DPP(pool->dpps[i])); ··· 1402 1369 if (pool->hubbub == NULL) { 1403 1370 BREAK_TO_DEBUGGER(); 1404 1371 dm_error("DC: failed to create hubbub!\n"); 1372 + goto create_fail; 1373 + } 1374 + 1375 + /* DIO */ 1376 + pool->dio = dcn302_dio_create(ctx); 1377 + if (pool->dio == NULL) { 1378 + BREAK_TO_DEBUGGER(); 1379 + dm_error("DC: failed to create dio!\n"); 1405 1380 goto create_fail; 1406 1381 } 1407 1382
+41
drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
··· 46 46 #include "dml/dcn30/dcn30_fpu.h" 47 47 48 48 #include "dcn10/dcn10_resource.h" 49 + #include "dio/dcn10/dcn10_dio.h" 49 50 50 51 #include "link_service.h" 51 52 ··· 249 248 static const struct dcn20_vmid_mask vmid_masks = { 250 249 DCN20_VMID_MASK_SH_LIST(_MASK) 251 250 }; 251 + 252 + static const struct dcn_dio_registers dio_regs = { 253 + DIO_REG_LIST_DCN10() 254 + }; 255 + 256 + #define DIO_MASK_SH_LIST(mask_sh)\ 257 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 258 + 259 + static const struct dcn_dio_shift dio_shift = { 260 + DIO_MASK_SH_LIST(__SHIFT) 261 + }; 262 + 263 + static const struct dcn_dio_mask dio_mask = { 264 + DIO_MASK_SH_LIST(_MASK) 265 + }; 266 + 267 + static struct dio *dcn303_dio_create(struct dc_context *ctx) 268 + { 269 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 270 + 271 + if (!dio10) 272 + return NULL; 273 + 274 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 275 + 276 + return &dio10->base; 277 + } 252 278 253 279 static struct hubbub *dcn303_hubbub_create(struct dc_context *ctx) 254 280 { ··· 994 966 pool->hubbub = NULL; 995 967 } 996 968 969 + if (pool->dio != NULL) { 970 + kfree(TO_DCN10_DIO(pool->dio)); 971 + pool->dio = NULL; 972 + } 973 + 997 974 for (i = 0; i < pool->pipe_count; i++) { 998 975 if (pool->dpps[i] != NULL) { 999 976 kfree(TO_DCN20_DPP(pool->dpps[i])); ··· 1334 1301 if (pool->hubbub == NULL) { 1335 1302 BREAK_TO_DEBUGGER(); 1336 1303 dm_error("DC: failed to create hubbub!\n"); 1304 + goto create_fail; 1305 + } 1306 + 1307 + /* DIO */ 1308 + pool->dio = dcn303_dio_create(ctx); 1309 + if (pool->dio == NULL) { 1310 + BREAK_TO_DEBUGGER(); 1311 + dm_error("DC: failed to create dio!\n"); 1337 1312 goto create_fail; 1338 1313 } 1339 1314
+40
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
··· 64 64 #include "dce/dce_audio.h" 65 65 #include "dce/dce_hwseq.h" 66 66 #include "clk_mgr.h" 67 + #include "dio/dcn10/dcn10_dio.h" 67 68 #include "dio/virtual/virtual_stream_encoder.h" 68 69 #include "dce110/dce110_resource.h" 69 70 #include "dml/display_mode_vba.h" ··· 811 810 DCN20_VMID_MASK_SH_LIST(_MASK) 812 811 }; 813 812 813 + static const struct dcn_dio_registers dio_regs = { 814 + DIO_REG_LIST_DCN10() 815 + }; 816 + 817 + #define DIO_MASK_SH_LIST(mask_sh)\ 818 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 819 + 820 + static const struct dcn_dio_shift dio_shift = { 821 + DIO_MASK_SH_LIST(__SHIFT) 822 + }; 823 + 824 + static const struct dcn_dio_mask dio_mask = { 825 + DIO_MASK_SH_LIST(_MASK) 826 + }; 827 + 814 828 static const struct resource_caps res_cap_dcn31 = { 815 829 .num_timing_generator = 4, 816 830 .num_opp = 4, ··· 1035 1019 num_rmu); 1036 1020 1037 1021 return &mpc30->base; 1022 + } 1023 + 1024 + static struct dio *dcn31_dio_create(struct dc_context *ctx) 1025 + { 1026 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 1027 + 1028 + if (!dio10) 1029 + return NULL; 1030 + 1031 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 1032 + 1033 + return &dio10->base; 1038 1034 } 1039 1035 1040 1036 static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx) ··· 1423 1395 if (pool->base.hubbub != NULL) { 1424 1396 kfree(pool->base.hubbub); 1425 1397 pool->base.hubbub = NULL; 1398 + } 1399 + if (pool->base.dio != NULL) { 1400 + kfree(TO_DCN10_DIO(pool->base.dio)); 1401 + pool->base.dio = NULL; 1426 1402 } 1427 1403 for (i = 0; i < pool->base.pipe_count; i++) { 1428 1404 if (pool->base.dpps[i] != NULL) ··· 2092 2060 if (pool->base.hubbub == NULL) { 2093 2061 BREAK_TO_DEBUGGER(); 2094 2062 dm_error("DC: failed to create hubbub!\n"); 2063 + goto create_fail; 2064 + } 2065 + 2066 + /* DIO */ 2067 + pool->base.dio = dcn31_dio_create(ctx); 2068 + if (pool->base.dio == NULL) { 2069 + BREAK_TO_DEBUGGER(); 2070 + dm_error("DC: failed to create dio!\n"); 2095 2071 goto create_fail; 2096 2072 } 2097 2073
+40
drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
··· 66 66 #include "dce/dce_audio.h" 67 67 #include "dce/dce_hwseq.h" 68 68 #include "clk_mgr.h" 69 + #include "dio/dcn10/dcn10_dio.h" 69 70 #include "dio/virtual/virtual_stream_encoder.h" 70 71 #include "dce110/dce110_resource.h" 71 72 #include "dml/display_mode_vba.h" ··· 823 822 DCN20_VMID_MASK_SH_LIST(_MASK) 824 823 }; 825 824 825 + static const struct dcn_dio_registers dio_regs = { 826 + DIO_REG_LIST_DCN10() 827 + }; 828 + 829 + #define DIO_MASK_SH_LIST(mask_sh)\ 830 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 831 + 832 + static const struct dcn_dio_shift dio_shift = { 833 + DIO_MASK_SH_LIST(__SHIFT) 834 + }; 835 + 836 + static const struct dcn_dio_mask dio_mask = { 837 + DIO_MASK_SH_LIST(_MASK) 838 + }; 839 + 826 840 static const struct resource_caps res_cap_dcn314 = { 827 841 .num_timing_generator = 4, 828 842 .num_opp = 4, ··· 1093 1077 num_rmu); 1094 1078 1095 1079 return &mpc30->base; 1080 + } 1081 + 1082 + static struct dio *dcn314_dio_create(struct dc_context *ctx) 1083 + { 1084 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 1085 + 1086 + if (!dio10) 1087 + return NULL; 1088 + 1089 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 1090 + 1091 + return &dio10->base; 1096 1092 } 1097 1093 1098 1094 static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx) ··· 1482 1454 if (pool->base.hubbub != NULL) { 1483 1455 kfree(pool->base.hubbub); 1484 1456 pool->base.hubbub = NULL; 1457 + } 1458 + if (pool->base.dio != NULL) { 1459 + kfree(TO_DCN10_DIO(pool->base.dio)); 1460 + pool->base.dio = NULL; 1485 1461 } 1486 1462 for (i = 0; i < pool->base.pipe_count; i++) { 1487 1463 if (pool->base.dpps[i] != NULL) ··· 2016 1984 if (pool->base.hubbub == NULL) { 2017 1985 BREAK_TO_DEBUGGER(); 2018 1986 dm_error("DC: failed to create hubbub!\n"); 1987 + goto create_fail; 1988 + } 1989 + 1990 + /* DIO */ 1991 + pool->base.dio = dcn314_dio_create(ctx); 1992 + if (pool->base.dio == NULL) { 1993 + BREAK_TO_DEBUGGER(); 1994 + dm_error("DC: failed to create dio!\n"); 2019 1995 goto create_fail; 2020 1996 } 2021 1997
+40
drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
··· 63 63 #include "dce/dce_audio.h" 64 64 #include "dce/dce_hwseq.h" 65 65 #include "clk_mgr.h" 66 + #include "dio/dcn10/dcn10_dio.h" 66 67 #include "dio/virtual/virtual_stream_encoder.h" 67 68 #include "dce110/dce110_resource.h" 68 69 #include "dml/display_mode_vba.h" ··· 810 809 DCN20_VMID_MASK_SH_LIST(_MASK) 811 810 }; 812 811 812 + static const struct dcn_dio_registers dio_regs = { 813 + DIO_REG_LIST_DCN10() 814 + }; 815 + 816 + #define DIO_MASK_SH_LIST(mask_sh)\ 817 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 818 + 819 + static const struct dcn_dio_shift dio_shift = { 820 + DIO_MASK_SH_LIST(__SHIFT) 821 + }; 822 + 823 + static const struct dcn_dio_mask dio_mask = { 824 + DIO_MASK_SH_LIST(_MASK) 825 + }; 826 + 813 827 static const struct resource_caps res_cap_dcn31 = { 814 828 .num_timing_generator = 4, 815 829 .num_opp = 4, ··· 1034 1018 num_rmu); 1035 1019 1036 1020 return &mpc30->base; 1021 + } 1022 + 1023 + static struct dio *dcn315_dio_create(struct dc_context *ctx) 1024 + { 1025 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 1026 + 1027 + if (!dio10) 1028 + return NULL; 1029 + 1030 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 1031 + 1032 + return &dio10->base; 1037 1033 } 1038 1034 1039 1035 static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx) ··· 1424 1396 if (pool->base.hubbub != NULL) { 1425 1397 kfree(pool->base.hubbub); 1426 1398 pool->base.hubbub = NULL; 1399 + } 1400 + if (pool->base.dio != NULL) { 1401 + kfree(TO_DCN10_DIO(pool->base.dio)); 1402 + pool->base.dio = NULL; 1427 1403 } 1428 1404 for (i = 0; i < pool->base.pipe_count; i++) { 1429 1405 if (pool->base.dpps[i] != NULL) ··· 2041 2009 if (pool->base.hubbub == NULL) { 2042 2010 BREAK_TO_DEBUGGER(); 2043 2011 dm_error("DC: failed to create hubbub!\n"); 2012 + goto create_fail; 2013 + } 2014 + 2015 + /* DIO */ 2016 + pool->base.dio = dcn315_dio_create(ctx); 2017 + if (pool->base.dio == NULL) { 2018 + BREAK_TO_DEBUGGER(); 2019 + dm_error("DC: failed to create dio!\n"); 2044 2020 goto create_fail; 2045 2021 } 2046 2022
+40
drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
··· 63 63 #include "dce/dce_audio.h" 64 64 #include "dce/dce_hwseq.h" 65 65 #include "clk_mgr.h" 66 + #include "dio/dcn10/dcn10_dio.h" 66 67 #include "dio/virtual/virtual_stream_encoder.h" 67 68 #include "dce110/dce110_resource.h" 68 69 #include "dml/display_mode_vba.h" ··· 805 804 DCN20_VMID_MASK_SH_LIST(_MASK) 806 805 }; 807 806 807 + static const struct dcn_dio_registers dio_regs = { 808 + DIO_REG_LIST_DCN10() 809 + }; 810 + 811 + #define DIO_MASK_SH_LIST(mask_sh)\ 812 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 813 + 814 + static const struct dcn_dio_shift dio_shift = { 815 + DIO_MASK_SH_LIST(__SHIFT) 816 + }; 817 + 818 + static const struct dcn_dio_mask dio_mask = { 819 + DIO_MASK_SH_LIST(_MASK) 820 + }; 821 + 808 822 static const struct resource_caps res_cap_dcn31 = { 809 823 .num_timing_generator = 4, 810 824 .num_opp = 4, ··· 1027 1011 num_rmu); 1028 1012 1029 1013 return &mpc30->base; 1014 + } 1015 + 1016 + static struct dio *dcn316_dio_create(struct dc_context *ctx) 1017 + { 1018 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 1019 + 1020 + if (!dio10) 1021 + return NULL; 1022 + 1023 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 1024 + 1025 + return &dio10->base; 1030 1026 } 1031 1027 1032 1028 static struct hubbub *dcn31_hubbub_create(struct dc_context *ctx) ··· 1419 1391 if (pool->base.hubbub != NULL) { 1420 1392 kfree(pool->base.hubbub); 1421 1393 pool->base.hubbub = NULL; 1394 + } 1395 + if (pool->base.dio != NULL) { 1396 + kfree(TO_DCN10_DIO(pool->base.dio)); 1397 + pool->base.dio = NULL; 1422 1398 } 1423 1399 for (i = 0; i < pool->base.pipe_count; i++) { 1424 1400 if (pool->base.dpps[i] != NULL) ··· 1916 1884 if (pool->base.hubbub == NULL) { 1917 1885 BREAK_TO_DEBUGGER(); 1918 1886 dm_error("DC: failed to create hubbub!\n"); 1887 + goto create_fail; 1888 + } 1889 + 1890 + /* DIO */ 1891 + pool->base.dio = dcn316_dio_create(ctx); 1892 + if (pool->base.dio == NULL) { 1893 + BREAK_TO_DEBUGGER(); 1894 + dm_error("DC: failed to create dio!\n"); 1919 1895 goto create_fail; 1920 1896 } 1921 1897
+43
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
··· 66 66 #include "dce/dce_hwseq.h" 67 67 #include "clk_mgr.h" 68 68 #include "dio/virtual/virtual_stream_encoder.h" 69 + #include "dio/dcn10/dcn10_dio.h" 69 70 #include "dml/display_mode_vba.h" 70 71 #include "dcn32/dcn32_dccg.h" 71 72 #include "dcn10/dcn10_resource.h" ··· 644 643 DCN20_VMID_MASK_SH_LIST(_MASK) 645 644 }; 646 645 646 + static struct dcn_dio_registers dio_regs; 647 + 648 + #define DIO_MASK_SH_LIST(mask_sh)\ 649 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 650 + 651 + static const struct dcn_dio_shift dio_shift = { 652 + DIO_MASK_SH_LIST(__SHIFT) 653 + }; 654 + 655 + static const struct dcn_dio_mask dio_mask = { 656 + DIO_MASK_SH_LIST(_MASK) 657 + }; 658 + 647 659 static const struct resource_caps res_cap_dcn32 = { 648 660 .num_timing_generator = 4, 649 661 .num_opp = 4, ··· 845 831 kfree(clk_src); 846 832 BREAK_TO_DEBUGGER(); 847 833 return NULL; 834 + } 835 + 836 + static struct dio *dcn32_dio_create(struct dc_context *ctx) 837 + { 838 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 839 + 840 + if (!dio10) 841 + return NULL; 842 + 843 + #undef REG_STRUCT 844 + #define REG_STRUCT dio_regs 845 + DIO_REG_LIST_DCN10(); 846 + 847 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 848 + 849 + return &dio10->base; 848 850 } 849 851 850 852 static struct hubbub *dcn32_hubbub_create(struct dc_context *ctx) ··· 1523 1493 1524 1494 if (pool->base.dccg != NULL) 1525 1495 dcn_dccg_destroy(&pool->base.dccg); 1496 + 1497 + if (pool->base.dio != NULL) { 1498 + kfree(TO_DCN10_DIO(pool->base.dio)); 1499 + pool->base.dio = NULL; 1500 + } 1526 1501 1527 1502 if (pool->base.oem_device != NULL) { 1528 1503 struct dc *dc = pool->base.oem_device->ctx->dc; ··· 2405 2370 if (pool->base.hubbub == NULL) { 2406 2371 BREAK_TO_DEBUGGER(); 2407 2372 dm_error("DC: failed to create hubbub!\n"); 2373 + goto create_fail; 2374 + } 2375 + 2376 + /* DIO */ 2377 + pool->base.dio = dcn32_dio_create(ctx); 2378 + if (pool->base.dio == NULL) { 2379 + BREAK_TO_DEBUGGER(); 2380 + dm_error("DC: failed to create dio!\n"); 2408 2381 goto create_fail; 2409 2382 } 2410 2383
+43
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
··· 69 69 #include "dce/dce_hwseq.h" 70 70 #include "clk_mgr.h" 71 71 #include "dio/virtual/virtual_stream_encoder.h" 72 + #include "dio/dcn10/dcn10_dio.h" 72 73 #include "dml/display_mode_vba.h" 73 74 #include "dcn32/dcn32_dccg.h" 74 75 #include "dcn10/dcn10_resource.h" ··· 640 639 DCN20_VMID_MASK_SH_LIST(_MASK) 641 640 }; 642 641 642 + static struct dcn_dio_registers dio_regs; 643 + 644 + #define DIO_MASK_SH_LIST(mask_sh)\ 645 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 646 + 647 + static const struct dcn_dio_shift dio_shift = { 648 + DIO_MASK_SH_LIST(__SHIFT) 649 + }; 650 + 651 + static const struct dcn_dio_mask dio_mask = { 652 + DIO_MASK_SH_LIST(_MASK) 653 + }; 654 + 643 655 static const struct resource_caps res_cap_dcn321 = { 644 656 .num_timing_generator = 4, 645 657 .num_opp = 4, ··· 839 825 kfree(clk_src); 840 826 BREAK_TO_DEBUGGER(); 841 827 return NULL; 828 + } 829 + 830 + static struct dio *dcn321_dio_create(struct dc_context *ctx) 831 + { 832 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 833 + 834 + if (!dio10) 835 + return NULL; 836 + 837 + #undef REG_STRUCT 838 + #define REG_STRUCT dio_regs 839 + DIO_REG_LIST_DCN10(); 840 + 841 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 842 + 843 + return &dio10->base; 842 844 } 843 845 844 846 static struct hubbub *dcn321_hubbub_create(struct dc_context *ctx) ··· 1504 1474 if (pool->base.dccg != NULL) 1505 1475 dcn_dccg_destroy(&pool->base.dccg); 1506 1476 1477 + if (pool->base.dio != NULL) { 1478 + kfree(TO_DCN10_DIO(pool->base.dio)); 1479 + pool->base.dio = NULL; 1480 + } 1481 + 1507 1482 if (pool->base.oem_device != NULL) { 1508 1483 struct dc *dc = pool->base.oem_device->ctx->dc; 1509 1484 ··· 1904 1869 if (pool->base.hubbub == NULL) { 1905 1870 BREAK_TO_DEBUGGER(); 1906 1871 dm_error("DC: failed to create hubbub!\n"); 1872 + goto create_fail; 1873 + } 1874 + 1875 + /* DIO */ 1876 + pool->base.dio = dcn321_dio_create(ctx); 1877 + if (pool->base.dio == NULL) { 1878 + BREAK_TO_DEBUGGER(); 1879 + dm_error("DC: failed to create dio!\n"); 1907 1880 goto create_fail; 1908 1881 } 1909 1882
+43
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
··· 71 71 #include "dce/dce_hwseq.h" 72 72 #include "clk_mgr.h" 73 73 #include "dio/virtual/virtual_stream_encoder.h" 74 + #include "dio/dcn10/dcn10_dio.h" 74 75 #include "dce110/dce110_resource.h" 75 76 #include "dml/display_mode_vba.h" 76 77 #include "dcn35/dcn35_dccg.h" ··· 665 664 DCN20_VMID_MASK_SH_LIST(_MASK) 666 665 }; 667 666 667 + static struct dcn_dio_registers dio_regs; 668 + 669 + #define DIO_MASK_SH_LIST(mask_sh)\ 670 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 671 + 672 + static const struct dcn_dio_shift dio_shift = { 673 + DIO_MASK_SH_LIST(__SHIFT) 674 + }; 675 + 676 + static const struct dcn_dio_mask dio_mask = { 677 + DIO_MASK_SH_LIST(_MASK) 678 + }; 679 + 668 680 static const struct resource_caps res_cap_dcn35 = { 669 681 .num_timing_generator = 4, 670 682 .num_opp = 4, ··· 985 971 num_rmu); 986 972 987 973 return &mpc30->base; 974 + } 975 + 976 + static struct dio *dcn35_dio_create(struct dc_context *ctx) 977 + { 978 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 979 + 980 + if (!dio10) 981 + return NULL; 982 + 983 + #undef REG_STRUCT 984 + #define REG_STRUCT dio_regs 985 + DIO_REG_LIST_DCN10(); 986 + 987 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 988 + 989 + return &dio10->base; 988 990 } 989 991 990 992 static struct hubbub *dcn35_hubbub_create(struct dc_context *ctx) ··· 1593 1563 1594 1564 if (pool->base.dccg != NULL) 1595 1565 dcn_dccg_destroy(&pool->base.dccg); 1566 + 1567 + if (pool->base.dio != NULL) { 1568 + kfree(TO_DCN10_DIO(pool->base.dio)); 1569 + pool->base.dio = NULL; 1570 + } 1596 1571 } 1597 1572 1598 1573 static struct hubp *dcn35_hubp_create( ··· 2065 2030 if (pool->base.hubbub == NULL) { 2066 2031 BREAK_TO_DEBUGGER(); 2067 2032 dm_error("DC: failed to create hubbub!\n"); 2033 + goto create_fail; 2034 + } 2035 + 2036 + /* DIO */ 2037 + pool->base.dio = dcn35_dio_create(ctx); 2038 + if (pool->base.dio == NULL) { 2039 + BREAK_TO_DEBUGGER(); 2040 + dm_error("DC: failed to create dio!\n"); 2068 2041 goto create_fail; 2069 2042 } 2070 2043
+43
drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
··· 50 50 #include "dce/dce_hwseq.h" 51 51 #include "clk_mgr.h" 52 52 #include "dio/virtual/virtual_stream_encoder.h" 53 + #include "dio/dcn10/dcn10_dio.h" 53 54 #include "dce110/dce110_resource.h" 54 55 #include "dml/display_mode_vba.h" 55 56 #include "dcn35/dcn35_dccg.h" ··· 645 644 DCN20_VMID_MASK_SH_LIST(_MASK) 646 645 }; 647 646 647 + static struct dcn_dio_registers dio_regs; 648 + 649 + #define DIO_MASK_SH_LIST(mask_sh)\ 650 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 651 + 652 + static const struct dcn_dio_shift dio_shift = { 653 + DIO_MASK_SH_LIST(__SHIFT) 654 + }; 655 + 656 + static const struct dcn_dio_mask dio_mask = { 657 + DIO_MASK_SH_LIST(_MASK) 658 + }; 659 + 648 660 static const struct resource_caps res_cap_dcn351 = { 649 661 .num_timing_generator = 4, 650 662 .num_opp = 4, ··· 965 951 num_rmu); 966 952 967 953 return &mpc30->base; 954 + } 955 + 956 + static struct dio *dcn351_dio_create(struct dc_context *ctx) 957 + { 958 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 959 + 960 + if (!dio10) 961 + return NULL; 962 + 963 + #undef REG_STRUCT 964 + #define REG_STRUCT dio_regs 965 + DIO_REG_LIST_DCN10(); 966 + 967 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 968 + 969 + return &dio10->base; 968 970 } 969 971 970 972 static struct hubbub *dcn35_hubbub_create(struct dc_context *ctx) ··· 1573 1543 1574 1544 if (pool->base.dccg != NULL) 1575 1545 dcn_dccg_destroy(&pool->base.dccg); 1546 + 1547 + if (pool->base.dio != NULL) { 1548 + kfree(TO_DCN10_DIO(pool->base.dio)); 1549 + pool->base.dio = NULL; 1550 + } 1576 1551 } 1577 1552 1578 1553 static struct hubp *dcn35_hubp_create( ··· 2037 2002 if (pool->base.hubbub == NULL) { 2038 2003 BREAK_TO_DEBUGGER(); 2039 2004 dm_error("DC: failed to create hubbub!\n"); 2005 + goto create_fail; 2006 + } 2007 + 2008 + /* DIO */ 2009 + pool->base.dio = dcn351_dio_create(ctx); 2010 + if (pool->base.dio == NULL) { 2011 + BREAK_TO_DEBUGGER(); 2012 + dm_error("DC: failed to create dio!\n"); 2040 2013 goto create_fail; 2041 2014 } 2042 2015
+43
drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
··· 50 50 #include "dce/dce_hwseq.h" 51 51 #include "clk_mgr.h" 52 52 #include "dio/virtual/virtual_stream_encoder.h" 53 + #include "dio/dcn10/dcn10_dio.h" 53 54 #include "dce110/dce110_resource.h" 54 55 #include "dml/display_mode_vba.h" 55 56 #include "dcn35/dcn35_dccg.h" ··· 652 651 DCN20_VMID_MASK_SH_LIST(_MASK) 653 652 }; 654 653 654 + static struct dcn_dio_registers dio_regs; 655 + 656 + #define DIO_MASK_SH_LIST(mask_sh)\ 657 + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) 658 + 659 + static const struct dcn_dio_shift dio_shift = { 660 + DIO_MASK_SH_LIST(__SHIFT) 661 + }; 662 + 663 + static const struct dcn_dio_mask dio_mask = { 664 + DIO_MASK_SH_LIST(_MASK) 665 + }; 666 + 655 667 static const struct resource_caps res_cap_dcn36 = { 656 668 .num_timing_generator = 4, 657 669 .num_opp = 4, ··· 972 958 num_rmu); 973 959 974 960 return &mpc30->base; 961 + } 962 + 963 + static struct dio *dcn36_dio_create(struct dc_context *ctx) 964 + { 965 + struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio); 966 + 967 + if (!dio10) 968 + return NULL; 969 + 970 + #undef REG_STRUCT 971 + #define REG_STRUCT dio_regs 972 + DIO_REG_LIST_DCN10(); 973 + 974 + dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask); 975 + 976 + return &dio10->base; 975 977 } 976 978 977 979 static struct hubbub *dcn35_hubbub_create(struct dc_context *ctx) ··· 1580 1550 1581 1551 if (pool->base.dccg != NULL) 1582 1552 dcn_dccg_destroy(&pool->base.dccg); 1553 + 1554 + if (pool->base.dio != NULL) { 1555 + kfree(TO_DCN10_DIO(pool->base.dio)); 1556 + pool->base.dio = NULL; 1557 + } 1583 1558 } 1584 1559 1585 1560 static struct hubp *dcn35_hubp_create( ··· 2044 2009 if (pool->base.hubbub == NULL) { 2045 2010 BREAK_TO_DEBUGGER(); 2046 2011 dm_error("DC: failed to create hubbub!\n"); 2012 + goto create_fail; 2013 + } 2014 + 2015 + /* DIO */ 2016 + pool->base.dio = dcn36_dio_create(ctx); 2017 + if (pool->base.dio == NULL) { 2018 + BREAK_TO_DEBUGGER(); 2019 + dm_error("DC: failed to create dio!\n"); 2047 2020 goto create_fail; 2048 2021 } 2049 2022
-1
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
··· 262 262 "smu fw program = %d, version = 0x%08x (%d.%d.%d)\n", 263 263 smu->smc_driver_if_version, if_version, 264 264 smu_program, smu_version, smu_major, smu_minor, smu_debug); 265 - dev_info(smu->adev->dev, "SMU driver if version not matched\n"); 266 265 } 267 266 268 267 return ret;
-1
drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
··· 101 101 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n", 102 102 smu->smc_driver_if_version, if_version, 103 103 smu_program, smu_version, smu_major, smu_minor, smu_debug); 104 - dev_info(smu->adev->dev, "SMU driver if version not matched\n"); 105 104 } 106 105 107 106 return ret;
-1
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
··· 284 284 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n", 285 285 smu->smc_driver_if_version, if_version, 286 286 smu_program, smu_version, smu_major, smu_minor, smu_debug); 287 - dev_info(adev->dev, "SMU driver if version not matched\n"); 288 287 } 289 288 290 289 return ret;
+1 -1
drivers/gpu/drm/ast/ast_dp501.c
··· 436 436 /* Finally, clear bits [17:16] of SCU2c */ 437 437 data = ast_read32(ast, 0x1202c); 438 438 data &= 0xfffcffff; 439 - ast_write32(ast, 0, data); 439 + ast_write32(ast, 0x1202c, data); 440 440 441 441 /* Disable DVO */ 442 442 ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x00);
+11 -5
drivers/gpu/drm/drm_bridge.c
··· 1569 1569 static void drm_bridge_debugfs_show_bridge(struct drm_printer *p, 1570 1570 struct drm_bridge *bridge, 1571 1571 unsigned int idx, 1572 - bool lingering) 1572 + bool lingering, 1573 + bool scoped) 1573 1574 { 1575 + unsigned int refcount = kref_read(&bridge->refcount); 1576 + 1577 + if (scoped) 1578 + refcount--; 1579 + 1574 1580 drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs); 1575 1581 1576 - drm_printf(p, "\trefcount: %u%s\n", kref_read(&bridge->refcount), 1582 + drm_printf(p, "\trefcount: %u%s\n", refcount, 1577 1583 lingering ? " [lingering]" : ""); 1578 1584 1579 1585 drm_printf(p, "\ttype: [%d] %s\n", ··· 1613 1607 mutex_lock(&bridge_lock); 1614 1608 1615 1609 list_for_each_entry(bridge, &bridge_list, list) 1616 - drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false); 1610 + drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false, false); 1617 1611 1618 1612 list_for_each_entry(bridge, &bridge_lingering_list, list) 1619 - drm_bridge_debugfs_show_bridge(&p, bridge, idx++, true); 1613 + drm_bridge_debugfs_show_bridge(&p, bridge, idx++, true, false); 1620 1614 1621 1615 mutex_unlock(&bridge_lock); 1622 1616 ··· 1631 1625 unsigned int idx = 0; 1632 1626 1633 1627 drm_for_each_bridge_in_chain_scoped(encoder, bridge) 1634 - drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false); 1628 + drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false, true); 1635 1629 1636 1630 return 0; 1637 1631 }
+1 -4
drivers/gpu/drm/drm_file.c
··· 233 233 void drm_file_free(struct drm_file *file) 234 234 { 235 235 struct drm_device *dev; 236 - int idx; 237 236 238 237 if (!file) 239 238 return; ··· 249 250 250 251 drm_events_release(file); 251 252 252 - if (drm_core_check_feature(dev, DRIVER_MODESET) && 253 - drm_dev_enter(dev, &idx)) { 253 + if (drm_core_check_feature(dev, DRIVER_MODESET)) { 254 254 drm_fb_release(file); 255 255 drm_property_destroy_user_blobs(dev, file); 256 - drm_dev_exit(idx); 257 256 } 258 257 259 258 if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+2
drivers/gpu/drm/drm_ioc32.c
··· 28 28 * IN THE SOFTWARE. 29 29 */ 30 30 #include <linux/compat.h> 31 + #include <linux/nospec.h> 31 32 #include <linux/ratelimit.h> 32 33 #include <linux/export.h> 33 34 ··· 375 374 if (nr >= ARRAY_SIZE(drm_compat_ioctls)) 376 375 return drm_ioctl(filp, cmd, arg); 377 376 377 + nr = array_index_nospec(nr, ARRAY_SIZE(drm_compat_ioctls)); 378 378 fn = drm_compat_ioctls[nr].fn; 379 379 if (!fn) 380 380 return drm_ioctl(filp, cmd, arg);
+3 -6
drivers/gpu/drm/drm_mode_config.c
··· 577 577 */ 578 578 WARN_ON(!list_empty(&dev->mode_config.fb_list)); 579 579 list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { 580 - if (list_empty(&fb->filp_head) || drm_framebuffer_read_refcount(fb) > 1) { 581 - struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]"); 580 + struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]"); 582 581 583 - drm_printf(&p, "framebuffer[%u]:\n", fb->base.id); 584 - drm_framebuffer_print_info(&p, 1, fb); 585 - } 586 - list_del_init(&fb->filp_head); 582 + drm_printf(&p, "framebuffer[%u]:\n", fb->base.id); 583 + drm_framebuffer_print_info(&p, 1, fb); 587 584 drm_framebuffer_free(&fb->base.refcount); 588 585 } 589 586
+1 -1
drivers/gpu/drm/i915/display/g4x_dp.c
··· 137 137 intel_dp->DP |= DP_SYNC_VS_HIGH; 138 138 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 139 139 140 - if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 140 + if (pipe_config->enhanced_framing) 141 141 intel_dp->DP |= DP_ENHANCED_FRAMING; 142 142 143 143 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
+2 -2
drivers/gpu/drm/i915/display/icl_dsi.c
··· 889 889 * non-compressed link speeds, and simplifies down to the ratio between 890 890 * compressed and non-compressed bpp. 891 891 */ 892 - if (crtc_state->dsc.compression_enable) { 892 + if (is_vid_mode(intel_dsi) && crtc_state->dsc.compression_enable) { 893 893 mul = fxp_q4_to_int(crtc_state->dsc.compressed_bpp_x16); 894 894 div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); 895 895 } ··· 1503 1503 struct drm_display_mode *adjusted_mode = 1504 1504 &pipe_config->hw.adjusted_mode; 1505 1505 1506 - if (pipe_config->dsc.compressed_bpp_x16) { 1506 + if (is_vid_mode(intel_dsi) && pipe_config->dsc.compressed_bpp_x16) { 1507 1507 int div = fxp_q4_to_int(pipe_config->dsc.compressed_bpp_x16); 1508 1508 int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); 1509 1509
+54
drivers/gpu/drm/i915/display/intel_cdclk.c
··· 2971 2971 return 0; 2972 2972 } 2973 2973 2974 + static int intel_cdclk_update_crtc_min_voltage_level(struct intel_atomic_state *state, 2975 + struct intel_crtc *crtc, 2976 + u8 old_min_voltage_level, 2977 + u8 new_min_voltage_level, 2978 + bool *need_cdclk_calc) 2979 + { 2980 + struct intel_display *display = to_intel_display(state); 2981 + struct intel_cdclk_state *cdclk_state; 2982 + bool allow_voltage_level_decrease = intel_any_crtc_needs_modeset(state); 2983 + int ret; 2984 + 2985 + if (new_min_voltage_level == old_min_voltage_level) 2986 + return 0; 2987 + 2988 + if (!allow_voltage_level_decrease && 2989 + new_min_voltage_level < old_min_voltage_level) 2990 + return 0; 2991 + 2992 + cdclk_state = intel_atomic_get_cdclk_state(state); 2993 + if (IS_ERR(cdclk_state)) 2994 + return PTR_ERR(cdclk_state); 2995 + 2996 + old_min_voltage_level = cdclk_state->min_voltage_level[crtc->pipe]; 2997 + 2998 + if (new_min_voltage_level == old_min_voltage_level) 2999 + return 0; 3000 + 3001 + if (!allow_voltage_level_decrease && 3002 + new_min_voltage_level < old_min_voltage_level) 3003 + return 0; 3004 + 3005 + cdclk_state->min_voltage_level[crtc->pipe] = new_min_voltage_level; 3006 + 3007 + ret = intel_atomic_lock_global_state(&cdclk_state->base); 3008 + if (ret) 3009 + return ret; 3010 + 3011 + *need_cdclk_calc = true; 3012 + 3013 + drm_dbg_kms(display->drm, 3014 + "[CRTC:%d:%s] min voltage level: %d -> %d\n", 3015 + crtc->base.base.id, crtc->base.name, 3016 + old_min_voltage_level, new_min_voltage_level); 3017 + 3018 + return 0; 3019 + } 3020 + 2974 3021 int intel_cdclk_update_dbuf_bw_min_cdclk(struct intel_atomic_state *state, 2975 3022 int old_min_cdclk, int new_min_cdclk, 2976 3023 bool *need_cdclk_calc) ··· 3431 3384 old_crtc_state->min_cdclk, 3432 3385 new_crtc_state->min_cdclk, 3433 3386 need_cdclk_calc); 3387 + if (ret) 3388 + return ret; 3389 + 3390 + ret = intel_cdclk_update_crtc_min_voltage_level(state, crtc, 3391 + old_crtc_state->min_voltage_level, 3392 + new_crtc_state->min_voltage_level, 3393 + need_cdclk_calc); 3434 3394 if (ret) 3435 3395 return ret; 3436 3396 }
+2
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 898 898 vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle); 899 899 if (likely(vma && vma->vm == vm)) 900 900 vma = i915_vma_tryget(vma); 901 + else 902 + vma = NULL; 901 903 rcu_read_unlock(); 902 904 if (likely(vma)) 903 905 return vma;
+31 -15
drivers/gpu/drm/sysfb/efidrm.c
··· 151 151 struct drm_sysfb_device *sysfb; 152 152 struct drm_device *dev; 153 153 struct resource *mem = NULL; 154 - void __iomem *screen_base = NULL; 155 154 struct drm_plane *primary_plane; 156 155 struct drm_crtc *crtc; 157 156 struct drm_encoder *encoder; ··· 237 238 238 239 mem_flags = efidrm_get_mem_flags(dev, res->start, vsize); 239 240 240 - if (mem_flags & EFI_MEMORY_WC) 241 - screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem)); 242 - else if (mem_flags & EFI_MEMORY_UC) 243 - screen_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); 244 - else if (mem_flags & EFI_MEMORY_WT) 245 - screen_base = devm_memremap(&pdev->dev, mem->start, resource_size(mem), 246 - MEMREMAP_WT); 247 - else if (mem_flags & EFI_MEMORY_WB) 248 - screen_base = devm_memremap(&pdev->dev, mem->start, resource_size(mem), 249 - MEMREMAP_WB); 250 - else 241 + if (mem_flags & EFI_MEMORY_WC) { 242 + void __iomem *screen_base = devm_ioremap_wc(&pdev->dev, mem->start, 243 + resource_size(mem)); 244 + 245 + if (!screen_base) 246 + return ERR_PTR(-ENXIO); 247 + iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base); 248 + } else if (mem_flags & EFI_MEMORY_UC) { 249 + void __iomem *screen_base = devm_ioremap(&pdev->dev, mem->start, 250 + resource_size(mem)); 251 + 252 + if (!screen_base) 253 + return ERR_PTR(-ENXIO); 254 + iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base); 255 + } else if (mem_flags & EFI_MEMORY_WT) { 256 + void *screen_base = devm_memremap(&pdev->dev, mem->start, 257 + resource_size(mem), MEMREMAP_WT); 258 + 259 + if (IS_ERR(screen_base)) 260 + return ERR_CAST(screen_base); 261 + iosys_map_set_vaddr(&sysfb->fb_addr, screen_base); 262 + } else if (mem_flags & EFI_MEMORY_WB) { 263 + void *screen_base = devm_memremap(&pdev->dev, mem->start, 264 + resource_size(mem), MEMREMAP_WB); 265 + 266 + if (IS_ERR(screen_base)) 267 + return ERR_CAST(screen_base); 268 + iosys_map_set_vaddr(&sysfb->fb_addr, screen_base); 269 + } else { 251 270 drm_err(dev, "invalid mem_flags: 0x%llx\n", mem_flags); 252 - if (!screen_base) 253 - return ERR_PTR(-ENOMEM); 254 - iosys_map_set_vaddr_iomem(&sysfb->fb_addr, screen_base); 271 + return ERR_PTR(-EINVAL); 272 + } 255 273 256 274 /* 257 275 * Modesetting
+13 -14
drivers/gpu/drm/xe/xe_device.c
··· 837 837 } 838 838 } 839 839 840 + static void xe_device_wedged_fini(struct drm_device *drm, void *arg) 841 + { 842 + struct xe_device *xe = arg; 843 + 844 + if (atomic_read(&xe->wedged.flag)) 845 + xe_pm_runtime_put(xe); 846 + } 847 + 840 848 int xe_device_probe(struct xe_device *xe) 841 849 { 842 850 struct xe_tile *tile; ··· 1020 1012 goto err_unregister_display; 1021 1013 1022 1014 detect_preproduction_hw(xe); 1015 + 1016 + err = drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe); 1017 + if (err) 1018 + goto err_unregister_display; 1023 1019 1024 1020 return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe); 1025 1021 ··· 1228 1216 return address & GENMASK_ULL(xe->info.va_bits - 1, 0); 1229 1217 } 1230 1218 1231 - static void xe_device_wedged_fini(struct drm_device *drm, void *arg) 1232 - { 1233 - struct xe_device *xe = arg; 1234 - 1235 - xe_pm_runtime_put(xe); 1236 - } 1237 - 1238 1219 /** 1239 1220 * DOC: Xe Device Wedging 1240 1221 * ··· 1305 1300 return; 1306 1301 } 1307 1302 1308 - xe_pm_runtime_get_noresume(xe); 1309 - 1310 - if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) { 1311 - drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n"); 1312 - return; 1313 - } 1314 - 1315 1303 if (!atomic_xchg(&xe->wedged.flag, 1)) { 1316 1304 xe->needs_flr_on_fini = true; 1305 + xe_pm_runtime_get_noresume(xe); 1317 1306 drm_err(&xe->drm, 1318 1307 "CRITICAL: Xe has declared device %s as wedged.\n" 1319 1308 "IOCTLs and executions are blocked. Only a rebind may clear the failure\n"
+6
drivers/gpu/drm/xe/xe_pagefault.c
··· 187 187 goto unlock_vm; 188 188 } 189 189 190 + if (xe_vma_read_only(vma) && 191 + pf->consumer.access_type != XE_PAGEFAULT_ACCESS_TYPE_READ) { 192 + err = -EPERM; 193 + goto unlock_vm; 194 + } 195 + 190 196 atomic = xe_pagefault_access_is_atomic(pf->consumer.access_type); 191 197 192 198 if (xe_vma_is_cpu_addr_mirror(vma))
+16 -7
drivers/gpu/drm/xe/xe_pxp.c
··· 380 380 return 0; 381 381 } 382 382 383 + /* 384 + * On PTL, older GSC FWs have a bug that can cause them to crash during 385 + * PXP invalidation events, which leads to a complete loss of power 386 + * management on the media GT. Therefore, we can't use PXP on FWs that 387 + * have this bug, which was fixed in PTL GSC build 1396. 388 + */ 389 + if (xe->info.platform == XE_PANTHERLAKE && 390 + gt->uc.gsc.fw.versions.found[XE_UC_FW_VER_RELEASE].build < 1396) { 391 + drm_info(&xe->drm, "PXP requires PTL GSC build 1396 or newer\n"); 392 + return 0; 393 + } 394 + 383 395 pxp = drmm_kzalloc(&xe->drm, sizeof(struct xe_pxp), GFP_KERNEL); 384 396 if (!pxp) { 385 397 err = -ENOMEM; ··· 524 512 static int pxp_start(struct xe_pxp *pxp, u8 type) 525 513 { 526 514 int ret = 0; 527 - bool restart = false; 515 + bool restart; 528 516 529 517 if (!xe_pxp_is_enabled(pxp)) 530 518 return -ENODEV; ··· 552 540 if (!wait_for_completion_timeout(&pxp->activation, 553 541 msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) 554 542 return -ETIMEDOUT; 543 + 544 + restart = false; 555 545 556 546 mutex_lock(&pxp->mutex); 557 547 ··· 597 583 drm_err(&pxp->xe->drm, "PXP termination failed before start\n"); 598 584 mutex_lock(&pxp->mutex); 599 585 pxp->status = XE_PXP_ERROR; 586 + complete_all(&pxp->termination); 600 587 601 588 goto out_unlock; 602 589 } ··· 885 870 pxp->key_instance++; 886 871 needs_queue_inval = true; 887 872 break; 888 - default: 889 - drm_err(&pxp->xe->drm, "unexpected state during PXP suspend: %u", 890 - pxp->status); 891 - ret = -EIO; 892 - goto out; 893 873 } 894 874 895 875 /* ··· 909 899 910 900 pxp->last_suspend_key_instance = pxp->key_instance; 911 901 912 - out: 913 902 return ret; 914 903 } 915 904
+1 -1
drivers/gpu/drm/xe/xe_svm.c
··· 903 903 void xe_svm_close(struct xe_vm *vm) 904 904 { 905 905 xe_assert(vm->xe, xe_vm_is_closed(vm)); 906 - flush_work(&vm->svm.garbage_collector.work); 906 + disable_work_sync(&vm->svm.garbage_collector.work); 907 907 xe_svm_put_pagemaps(vm); 908 908 drm_pagemap_release_owner(&vm->svm.peer); 909 909 }
+12 -4
drivers/gpu/drm/xe/xe_vm_madvise.c
··· 408 408 struct xe_device *xe = to_xe_device(dev); 409 409 struct xe_file *xef = to_xe_file(file); 410 410 struct drm_xe_madvise *args = data; 411 - struct xe_vmas_in_madvise_range madvise_range = {.addr = args->start, 412 - .range = args->range, }; 411 + struct xe_vmas_in_madvise_range madvise_range = { 412 + /* 413 + * Userspace may pass canonical (sign-extended) addresses. 414 + * Strip the sign extension to get the internal non-canonical 415 + * form used by the GPUVM, matching xe_vm_bind_ioctl() behavior. 416 + */ 417 + .addr = xe_device_uncanonicalize_addr(xe, args->start), 418 + .range = args->range, 419 + }; 413 420 struct xe_madvise_details details; 414 421 struct xe_vm *vm; 415 422 struct drm_exec exec; ··· 446 439 if (err) 447 440 goto unlock_vm; 448 441 449 - err = xe_vm_alloc_madvise_vma(vm, args->start, args->range); 442 + err = xe_vm_alloc_madvise_vma(vm, madvise_range.addr, args->range); 450 443 if (err) 451 444 goto madv_fini; 452 445 ··· 489 482 madvise_funcs[attr_type](xe, vm, madvise_range.vmas, madvise_range.num_vmas, args, 490 483 &details); 491 484 492 - err = xe_vm_invalidate_madvise_range(vm, args->start, args->start + args->range); 485 + err = xe_vm_invalidate_madvise_range(vm, madvise_range.addr, 486 + madvise_range.addr + args->range); 493 487 494 488 if (madvise_range.has_svm_userptr_vmas) 495 489 xe_svm_notifier_unlock(vm);
+2 -1
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
··· 413 413 rc = amd_sfh_hid_client_init(mp2); 414 414 if (rc) { 415 415 amd_sfh_clear_intr(mp2); 416 - dev_err(&pdev->dev, "amd_sfh_hid_client_init failed err %d\n", rc); 416 + if (rc != -EOPNOTSUPP) 417 + dev_err(&pdev->dev, "amd_sfh_hid_client_init failed err %d\n", rc); 417 418 return; 418 419 } 419 420
+6
drivers/hid/hid-debug.c
··· 990 990 { 0x0c, 0x01c9, "ALContactSync" }, 991 991 { 0x0c, 0x01ca, "ALNavigation" }, 992 992 { 0x0c, 0x01cb, "ALContextawareDesktopAssistant" }, 993 + { 0x0c, 0x01cc, "ALActionOnSelection" }, 994 + { 0x0c, 0x01cd, "ALContextualInsertion" }, 995 + { 0x0c, 0x01ce, "ALContextualQuery" }, 993 996 { 0x0c, 0x0200, "GenericGUIApplicationControls" }, 994 997 { 0x0c, 0x0201, "ACNew" }, 995 998 { 0x0c, 0x0202, "ACOpen" }, ··· 3378 3375 [KEY_BRIGHTNESS_MIN] = "BrightnessMin", 3379 3376 [KEY_BRIGHTNESS_MAX] = "BrightnessMax", 3380 3377 [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", 3378 + [KEY_ACTION_ON_SELECTION] = "ActionOnSelection", 3379 + [KEY_CONTEXTUAL_INSERT] = "ContextualInsert", 3380 + [KEY_CONTEXTUAL_QUERY] = "ContextualQuery", 3381 3381 [KEY_KBDINPUTASSIST_PREV] = "KbdInputAssistPrev", 3382 3382 [KEY_KBDINPUTASSIST_NEXT] = "KbdInputAssistNext", 3383 3383 [KEY_KBDINPUTASSIST_PREVGROUP] = "KbdInputAssistPrevGroup",
+7
drivers/hid/hid-ids.h
··· 22 22 #define USB_DEVICE_ID_3M2256 0x0502 23 23 #define USB_DEVICE_ID_3M3266 0x0506 24 24 25 + #define USB_VENDOR_ID_8BITDO 0x2dc8 26 + #define USB_DEVICE_ID_8BITDO_PRO_3 0x6009 27 + 25 28 #define USB_VENDOR_ID_A4TECH 0x09da 26 29 #define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006 27 30 #define USB_DEVICE_ID_A4TECH_X5_005D 0x000a ··· 1473 1470 1474 1471 #define USB_VENDOR_ID_VTL 0x0306 1475 1472 #define USB_DEVICE_ID_VTL_MULTITOUCH_FF3F 0xff3f 1473 + 1474 + #define USB_VENDOR_ID_VXE 0x3554 1475 + #define USB_DEVICE_ID_VXE_DRAGONFLY_R1_PRO_DONGLE 0xf58a 1476 + #define USB_DEVICE_ID_VXE_DRAGONFLY_R1_PRO_WIRED 0xf58c 1476 1477 1477 1478 #define USB_VENDOR_ID_WACOM 0x056a 1478 1479 #define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81
+3
drivers/hid/hid-input.c
··· 1227 1227 case 0x1bc: map_key_clear(KEY_MESSENGER); break; 1228 1228 case 0x1bd: map_key_clear(KEY_INFO); break; 1229 1229 case 0x1cb: map_key_clear(KEY_ASSISTANT); break; 1230 + case 0x1cc: map_key_clear(KEY_ACTION_ON_SELECTION); break; 1231 + case 0x1cd: map_key_clear(KEY_CONTEXTUAL_INSERT); break; 1232 + case 0x1ce: map_key_clear(KEY_CONTEXTUAL_QUERY); break; 1230 1233 case 0x201: map_key_clear(KEY_NEW); break; 1231 1234 case 0x202: map_key_clear(KEY_OPEN); break; 1232 1235 case 0x203: map_key_clear(KEY_CLOSE); break;
+2
drivers/hid/hid-kysona.c
··· 272 272 static const struct hid_device_id kysona_devices[] = { 273 273 { HID_USB_DEVICE(USB_VENDOR_ID_KYSONA, USB_DEVICE_ID_KYSONA_M600_DONGLE) }, 274 274 { HID_USB_DEVICE(USB_VENDOR_ID_KYSONA, USB_DEVICE_ID_KYSONA_M600_WIRED) }, 275 + { HID_USB_DEVICE(USB_VENDOR_ID_VXE, USB_DEVICE_ID_VXE_DRAGONFLY_R1_PRO_DONGLE) }, 276 + { HID_USB_DEVICE(USB_VENDOR_ID_VXE, USB_DEVICE_ID_VXE_DRAGONFLY_R1_PRO_WIRED) }, 275 277 { } 276 278 }; 277 279 MODULE_DEVICE_TABLE(hid, kysona_devices);
+1
drivers/hid/hid-quirks.c
··· 25 25 */ 26 26 27 27 static const struct hid_device_id hid_quirks[] = { 28 + { HID_USB_DEVICE(USB_VENDOR_ID_8BITDO, USB_DEVICE_ID_8BITDO_PRO_3), HID_QUIRK_ALWAYS_POLL }, 28 29 { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD }, 29 30 { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD }, 30 31 { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE), HID_QUIRK_ALWAYS_POLL },
+2
drivers/hid/hid-roccat.c
··· 257 257 if (!new_value) 258 258 return -ENOMEM; 259 259 260 + mutex_lock(&device->readers_lock); 260 261 mutex_lock(&device->cbuf_lock); 261 262 262 263 report = &device->cbuf[device->cbuf_end]; ··· 280 279 } 281 280 282 281 mutex_unlock(&device->cbuf_lock); 282 + mutex_unlock(&device->readers_lock); 283 283 284 284 wake_up_interruptible(&device->wait); 285 285 return 0;
+7
drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
··· 26 26 .max_interrupt_delay = MAX_RX_INTERRUPT_DELAY, 27 27 }; 28 28 29 + static struct quicki2c_ddata nvl_ddata = { 30 + .max_detect_size = MAX_RX_DETECT_SIZE_NVL, 31 + .max_interrupt_delay = MAX_RX_INTERRUPT_DELAY, 32 + }; 33 + 29 34 /* THC QuickI2C ACPI method to get device properties */ 30 35 /* HIDI2C device method */ 31 36 static guid_t i2c_hid_guid = ··· 1037 1032 { PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT2, &ptl_ddata) }, 1038 1033 { PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_I2C_PORT1, &ptl_ddata) }, 1039 1034 { PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_I2C_PORT2, &ptl_ddata) }, 1035 + { PCI_DEVICE_DATA(INTEL, THC_NVL_H_DEVICE_ID_I2C_PORT1, &nvl_ddata) }, 1036 + { PCI_DEVICE_DATA(INTEL, THC_NVL_H_DEVICE_ID_I2C_PORT2, &nvl_ddata) }, 1040 1037 { } 1041 1038 }; 1042 1039 MODULE_DEVICE_TABLE(pci, quicki2c_pci_tbl);
+4
drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
··· 15 15 #define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_I2C_PORT2 0xE44A 16 16 #define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_I2C_PORT1 0x4D48 17 17 #define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_I2C_PORT2 0x4D4A 18 + #define PCI_DEVICE_ID_INTEL_THC_NVL_H_DEVICE_ID_I2C_PORT1 0xD348 19 + #define PCI_DEVICE_ID_INTEL_THC_NVL_H_DEVICE_ID_I2C_PORT2 0xD34A 18 20 19 21 /* Packet size value, the unit is 16 bytes */ 20 22 #define MAX_PACKET_SIZE_VALUE_LNL 256 ··· 42 40 43 41 /* PTL Max packet size detection capability is 255 Bytes */ 44 42 #define MAX_RX_DETECT_SIZE_PTL 255 43 + /* NVL Max packet size detection capability is 64K Bytes */ 44 + #define MAX_RX_DETECT_SIZE_NVL 65535 45 45 /* Max interrupt delay capability is 2.56ms */ 46 46 #define MAX_RX_INTERRUPT_DELAY 256 47 47
+6
drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
··· 37 37 .max_packet_size_value = MAX_PACKET_SIZE_VALUE_MTL, 38 38 }; 39 39 40 + struct quickspi_driver_data nvl = { 41 + .max_packet_size_value = MAX_PACKET_SIZE_VALUE_LNL, 42 + }; 43 + 40 44 /* THC QuickSPI ACPI method to get device properties */ 41 45 /* HIDSPI Method: {6e2ac436-0fcf-41af-a265-b32a220dcfab} */ 42 46 static guid_t hidspi_guid = ··· 986 982 {PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_SPI_PORT2, &ptl), }, 987 983 {PCI_DEVICE_DATA(INTEL, THC_ARL_DEVICE_ID_SPI_PORT1, &arl), }, 988 984 {PCI_DEVICE_DATA(INTEL, THC_ARL_DEVICE_ID_SPI_PORT2, &arl), }, 985 + {PCI_DEVICE_DATA(INTEL, THC_NVL_H_DEVICE_ID_SPI_PORT1, &nvl), }, 986 + {PCI_DEVICE_DATA(INTEL, THC_NVL_H_DEVICE_ID_SPI_PORT2, &nvl), }, 989 987 {} 990 988 }; 991 989 MODULE_DEVICE_TABLE(pci, quickspi_pci_tbl);
+2
drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
··· 23 23 #define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_SPI_PORT2 0x4D4B 24 24 #define PCI_DEVICE_ID_INTEL_THC_ARL_DEVICE_ID_SPI_PORT1 0x7749 25 25 #define PCI_DEVICE_ID_INTEL_THC_ARL_DEVICE_ID_SPI_PORT2 0x774B 26 + #define PCI_DEVICE_ID_INTEL_THC_NVL_H_DEVICE_ID_SPI_PORT1 0xD349 27 + #define PCI_DEVICE_ID_INTEL_THC_NVL_H_DEVICE_ID_SPI_PORT2 0xD34B 26 28 27 29 /* HIDSPI special ACPI parameters DSM methods */ 28 30 #define ACPI_QUICKSPI_REVISION_NUM 2
+12 -3
drivers/hv/mshv_root_main.c
··· 630 630 { 631 631 struct mshv_partition *p = vp->vp_partition; 632 632 struct mshv_mem_region *region; 633 - bool ret; 633 + bool ret = false; 634 634 u64 gfn; 635 635 #if defined(CONFIG_X86_64) 636 636 struct hv_x64_memory_intercept_message *msg = ··· 641 641 (struct hv_arm64_memory_intercept_message *) 642 642 vp->vp_intercept_msg_page->u.payload; 643 643 #endif 644 + enum hv_intercept_access_type access_type = 645 + msg->header.intercept_access_type; 644 646 645 647 gfn = HVPFN_DOWN(msg->guest_physical_address); 646 648 ··· 650 648 if (!region) 651 649 return false; 652 650 651 + if (access_type == HV_INTERCEPT_ACCESS_WRITE && 652 + !(region->hv_map_flags & HV_MAP_GPA_WRITABLE)) 653 + goto put_region; 654 + 655 + if (access_type == HV_INTERCEPT_ACCESS_EXECUTE && 656 + !(region->hv_map_flags & HV_MAP_GPA_EXECUTABLE)) 657 + goto put_region; 658 + 653 659 /* Only movable memory ranges are supported for GPA intercepts */ 654 660 if (region->mreg_type == MSHV_REGION_TYPE_MEM_MOVABLE) 655 661 ret = mshv_region_handle_gfn_fault(region, gfn); 656 - else 657 - ret = false; 658 662 663 + put_region: 659 664 mshv_region_put(region); 660 665 661 666 return ret;
+6 -1
drivers/hwmon/asus-ec-sensors.c
··· 111 111 ec_sensor_temp_mb, 112 112 /* "T_Sensor" temperature sensor reading [℃] */ 113 113 ec_sensor_temp_t_sensor, 114 + /* like ec_sensor_temp_t_sensor, but at an alternate address [℃] */ 115 + ec_sensor_temp_t_sensor_alt1, 114 116 /* VRM temperature [℃] */ 115 117 ec_sensor_temp_vrm, 116 118 /* VRM east (right) temperature [℃] */ ··· 162 160 #define SENSOR_TEMP_CPU_PACKAGE BIT(ec_sensor_temp_cpu_package) 163 161 #define SENSOR_TEMP_MB BIT(ec_sensor_temp_mb) 164 162 #define SENSOR_TEMP_T_SENSOR BIT(ec_sensor_temp_t_sensor) 163 + #define SENSOR_TEMP_T_SENSOR_ALT1 BIT(ec_sensor_temp_t_sensor_alt1) 165 164 #define SENSOR_TEMP_VRM BIT(ec_sensor_temp_vrm) 166 165 #define SENSOR_TEMP_VRME BIT(ec_sensor_temp_vrme) 167 166 #define SENSOR_TEMP_VRMW BIT(ec_sensor_temp_vrmw) ··· 282 279 EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x33), 283 280 [ec_sensor_temp_t_sensor] = 284 281 EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x36), 282 + [ec_sensor_temp_t_sensor_alt1] = 283 + EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x37), 285 284 [ec_sensor_fan_cpu_opt] = 286 285 EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0), 287 286 [ec_sensor_temp_water_in] = ··· 524 519 static const struct ec_board_info board_info_prime_x670e_pro_wifi = { 525 520 .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE | 526 521 SENSOR_TEMP_MB | SENSOR_TEMP_VRM | 527 - SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT, 522 + SENSOR_TEMP_T_SENSOR_ALT1 | SENSOR_FAN_CPU_OPT, 528 523 .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH, 529 524 .family = family_amd_600_series, 530 525 };
+9 -10
drivers/hwmon/occ/common.c
··· 420 420 return sysfs_emit(buf, "%u\n", val); 421 421 } 422 422 423 + static u64 occ_get_powr_avg(u64 accum, u32 samples) 424 + { 425 + return (samples == 0) ? 0 : 426 + mul_u64_u32_div(accum, 1000000UL, samples); 427 + } 428 + 423 429 static ssize_t occ_show_power_1(struct device *dev, 424 430 struct device_attribute *attr, char *buf) 425 431 { ··· 447 441 val = get_unaligned_be16(&power->sensor_id); 448 442 break; 449 443 case 1: 450 - val = get_unaligned_be32(&power->accumulator) / 451 - get_unaligned_be32(&power->update_tag); 452 - val *= 1000000ULL; 444 + val = occ_get_powr_avg(get_unaligned_be32(&power->accumulator), 445 + get_unaligned_be32(&power->update_tag)); 453 446 break; 454 447 case 2: 455 448 val = (u64)get_unaligned_be32(&power->update_tag) * ··· 462 457 } 463 458 464 459 return sysfs_emit(buf, "%llu\n", val); 465 - } 466 - 467 - static u64 occ_get_powr_avg(u64 accum, u32 samples) 468 - { 469 - return (samples == 0) ? 0 : 470 - mul_u64_u32_div(accum, 1000000UL, samples); 471 460 } 472 461 473 462 static ssize_t occ_show_power_2(struct device *dev, ··· 724 725 switch (sattr->nr) { 725 726 case 0: 726 727 if (extn->flags & EXTN_FLAG_SENSOR_ID) { 727 - rc = sysfs_emit(buf, "%u", 728 + rc = sysfs_emit(buf, "%u\n", 728 729 get_unaligned_be32(&extn->sensor_id)); 729 730 } else { 730 731 rc = sysfs_emit(buf, "%4phN\n", extn->name);
+1
drivers/hwmon/pmbus/ltc4286.c
··· 173 173 MODULE_AUTHOR("Delphine CC Chiu <Delphine_CC_Chiu@wiwynn.com>"); 174 174 MODULE_DESCRIPTION("PMBUS driver for LTC4286 and compatibles"); 175 175 MODULE_LICENSE("GPL"); 176 + MODULE_IMPORT_NS("PMBUS");
+4 -1
drivers/hwmon/pmbus/pxe1610.c
··· 104 104 * By default this device doesn't boot to page 0, so set page 0 105 105 * to access all pmbus registers. 106 106 */ 107 - i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0); 107 + ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0); 108 + if (ret < 0) 109 + return dev_err_probe(&client->dev, ret, 110 + "Failed to set page 0\n"); 108 111 109 112 /* Read Manufacturer id */ 110 113 ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
+5 -5
drivers/hwmon/pmbus/tps53679.c
··· 103 103 } 104 104 105 105 ret = i2c_smbus_read_block_data(client, PMBUS_IC_DEVICE_ID, buf); 106 - if (ret < 0) 107 - return ret; 106 + if (ret <= 0) 107 + return ret < 0 ? ret : -EIO; 108 108 109 - /* Adjust length if null terminator if present */ 109 + /* Adjust length if null terminator is present */ 110 110 buf_len = (buf[ret - 1] != '\x00' ? ret : ret - 1); 111 111 112 112 id_len = strlen(id); ··· 175 175 ret = i2c_smbus_read_block_data(client, PMBUS_IC_DEVICE_ID, buf); 176 176 if (ret < 0) 177 177 return ret; 178 - if (strncmp("TI\x53\x67\x60", buf, 5)) { 179 - dev_err(&client->dev, "Unexpected device ID: %s\n", buf); 178 + if (ret != 6 || memcmp(buf, "TI\x53\x67\x60\x00", 6)) { 179 + dev_err(&client->dev, "Unexpected device ID: %*ph\n", ret, buf); 180 180 return -ENODEV; 181 181 } 182 182
+2
drivers/iio/accel/adxl313_core.c
··· 998 998 999 999 ret = regmap_write(data->regmap, ADXL313_REG_FIFO_CTL, 1000 1000 FIELD_PREP(ADXL313_REG_FIFO_CTL_MODE_MSK, ADXL313_FIFO_BYPASS)); 1001 + if (ret) 1002 + return ret; 1001 1003 1002 1004 ret = regmap_write(data->regmap, ADXL313_REG_INT_ENABLE, 0); 1003 1005 if (ret)
+1 -1
drivers/iio/accel/adxl355_core.c
··· 745 745 BIT(IIO_CHAN_INFO_OFFSET), 746 746 .scan_index = 3, 747 747 .scan_type = { 748 - .sign = 's', 748 + .sign = 'u', 749 749 .realbits = 12, 750 750 .storagebits = 16, 751 751 .endianness = IIO_BE,
+1 -1
drivers/iio/accel/adxl380.c
··· 877 877 ret = regmap_update_bits(st->regmap, ADXL380_FIFO_CONFIG_0_REG, 878 878 ADXL380_FIFO_SAMPLES_8_MSK, 879 879 FIELD_PREP(ADXL380_FIFO_SAMPLES_8_MSK, 880 - (fifo_samples & BIT(8)))); 880 + !!(fifo_samples & BIT(8)))); 881 881 if (ret) 882 882 return ret; 883 883
+3 -5
drivers/iio/adc/ad4062.c
··· 719 719 } 720 720 st->gpo_irq[1] = true; 721 721 722 - return devm_request_threaded_irq(dev, ret, 723 - ad4062_irq_handler_drdy, 724 - NULL, IRQF_ONESHOT, indio_dev->name, 725 - indio_dev); 722 + return devm_request_irq(dev, ret, ad4062_irq_handler_drdy, 723 + IRQF_NO_THREAD, indio_dev->name, indio_dev); 726 724 } 727 725 728 726 static const struct iio_trigger_ops ad4062_trigger_ops = { ··· 953 955 default: 954 956 return -EINVAL; 955 957 } 956 - }; 958 + } 957 959 958 960 static int ad4062_write_raw(struct iio_dev *indio_dev, 959 961 struct iio_chan_spec const *chan, int val,
+6 -6
drivers/iio/adc/ade9000.c
··· 787 787 ADE9000_MIDDLE_PAGE_BIT); 788 788 if (ret) { 789 789 dev_err_ratelimited(dev, "IRQ0 WFB write fail"); 790 - return IRQ_HANDLED; 790 + return ret; 791 791 } 792 792 793 793 ade9000_configure_scan(indio_dev, ADE9000_REG_WF_BUFF); ··· 1123 1123 tmp &= ~ADE9000_PHASE_C_POS_BIT; 1124 1124 1125 1125 switch (tmp) { 1126 - case ADE9000_REG_AWATTOS: 1126 + case ADE9000_REG_AWATT: 1127 1127 return regmap_write(st->regmap, 1128 1128 ADE9000_ADDR_ADJUST(ADE9000_REG_AWATTOS, 1129 1129 chan->channel), val); ··· 1706 1706 1707 1707 init_completion(&st->reset_completion); 1708 1708 1709 + ret = devm_mutex_init(dev, &st->lock); 1710 + if (ret) 1711 + return ret; 1712 + 1709 1713 ret = ade9000_request_irq(dev, "irq0", ade9000_irq0_thread, indio_dev); 1710 1714 if (ret) 1711 1715 return ret; ··· 1719 1715 return ret; 1720 1716 1721 1717 ret = ade9000_request_irq(dev, "dready", ade9000_dready_thread, indio_dev); 1722 - if (ret) 1723 - return ret; 1724 - 1725 - ret = devm_mutex_init(dev, &st->lock); 1726 1718 if (ret) 1727 1719 return ret; 1728 1720
+1
drivers/iio/adc/aspeed_adc.c
··· 415 415 } 416 416 adc_engine_control_reg_val = 417 417 readl(data->base + ASPEED_REG_ENGINE_CONTROL); 418 + adc_engine_control_reg_val &= ~ASPEED_ADC_REF_VOLTAGE; 418 419 419 420 ret = devm_regulator_get_enable_read_voltage(data->dev, "vref"); 420 421 if (ret < 0 && ret != -ENODEV)
+5 -4
drivers/iio/adc/nxp-sar-adc.c
··· 718 718 struct nxp_sar_adc *info = iio_priv(indio_dev); 719 719 int ret; 720 720 721 + info->dma_chan = dma_request_chan(indio_dev->dev.parent, "rx"); 722 + if (IS_ERR(info->dma_chan)) 723 + return PTR_ERR(info->dma_chan); 724 + 721 725 nxp_sar_adc_dma_channels_enable(info, *indio_dev->active_scan_mask); 722 726 723 727 nxp_sar_adc_dma_cfg(info, true); ··· 742 738 out_dma_channels_disable: 743 739 nxp_sar_adc_dma_cfg(info, false); 744 740 nxp_sar_adc_dma_channels_disable(info, *indio_dev->active_scan_mask); 741 + dma_release_channel(info->dma_chan); 745 742 746 743 return ret; 747 744 } ··· 769 764 int current_mode = iio_device_get_current_mode(indio_dev); 770 765 unsigned long channel; 771 766 int ret; 772 - 773 - info->dma_chan = dma_request_chan(indio_dev->dev.parent, "rx"); 774 - if (IS_ERR(info->dma_chan)) 775 - return PTR_ERR(info->dma_chan); 776 767 777 768 info->channels_used = 0; 778 769
+20 -21
drivers/iio/adc/ti-adc161s626.c
··· 15 15 #include <linux/init.h> 16 16 #include <linux/err.h> 17 17 #include <linux/spi/spi.h> 18 + #include <linux/unaligned.h> 18 19 #include <linux/iio/iio.h> 19 20 #include <linux/iio/trigger.h> 20 21 #include <linux/iio/buffer.h> ··· 71 70 72 71 u8 read_size; 73 72 u8 shift; 74 - 75 - u8 buffer[16] __aligned(IIO_DMA_MINALIGN); 73 + u8 buf[3] __aligned(IIO_DMA_MINALIGN); 76 74 }; 77 75 78 76 static int ti_adc_read_measurement(struct ti_adc_data *data, ··· 80 80 int ret; 81 81 82 82 switch (data->read_size) { 83 - case 2: { 84 - __be16 buf; 85 - 86 - ret = spi_read(data->spi, (void *) &buf, 2); 83 + case 2: 84 + ret = spi_read(data->spi, data->buf, 2); 87 85 if (ret) 88 86 return ret; 89 87 90 - *val = be16_to_cpu(buf); 88 + *val = get_unaligned_be16(data->buf); 91 89 break; 92 - } 93 - case 3: { 94 - __be32 buf; 95 - 96 - ret = spi_read(data->spi, (void *) &buf, 3); 90 + case 3: 91 + ret = spi_read(data->spi, data->buf, 3); 97 92 if (ret) 98 93 return ret; 99 94 100 - *val = be32_to_cpu(buf) >> 8; 95 + *val = get_unaligned_be24(data->buf); 101 96 break; 102 - } 103 97 default: 104 98 return -EINVAL; 105 99 } ··· 108 114 struct iio_poll_func *pf = private; 109 115 struct iio_dev *indio_dev = pf->indio_dev; 110 116 struct ti_adc_data *data = iio_priv(indio_dev); 111 - int ret; 117 + struct { 118 + s16 data; 119 + aligned_s64 timestamp; 120 + } scan = { }; 121 + int ret, val; 112 122 113 - ret = ti_adc_read_measurement(data, &indio_dev->channels[0], 114 - (int *) &data->buffer); 115 - if (!ret) 116 - iio_push_to_buffers_with_timestamp(indio_dev, 117 - data->buffer, 118 - iio_get_time_ns(indio_dev)); 123 + ret = ti_adc_read_measurement(data, &indio_dev->channels[0], &val); 124 + if (ret) 125 + goto exit_notify_done; 119 126 127 + scan.data = val; 128 + iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev)); 129 + 130 + exit_notify_done: 120 131 iio_trigger_notify_done(indio_dev->trig); 121 132 122 133 return IRQ_HANDLED;
+1 -1
drivers/iio/adc/ti-ads1018.c
··· 249 249 struct iio_chan_spec const *chan, u16 *cnv) 250 250 { 251 251 u8 max_drate_mode = ads1018->chip_info->num_data_rate_mode_to_hz - 1; 252 - u8 drate = ads1018->chip_info->data_rate_mode_to_hz[max_drate_mode]; 252 + u32 drate = ads1018->chip_info->data_rate_mode_to_hz[max_drate_mode]; 253 253 u8 pga_mode = ads1018->chan_data[chan->scan_index].pga_mode; 254 254 struct spi_transfer xfer[2] = { 255 255 {
+6 -5
drivers/iio/adc/ti-ads1119.c
··· 274 274 275 275 ret = pm_runtime_resume_and_get(dev); 276 276 if (ret) 277 - goto pdown; 277 + return ret; 278 278 279 279 ret = ads1119_configure_channel(st, mux, gain, datarate); 280 280 if (ret) 281 281 goto pdown; 282 + 283 + if (st->client->irq) 284 + reinit_completion(&st->completion); 282 285 283 286 ret = i2c_smbus_write_byte(st->client, ADS1119_CMD_START_SYNC); 284 287 if (ret) ··· 738 735 return dev_err_probe(dev, ret, "Failed to setup IIO buffer\n"); 739 736 740 737 if (client->irq > 0) { 741 - ret = devm_request_threaded_irq(dev, client->irq, 742 - ads1119_irq_handler, 743 - NULL, IRQF_ONESHOT, 744 - "ads1119", indio_dev); 738 + ret = devm_request_irq(dev, client->irq, ads1119_irq_handler, 739 + IRQF_NO_THREAD, "ads1119", indio_dev); 745 740 if (ret) 746 741 return dev_err_probe(dev, ret, 747 742 "Failed to allocate irq\n");
+5 -3
drivers/iio/adc/ti-ads7950.c
··· 427 427 static int ti_ads7950_get(struct gpio_chip *chip, unsigned int offset) 428 428 { 429 429 struct ti_ads7950_state *st = gpiochip_get_data(chip); 430 + bool state; 430 431 int ret; 431 432 432 433 mutex_lock(&st->slock); 433 434 434 435 /* If set as output, return the output */ 435 436 if (st->gpio_cmd_settings_bitmask & BIT(offset)) { 436 - ret = st->cmd_settings_bitmask & BIT(offset); 437 + state = st->cmd_settings_bitmask & BIT(offset); 438 + ret = 0; 437 439 goto out; 438 440 } 439 441 ··· 446 444 if (ret) 447 445 goto out; 448 446 449 - ret = ((st->single_rx >> 12) & BIT(offset)) ? 1 : 0; 447 + state = (st->single_rx >> 12) & BIT(offset); 450 448 451 449 /* Revert back to original settings */ 452 450 st->cmd_settings_bitmask &= ~TI_ADS7950_CR_GPIO_DATA; ··· 458 456 out: 459 457 mutex_unlock(&st->slock); 460 458 461 - return ret; 459 + return ret ?: state; 462 460 } 463 461 464 462 static int ti_ads7950_get_direction(struct gpio_chip *chip,
+30 -18
drivers/iio/common/hid-sensors/hid-sensor-trigger.c
··· 14 14 #include <linux/iio/triggered_buffer.h> 15 15 #include <linux/iio/trigger_consumer.h> 16 16 #include <linux/iio/sysfs.h> 17 + #include <linux/iio/kfifo_buf.h> 17 18 #include "hid-sensor-trigger.h" 18 19 19 20 static ssize_t _hid_sensor_set_report_latency(struct device *dev, ··· 203 202 _hid_sensor_power_state(attrb, true); 204 203 } 205 204 206 - static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig, 207 - bool state) 205 + static int buffer_postenable(struct iio_dev *indio_dev) 208 206 { 209 - return hid_sensor_power_state(iio_trigger_get_drvdata(trig), state); 207 + return hid_sensor_power_state(iio_device_get_drvdata(indio_dev), 1); 210 208 } 209 + 210 + static int buffer_predisable(struct iio_dev *indio_dev) 211 + { 212 + return hid_sensor_power_state(iio_device_get_drvdata(indio_dev), 0); 213 + } 214 + 215 + static const struct iio_buffer_setup_ops hid_sensor_buffer_ops = { 216 + .postenable = buffer_postenable, 217 + .predisable = buffer_predisable, 218 + }; 211 219 212 220 void hid_sensor_remove_trigger(struct iio_dev *indio_dev, 213 221 struct hid_sensor_common *attrb) ··· 229 219 cancel_work_sync(&attrb->work); 230 220 iio_trigger_unregister(attrb->trigger); 231 221 iio_trigger_free(attrb->trigger); 232 - iio_triggered_buffer_cleanup(indio_dev); 233 222 } 234 223 EXPORT_SYMBOL_NS(hid_sensor_remove_trigger, "IIO_HID"); 235 - 236 - static const struct iio_trigger_ops hid_sensor_trigger_ops = { 237 - .set_trigger_state = &hid_sensor_data_rdy_trigger_set_state, 238 - }; 239 224 240 225 int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, 241 226 struct hid_sensor_common *attrb) ··· 244 239 else 245 240 fifo_attrs = NULL; 246 241 247 - ret = iio_triggered_buffer_setup_ext(indio_dev, 248 - &iio_pollfunc_store_time, NULL, 249 - IIO_BUFFER_DIRECTION_IN, 250 - NULL, fifo_attrs); 242 + indio_dev->modes = INDIO_DIRECT_MODE | INDIO_HARDWARE_TRIGGERED; 243 + 244 + ret = devm_iio_kfifo_buffer_setup_ext(&indio_dev->dev, indio_dev, 245 + &hid_sensor_buffer_ops, 246 + fifo_attrs); 251 247 if (ret) { 252 - dev_err(&indio_dev->dev, "Triggered Buffer Setup Failed\n"); 248 + dev_err(&indio_dev->dev, "Kfifo Buffer Setup Failed\n"); 253 249 return ret; 254 250 } 251 + 252 + /* 253 + * The current user space in distro "iio-sensor-proxy" is not working in 254 + * trigerless mode and it expects 255 + * /sys/bus/iio/devices/iio:device0/trigger/current_trigger. 256 + * The change replacing iio_triggered_buffer_setup_ext() with 257 + * devm_iio_kfifo_buffer_setup_ext() will not create attribute without 258 + * registering a trigger with INDIO_HARDWARE_TRIGGERED. 259 + * So the below code fragment is still required. 260 + */ 255 261 256 262 trig = iio_trigger_alloc(indio_dev->dev.parent, 257 263 "%s-dev%d", name, iio_device_id(indio_dev)); 258 264 if (trig == NULL) { 259 265 dev_err(&indio_dev->dev, "Trigger Allocate Failed\n"); 260 - ret = -ENOMEM; 261 - goto error_triggered_buffer_cleanup; 266 + return -ENOMEM; 262 267 } 263 268 264 269 iio_trigger_set_drvdata(trig, attrb); 265 - trig->ops = &hid_sensor_trigger_ops; 266 270 ret = iio_trigger_register(trig); 267 271 268 272 if (ret) { ··· 298 284 iio_trigger_unregister(trig); 299 285 error_free_trig: 300 286 iio_trigger_free(trig); 301 - error_triggered_buffer_cleanup: 302 - iio_triggered_buffer_cleanup(indio_dev); 303 287 return ret; 304 288 } 305 289 EXPORT_SYMBOL_NS(hid_sensor_setup_trigger, "IIO_HID");
+1 -1
drivers/iio/dac/ad5770r.c
··· 322 322 chan->address, 323 323 st->transf_buf, 2); 324 324 if (ret) 325 - return 0; 325 + return ret; 326 326 327 327 buf16 = get_unaligned_le16(st->transf_buf); 328 328 *val = buf16 >> 2;
+22 -29
drivers/iio/dac/mcp47feb02.c
··· 65 65 #define MCP47FEB02_MAX_SCALES_CH 3 66 66 #define MCP47FEB02_DAC_WIPER_UNLOCKED 0 67 67 #define MCP47FEB02_NORMAL_OPERATION 0 68 - #define MCP47FEB02_INTERNAL_BAND_GAP_mV 2440 68 + #define MCP47FEB02_INTERNAL_BAND_GAP_uV 2440000 69 69 #define NV_DAC_ADDR_OFFSET 0x10 70 70 71 71 enum mcp47feb02_vref_mode { ··· 697 697 }; 698 698 699 699 static void mcp47feb02_init_scale(struct mcp47feb02_data *data, enum mcp47feb02_scale scale, 700 - int vref_mV, int scale_avail[]) 700 + int vref_uV, int scale_avail[]) 701 701 { 702 702 u32 value_micro, value_int; 703 703 u64 tmp; 704 704 705 - /* vref_mV should not be negative */ 706 - tmp = (u64)vref_mV * MICRO >> data->chip_features->resolution; 705 + /* vref_uV should not be negative */ 706 + tmp = (u64)vref_uV * MILLI >> data->chip_features->resolution; 707 707 value_int = div_u64_rem(tmp, MICRO, &value_micro); 708 708 scale_avail[scale * 2] = value_int; 709 709 scale_avail[scale * 2 + 1] = value_micro; 710 710 } 711 711 712 - static int mcp47feb02_init_scales_avail(struct mcp47feb02_data *data, int vdd_mV, 713 - int vref_mV, int vref1_mV) 712 + static int mcp47feb02_init_scales_avail(struct mcp47feb02_data *data, int vdd_uV, 713 + int vref_uV, int vref1_uV) 714 714 { 715 - struct device *dev = regmap_get_device(data->regmap); 716 715 int tmp_vref; 717 716 718 - mcp47feb02_init_scale(data, MCP47FEB02_SCALE_VDD, vdd_mV, data->scale); 717 + mcp47feb02_init_scale(data, MCP47FEB02_SCALE_VDD, vdd_uV, data->scale); 719 718 720 719 if (data->use_vref) 721 - tmp_vref = vref_mV; 720 + tmp_vref = vref_uV; 722 721 else 723 - tmp_vref = MCP47FEB02_INTERNAL_BAND_GAP_mV; 722 + tmp_vref = MCP47FEB02_INTERNAL_BAND_GAP_uV; 724 723 725 724 mcp47feb02_init_scale(data, MCP47FEB02_SCALE_GAIN_X1, tmp_vref, data->scale); 726 725 mcp47feb02_init_scale(data, MCP47FEB02_SCALE_GAIN_X2, tmp_vref * 2, data->scale); 727 726 728 727 if (data->phys_channels >= 4) { 729 - mcp47feb02_init_scale(data, MCP47FEB02_SCALE_VDD, vdd_mV, data->scale_1); 730 - 731 - if (data->use_vref1 && vref1_mV <= 0) 732 - return dev_err_probe(dev, vref1_mV, "Invalid voltage for Vref1\n"); 728 + mcp47feb02_init_scale(data, MCP47FEB02_SCALE_VDD, vdd_uV, data->scale_1); 733 729 734 730 if (data->use_vref1) 735 - tmp_vref = vref1_mV; 731 + tmp_vref = vref1_uV; 736 732 else 737 - tmp_vref = MCP47FEB02_INTERNAL_BAND_GAP_mV; 733 + tmp_vref = MCP47FEB02_INTERNAL_BAND_GAP_uV; 738 734 739 735 mcp47feb02_init_scale(data, MCP47FEB02_SCALE_GAIN_X1, 740 736 tmp_vref, data->scale_1); ··· 951 955 u32 num_channels; 952 956 u8 chan_idx = 0; 953 957 954 - guard(mutex)(&data->lock); 955 - 956 958 num_channels = device_get_child_node_count(dev); 957 959 if (num_channels > chip_features->phys_channels) 958 960 return dev_err_probe(dev, -EINVAL, "More channels than the chip supports\n"); ··· 1074 1080 return 0; 1075 1081 } 1076 1082 1077 - static int mcp47feb02_init_ch_scales(struct mcp47feb02_data *data, int vdd_mV, 1078 - int vref_mV, int vref1_mV) 1083 + static int mcp47feb02_init_ch_scales(struct mcp47feb02_data *data, int vdd_uV, 1084 + int vref_uV, int vref1_uV) 1079 1085 { 1080 1086 unsigned int i; 1081 1087 ··· 1083 1089 struct device *dev = regmap_get_device(data->regmap); 1084 1090 int ret; 1085 1091 1086 - ret = mcp47feb02_init_scales_avail(data, vdd_mV, vref_mV, vref1_mV); 1092 + ret = mcp47feb02_init_scales_avail(data, vdd_uV, vref_uV, vref1_uV); 1087 1093 if (ret) 1088 1094 return dev_err_probe(dev, ret, "failed to init scales for ch %u\n", i); 1089 1095 } ··· 1097 1103 struct device *dev = &client->dev; 1098 1104 struct mcp47feb02_data *data; 1099 1105 struct iio_dev *indio_dev; 1100 - int vref1_mV = 0; 1101 - int vref_mV = 0; 1102 - int vdd_mV; 1103 - int ret; 1106 + int vref1_uV, vref_uV, vdd_uV, ret; 1104 1107 1105 1108 indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); 1106 1109 if (!indio_dev) ··· 1134 1143 if (ret < 0) 1135 1144 return ret; 1136 1145 1137 - vdd_mV = ret / MILLI; 1146 + vdd_uV = ret; 1138 1147 1139 1148 ret = devm_regulator_get_enable_read_voltage(dev, "vref"); 1140 1149 if (ret > 0) { 1141 - vref_mV = ret / MILLI; 1150 + vref_uV = ret; 1142 1151 data->use_vref = true; 1143 1152 } else { 1153 + vref_uV = 0; 1144 1154 dev_dbg(dev, "using internal band gap as voltage reference.\n"); 1145 1155 dev_dbg(dev, "Vref is unavailable.\n"); 1146 1156 } ··· 1149 1157 if (chip_features->have_ext_vref1) { 1150 1158 ret = devm_regulator_get_enable_read_voltage(dev, "vref1"); 1151 1159 if (ret > 0) { 1152 - vref1_mV = ret / MILLI; 1160 + vref1_uV = ret; 1153 1161 data->use_vref1 = true; 1154 1162 } else { 1163 + vref1_uV = 0; 1155 1164 dev_dbg(dev, "using internal band gap as voltage reference 1.\n"); 1156 1165 dev_dbg(dev, "Vref1 is unavailable.\n"); 1157 1166 } ··· 1162 1169 if (ret) 1163 1170 return dev_err_probe(dev, ret, "Error initialising vref register\n"); 1164 1171 1165 - ret = mcp47feb02_init_ch_scales(data, vdd_mV, vref_mV, vref1_mV); 1172 + ret = mcp47feb02_init_ch_scales(data, vdd_uV, vref_uV, vref1_uV); 1166 1173 if (ret) 1167 1174 return ret; 1168 1175
+21 -11
drivers/iio/gyro/mpu3050-core.c
··· 1129 1129 1130 1130 ret = iio_trigger_register(mpu3050->trig); 1131 1131 if (ret) 1132 - return ret; 1132 + goto err_iio_trigger; 1133 1133 1134 1134 indio_dev->trig = iio_trigger_get(mpu3050->trig); 1135 1135 1136 1136 return 0; 1137 + 1138 + err_iio_trigger: 1139 + free_irq(mpu3050->irq, mpu3050->trig); 1140 + 1141 + return ret; 1137 1142 } 1138 1143 1139 1144 int mpu3050_common_probe(struct device *dev, ··· 1226 1221 goto err_power_down; 1227 1222 } 1228 1223 1229 - ret = iio_device_register(indio_dev); 1230 - if (ret) { 1231 - dev_err(dev, "device register failed\n"); 1232 - goto err_cleanup_buffer; 1233 - } 1234 - 1235 1224 dev_set_drvdata(dev, indio_dev); 1236 1225 1237 1226 /* Check if we have an assigned IRQ to use as trigger */ ··· 1248 1249 pm_runtime_use_autosuspend(dev); 1249 1250 pm_runtime_put(dev); 1250 1251 1252 + ret = iio_device_register(indio_dev); 1253 + if (ret) { 1254 + dev_err(dev, "device register failed\n"); 1255 + goto err_iio_device_register; 1256 + } 1257 + 1251 1258 return 0; 1252 1259 1253 - err_cleanup_buffer: 1260 + err_iio_device_register: 1261 + pm_runtime_get_sync(dev); 1262 + pm_runtime_put_noidle(dev); 1263 + pm_runtime_disable(dev); 1264 + if (irq) 1265 + free_irq(mpu3050->irq, mpu3050->trig); 1254 1266 iio_triggered_buffer_cleanup(indio_dev); 1255 1267 err_power_down: 1256 1268 mpu3050_power_down(mpu3050); ··· 1274 1264 struct iio_dev *indio_dev = dev_get_drvdata(dev); 1275 1265 struct mpu3050 *mpu3050 = iio_priv(indio_dev); 1276 1266 1267 + iio_device_unregister(indio_dev); 1277 1268 pm_runtime_get_sync(dev); 1278 1269 pm_runtime_put_noidle(dev); 1279 1270 pm_runtime_disable(dev); 1280 - iio_triggered_buffer_cleanup(indio_dev); 1281 1271 if (mpu3050->irq) 1282 - free_irq(mpu3050->irq, mpu3050); 1283 - iio_device_unregister(indio_dev); 1272 + free_irq(mpu3050->irq, mpu3050->trig); 1273 + iio_triggered_buffer_cleanup(indio_dev); 1284 1274 mpu3050_power_down(mpu3050); 1285 1275 } 1286 1276
+4 -4
drivers/iio/imu/adis16550.c
··· 643 643 case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: 644 644 switch (chan->type) { 645 645 case IIO_ANGL_VEL: 646 - ret = adis16550_get_accl_filter_freq(st, val); 646 + ret = adis16550_get_gyro_filter_freq(st, val); 647 647 if (ret) 648 648 return ret; 649 649 return IIO_VAL_INT; 650 650 case IIO_ACCEL: 651 - ret = adis16550_get_gyro_filter_freq(st, val); 651 + ret = adis16550_get_accl_filter_freq(st, val); 652 652 if (ret) 653 653 return ret; 654 654 return IIO_VAL_INT; ··· 681 681 case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: 682 682 switch (chan->type) { 683 683 case IIO_ANGL_VEL: 684 - return adis16550_set_accl_filter_freq(st, val); 685 - case IIO_ACCEL: 686 684 return adis16550_set_gyro_filter_freq(st, val); 685 + case IIO_ACCEL: 686 + return adis16550_set_accl_filter_freq(st, val); 687 687 default: 688 688 return -EINVAL; 689 689 }
+5 -10
drivers/iio/imu/bmi160/bmi160_core.c
··· 573 573 int_out_ctrl_shift = BMI160_INT1_OUT_CTRL_SHIFT; 574 574 int_latch_mask = BMI160_INT1_LATCH_MASK; 575 575 int_map_mask = BMI160_INT1_MAP_DRDY_EN; 576 + pin_name = "INT1"; 576 577 break; 577 578 case BMI160_PIN_INT2: 578 579 int_out_ctrl_shift = BMI160_INT2_OUT_CTRL_SHIFT; 579 580 int_latch_mask = BMI160_INT2_LATCH_MASK; 580 581 int_map_mask = BMI160_INT2_MAP_DRDY_EN; 582 + pin_name = "INT2"; 581 583 break; 584 + default: 585 + return -EINVAL; 582 586 } 583 587 int_out_ctrl_mask = BMI160_INT_OUT_CTRL_MASK << int_out_ctrl_shift; 584 588 ··· 616 612 ret = bmi160_write_conf_reg(regmap, BMI160_REG_INT_MAP, 617 613 int_map_mask, int_map_mask, 618 614 write_usleep); 619 - if (ret) { 620 - switch (pin) { 621 - case BMI160_PIN_INT1: 622 - pin_name = "INT1"; 623 - break; 624 - case BMI160_PIN_INT2: 625 - pin_name = "INT2"; 626 - break; 627 - } 615 + if (ret) 628 616 dev_err(dev, "Failed to configure %s IRQ pin", pin_name); 629 - } 630 617 631 618 return ret; 632 619 }
+1 -1
drivers/iio/imu/bno055/bno055.c
··· 64 64 #define BNO055_GRAVITY_DATA_X_LSB_REG 0x2E 65 65 #define BNO055_GRAVITY_DATA_Y_LSB_REG 0x30 66 66 #define BNO055_GRAVITY_DATA_Z_LSB_REG 0x32 67 - #define BNO055_SCAN_CH_COUNT ((BNO055_GRAVITY_DATA_Z_LSB_REG - BNO055_ACC_DATA_X_LSB_REG) / 2) 67 + #define BNO055_SCAN_CH_COUNT ((BNO055_GRAVITY_DATA_Z_LSB_REG - BNO055_ACC_DATA_X_LSB_REG) / 2 + 1) 68 68 #define BNO055_TEMP_REG 0x34 69 69 #define BNO055_CALIB_STAT_REG 0x35 70 70 #define BNO055_CALIB_STAT_MAGN_SHIFT 0
+14 -1
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
··· 225 225 const struct st_lsm6dsx_reg *batch_reg; 226 226 u8 data; 227 227 228 + /* Only internal sensors have a FIFO ODR configuration register. */ 229 + if (sensor->id >= ARRAY_SIZE(hw->settings->batch)) 230 + return 0; 231 + 228 232 batch_reg = &hw->settings->batch[sensor->id]; 229 233 if (batch_reg->addr) { 230 234 int val; ··· 862 858 int i, ret; 863 859 864 860 for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) { 861 + const struct iio_dev_attr **attrs; 862 + 865 863 if (!hw->iio_devs[i]) 866 864 continue; 867 865 866 + /* 867 + * For the accelerometer, allow setting FIFO sampling frequency 868 + * values different from the sensor sampling frequency, which 869 + * may be needed to keep FIFO data rate low while sampling 870 + * acceleration data at high rates for accurate event detection. 871 + */ 872 + attrs = i == ST_LSM6DSX_ID_ACC ? st_lsm6dsx_buffer_attrs : NULL; 868 873 ret = devm_iio_kfifo_buffer_setup_ext(hw->dev, hw->iio_devs[i], 869 874 &st_lsm6dsx_buffer_ops, 870 - st_lsm6dsx_buffer_attrs); 875 + attrs); 871 876 if (ret) 872 877 return ret; 873 878 }
+12 -6
drivers/iio/light/vcnl4035.c
··· 103 103 struct iio_dev *indio_dev = pf->indio_dev; 104 104 struct vcnl4035_data *data = iio_priv(indio_dev); 105 105 /* Ensure naturally aligned timestamp */ 106 - u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)] __aligned(8) = { }; 106 + struct { 107 + u16 als_data; 108 + aligned_s64 timestamp; 109 + } buffer = { }; 110 + unsigned int val; 107 111 int ret; 108 112 109 - ret = regmap_read(data->regmap, VCNL4035_ALS_DATA, (int *)buffer); 113 + ret = regmap_read(data->regmap, VCNL4035_ALS_DATA, &val); 110 114 if (ret < 0) { 111 115 dev_err(&data->client->dev, 112 116 "Trigger consumer can't read from sensor.\n"); 113 117 goto fail_read; 114 118 } 115 - iio_push_to_buffers_with_timestamp(indio_dev, buffer, 116 - iio_get_time_ns(indio_dev)); 119 + 120 + buffer.als_data = val; 121 + iio_push_to_buffers_with_timestamp(indio_dev, &buffer, 122 + iio_get_time_ns(indio_dev)); 117 123 118 124 fail_read: 119 125 iio_trigger_notify_done(indio_dev->trig); ··· 387 381 .sign = 'u', 388 382 .realbits = 16, 389 383 .storagebits = 16, 390 - .endianness = IIO_LE, 384 + .endianness = IIO_CPU, 391 385 }, 392 386 }, 393 387 { ··· 401 395 .sign = 'u', 402 396 .realbits = 16, 403 397 .storagebits = 16, 404 - .endianness = IIO_LE, 398 + .endianness = IIO_CPU, 405 399 }, 406 400 }, 407 401 };
+1 -3
drivers/iio/light/veml6070.c
··· 134 134 if (ret < 0) 135 135 return ret; 136 136 137 - ret = (msb << 8) | lsb; 138 - 139 - return 0; 137 + return (msb << 8) | lsb; 140 138 } 141 139 142 140 static const struct iio_chan_spec veml6070_channels[] = {
+20 -4
drivers/iio/orientation/hid-sensor-rotation.c
··· 19 19 struct hid_sensor_common common_attributes; 20 20 struct hid_sensor_hub_attribute_info quaternion; 21 21 struct { 22 - s32 sampled_vals[4]; 23 - aligned_s64 timestamp; 22 + IIO_DECLARE_QUATERNION(s32, sampled_vals); 23 + /* 24 + * ABI regression avoidance: There are two copies of the same 25 + * timestamp in case of userspace depending on broken alignment 26 + * from older kernels. 27 + */ 28 + aligned_s64 timestamp[2]; 24 29 } scan; 25 30 int scale_pre_decml; 26 31 int scale_post_decml; ··· 159 154 if (!rot_state->timestamp) 160 155 rot_state->timestamp = iio_get_time_ns(indio_dev); 161 156 162 - iio_push_to_buffers_with_timestamp(indio_dev, &rot_state->scan, 163 - rot_state->timestamp); 157 + /* 158 + * ABI regression avoidance: IIO previously had an incorrect 159 + * implementation of iio_push_to_buffers_with_timestamp() that 160 + * put the timestamp in the last 8 bytes of the buffer, which 161 + * was incorrect according to the IIO ABI. To avoid breaking 162 + * userspace that may be depending on this broken behavior, we 163 + * put the timestamp in both the correct place [0] and the old 164 + * incorrect place [1]. 165 + */ 166 + rot_state->scan.timestamp[0] = rot_state->timestamp; 167 + rot_state->scan.timestamp[1] = rot_state->timestamp; 168 + 169 + iio_push_to_buffers(indio_dev, &rot_state->scan); 164 170 165 171 rot_state->timestamp = 0; 166 172 }
+1 -1
drivers/iio/pressure/abp2030pa.c
··· 520 520 data->p_offset = div_s64(odelta * data->pmin, pdelta) - data->outmin; 521 521 522 522 if (data->irq > 0) { 523 - ret = devm_request_irq(dev, irq, abp2_eoc_handler, IRQF_ONESHOT, 523 + ret = devm_request_irq(dev, irq, abp2_eoc_handler, 0, 524 524 dev_name(dev), data); 525 525 if (ret) 526 526 return ret;
+4 -3
drivers/iio/proximity/rfd77402.c
··· 173 173 struct i2c_client *client = data->client; 174 174 int val, ret; 175 175 176 - if (data->irq_en) { 177 - reinit_completion(&data->completion); 176 + if (data->irq_en) 178 177 return rfd77402_wait_for_irq(data); 179 - } 180 178 181 179 /* 182 180 * As per RFD77402 datasheet section '3.1.1 Single Measure', the ··· 201 203 RFD77402_STATUS_MCPU_ON); 202 204 if (ret < 0) 203 205 return ret; 206 + 207 + if (data->irq_en) 208 + reinit_completion(&data->completion); 204 209 205 210 ret = i2c_smbus_write_byte_data(client, RFD77402_CMD_R, 206 211 RFD77402_CMD_SINGLE |
+5
drivers/input/joystick/xpad.c
··· 313 313 { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, 314 314 { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE }, 315 315 { 0x1532, 0x0a29, "Razer Wolverine V2", 0, XTYPE_XBOXONE }, 316 + { 0x1532, 0x0a57, "Razer Wolverine V3 Pro (Wired)", 0, XTYPE_XBOX360 }, 317 + { 0x1532, 0x0a59, "Razer Wolverine V3 Pro (2.4 GHz Dongle)", 0, XTYPE_XBOX360 }, 316 318 { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, 317 319 { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, 318 320 { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 }, ··· 362 360 { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, 363 361 { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, 364 362 { 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 }, 363 + { 0x20bc, 0x5134, "BETOP BTP-KP50B Xinput Dongle", 0, XTYPE_XBOX360 }, 364 + { 0x20bc, 0x514a, "BETOP BTP-KP50C Xinput Dongle", 0, XTYPE_XBOX360 }, 365 365 { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, 366 366 { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE }, 367 367 { 0x20d6, 0x2064, "PowerA Wired Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, ··· 566 562 XPAD_XBOX360_VENDOR(0x1a86), /* Nanjing Qinheng Microelectronics (WCH) */ 567 563 XPAD_XBOX360_VENDOR(0x1bad), /* Harmonix Rock Band guitar and drums */ 568 564 XPAD_XBOX360_VENDOR(0x1ee9), /* ZOTAC Technology Limited */ 565 + XPAD_XBOX360_VENDOR(0x20bc), /* BETOP wireless dongles */ 569 566 XPAD_XBOX360_VENDOR(0x20d6), /* PowerA controllers */ 570 567 XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA controllers */ 571 568 XPAD_XBOX360_VENDOR(0x2345), /* Machenike Controllers */
+41 -1
drivers/input/mouse/bcm5974.c
··· 286 286 const struct tp_finger *index[MAX_FINGERS]; /* finger index data */ 287 287 struct input_mt_pos pos[MAX_FINGERS]; /* position array */ 288 288 int slots[MAX_FINGERS]; /* slot assignments */ 289 + struct work_struct mode_reset_work; 290 + unsigned long last_mode_reset; 289 291 }; 290 292 291 293 /* trackpad finger block data, le16-aligned */ ··· 698 696 return retval; 699 697 } 700 698 699 + /* 700 + * Mode switches sent before the control response are ignored. 701 + * Fixing this state requires switching to normal mode and waiting 702 + * about 1ms before switching back to wellspring mode. 703 + */ 704 + static void bcm5974_mode_reset_work(struct work_struct *work) 705 + { 706 + struct bcm5974 *dev = container_of(work, struct bcm5974, mode_reset_work); 707 + int error; 708 + 709 + guard(mutex)(&dev->pm_mutex); 710 + dev->last_mode_reset = jiffies; 711 + 712 + error = bcm5974_wellspring_mode(dev, false); 713 + if (error) { 714 + dev_err(&dev->intf->dev, "reset to normal mode failed\n"); 715 + return; 716 + } 717 + 718 + fsleep(1000); 719 + 720 + error = bcm5974_wellspring_mode(dev, true); 721 + if (error) 722 + dev_err(&dev->intf->dev, "mode switch after reset failed\n"); 723 + } 724 + 701 725 static void bcm5974_irq_button(struct urb *urb) 702 726 { 703 727 struct bcm5974 *dev = urb->context; ··· 780 752 if (dev->tp_urb->actual_length == 2) 781 753 goto exit; 782 754 783 - if (report_tp_state(dev, dev->tp_urb->actual_length)) 755 + if (report_tp_state(dev, dev->tp_urb->actual_length)) { 784 756 dprintk(1, "bcm5974: bad trackpad package, length: %d\n", 785 757 dev->tp_urb->actual_length); 758 + 759 + /* 760 + * Receiving a HID packet means we aren't in wellspring mode. 761 + * If we haven't tried a reset in the last second, try now. 762 + */ 763 + if (dev->tp_urb->actual_length == 8 && 764 + time_after(jiffies, dev->last_mode_reset + msecs_to_jiffies(1000))) { 765 + schedule_work(&dev->mode_reset_work); 766 + } 767 + } 786 768 787 769 exit: 788 770 error = usb_submit_urb(dev->tp_urb, GFP_ATOMIC); ··· 944 906 dev->intf = iface; 945 907 dev->input = input_dev; 946 908 dev->cfg = *cfg; 909 + INIT_WORK(&dev->mode_reset_work, bcm5974_mode_reset_work); 947 910 mutex_init(&dev->pm_mutex); 948 911 949 912 /* setup urbs */ ··· 1037 998 { 1038 999 struct bcm5974 *dev = usb_get_intfdata(iface); 1039 1000 1001 + disable_work_sync(&dev->mode_reset_work); 1040 1002 usb_set_intfdata(iface, NULL); 1041 1003 1042 1004 input_unregister_device(dev->input);
+2 -2
drivers/input/rmi4/rmi_f54.c
··· 538 538 int error; 539 539 int i; 540 540 541 + mutex_lock(&f54->data_mutex); 542 + 541 543 report_size = rmi_f54_get_report_size(f54); 542 544 if (report_size == 0) { 543 545 dev_err(&fn->dev, "Bad report size, report type=%d\n", ··· 547 545 error = -EINVAL; 548 546 goto error; /* retry won't help */ 549 547 } 550 - 551 - mutex_lock(&f54->data_mutex); 552 548 553 549 /* 554 550 * Need to check if command has completed.
+7
drivers/input/serio/i8042-acpipnpio.h
··· 1189 1189 }, 1190 1190 { 1191 1191 .matches = { 1192 + DMI_MATCH(DMI_BOARD_NAME, "X6KK45xU_X6SP45xU"), 1193 + }, 1194 + .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | 1195 + SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) 1196 + }, 1197 + { 1198 + .matches = { 1192 1199 DMI_MATCH(DMI_BOARD_NAME, "WUJIE Series-X5SP4NAG"), 1193 1200 }, 1194 1201 .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+2 -2
drivers/interconnect/qcom/sm8450.c
··· 800 800 .channels = 1, 801 801 .buswidth = 4, 802 802 .num_links = 1, 803 - .link_nodes = { MASTER_CDSP_NOC_CFG }, 803 + .link_nodes = { &qhm_nsp_noc_config }, 804 804 }; 805 805 806 806 static struct qcom_icc_node qhs_cpr_cx = { ··· 874 874 .channels = 1, 875 875 .buswidth = 4, 876 876 .num_links = 1, 877 - .link_nodes = { MASTER_CNOC_LPASS_AG_NOC }, 877 + .link_nodes = { &qhm_config_noc }, 878 878 }; 879 879 880 880 static struct qcom_icc_node qhs_mss_cfg = {
+6
drivers/iommu/iommu.c
··· 2717 2717 2718 2718 pr_debug("unmapped: iova 0x%lx size 0x%zx\n", 2719 2719 iova, unmapped_page); 2720 + /* 2721 + * If the driver itself isn't using the gather, make sure 2722 + * it looks non-empty so iotlb_sync will still be called. 2723 + */ 2724 + if (iotlb_gather->start >= iotlb_gather->end) 2725 + iommu_iotlb_gather_add_range(iotlb_gather, iova, size); 2720 2726 2721 2727 iova += unmapped_page; 2722 2728 unmapped += unmapped_page;
+2 -2
drivers/irqchip/irq-riscv-aplic-main.c
··· 150 150 struct device *dev = priv->dev; 151 151 152 152 list_del(&priv->head); 153 - if (dev->pm_domain) 153 + if (dev->pm_domain && dev->of_node) 154 154 dev_pm_genpd_remove_notifier(dev); 155 155 } 156 156 ··· 165 165 166 166 priv->saved_hw_regs.srcs = srcs; 167 167 list_add(&priv->head, &aplics); 168 - if (dev->pm_domain) { 168 + if (dev->pm_domain && dev->of_node) { 169 169 priv->genpd_nb.notifier_call = aplic_pm_notifier; 170 170 ret = dev_pm_genpd_add_notifier(dev, &priv->genpd_nb); 171 171 if (ret)
+4 -1
drivers/misc/fastrpc.c
··· 1401 1401 } 1402 1402 err_map: 1403 1403 fastrpc_buf_free(fl->cctx->remote_heap); 1404 + fl->cctx->remote_heap = NULL; 1404 1405 err_name: 1405 1406 kfree(name); 1406 1407 err: ··· 2390 2389 if (!err) { 2391 2390 src_perms = BIT(QCOM_SCM_VMID_HLOS); 2392 2391 2393 - qcom_scm_assign_mem(res.start, resource_size(&res), &src_perms, 2392 + err = qcom_scm_assign_mem(res.start, resource_size(&res), &src_perms, 2394 2393 data->vmperms, data->vmcount); 2394 + if (err) 2395 + goto err_free_data; 2395 2396 } 2396 2397 2397 2398 }
+4 -2
drivers/misc/lis3lv02d/lis3lv02d.c
··· 1230 1230 else 1231 1231 thread_fn = NULL; 1232 1232 1233 + if (thread_fn) 1234 + irq_flags |= IRQF_ONESHOT; 1235 + 1233 1236 err = request_threaded_irq(lis3->irq, lis302dl_interrupt, 1234 1237 thread_fn, 1235 - IRQF_TRIGGER_RISING | IRQF_ONESHOT | 1236 - irq_flags, 1238 + irq_flags | IRQF_TRIGGER_RISING, 1237 1239 DRIVER_NAME, lis3); 1238 1240 1239 1241 if (err < 0) {
+1
drivers/misc/mei/Kconfig
··· 3 3 config INTEL_MEI 4 4 tristate "Intel Management Engine Interface" 5 5 depends on PCI 6 + depends on X86 || DRM_XE!=n || COMPILE_TEST 6 7 default X86_64 || MATOM 7 8 help 8 9 The Intel Management Engine (Intel ME) provides Manageability,
+4 -10
drivers/misc/mei/hw-me.c
··· 1337 1337 /* check if we need to start the dev */ 1338 1338 if (!mei_host_is_ready(dev)) { 1339 1339 if (mei_hw_is_ready(dev)) { 1340 - /* synchronized by dev mutex */ 1341 - if (waitqueue_active(&dev->wait_hw_ready)) { 1342 - dev_dbg(&dev->dev, "we need to start the dev.\n"); 1343 - dev->recvd_hw_ready = true; 1344 - wake_up(&dev->wait_hw_ready); 1345 - } else if (dev->dev_state != MEI_DEV_UNINITIALIZED && 1346 - dev->dev_state != MEI_DEV_POWERING_DOWN && 1347 - dev->dev_state != MEI_DEV_POWER_DOWN) { 1340 + if (dev->dev_state == MEI_DEV_ENABLED) { 1348 1341 dev_dbg(&dev->dev, "Force link reset.\n"); 1349 1342 schedule_work(&dev->reset_work); 1350 1343 } else { 1351 - dev_dbg(&dev->dev, "Ignore this interrupt in state = %d\n", 1352 - dev->dev_state); 1344 + dev_dbg(&dev->dev, "we need to start the dev.\n"); 1345 + dev->recvd_hw_ready = true; 1346 + wake_up(&dev->wait_hw_ready); 1353 1347 } 1354 1348 } else { 1355 1349 dev_dbg(&dev->dev, "Spurious Interrupt\n");
+1 -2
drivers/net/ethernet/airoha/airoha_eth.c
··· 698 698 if (q->skb) { 699 699 dev_kfree_skb(q->skb); 700 700 q->skb = NULL; 701 - } else { 702 - page_pool_put_full_page(q->page_pool, page, true); 703 701 } 702 + page_pool_put_full_page(q->page_pool, page, true); 704 703 } 705 704 airoha_qdma_fill_rx_queue(q); 706 705
+1
drivers/net/ethernet/altera/altera_tse_main.c
··· 570 570 DMA_TO_DEVICE); 571 571 if (dma_mapping_error(priv->device, dma_addr)) { 572 572 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); 573 + dev_kfree_skb_any(skb); 573 574 ret = NETDEV_TX_OK; 574 575 goto out; 575 576 }
+1 -1
drivers/net/ethernet/freescale/Kconfig
··· 28 28 depends on PTP_1588_CLOCK_OPTIONAL 29 29 select CRC32 30 30 select PHYLIB 31 - select FIXED_PHY if M5272 31 + select FIXED_PHY 32 32 select PAGE_POOL 33 33 imply PAGE_POOL_STATS 34 34 imply NET_SELFTESTS
+7 -1
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
··· 496 496 */ 497 497 ret_val = e1000_read_eeprom(hw, first_word, 1, 498 498 &eeprom_buff[0]); 499 + if (ret_val) 500 + goto out; 501 + 499 502 ptr++; 500 503 } 501 - if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { 504 + if ((eeprom->offset + eeprom->len) & 1) { 502 505 /* need read/modify/write of last changed EEPROM word 503 506 * only the first byte of the word is being modified 504 507 */ 505 508 ret_val = e1000_read_eeprom(hw, last_word, 1, 506 509 &eeprom_buff[last_word - first_word]); 510 + if (ret_val) 511 + goto out; 507 512 } 508 513 509 514 /* Device's eeprom is always little-endian, word addressable */ ··· 527 522 if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG)) 528 523 e1000_update_eeprom_checksum(hw); 529 524 525 + out: 530 526 kfree(eeprom_buff); 531 527 return ret_val; 532 528 }
+19 -11
drivers/net/ethernet/intel/ice/ice_ptp.c
··· 1296 1296 if (pf->hw.reset_ongoing) 1297 1297 return; 1298 1298 1299 - if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) { 1299 + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && 1300 + test_bit(ICE_FLAG_DPLL, pf->flags)) { 1300 1301 int pin, err; 1301 - 1302 - if (!test_bit(ICE_FLAG_DPLL, pf->flags)) 1303 - return; 1304 1302 1305 1303 mutex_lock(&pf->dplls.lock); 1306 1304 for (pin = 0; pin < ICE_SYNCE_CLK_NUM; pin++) { ··· 1312 1314 port_num, 1313 1315 &active, 1314 1316 clk_pin); 1315 - if (WARN_ON_ONCE(err)) { 1316 - mutex_unlock(&pf->dplls.lock); 1317 - return; 1317 + if (err) { 1318 + dev_err_once(ice_pf_to_dev(pf), 1319 + "Failed to read SyncE bypass mux for pin %d, err %d\n", 1320 + pin, err); 1321 + break; 1318 1322 } 1319 1323 1320 1324 err = ice_tspll_cfg_synce_ethdiv_e825c(hw, clk_pin); 1321 - if (active && WARN_ON_ONCE(err)) { 1322 - mutex_unlock(&pf->dplls.lock); 1323 - return; 1325 + if (active && err) { 1326 + dev_err_once(ice_pf_to_dev(pf), 1327 + "Failed to configure SyncE ETH divider for pin %d, err %d\n", 1328 + pin, err); 1329 + break; 1324 1330 } 1325 1331 } 1326 1332 mutex_unlock(&pf->dplls.lock); ··· 3082 3080 struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); 3083 3081 struct ice_ptp *ptp = &pf->ptp; 3084 3082 3085 - if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN) 3083 + if (!ctrl_ptp) { 3084 + dev_info(ice_pf_to_dev(pf), 3085 + "PTP unavailable: no controlling PF\n"); 3086 + return -EOPNOTSUPP; 3087 + } 3088 + 3089 + if (pf->hw.mac_type == ICE_MAC_UNKNOWN) 3086 3090 return -ENODEV; 3087 3091 3088 3092 INIT_LIST_HEAD(&ptp->port.list_node);
+11 -9
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
··· 287 287 return err; 288 288 } 289 289 290 - /* API for virtchnl "transaction" support ("xn" for short). 291 - * 292 - * We are reusing the completion lock to serialize the accesses to the 293 - * transaction state for simplicity, but it could be its own separate synchro 294 - * as well. For now, this API is only used from within a workqueue context; 295 - * raw_spin_lock() is enough. 296 - */ 290 + /* API for virtchnl "transaction" support ("xn" for short). */ 291 + 297 292 /** 298 293 * idpf_vc_xn_lock - Request exclusive access to vc transaction 299 294 * @xn: struct idpf_vc_xn* to access 300 295 */ 301 296 #define idpf_vc_xn_lock(xn) \ 302 - raw_spin_lock(&(xn)->completed.wait.lock) 297 + spin_lock(&(xn)->lock) 303 298 304 299 /** 305 300 * idpf_vc_xn_unlock - Release exclusive access to vc transaction 306 301 * @xn: struct idpf_vc_xn* to access 307 302 */ 308 303 #define idpf_vc_xn_unlock(xn) \ 309 - raw_spin_unlock(&(xn)->completed.wait.lock) 304 + spin_unlock(&(xn)->lock) 310 305 311 306 /** 312 307 * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and ··· 333 338 xn->state = IDPF_VC_XN_IDLE; 334 339 xn->idx = i; 335 340 idpf_vc_xn_release_bufs(xn); 341 + spin_lock_init(&xn->lock); 336 342 init_completion(&xn->completed); 337 343 } 338 344 ··· 402 406 struct idpf_vc_xn *xn) 403 407 { 404 408 idpf_vc_xn_release_bufs(xn); 409 + spin_lock_bh(&vcxn_mngr->xn_bm_lock); 405 410 set_bit(xn->idx, vcxn_mngr->free_xn_bm); 411 + spin_unlock_bh(&vcxn_mngr->xn_bm_lock); 406 412 } 407 413 408 414 /** ··· 615 617 err = -ENXIO; 616 618 goto out_unlock; 617 619 case IDPF_VC_XN_ASYNC: 620 + /* Set reply_sz from the actual payload so that async_handler 621 + * can evaluate the response. 622 + */ 623 + xn->reply_sz = ctlq_msg->data_len; 618 624 err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg); 619 625 idpf_vc_xn_unlock(xn); 620 626 return err;
+3 -2
drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
··· 42 42 * struct idpf_vc_xn - Data structure representing virtchnl transactions 43 43 * @completed: virtchnl event loop uses that to signal when a reply is 44 44 * available, uses kernel completion API 45 - * @state: virtchnl event loop stores the data below, protected by the 46 - * completion's lock. 45 + * @lock: protects the transaction state fields below 46 + * @state: virtchnl event loop stores the data below, protected by @lock 47 47 * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be 48 48 * truncated on its way to the receiver thread according to 49 49 * reply_buf.iov_len. ··· 58 58 */ 59 59 struct idpf_vc_xn { 60 60 struct completion completed; 61 + spinlock_t lock; 61 62 enum idpf_vc_xn_state state; 62 63 size_t reply_sz; 63 64 struct kvec reply;
+1 -2
drivers/net/ethernet/intel/igb/igb_main.c
··· 2203 2203 2204 2204 for (i = 0; i < adapter->num_q_vectors; i++) { 2205 2205 if (adapter->q_vector[i]) { 2206 - napi_synchronize(&adapter->q_vector[i]->napi); 2207 - igb_set_queue_napi(adapter, i, NULL); 2208 2206 napi_disable(&adapter->q_vector[i]->napi); 2207 + igb_set_queue_napi(adapter, i, NULL); 2209 2208 } 2210 2209 } 2211 2210
+1 -1
drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
··· 474 474 adapter->flags2 &= ~(IXGBE_FLAG2_API_MISMATCH | 475 475 IXGBE_FLAG2_FW_ROLLBACK); 476 476 477 - return 0; 477 + return ixgbe_refresh_fw_version(adapter); 478 478 } 479 479 480 480 static const struct devlink_ops ixgbe_devlink_ops = {
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe.h
··· 974 974 bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, 975 975 u16 subdevice_id); 976 976 void ixgbe_set_fw_version_e610(struct ixgbe_adapter *adapter); 977 - void ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter); 977 + int ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter); 978 978 #ifdef CONFIG_PCI_IOV 979 979 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); 980 980 #endif
+7 -6
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
··· 1155 1155 return ret_val; 1156 1156 } 1157 1157 1158 - void ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter) 1158 + int ixgbe_refresh_fw_version(struct ixgbe_adapter *adapter) 1159 1159 { 1160 1160 struct ixgbe_hw *hw = &adapter->hw; 1161 + int err; 1161 1162 1162 - ixgbe_get_flash_data(hw); 1163 + err = ixgbe_get_flash_data(hw); 1164 + if (err) 1165 + return err; 1166 + 1163 1167 ixgbe_set_fw_version_e610(adapter); 1168 + return 0; 1164 1169 } 1165 1170 1166 1171 static void ixgbe_get_drvinfo(struct net_device *netdev, 1167 1172 struct ethtool_drvinfo *drvinfo) 1168 1173 { 1169 1174 struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); 1170 - 1171 - /* need to refresh info for e610 in case fw reloads in runtime */ 1172 - if (adapter->hw.mac.type == ixgbe_mac_e610) 1173 - ixgbe_refresh_fw_version(adapter); 1174 1175 1175 1176 strscpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); 1176 1177
+10
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 6289 6289 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 6290 6290 msleep(2000); 6291 6291 ixgbe_up(adapter); 6292 + 6293 + /* E610 has no FW event to notify all PFs of an EMPR reset, so 6294 + * refresh the FW version here to pick up any new FW version after 6295 + * a hardware reset (e.g. EMPR triggered by another PF's devlink 6296 + * reload). ixgbe_refresh_fw_version() updates both hw->flash and 6297 + * adapter->eeprom_id so ethtool -i reports the correct string. 6298 + */ 6299 + if (adapter->hw.mac.type == ixgbe_mac_e610) 6300 + (void)ixgbe_refresh_fw_version(adapter); 6301 + 6292 6302 clear_bit(__IXGBE_RESETTING, &adapter->state); 6293 6303 } 6294 6304
+7
drivers/net/ethernet/intel/ixgbevf/vf.c
··· 709 709 return err; 710 710 } 711 711 712 + static int ixgbevf_hv_negotiate_features_vf(struct ixgbe_hw *hw, 713 + u32 *pf_features) 714 + { 715 + return -EOPNOTSUPP; 716 + } 717 + 712 718 /** 713 719 * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address 714 720 * @hw: pointer to the HW structure ··· 1148 1142 .setup_link = ixgbevf_setup_mac_link_vf, 1149 1143 .check_link = ixgbevf_hv_check_mac_link_vf, 1150 1144 .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf, 1145 + .negotiate_features = ixgbevf_hv_negotiate_features_vf, 1151 1146 .set_rar = ixgbevf_hv_set_rar_vf, 1152 1147 .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf, 1153 1148 .update_xcast_mode = ixgbevf_hv_update_xcast_mode,
+1
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 2285 2285 { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */ 2286 2286 { PCI_VDEVICE(MELLANOX, 0x1025) }, /* ConnectX-9 */ 2287 2287 { PCI_VDEVICE(MELLANOX, 0x1027) }, /* ConnectX-10 */ 2288 + { PCI_VDEVICE(MELLANOX, 0x2101) }, /* ConnectX-10 NVLink-C2C */ 2288 2289 { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ 2289 2290 { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ 2290 2291 { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
+24 -4
drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
··· 91 91 pp_params.dma_dir = DMA_BIDIRECTIONAL; 92 92 93 93 rx->page_pool = page_pool_create(&pp_params); 94 + if (unlikely(IS_ERR(rx->page_pool))) 95 + return PTR_ERR(rx->page_pool); 94 96 95 97 for (int i = 0; i < lan966x->num_phys_ports; ++i) { 96 98 struct lan966x_port *port; ··· 119 117 return PTR_ERR(rx->page_pool); 120 118 121 119 err = fdma_alloc_coherent(lan966x->dev, fdma); 122 - if (err) 120 + if (err) { 121 + page_pool_destroy(rx->page_pool); 123 122 return err; 123 + } 124 124 125 125 fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size), 126 126 FDMA_DCB_STATUS_INTR); ··· 812 808 813 809 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) 814 810 { 811 + struct page *(*old_pages)[FDMA_RX_DCB_MAX_DBS]; 815 812 struct page_pool *page_pool; 816 813 struct fdma fdma_rx_old; 817 - int err; 814 + int err, i, j; 815 + 816 + old_pages = kmemdup(lan966x->rx.page, sizeof(lan966x->rx.page), 817 + GFP_KERNEL); 818 + if (!old_pages) 819 + return -ENOMEM; 818 820 819 821 /* Store these for later to free them */ 820 822 memcpy(&fdma_rx_old, &lan966x->rx.fdma, sizeof(struct fdma)); ··· 831 821 lan966x_fdma_stop_netdev(lan966x); 832 822 833 823 lan966x_fdma_rx_disable(&lan966x->rx); 834 - lan966x_fdma_rx_free_pages(&lan966x->rx); 835 824 lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1; 836 825 lan966x->rx.max_mtu = new_mtu; 837 826 err = lan966x_fdma_rx_alloc(&lan966x->rx); 838 827 if (err) 839 828 goto restore; 840 829 lan966x_fdma_rx_start(&lan966x->rx); 830 + 831 + for (i = 0; i < fdma_rx_old.n_dcbs; ++i) 832 + for (j = 0; j < fdma_rx_old.n_dbs; ++j) 833 + page_pool_put_full_page(page_pool, 834 + old_pages[i][j], false); 841 835 842 836 fdma_free_coherent(lan966x->dev, &fdma_rx_old); 843 837 ··· 850 836 lan966x_fdma_wakeup_netdev(lan966x); 851 837 napi_enable(&lan966x->napi); 852 838 853 - return err; 839 + kfree(old_pages); 840 + return 0; 854 841 restore: 855 842 lan966x->rx.page_pool = page_pool; 856 843 memcpy(&lan966x->rx.fdma, &fdma_rx_old, sizeof(struct fdma)); 857 844 lan966x_fdma_rx_start(&lan966x->rx); 858 845 846 + lan966x_fdma_wakeup_netdev(lan966x); 847 + napi_enable(&lan966x->napi); 848 + 849 + kfree(old_pages); 859 850 return err; 860 851 } 861 852 ··· 974 955 err = lan966x_fdma_tx_alloc(&lan966x->tx); 975 956 if (err) { 976 957 fdma_free_coherent(lan966x->dev, &lan966x->rx.fdma); 958 + page_pool_destroy(lan966x->rx.page_pool); 977 959 return err; 978 960 } 979 961
+1 -1
drivers/net/ethernet/qualcomm/qca_uart.c
··· 100 100 if (!qca->rx_skb) { 101 101 netdev_dbg(netdev, "recv: out of RX resources\n"); 102 102 n_stats->rx_errors++; 103 - return i; 103 + return i + 1; 104 104 } 105 105 } 106 106 }
+6 -5
drivers/net/ethernet/stmicro/stmmac/chain_mode.c
··· 20 20 unsigned int nopaged_len = skb_headlen(skb); 21 21 struct stmmac_priv *priv = tx_q->priv_data; 22 22 unsigned int entry = tx_q->cur_tx; 23 - unsigned int bmax, des2; 23 + unsigned int bmax, buf_len, des2; 24 24 unsigned int i = 1, len; 25 25 struct dma_desc *desc; 26 26 ··· 31 31 else 32 32 bmax = BUF_SIZE_2KiB; 33 33 34 - len = nopaged_len - bmax; 34 + buf_len = min_t(unsigned int, nopaged_len, bmax); 35 + len = nopaged_len - buf_len; 35 36 36 37 des2 = dma_map_single(priv->device, skb->data, 37 - bmax, DMA_TO_DEVICE); 38 + buf_len, DMA_TO_DEVICE); 38 39 desc->des2 = cpu_to_le32(des2); 39 40 if (dma_mapping_error(priv->device, des2)) 40 41 return -1; 41 42 tx_q->tx_skbuff_dma[entry].buf = des2; 42 - tx_q->tx_skbuff_dma[entry].len = bmax; 43 + tx_q->tx_skbuff_dma[entry].len = buf_len; 43 44 /* do not close the descriptor and do not set own bit */ 44 - stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, STMMAC_CHAIN_MODE, 45 + stmmac_prepare_tx_desc(priv, desc, 1, buf_len, csum, STMMAC_CHAIN_MODE, 45 46 0, false, skb->len); 46 47 47 48 while (len != 0) {
+8
drivers/net/ethernet/stmicro/stmmac/dwmac-motorcomm.c
··· 6 6 */ 7 7 8 8 #include <linux/bits.h> 9 + #include <linux/delay.h> 9 10 #include <linux/dev_printk.h> 10 11 #include <linux/io.h> 11 12 #include <linux/iopoll.h> ··· 329 328 dev_warn(&pdev->dev, "failed to disable L1 state: %d\n", ret); 330 329 331 330 motorcomm_reset(priv); 331 + 332 + /* 333 + * After system reset, the eFuse controller needs time to load 334 + * its internal data. Without this delay, eFuse reads return 335 + * all zeros, causing MAC address detection to fail. 336 + */ 337 + usleep_range(2000, 5000); 332 338 333 339 ret = motorcomm_efuse_read_mac(&pdev->dev, priv, res.mac); 334 340 if (ret == -ENOENT) {
+17 -2
drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
··· 9 9 #include "stmmac_platform.h" 10 10 11 11 static const char *const mgbe_clks[] = { 12 - "rx-pcs", "tx", "tx-pcs", "mac-divider", "mac", "mgbe", "ptp-ref", "mac" 12 + "rx-pcs", "tx", "tx-pcs", "mac-divider", "mac", "mgbe", "ptp_ref", "mac" 13 13 }; 14 14 15 15 struct tegra_mgbe { ··· 215 215 { 216 216 struct plat_stmmacenet_data *plat; 217 217 struct stmmac_resources res; 218 + bool use_legacy_ptp = false; 218 219 struct tegra_mgbe *mgbe; 219 220 int irq, err, i; 220 221 u32 value; ··· 258 257 if (!mgbe->clks) 259 258 return -ENOMEM; 260 259 261 - for (i = 0; i < ARRAY_SIZE(mgbe_clks); i++) 260 + /* Older device-trees use 'ptp-ref' rather than 'ptp_ref'. 261 + * Fall back when the legacy name is present. 262 + */ 263 + if (of_property_match_string(pdev->dev.of_node, "clock-names", 264 + "ptp-ref") >= 0) 265 + use_legacy_ptp = true; 266 + 267 + for (i = 0; i < ARRAY_SIZE(mgbe_clks); i++) { 262 268 mgbe->clks[i].id = mgbe_clks[i]; 269 + 270 + if (use_legacy_ptp && !strcmp(mgbe_clks[i], "ptp_ref")) { 271 + dev_warn(mgbe->dev, 272 + "Device-tree update needed for PTP clock!\n"); 273 + mgbe->clks[i].id = "ptp-ref"; 274 + } 275 + } 263 276 264 277 err = devm_clk_bulk_get(mgbe->dev, ARRAY_SIZE(mgbe_clks), mgbe->clks); 265 278 if (err < 0)
+4 -4
drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
··· 424 424 char i2c_name[32]; 425 425 char sfp_name[32]; 426 426 char phylink_name[32]; 427 - struct property_entry gpio_props[1]; 428 - struct property_entry i2c_props[3]; 429 - struct property_entry sfp_props[8]; 430 - struct property_entry phylink_props[2]; 427 + struct property_entry gpio_props[2]; 428 + struct property_entry i2c_props[4]; 429 + struct property_entry sfp_props[9]; 430 + struct property_entry phylink_props[3]; 431 431 struct software_node_ref_args i2c_ref[1]; 432 432 struct software_node_ref_args gpio0_ref[1]; 433 433 struct software_node_ref_args gpio1_ref[1];
+5 -4
drivers/net/ipa/reg/gsi_reg-v5.0.c
··· 30 30 31 31 static const u32 reg_ch_c_cntxt_1_fmask[] = { 32 32 [CH_R_LENGTH] = GENMASK(23, 0), 33 - [ERINDEX] = GENMASK(31, 24), 33 + [CH_ERINDEX] = GENMASK(31, 24), 34 34 }; 35 35 36 36 REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1, ··· 156 156 157 157 static const u32 reg_generic_cmd_fmask[] = { 158 158 [GENERIC_OPCODE] = GENMASK(4, 0), 159 - [GENERIC_CHID] = GENMASK(9, 5), 160 - [GENERIC_EE] = GENMASK(13, 10), 161 - /* Bits 14-31 reserved */ 159 + [GENERIC_CHID] = GENMASK(12, 5), 160 + [GENERIC_EE] = GENMASK(16, 13), 161 + /* Bits 17-23 reserved */ 162 + [GENERIC_PARAMS] = GENMASK(31, 24), 162 163 }; 163 164 164 165 REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00025018 + 0x12000 * GSI_EE_AP);
+1 -2
drivers/net/mdio/mdio-realtek-rtl9300.c
··· 466 466 { 467 467 struct device *dev = &pdev->dev; 468 468 struct rtl9300_mdio_priv *priv; 469 - struct fwnode_handle *child; 470 469 int err; 471 470 472 471 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ··· 486 487 if (err) 487 488 return err; 488 489 489 - device_for_each_child_node(dev, child) { 490 + device_for_each_child_node_scoped(dev, child) { 490 491 err = rtl9300_mdiobus_probe_one(dev, priv, child); 491 492 if (err) 492 493 return err;
+16
drivers/net/phy/sfp.c
··· 543 543 SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex, 544 544 sfp_fixup_ignore_tx_fault_and_los), 545 545 546 + // Hisense LXT-010S-H is a GPON ONT SFP (sold as LEOX LXT-010S-H) that 547 + // can operate at 2500base-X, but reports 1000BASE-LX / 1300MBd in its 548 + // EEPROM 549 + SFP_QUIRK("Hisense-Leox", "LXT-010S-H", sfp_quirk_2500basex, 550 + sfp_fixup_ignore_tx_fault), 551 + 552 + // Hisense ZNID-GPON-2311NA can operate at 2500base-X, but reports 553 + // 1000BASE-LX / 1300MBd in its EEPROM 554 + SFP_QUIRK("Hisense", "ZNID-GPON-2311NA", sfp_quirk_2500basex, 555 + sfp_fixup_ignore_tx_fault), 556 + 557 + // HSGQ HSGQ-XPON-Stick can operate at 2500base-X, but reports 558 + // 1000BASE-LX / 1300MBd in its EEPROM 559 + SFP_QUIRK("HSGQ", "HSGQ-XPON-Stick", sfp_quirk_2500basex, 560 + sfp_fixup_ignore_tx_fault), 561 + 546 562 // Lantech 8330-262D-E and 8330-265D can operate at 2500base-X, but 547 563 // incorrectly report 2500MBd NRZ in their EEPROM. 548 564 // Some 8330-265D modules have inverted LOS, while all of them report
+8 -5
drivers/net/wan/lapbether.c
··· 446 446 static int lapbeth_device_event(struct notifier_block *this, 447 447 unsigned long event, void *ptr) 448 448 { 449 - struct lapbethdev *lapbeth; 450 449 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 450 + struct lapbethdev *lapbeth; 451 451 452 452 if (dev_net(dev) != &init_net) 453 453 return NOTIFY_DONE; 454 454 455 - if (!dev_is_ethdev(dev) && !lapbeth_get_x25_dev(dev)) 455 + lapbeth = lapbeth_get_x25_dev(dev); 456 + if (!dev_is_ethdev(dev) && !lapbeth) 456 457 return NOTIFY_DONE; 457 458 458 459 switch (event) { 459 460 case NETDEV_UP: 460 461 /* New ethernet device -> new LAPB interface */ 461 - if (!lapbeth_get_x25_dev(dev)) 462 + if (!lapbeth) 462 463 lapbeth_new_device(dev); 463 464 break; 464 465 case NETDEV_GOING_DOWN: 465 466 /* ethernet device closes -> close LAPB interface */ 466 - lapbeth = lapbeth_get_x25_dev(dev); 467 467 if (lapbeth) 468 468 dev_close(lapbeth->axdev); 469 469 break; 470 470 case NETDEV_UNREGISTER: 471 471 /* ethernet device disappears -> remove LAPB interface */ 472 - lapbeth = lapbeth_get_x25_dev(dev); 473 472 if (lapbeth) 474 473 lapbeth_free_device(lapbeth); 475 474 break; 475 + case NETDEV_PRE_TYPE_CHANGE: 476 + /* Our underlying device type must not change. */ 477 + if (lapbeth) 478 + return NOTIFY_BAD; 476 479 } 477 480 478 481 return NOTIFY_DONE;
+5
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
··· 153 153 bphy_err(drvr, "invalid interface index: %u\n", ifevent->ifidx); 154 154 return; 155 155 } 156 + if (ifevent->bsscfgidx >= BRCMF_MAX_IFS) { 157 + bphy_err(drvr, "invalid bsscfg index: %u\n", 158 + ifevent->bsscfgidx); 159 + return; 160 + } 156 161 157 162 ifp = drvr->iflist[ifevent->bsscfgidx]; 158 163
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
··· 483 483 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr 484 484 & boundary)) { 485 485 *alignbits = dma_align_sizetobits(size); 486 - dma_free_coherent(di->dmadev, size, va, *descpa); 486 + dma_free_coherent(di->dmadev, *alloced, va, *descpa); 487 487 va = dma_alloc_consistent(di, size, *alignbits, 488 488 alloced, descpa); 489 489 }
+1 -1
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
··· 826 826 if (retval) 827 827 goto exit_free_device; 828 828 829 - rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev, 829 + rt2x00dev->anchor = devm_kmalloc(&usb_intf->dev, 830 830 sizeof(struct usb_anchor), 831 831 GFP_KERNEL); 832 832 if (!rt2x00dev->anchor) {
+8 -3
drivers/nfc/pn533/uart.c
··· 211 211 212 212 timer_delete(&dev->cmd_timeout); 213 213 for (i = 0; i < count; i++) { 214 + if (!dev->recv_skb) { 215 + dev->recv_skb = alloc_skb(PN532_UART_SKB_BUFF_LEN, 216 + GFP_KERNEL); 217 + if (!dev->recv_skb) 218 + return i; 219 + } 220 + 214 221 if (unlikely(!skb_tailroom(dev->recv_skb))) 215 222 skb_trim(dev->recv_skb, 0); 216 223 ··· 226 219 continue; 227 220 228 221 pn533_recv_frame(dev->priv, dev->recv_skb, 0); 229 - dev->recv_skb = alloc_skb(PN532_UART_SKB_BUFF_LEN, GFP_KERNEL); 230 - if (!dev->recv_skb) 231 - return 0; 222 + dev->recv_skb = NULL; 232 223 } 233 224 234 225 return i;
+7 -3
drivers/nfc/s3fwrn5/uart.c
··· 57 57 size_t i; 58 58 59 59 for (i = 0; i < count; i++) { 60 + if (!phy->recv_skb) { 61 + phy->recv_skb = alloc_skb(NCI_SKB_BUFF_LEN, GFP_KERNEL); 62 + if (!phy->recv_skb) 63 + return i; 64 + } 65 + 60 66 skb_put_u8(phy->recv_skb, *data++); 61 67 62 68 if (phy->recv_skb->len < S3FWRN82_NCI_HEADER) ··· 74 68 75 69 s3fwrn5_recv_frame(phy->common.ndev, phy->recv_skb, 76 70 phy->common.mode); 77 - phy->recv_skb = alloc_skb(NCI_SKB_BUFF_LEN, GFP_KERNEL); 78 - if (!phy->recv_skb) 79 - return 0; 71 + phy->recv_skb = NULL; 80 72 } 81 73 82 74 return i;
+1
drivers/nvmem/imx-ocotp-ele.c
··· 131 131 static void imx_ocotp_fixup_dt_cell_info(struct nvmem_device *nvmem, 132 132 struct nvmem_cell_info *cell) 133 133 { 134 + cell->raw_len = round_up(cell->bytes, 4); 134 135 cell->read_post_process = imx_ocotp_cell_pp; 135 136 } 136 137
+1
drivers/nvmem/imx-ocotp.c
··· 589 589 static void imx_ocotp_fixup_dt_cell_info(struct nvmem_device *nvmem, 590 590 struct nvmem_cell_info *cell) 591 591 { 592 + cell->raw_len = round_up(cell->bytes, 4); 592 593 cell->read_post_process = imx_ocotp_cell_pp; 593 594 } 594 595
+4 -4
drivers/nvmem/zynqmp_nvmem.c
··· 66 66 dma_addr_t dma_buf; 67 67 size_t words = bytes / WORD_INBYTES; 68 68 int ret; 69 - int value; 69 + unsigned int value; 70 70 char *data; 71 71 72 72 if (bytes % WORD_INBYTES != 0) { ··· 80 80 } 81 81 82 82 if (pufflag == 1 && flag == EFUSE_WRITE) { 83 - memcpy(&value, val, bytes); 83 + memcpy(&value, val, sizeof(value)); 84 84 if ((offset == EFUSE_PUF_START_OFFSET || 85 85 offset == EFUSE_PUF_MID_OFFSET) && 86 86 value & P_USER_0_64_UPPER_MASK) { ··· 100 100 if (!efuse) 101 101 return -ENOMEM; 102 102 103 - data = dma_alloc_coherent(dev, sizeof(bytes), 103 + data = dma_alloc_coherent(dev, bytes, 104 104 &dma_buf, GFP_KERNEL); 105 105 if (!data) { 106 106 ret = -ENOMEM; ··· 134 134 if (flag == EFUSE_READ) 135 135 memcpy(val, data, bytes); 136 136 efuse_access_err: 137 - dma_free_coherent(dev, sizeof(bytes), 137 + dma_free_coherent(dev, bytes, 138 138 data, dma_buf); 139 139 efuse_data_fail: 140 140 dma_free_coherent(dev, sizeof(struct xilinx_efuse),
+9 -3
drivers/pci/controller/pci-hyperv.c
··· 2485 2485 if (!hv_dev) 2486 2486 continue; 2487 2487 2488 + /* 2489 + * If the Hyper-V host doesn't provide a NUMA node for the 2490 + * device, default to node 0. With NUMA_NO_NODE the kernel 2491 + * may spread work across NUMA nodes, which degrades 2492 + * performance on Hyper-V. 2493 + */ 2494 + set_dev_node(&dev->dev, 0); 2495 + 2488 2496 if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY && 2489 2497 hv_dev->desc.virtual_numa_node < num_possible_nodes()) 2490 2498 /* ··· 3786 3778 hbus->bridge->domain_nr); 3787 3779 if (!hbus->wq) { 3788 3780 ret = -ENOMEM; 3789 - goto free_dom; 3781 + goto free_bus; 3790 3782 } 3791 3783 3792 3784 hdev->channel->next_request_id_callback = vmbus_next_request_id; ··· 3882 3874 vmbus_close(hdev->channel); 3883 3875 destroy_wq: 3884 3876 destroy_workqueue(hbus->wq); 3885 - free_dom: 3886 - pci_bus_release_emul_domain_nr(hbus->bridge->domain_nr); 3887 3877 free_bus: 3888 3878 kfree(hbus); 3889 3879 return ret;
+9
drivers/platform/x86/amd/pmc/pmc-quirks.c
··· 203 203 DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"), 204 204 } 205 205 }, 206 + /* https://bugzilla.kernel.org/show_bug.cgi?id=221273 */ 207 + { 208 + .ident = "Thinkpad L14 Gen3", 209 + .driver_data = &quirk_s2idle_bug, 210 + .matches = { 211 + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), 212 + DMI_MATCH(DMI_PRODUCT_NAME, "21C6"), 213 + } 214 + }, 206 215 /* https://gitlab.freedesktop.org/drm/amd/-/issues/4434 */ 207 216 { 208 217 .ident = "Lenovo Yoga 6 13ALC6",
+86
drivers/platform/x86/asus-armoury.h
··· 594 594 }, 595 595 { 596 596 .matches = { 597 + DMI_MATCH(DMI_BOARD_NAME, "FA607NU"), 598 + }, 599 + .driver_data = &(struct power_data) { 600 + .ac_data = &(struct power_limits) { 601 + .ppt_pl1_spl_min = 15, 602 + .ppt_pl1_spl_max = 80, 603 + .ppt_pl2_sppt_min = 35, 604 + .ppt_pl2_sppt_max = 80, 605 + .ppt_pl3_fppt_min = 35, 606 + .ppt_pl3_fppt_max = 80, 607 + .nv_dynamic_boost_min = 5, 608 + .nv_dynamic_boost_max = 25, 609 + .nv_temp_target_min = 75, 610 + .nv_temp_target_max = 87, 611 + }, 612 + .dc_data = &(struct power_limits) { 613 + .ppt_pl1_spl_min = 25, 614 + .ppt_pl1_spl_def = 45, 615 + .ppt_pl1_spl_max = 65, 616 + .ppt_pl2_sppt_min = 25, 617 + .ppt_pl2_sppt_def = 54, 618 + .ppt_pl2_sppt_max = 65, 619 + .ppt_pl3_fppt_min = 25, 620 + .ppt_pl3_fppt_max = 65, 621 + .nv_temp_target_min = 75, 622 + .nv_temp_target_max = 87, 623 + }, 624 + }, 625 + }, 626 + { 627 + .matches = { 597 628 DMI_MATCH(DMI_BOARD_NAME, "FA607P"), 598 629 }, 599 630 .driver_data = &(struct power_data) { ··· 1342 1311 }, 1343 1312 { 1344 1313 .matches = { 1314 + DMI_MATCH(DMI_BOARD_NAME, "GU605MU"), 1315 + }, 1316 + .driver_data = &(struct power_data) { 1317 + .ac_data = &(struct power_limits) { 1318 + .ppt_pl1_spl_min = 28, 1319 + .ppt_pl1_spl_max = 90, 1320 + .ppt_pl2_sppt_min = 28, 1321 + .ppt_pl2_sppt_max = 135, 1322 + .nv_dynamic_boost_min = 5, 1323 + .nv_dynamic_boost_max = 20, 1324 + .nv_temp_target_min = 75, 1325 + .nv_temp_target_max = 87, 1326 + .nv_tgp_min = 55, 1327 + .nv_tgp_max = 85, 1328 + }, 1329 + .dc_data = &(struct power_limits) { 1330 + .ppt_pl1_spl_min = 25, 1331 + .ppt_pl1_spl_max = 35, 1332 + .ppt_pl2_sppt_min = 38, 1333 + .ppt_pl2_sppt_max = 53, 1334 + .nv_temp_target_min = 75, 1335 + .nv_temp_target_max = 87, 1336 + }, 1337 + .requires_fan_curve = true, 1338 + }, 1339 + }, 1340 + { 1341 + .matches = { 1345 1342 DMI_MATCH(DMI_BOARD_NAME, "GU605M"), 1346 1343 }, 1347 1344 .driver_data = &(struct power_data) { ··· 1418 1359 .ppt_pl1_spl_max = 45, 1419 1360 .ppt_pl2_sppt_min = 25, 1420 1361 .ppt_pl2_sppt_max = 54, 1362 + .ppt_pl3_fppt_min = 35, 1363 + .ppt_pl3_fppt_max = 65, 1364 + .nv_temp_target_min = 75, 1365 + .nv_temp_target_max = 87, 1366 + }, 1367 + .dc_data = &(struct power_limits) { 1368 + .ppt_pl1_spl_min = 15, 1369 + .ppt_pl1_spl_max = 35, 1370 + .ppt_pl2_sppt_min = 25, 1371 + .ppt_pl2_sppt_max = 35, 1372 + .ppt_pl3_fppt_min = 35, 1373 + .ppt_pl3_fppt_max = 65, 1374 + .nv_temp_target_min = 75, 1375 + .nv_temp_target_max = 87, 1376 + }, 1377 + }, 1378 + }, 1379 + { 1380 + .matches = { 1381 + DMI_MATCH(DMI_BOARD_NAME, "GV302XU"), 1382 + }, 1383 + .driver_data = &(struct power_data) { 1384 + .ac_data = &(struct power_limits) { 1385 + .ppt_pl1_spl_min = 15, 1386 + .ppt_pl1_spl_max = 55, 1387 + .ppt_pl2_sppt_min = 25, 1388 + .ppt_pl2_sppt_max = 60, 1421 1389 .ppt_pl3_fppt_min = 35, 1422 1390 .ppt_pl3_fppt_max = 65, 1423 1391 .nv_temp_target_min = 75,
+3 -1
drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c
··· 36 36 37 37 /* Supported SST hardware version by this driver */ 38 38 #define ISST_MAJOR_VERSION 0 39 - #define ISST_MINOR_VERSION 2 39 + #define ISST_MINOR_VERSION 3 40 40 41 41 /* 42 42 * Used to indicate if value read from MMIO needs to get multiplied ··· 1460 1460 j * SST_TF_RATIO_0_WIDTH, SST_TF_RATIO_0_WIDTH, 1461 1461 SST_MUL_FACTOR_FREQ) 1462 1462 } 1463 + 1464 + memset(turbo_freq.bucket_core_counts, 0, sizeof(turbo_freq.bucket_core_counts)); 1463 1465 1464 1466 if (feature_rev >= 2) { 1465 1467 bool has_tf_info_8 = false;
+8 -2
drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
··· 31 31 #include "uncore-frequency-common.h" 32 32 33 33 #define UNCORE_MAJOR_VERSION 0 34 - #define UNCORE_MINOR_VERSION 2 34 + #define UNCORE_MINOR_VERSION 3 35 35 #define UNCORE_ELC_SUPPORTED_VERSION 2 36 36 #define UNCORE_HEADER_INDEX 0 37 37 #define UNCORE_FABRIC_CLUSTER_OFFSET 8 ··· 537 537 #define UNCORE_VERSION_MASK GENMASK_ULL(7, 0) 538 538 #define UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK GENMASK_ULL(15, 8) 539 539 #define UNCORE_CLUSTER_OFF_MASK GENMASK_ULL(7, 0) 540 + #define UNCORE_AUTONOMOUS_UFS_DISABLED BIT(32) 540 541 #define UNCORE_MAX_CLUSTER_PER_DOMAIN 8 541 542 542 543 static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) ··· 599 598 600 599 for (i = 0; i < num_resources; ++i) { 601 600 struct tpmi_uncore_power_domain_info *pd_info; 601 + bool auto_ufs_enabled; 602 602 struct resource *res; 603 603 u64 cluster_offset; 604 604 u8 cluster_mask; ··· 649 647 continue; 650 648 } 651 649 650 + auto_ufs_enabled = !(header & UNCORE_AUTONOMOUS_UFS_DISABLED); 651 + 652 652 /* Find out number of clusters in this resource */ 653 653 pd_info->cluster_count = hweight8(cluster_mask); 654 654 ··· 693 689 694 690 cluster_info->uncore_root = tpmi_uncore; 695 691 696 - if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) >= UNCORE_ELC_SUPPORTED_VERSION) 692 + if ((TPMI_MINOR_VERSION(pd_info->ufs_header_ver) >= 693 + UNCORE_ELC_SUPPORTED_VERSION) && 694 + auto_ufs_enabled) 697 695 cluster_info->elc_supported = true; 698 696 699 697 ret = uncore_freq_add_entry(&cluster_info->uncore_data, 0);
-1
drivers/reset/core.c
··· 856 856 ret = __auxiliary_device_add(adev, "reset"); 857 857 if (ret) { 858 858 auxiliary_device_uninit(adev); 859 - kfree(adev); 860 859 return ret; 861 860 } 862 861
+1 -1
drivers/reset/reset-rzg2l-usbphy-ctrl.c
··· 350 350 351 351 MODULE_LICENSE("GPL v2"); 352 352 MODULE_DESCRIPTION("Renesas RZ/G2L USBPHY Control"); 353 - MODULE_AUTHOR("biju.das.jz@bp.renesas.com>"); 353 + MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+36 -24
drivers/reset/spacemit/reset-spacemit-k3.c
··· 112 112 [RESET_APMU_SDH0] = RESET_DATA(APMU_SDH0_CLK_RES_CTRL, 0, BIT(1)), 113 113 [RESET_APMU_SDH1] = RESET_DATA(APMU_SDH1_CLK_RES_CTRL, 0, BIT(1)), 114 114 [RESET_APMU_SDH2] = RESET_DATA(APMU_SDH2_CLK_RES_CTRL, 0, BIT(1)), 115 - [RESET_APMU_USB2] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, 116 - BIT(1)|BIT(2)|BIT(3)), 117 - [RESET_APMU_USB3_PORTA] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, 118 - BIT(5)|BIT(6)|BIT(7)), 119 - [RESET_APMU_USB3_PORTB] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, 120 - BIT(9)|BIT(10)|BIT(11)), 121 - [RESET_APMU_USB3_PORTC] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, 122 - BIT(13)|BIT(14)|BIT(15)), 123 - [RESET_APMU_USB3_PORTD] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, 124 - BIT(17)|BIT(18)|BIT(19)), 115 + [RESET_APMU_USB2_AHB] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, BIT(1)), 116 + [RESET_APMU_USB2_VCC] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, BIT(2)), 117 + [RESET_APMU_USB2_PHY] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, BIT(3)), 118 + [RESET_APMU_USB3_A_AHB] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, BIT(5)), 119 + [RESET_APMU_USB3_A_VCC] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, BIT(6)), 120 + [RESET_APMU_USB3_A_PHY] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, BIT(7)), 121 + [RESET_APMU_USB3_B_AHB] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, BIT(9)), 122 + [RESET_APMU_USB3_B_VCC] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, BIT(10)), 123 + [RESET_APMU_USB3_B_PHY] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, BIT(11)), 124 + [RESET_APMU_USB3_C_AHB] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, BIT(13)), 125 + [RESET_APMU_USB3_C_VCC] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, BIT(14)), 126 + [RESET_APMU_USB3_C_PHY] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, BIT(15)), 127 + [RESET_APMU_USB3_D_AHB] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, BIT(17)), 128 + [RESET_APMU_USB3_D_VCC] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, BIT(18)), 129 + [RESET_APMU_USB3_D_PHY] = RESET_DATA(APMU_USB_CLK_RES_CTRL, 0, BIT(19)), 125 130 [RESET_APMU_QSPI] = RESET_DATA(APMU_QSPI_CLK_RES_CTRL, 0, BIT(1)), 126 131 [RESET_APMU_QSPI_BUS] = RESET_DATA(APMU_QSPI_CLK_RES_CTRL, 0, BIT(0)), 127 132 [RESET_APMU_DMA] = RESET_DATA(APMU_DMA_CLK_RES_CTRL, 0, BIT(0)), ··· 156 151 [RESET_APMU_CPU7_SW] = RESET_DATA(APMU_PMU_CC2_AP, BIT(26), 0), 157 152 [RESET_APMU_C1_MPSUB_SW] = RESET_DATA(APMU_PMU_CC2_AP, BIT(28), 0), 158 153 [RESET_APMU_MPSUB_DBG] = RESET_DATA(APMU_PMU_CC2_AP, BIT(29), 0), 159 - [RESET_APMU_UCIE] = RESET_DATA(APMU_UCIE_CTRL, 160 - BIT(1) | BIT(2) | BIT(3), 0), 161 - [RESET_APMU_RCPU] = RESET_DATA(APMU_RCPU_CLK_RES_CTRL, 0, 162 - BIT(3) | BIT(2) | BIT(0)), 154 + [RESET_APMU_UCIE_IP] = RESET_DATA(APMU_UCIE_CTRL, BIT(1), 0), 155 + [RESET_APMU_UCIE_HOT] = RESET_DATA(APMU_UCIE_CTRL, BIT(2), 0), 156 + [RESET_APMU_UCIE_MON] = RESET_DATA(APMU_UCIE_CTRL, BIT(3), 0), 157 + [RESET_APMU_RCPU_AUDIO_SYS] = RESET_DATA(APMU_RCPU_CLK_RES_CTRL, 0, BIT(0)), 158 + [RESET_APMU_RCPU_MCU_CORE] = RESET_DATA(APMU_RCPU_CLK_RES_CTRL, 0, BIT(2)), 159 + [RESET_APMU_RCPU_AUDIO_APMU] = RESET_DATA(APMU_RCPU_CLK_RES_CTRL, 0, BIT(3)), 163 160 [RESET_APMU_DSI4LN2_ESCCLK] = RESET_DATA(APMU_LCD_CLK_RES_CTRL3, 0, BIT(3)), 164 161 [RESET_APMU_DSI4LN2_LCD_SW] = RESET_DATA(APMU_LCD_CLK_RES_CTRL3, 0, BIT(4)), 165 162 [RESET_APMU_DSI4LN2_LCD_MCLK] = RESET_DATA(APMU_LCD_CLK_RES_CTRL4, 0, BIT(9)), ··· 171 164 [RESET_APMU_UFS_ACLK] = RESET_DATA(APMU_UFS_CLK_RES_CTRL, 0, BIT(0)), 172 165 [RESET_APMU_EDP0] = RESET_DATA(APMU_LCD_EDP_CTRL, 0, BIT(0)), 173 166 [RESET_APMU_EDP1] = RESET_DATA(APMU_LCD_EDP_CTRL, 0, BIT(16)), 174 - [RESET_APMU_PCIE_PORTA] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_A, 0, 175 - BIT(5) | BIT(4) | BIT(3)), 176 - [RESET_APMU_PCIE_PORTB] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_B, 0, 177 - BIT(5) | BIT(4) | BIT(3)), 178 - [RESET_APMU_PCIE_PORTC] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_C, 0, 179 - BIT(5) | BIT(4) | BIT(3)), 180 - [RESET_APMU_PCIE_PORTD] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_D, 0, 181 - BIT(5) | BIT(4) | BIT(3)), 182 - [RESET_APMU_PCIE_PORTE] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_E, 0, 183 - BIT(5) | BIT(4) | BIT(3)), 167 + [RESET_APMU_PCIE_A_DBI] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_A, 0, BIT(3)), 168 + [RESET_APMU_PCIE_A_SLAVE] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_A, 0, BIT(4)), 169 + [RESET_APMU_PCIE_A_MASTER] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_A, 0, BIT(5)), 170 + [RESET_APMU_PCIE_B_DBI] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_B, 0, BIT(3)), 171 + [RESET_APMU_PCIE_B_SLAVE] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_B, 0, BIT(4)), 172 + [RESET_APMU_PCIE_B_MASTER] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_B, 0, BIT(5)), 173 + [RESET_APMU_PCIE_C_DBI] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_C, 0, BIT(3)), 174 + [RESET_APMU_PCIE_C_SLAVE] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_C, 0, BIT(4)), 175 + [RESET_APMU_PCIE_C_MASTER] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_C, 0, BIT(5)), 176 + [RESET_APMU_PCIE_D_DBI] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_D, 0, BIT(3)), 177 + [RESET_APMU_PCIE_D_SLAVE] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_D, 0, BIT(4)), 178 + [RESET_APMU_PCIE_D_MASTER] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_D, 0, BIT(5)), 179 + [RESET_APMU_PCIE_E_DBI] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_E, 0, BIT(3)), 180 + [RESET_APMU_PCIE_E_SLAVE] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_E, 0, BIT(4)), 181 + [RESET_APMU_PCIE_E_MASTER] = RESET_DATA(APMU_PCIE_CLK_RES_CTRL_E, 0, BIT(5)), 184 182 [RESET_APMU_EMAC0] = RESET_DATA(APMU_EMAC0_CLK_RES_CTRL, 0, BIT(1)), 185 183 [RESET_APMU_EMAC1] = RESET_DATA(APMU_EMAC1_CLK_RES_CTRL, 0, BIT(1)), 186 184 [RESET_APMU_EMAC2] = RESET_DATA(APMU_EMAC2_CLK_RES_CTRL, 0, BIT(1)),
+14 -18
drivers/s390/crypto/zcrypt_msgtype6.c
··· 953 953 /* 954 954 * The request distributor calls this function if it picked the CEXxC 955 955 * device to handle a modexpo request. 956 + * This function assumes that ap_msg has been initialized with 957 + * ap_init_apmsg() and thus a valid buffer with the size of 958 + * ap_msg->bufsize is available within ap_msg. Also the caller has 959 + * to make sure ap_release_apmsg() is always called even on failure. 956 960 * @zq: pointer to zcrypt_queue structure that identifies the 957 961 * CEXxC device to the request distributor 958 962 * @mex: pointer to the modexpo request buffer ··· 968 964 struct ap_response_type *resp_type = &ap_msg->response; 969 965 int rc; 970 966 971 - ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); 972 - if (!ap_msg->msg) 973 - return -ENOMEM; 974 - ap_msg->bufsize = PAGE_SIZE; 975 967 ap_msg->receive = zcrypt_msgtype6_receive; 976 968 ap_msg->psmid = (((unsigned long)current->pid) << 32) + 977 969 atomic_inc_return(&zcrypt_step); 978 970 rc = icamex_msg_to_type6mex_msgx(zq, ap_msg, mex); 979 971 if (rc) 980 - goto out_free; 972 + goto out; 981 973 resp_type->type = CEXXC_RESPONSE_TYPE_ICA; 982 974 init_completion(&resp_type->work); 983 975 rc = ap_queue_message(zq->queue, ap_msg); 984 976 if (rc) 985 - goto out_free; 977 + goto out; 986 978 rc = wait_for_completion_interruptible(&resp_type->work); 987 979 if (rc == 0) { 988 980 rc = ap_msg->rc; ··· 991 991 ap_cancel_message(zq->queue, ap_msg); 992 992 } 993 993 994 - out_free: 995 - free_page((unsigned long)ap_msg->msg); 996 - ap_msg->msg = NULL; 994 + out: 997 995 return rc; 998 996 } 999 997 1000 998 /* 1001 999 * The request distributor calls this function if it picked the CEXxC 1002 1000 * device to handle a modexpo_crt request. 1001 + * This function assumes that ap_msg has been initialized with 1002 + * ap_init_apmsg() and thus a valid buffer with the size of 1003 + * ap_msg->bufsize is available within ap_msg. Also the caller has 1004 + * to make sure ap_release_apmsg() is always called even on failure. 1003 1005 * @zq: pointer to zcrypt_queue structure that identifies the 1004 1006 * CEXxC device to the request distributor 1005 1007 * @crt: pointer to the modexpoc_crt request buffer ··· 1013 1011 struct ap_response_type *resp_type = &ap_msg->response; 1014 1012 int rc; 1015 1013 1016 - ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); 1017 - if (!ap_msg->msg) 1018 - return -ENOMEM; 1019 - ap_msg->bufsize = PAGE_SIZE; 1020 1014 ap_msg->receive = zcrypt_msgtype6_receive; 1021 1015 ap_msg->psmid = (((unsigned long)current->pid) << 32) + 1022 1016 atomic_inc_return(&zcrypt_step); 1023 1017 rc = icacrt_msg_to_type6crt_msgx(zq, ap_msg, crt); 1024 1018 if (rc) 1025 - goto out_free; 1019 + goto out; 1026 1020 resp_type->type = CEXXC_RESPONSE_TYPE_ICA; 1027 1021 init_completion(&resp_type->work); 1028 1022 rc = ap_queue_message(zq->queue, ap_msg); 1029 1023 if (rc) 1030 - goto out_free; 1024 + goto out; 1031 1025 rc = wait_for_completion_interruptible(&resp_type->work); 1032 1026 if (rc == 0) { 1033 1027 rc = ap_msg->rc; ··· 1036 1038 ap_cancel_message(zq->queue, ap_msg); 1037 1039 } 1038 1040 1039 - out_free: 1040 - free_page((unsigned long)ap_msg->msg); 1041 - ap_msg->msg = NULL; 1041 + out: 1042 1042 return rc; 1043 1043 } 1044 1044
+1 -1
drivers/soc/aspeed/aspeed-socinfo.c
··· 39 39 unsigned int i; 40 40 41 41 for (i = 0 ; i < ARRAY_SIZE(rev_table) ; ++i) { 42 - if (rev_table[i].id == id) 42 + if ((rev_table[i].id & 0xff00ffff) == id) 43 43 return rev_table[i].name; 44 44 } 45 45
+4 -2
drivers/soc/microchip/mpfs-control-scb.c
··· 14 14 { 15 15 struct device *dev = &pdev->dev; 16 16 17 - return mfd_add_devices(dev, PLATFORM_DEVID_NONE, mpfs_control_scb_devs, 18 - ARRAY_SIZE(mpfs_control_scb_devs), NULL, 0, NULL); 17 + return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, 18 + mpfs_control_scb_devs, 19 + ARRAY_SIZE(mpfs_control_scb_devs), NULL, 0, 20 + NULL); 19 21 } 20 22 21 23 static const struct of_device_id mpfs_control_scb_of_match[] = {
+4 -2
drivers/soc/microchip/mpfs-mss-top-sysreg.c
··· 16 16 struct device *dev = &pdev->dev; 17 17 int ret; 18 18 19 - ret = mfd_add_devices(dev, PLATFORM_DEVID_NONE, mpfs_mss_top_sysreg_devs, 20 - ARRAY_SIZE(mpfs_mss_top_sysreg_devs) , NULL, 0, NULL); 19 + ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, 20 + mpfs_mss_top_sysreg_devs, 21 + ARRAY_SIZE(mpfs_mss_top_sysreg_devs), NULL, 22 + 0, NULL); 21 23 if (ret) 22 24 return ret; 23 25
+1 -1
drivers/soc/qcom/pdr_internal.h
··· 84 84 85 85 struct servreg_loc_pfr_req { 86 86 char service[SERVREG_NAME_LENGTH + 1]; 87 - char reason[257]; 87 + char reason[SERVREG_PFR_LENGTH + 1]; 88 88 }; 89 89 90 90 struct servreg_loc_pfr_resp {
+1 -1
drivers/soc/qcom/qcom_pdr_msg.c
··· 325 325 }, 326 326 { 327 327 .data_type = QMI_STRING, 328 - .elem_len = SERVREG_NAME_LENGTH + 1, 328 + .elem_len = SERVREG_PFR_LENGTH + 1, 329 329 .elem_size = sizeof(char), 330 330 .array_type = VAR_LEN_ARRAY, 331 331 .tlv_type = 0x02,
+12
drivers/spi/spi-amlogic-spifc-a4.c
··· 1066 1066 .finish_io_req = aml_sfc_ecc_finish_io_req, 1067 1067 }; 1068 1068 1069 + static void aml_sfc_unregister_ecc_engine(void *data) 1070 + { 1071 + struct nand_ecc_engine *eng = data; 1072 + 1073 + nand_ecc_unregister_on_host_hw_engine(eng); 1074 + } 1075 + 1069 1076 static int aml_sfc_clk_init(struct aml_sfc *sfc) 1070 1077 { 1071 1078 sfc->gate_clk = devm_clk_get_enabled(sfc->dev, "gate"); ··· 1155 1148 ret = nand_ecc_register_on_host_hw_engine(&sfc->ecc_eng); 1156 1149 if (ret) 1157 1150 return dev_err_probe(&pdev->dev, ret, "failed to register Aml host ecc engine.\n"); 1151 + 1152 + ret = devm_add_action_or_reset(dev, aml_sfc_unregister_ecc_engine, 1153 + &sfc->ecc_eng); 1154 + if (ret) 1155 + return dev_err_probe(dev, ret, "failed to add ECC unregister action\n"); 1158 1156 1159 1157 ret = of_property_read_u32(np, "amlogic,rx-adj", &val); 1160 1158 if (!ret)
+9 -8
drivers/spi/spi-cadence-quadspi.c
··· 1483 1483 if (refcount_read(&cqspi->inflight_ops) == 0) 1484 1484 return -ENODEV; 1485 1485 1486 - if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { 1487 - ret = pm_runtime_resume_and_get(dev); 1488 - if (ret) { 1489 - dev_err(&mem->spi->dev, "resume failed with %d\n", ret); 1490 - return ret; 1491 - } 1492 - } 1493 - 1494 1486 if (!refcount_read(&cqspi->refcount)) 1495 1487 return -EBUSY; 1496 1488 ··· 1494 1502 return -EBUSY; 1495 1503 } 1496 1504 1505 + if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { 1506 + ret = pm_runtime_resume_and_get(dev); 1507 + if (ret) { 1508 + dev_err(&mem->spi->dev, "resume failed with %d\n", ret); 1509 + goto dec_inflight_refcount; 1510 + } 1511 + } 1512 + 1497 1513 ret = cqspi_mem_process(mem, op); 1498 1514 1499 1515 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) ··· 1510 1510 if (ret) 1511 1511 dev_err(&mem->spi->dev, "operation failed with %d\n", ret); 1512 1512 1513 + dec_inflight_refcount: 1513 1514 if (refcount_read(&cqspi->inflight_ops) > 1) 1514 1515 refcount_dec(&cqspi->inflight_ops); 1515 1516
+6 -6
drivers/spi/spi-stm32-ospi.c
··· 928 928 dma_cfg.dst_addr = ospi->regs_phys_base + OSPI_DR; 929 929 ret = stm32_ospi_dma_setup(ospi, &dma_cfg); 930 930 if (ret) 931 - return ret; 931 + goto err_dma_free; 932 932 933 933 mutex_init(&ospi->lock); 934 934 ··· 965 965 if (ret) { 966 966 /* Disable ospi */ 967 967 writel_relaxed(0, ospi->regs_base + OSPI_CR); 968 - goto err_pm_resume; 968 + goto err_reset_control; 969 969 } 970 970 971 971 pm_runtime_put_autosuspend(ospi->dev); 972 972 973 973 return 0; 974 974 975 + err_reset_control: 976 + reset_control_release(ospi->rstc); 975 977 err_pm_resume: 976 978 pm_runtime_put_sync_suspend(ospi->dev); 977 979 978 980 err_pm_enable: 979 981 pm_runtime_force_suspend(ospi->dev); 980 982 mutex_destroy(&ospi->lock); 983 + err_dma_free: 981 984 if (ospi->dma_chtx) 982 985 dma_release_channel(ospi->dma_chtx); 983 986 if (ospi->dma_chrx) ··· 992 989 static void stm32_ospi_remove(struct platform_device *pdev) 993 990 { 994 991 struct stm32_ospi *ospi = platform_get_drvdata(pdev); 995 - int ret; 996 992 997 - ret = pm_runtime_resume_and_get(ospi->dev); 998 - if (ret < 0) 999 - return; 993 + pm_runtime_resume_and_get(ospi->dev); 1000 994 1001 995 spi_unregister_controller(ospi->ctrl); 1002 996 /* Disable ospi */
+27 -5
drivers/thermal/thermal_core.c
··· 41 41 42 42 static bool thermal_pm_suspended; 43 43 44 + static struct workqueue_struct *thermal_wq __ro_after_init; 45 + 44 46 /* 45 47 * Governor section: set of functions to handle thermal governors 46 48 * ··· 315 313 if (delay > HZ) 316 314 delay = round_jiffies_relative(delay); 317 315 318 - mod_delayed_work(system_freezable_power_efficient_wq, &tz->poll_queue, delay); 316 + mod_delayed_work(thermal_wq, &tz->poll_queue, delay); 319 317 } 320 318 321 319 static void thermal_zone_recheck(struct thermal_zone_device *tz, int error) ··· 1642 1640 device_del(&tz->device); 1643 1641 release_device: 1644 1642 put_device(&tz->device); 1643 + wait_for_completion(&tz->removal); 1645 1644 remove_id: 1646 1645 ida_free(&thermal_tz_ida, id); 1647 1646 free_tzp: ··· 1788 1785 1789 1786 guard(thermal_zone)(tz); 1790 1787 1788 + /* If the thermal zone is going away, there's nothing to do. */ 1789 + if (tz->state & TZ_STATE_FLAG_EXIT) 1790 + return; 1791 + 1791 1792 tz->state &= ~(TZ_STATE_FLAG_SUSPENDED | TZ_STATE_FLAG_RESUMING); 1792 1793 1793 1794 thermal_debug_tz_resume(tz); ··· 1818 1811 } 1819 1812 1820 1813 tz->state |= TZ_STATE_FLAG_SUSPENDED; 1814 + 1815 + /* Prevent new work from getting to the workqueue subsequently. */ 1816 + cancel_delayed_work(&tz->poll_queue); 1821 1817 } 1822 1818 1823 1819 static void thermal_pm_notify_prepare(void) ··· 1839 1829 { 1840 1830 guard(thermal_zone)(tz); 1841 1831 1842 - cancel_delayed_work(&tz->poll_queue); 1843 - 1844 1832 reinit_completion(&tz->resume); 1845 1833 tz->state |= TZ_STATE_FLAG_RESUMING; 1846 1834 ··· 1848 1840 */ 1849 1841 INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_resume); 1850 1842 /* Queue up the work without a delay. */ 1851 - mod_delayed_work(system_freezable_power_efficient_wq, &tz->poll_queue, 0); 1843 + mod_delayed_work(thermal_wq, &tz->poll_queue, 0); 1852 1844 } 1853 1845 1854 1846 static void thermal_pm_notify_complete(void) ··· 1871 1863 case PM_RESTORE_PREPARE: 1872 1864 case PM_SUSPEND_PREPARE: 1873 1865 thermal_pm_notify_prepare(); 1866 + /* 1867 + * Allow any leftover thermal work items already on the 1868 + * worqueue to complete so they don't get in the way later. 1869 + */ 1870 + flush_workqueue(thermal_wq); 1874 1871 break; 1875 1872 case PM_POST_HIBERNATION: 1876 1873 case PM_POST_RESTORE: ··· 1908 1895 if (result) 1909 1896 goto error; 1910 1897 1898 + thermal_wq = alloc_workqueue("thermal_events", 1899 + WQ_FREEZABLE | WQ_POWER_EFFICIENT | WQ_PERCPU, 0); 1900 + if (!thermal_wq) { 1901 + result = -ENOMEM; 1902 + goto unregister_netlink; 1903 + } 1904 + 1911 1905 result = thermal_register_governors(); 1912 1906 if (result) 1913 - goto unregister_netlink; 1907 + goto destroy_workqueue; 1914 1908 1915 1909 thermal_class = kzalloc_obj(*thermal_class); 1916 1910 if (!thermal_class) { ··· 1944 1924 1945 1925 unregister_governors: 1946 1926 thermal_unregister_governors(); 1927 + destroy_workqueue: 1928 + destroy_workqueue(thermal_wq); 1947 1929 unregister_netlink: 1948 1930 thermal_netlink_exit(); 1949 1931 error:
+1 -1
drivers/thunderbolt/nhi.c
··· 1020 1020 * If power rails are sustainable for wakeup from S4 this 1021 1021 * property is set by the BIOS. 1022 1022 */ 1023 - if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val)) 1023 + if (!device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val)) 1024 1024 return !!val; 1025 1025 1026 1026 return true;
+18
drivers/tty/vt/vt.c
··· 1909 1909 dest = ((u16 *)vc->vc_origin) + r * vc->vc_cols; 1910 1910 memcpy(dest, src, 2 * cols); 1911 1911 } 1912 + /* 1913 + * If the console was resized while in the alternate screen, 1914 + * resize the saved unicode buffer to the current dimensions. 1915 + * On allocation failure new_uniscr is NULL, causing the old 1916 + * buffer to be freed and vc_uni_lines to be lazily rebuilt 1917 + * via vc_uniscr_check() when next needed. 1918 + */ 1919 + if (vc->vc_saved_uni_lines && 1920 + (vc->vc_saved_rows != vc->vc_rows || 1921 + vc->vc_saved_cols != vc->vc_cols)) { 1922 + u32 **new_uniscr = vc_uniscr_alloc(vc->vc_cols, vc->vc_rows); 1923 + 1924 + if (new_uniscr) 1925 + vc_uniscr_copy_area(new_uniscr, vc->vc_cols, vc->vc_rows, 1926 + vc->vc_saved_uni_lines, cols, 0, rows); 1927 + vc_uniscr_free(vc->vc_saved_uni_lines); 1928 + vc->vc_saved_uni_lines = new_uniscr; 1929 + } 1912 1930 vc_uniscr_set(vc, vc->vc_saved_uni_lines); 1913 1931 vc->vc_saved_uni_lines = NULL; 1914 1932 restore_cur(vc);
+4
drivers/usb/cdns3/cdns3-gadget.c
··· 2589 2589 struct cdns3_request *priv_req; 2590 2590 int ret = 0; 2591 2591 2592 + if (!ep->desc) 2593 + return -ESHUTDOWN; 2594 + 2592 2595 request->actual = 0; 2593 2596 request->status = -EINPROGRESS; 2594 2597 priv_req = to_cdns3_request(request); ··· 3431 3428 ret = cdns3_gadget_start(cdns); 3432 3429 if (ret) { 3433 3430 pm_runtime_put_sync(cdns->dev); 3431 + cdns_drd_gadget_off(cdns); 3434 3432 return ret; 3435 3433 } 3436 3434
+9
drivers/usb/class/cdc-acm.c
··· 1225 1225 if (!data_interface || !control_interface) 1226 1226 return -ENODEV; 1227 1227 goto skip_normal_probe; 1228 + } else if (quirks == NO_UNION_12) { 1229 + data_interface = usb_ifnum_to_if(usb_dev, 2); 1230 + control_interface = usb_ifnum_to_if(usb_dev, 1); 1231 + if (!data_interface || !control_interface) 1232 + return -ENODEV; 1233 + goto skip_normal_probe; 1228 1234 } 1229 1235 1230 1236 /* normal probing*/ ··· 1753 1747 }, 1754 1748 { USB_DEVICE(0x045b, 0x024D), /* Renesas R-Car E3 USB Download mode */ 1755 1749 .driver_info = DISABLE_ECHO, /* Don't echo banner */ 1750 + }, 1751 + { USB_DEVICE(0x04b8, 0x0d12), /* EPSON HMD Com&Sens */ 1752 + .driver_info = NO_UNION_12, /* union descriptor is garbage */ 1756 1753 }, 1757 1754 { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */ 1758 1755 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+1
drivers/usb/class/cdc-acm.h
··· 114 114 #define SEND_ZERO_PACKET BIT(6) 115 115 #define DISABLE_ECHO BIT(7) 116 116 #define MISSING_CAP_BRK BIT(8) 117 + #define NO_UNION_12 BIT(9)
+3
drivers/usb/class/usbtmc.c
··· 254 254 list_del(&file_data->file_elem); 255 255 256 256 spin_unlock_irq(&file_data->data->dev_lock); 257 + 258 + /* flush anchored URBs */ 259 + usbtmc_draw_down(file_data); 257 260 mutex_unlock(&file_data->data->io_mutex); 258 261 259 262 kref_put(&file_data->data->kref, usbtmc_delete);
+2 -3
drivers/usb/common/ulpi.c
··· 331 331 ulpi->ops = ops; 332 332 333 333 ret = ulpi_register(dev, ulpi); 334 - if (ret) { 335 - kfree(ulpi); 334 + if (ret) 336 335 return ERR_PTR(ret); 337 - } 336 + 338 337 339 338 return ulpi; 340 339 }
+14 -9
drivers/usb/core/driver.c
··· 1415 1415 int status = 0; 1416 1416 int i = 0, n = 0; 1417 1417 struct usb_interface *intf; 1418 + bool offload_active = false; 1418 1419 1419 1420 if (udev->state == USB_STATE_NOTATTACHED || 1420 1421 udev->state == USB_STATE_SUSPENDED) 1421 1422 goto done; 1422 1423 1424 + usb_offload_set_pm_locked(udev, true); 1423 1425 if (msg.event == PM_EVENT_SUSPEND && usb_offload_check(udev)) { 1424 1426 dev_dbg(&udev->dev, "device offloaded, skip suspend.\n"); 1425 - udev->offload_at_suspend = 1; 1427 + offload_active = true; 1426 1428 } 1427 1429 1428 1430 /* Suspend all the interfaces and then udev itself */ ··· 1438 1436 * interrupt urbs, allowing interrupt events to be 1439 1437 * handled during system suspend. 1440 1438 */ 1441 - if (udev->offload_at_suspend && 1442 - intf->needs_remote_wakeup) { 1439 + if (offload_active && intf->needs_remote_wakeup) { 1443 1440 dev_dbg(&intf->dev, 1444 1441 "device offloaded, skip suspend.\n"); 1445 1442 continue; ··· 1453 1452 } 1454 1453 } 1455 1454 if (status == 0) { 1456 - if (!udev->offload_at_suspend) 1455 + if (!offload_active) 1457 1456 status = usb_suspend_device(udev, msg); 1458 1457 1459 1458 /* ··· 1499 1498 */ 1500 1499 } else { 1501 1500 udev->can_submit = 0; 1502 - if (!udev->offload_at_suspend) { 1501 + if (!offload_active) { 1503 1502 for (i = 0; i < 16; ++i) { 1504 1503 usb_hcd_flush_endpoint(udev, udev->ep_out[i]); 1505 1504 usb_hcd_flush_endpoint(udev, udev->ep_in[i]); ··· 1508 1507 } 1509 1508 1510 1509 done: 1510 + if (status != 0) 1511 + usb_offload_set_pm_locked(udev, false); 1511 1512 dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status); 1512 1513 return status; 1513 1514 } ··· 1539 1536 int status = 0; 1540 1537 int i; 1541 1538 struct usb_interface *intf; 1539 + bool offload_active = false; 1542 1540 1543 1541 if (udev->state == USB_STATE_NOTATTACHED) { 1544 1542 status = -ENODEV; 1545 1543 goto done; 1546 1544 } 1547 1545 udev->can_submit = 1; 1546 + if (msg.event == PM_EVENT_RESUME) 1547 + offload_active = usb_offload_check(udev); 1548 1548 1549 1549 /* Resume the device */ 1550 1550 if (udev->state == USB_STATE_SUSPENDED || udev->reset_resume) { 1551 - if (!udev->offload_at_suspend) 1551 + if (!offload_active) 1552 1552 status = usb_resume_device(udev, msg); 1553 1553 else 1554 1554 dev_dbg(&udev->dev, ··· 1568 1562 * pending interrupt urbs, allowing interrupt events 1569 1563 * to be handled during system suspend. 1570 1564 */ 1571 - if (udev->offload_at_suspend && 1572 - intf->needs_remote_wakeup) { 1565 + if (offload_active && intf->needs_remote_wakeup) { 1573 1566 dev_dbg(&intf->dev, 1574 1567 "device offloaded, skip resume.\n"); 1575 1568 continue; ··· 1577 1572 udev->reset_resume); 1578 1573 } 1579 1574 } 1580 - udev->offload_at_suspend = 0; 1581 1575 usb_mark_last_busy(udev); 1582 1576 1583 1577 done: 1584 1578 dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status); 1579 + usb_offload_set_pm_locked(udev, false); 1585 1580 if (!status) 1586 1581 udev->reset_resume = 0; 1587 1582 return status;
+1 -1
drivers/usb/core/hcd.c
··· 2403 2403 if (hcd->rh_registered) { 2404 2404 pm_wakeup_event(&hcd->self.root_hub->dev, 0); 2405 2405 set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags); 2406 - queue_work(pm_wq, &hcd->wakeup_work); 2406 + queue_work(system_freezable_wq, &hcd->wakeup_work); 2407 2407 } 2408 2408 spin_unlock_irqrestore (&hcd_root_hub_lock, flags); 2409 2409 }
+59 -43
drivers/usb/core/offload.c
··· 25 25 */ 26 26 int usb_offload_get(struct usb_device *udev) 27 27 { 28 - int ret; 28 + int ret = 0; 29 29 30 - usb_lock_device(udev); 31 - if (udev->state == USB_STATE_NOTATTACHED) { 32 - usb_unlock_device(udev); 30 + if (!usb_get_dev(udev)) 33 31 return -ENODEV; 32 + 33 + if (pm_runtime_get_if_active(&udev->dev) != 1) { 34 + ret = -EBUSY; 35 + goto err_rpm; 34 36 } 35 37 36 - if (udev->state == USB_STATE_SUSPENDED || 37 - udev->offload_at_suspend) { 38 - usb_unlock_device(udev); 39 - return -EBUSY; 40 - } 38 + spin_lock(&udev->offload_lock); 41 39 42 - /* 43 - * offload_usage could only be modified when the device is active, since 44 - * it will alter the suspend flow of the device. 45 - */ 46 - ret = usb_autoresume_device(udev); 47 - if (ret < 0) { 48 - usb_unlock_device(udev); 49 - return ret; 40 + if (udev->offload_pm_locked) { 41 + ret = -EAGAIN; 42 + goto err; 50 43 } 51 44 52 45 udev->offload_usage++; 53 - usb_autosuspend_device(udev); 54 - usb_unlock_device(udev); 46 + 47 + err: 48 + spin_unlock(&udev->offload_lock); 49 + pm_runtime_put_autosuspend(&udev->dev); 50 + err_rpm: 51 + usb_put_dev(udev); 55 52 56 53 return ret; 57 54 } ··· 66 69 */ 67 70 int usb_offload_put(struct usb_device *udev) 68 71 { 69 - int ret; 72 + int ret = 0; 70 73 71 - usb_lock_device(udev); 72 - if (udev->state == USB_STATE_NOTATTACHED) { 73 - usb_unlock_device(udev); 74 + if (!usb_get_dev(udev)) 74 75 return -ENODEV; 76 + 77 + if (pm_runtime_get_if_active(&udev->dev) != 1) { 78 + ret = -EBUSY; 79 + goto err_rpm; 75 80 } 76 81 77 - if (udev->state == USB_STATE_SUSPENDED || 78 - udev->offload_at_suspend) { 79 - usb_unlock_device(udev); 80 - return -EBUSY; 81 - } 82 + spin_lock(&udev->offload_lock); 82 83 83 - /* 84 - * offload_usage could only be modified when the device is active, since 85 - * it will alter the suspend flow of the device. 86 - */ 87 - ret = usb_autoresume_device(udev); 88 - if (ret < 0) { 89 - usb_unlock_device(udev); 90 - return ret; 84 + if (udev->offload_pm_locked) { 85 + ret = -EAGAIN; 86 + goto err; 91 87 } 92 88 93 89 /* Drop the count when it wasn't 0, ignore the operation otherwise. */ 94 90 if (udev->offload_usage) 95 91 udev->offload_usage--; 96 - usb_autosuspend_device(udev); 97 - usb_unlock_device(udev); 92 + 93 + err: 94 + spin_unlock(&udev->offload_lock); 95 + pm_runtime_put_autosuspend(&udev->dev); 96 + err_rpm: 97 + usb_put_dev(udev); 98 98 99 99 return ret; 100 100 } ··· 106 112 * management. 107 113 * 108 114 * The caller must hold @udev's device lock. In addition, the caller should 109 - * ensure downstream usb devices are all either suspended or marked as 110 - * "offload_at_suspend" to ensure the correctness of the return value. 115 + * ensure the device itself and the downstream usb devices are all marked as 116 + * "offload_pm_locked" to ensure the correctness of the return value. 111 117 * 112 118 * Returns true on any offload activity, false otherwise. 113 119 */ 114 120 bool usb_offload_check(struct usb_device *udev) __must_hold(&udev->dev->mutex) 115 121 { 116 122 struct usb_device *child; 117 - bool active; 123 + bool active = false; 118 124 int port1; 125 + 126 + if (udev->offload_usage) 127 + return true; 119 128 120 129 usb_hub_for_each_child(udev, port1, child) { 121 130 usb_lock_device(child); 122 131 active = usb_offload_check(child); 123 132 usb_unlock_device(child); 133 + 124 134 if (active) 125 - return true; 135 + break; 126 136 } 127 137 128 - return !!udev->offload_usage; 138 + return active; 129 139 } 130 140 EXPORT_SYMBOL_GPL(usb_offload_check); 141 + 142 + /** 143 + * usb_offload_set_pm_locked - set the PM lock state of a USB device 144 + * @udev: the USB device to modify 145 + * @locked: the new lock state 146 + * 147 + * Setting @locked to true prevents offload_usage from being modified. This 148 + * ensures that offload activities cannot be started or stopped during critical 149 + * power management transitions, maintaining a stable state for the duration 150 + * of the transition. 151 + */ 152 + void usb_offload_set_pm_locked(struct usb_device *udev, bool locked) 153 + { 154 + spin_lock(&udev->offload_lock); 155 + udev->offload_pm_locked = locked; 156 + spin_unlock(&udev->offload_lock); 157 + } 158 + EXPORT_SYMBOL_GPL(usb_offload_set_pm_locked);
+11 -1
drivers/usb/core/phy.c
··· 114 114 struct usb_phy_roothub *usb_phy_roothub_alloc_usb3_phy(struct device *dev) 115 115 { 116 116 struct usb_phy_roothub *phy_roothub; 117 - int num_phys; 117 + int num_phys, usb2_phy_index; 118 118 119 119 if (!IS_ENABLED(CONFIG_GENERIC_PHY)) 120 120 return NULL; ··· 122 122 num_phys = of_count_phandle_with_args(dev->of_node, "phys", 123 123 "#phy-cells"); 124 124 if (num_phys <= 0) 125 + return NULL; 126 + 127 + /* 128 + * If 'usb2-phy' is not present, usb_phy_roothub_alloc() added 129 + * all PHYs to the primary HCD's phy_roothub already, so skip 130 + * adding 'usb3-phy' here to avoid double use of that. 131 + */ 132 + usb2_phy_index = of_property_match_string(dev->of_node, "phy-names", 133 + "usb2-phy"); 134 + if (usb2_phy_index < 0) 125 135 return NULL; 126 136 127 137 phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL);
+3
drivers/usb/core/quirks.c
··· 401 401 402 402 /* Silicon Motion Flash Drive */ 403 403 { USB_DEVICE(0x090c, 0x1000), .driver_info = USB_QUIRK_DELAY_INIT }, 404 + { USB_DEVICE(0x090c, 0x2000), .driver_info = USB_QUIRK_DELAY_INIT }, 404 405 405 406 /* Sound Devices USBPre2 */ 406 407 { USB_DEVICE(0x0926, 0x0202), .driver_info = ··· 493 492 /* Razer - Razer Blade Keyboard */ 494 493 { USB_DEVICE(0x1532, 0x0116), .driver_info = 495 494 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, 495 + /* Razer - Razer Kiyo Pro Webcam */ 496 + { USB_DEVICE(0x1532, 0x0e05), .driver_info = USB_QUIRK_NO_LPM }, 496 497 497 498 /* Lenovo ThinkPad OneLink+ Dock twin hub controllers (VIA Labs VL812) */ 498 499 { USB_DEVICE(0x17ef, 0x1018), .driver_info = USB_QUIRK_RESET_RESUME },
+1
drivers/usb/core/usb.c
··· 671 671 set_dev_node(&dev->dev, dev_to_node(bus->sysdev)); 672 672 dev->state = USB_STATE_ATTACHED; 673 673 dev->lpm_disable_count = 1; 674 + spin_lock_init(&dev->offload_lock); 674 675 dev->offload_usage = 0; 675 676 atomic_set(&dev->urbnum, 0); 676 677
+2
drivers/usb/dwc2/gadget.c
··· 4607 4607 /* Exit clock gating when driver is stopped. */ 4608 4608 if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE && 4609 4609 hsotg->bus_suspended && !hsotg->params.no_clock_gating) { 4610 + spin_lock_irqsave(&hsotg->lock, flags); 4610 4611 dwc2_gadget_exit_clock_gating(hsotg, 0); 4612 + spin_unlock_irqrestore(&hsotg->lock, flags); 4611 4613 } 4612 4614 4613 4615 /* all endpoints should be shutdown */
+3 -2
drivers/usb/dwc3/dwc3-google.c
··· 385 385 "google,usb-cfg-csr", 386 386 ARRAY_SIZE(args), args); 387 387 if (IS_ERR(google->usb_cfg_regmap)) { 388 - return dev_err_probe(dev, PTR_ERR(google->usb_cfg_regmap), 389 - "invalid usb cfg csr\n"); 388 + ret = dev_err_probe(dev, PTR_ERR(google->usb_cfg_regmap), 389 + "invalid usb cfg csr\n"); 390 + goto err_deinit_pdom; 390 391 } 391 392 392 393 google->host_cfg_offset = args[0];
+1 -1
drivers/usb/dwc3/dwc3-imx8mp.c
··· 263 263 dwc3 = platform_get_drvdata(dwc3_imx->dwc3_pdev); 264 264 if (!dwc3) { 265 265 err = dev_err_probe(dev, -EPROBE_DEFER, "failed to get dwc3 platform data\n"); 266 - goto depopulate; 266 + goto put_dwc3; 267 267 } 268 268 269 269 dwc3->glue_ops = &dwc3_imx_glue_ops;
+23 -12
drivers/usb/gadget/function/f_ecm.c
··· 681 681 struct usb_ep *ep; 682 682 683 683 struct f_ecm_opts *ecm_opts; 684 + struct net_device *net __free(detach_gadget) = NULL; 684 685 struct usb_request *request __free(free_usb_request) = NULL; 685 686 686 687 if (!can_support_ecm(cdev->gadget)) ··· 689 688 690 689 ecm_opts = container_of(f->fi, struct f_ecm_opts, func_inst); 691 690 692 - mutex_lock(&ecm_opts->lock); 691 + scoped_guard(mutex, &ecm_opts->lock) 692 + if (ecm_opts->bind_count == 0 && !ecm_opts->bound) { 693 + if (!device_is_registered(&ecm_opts->net->dev)) { 694 + gether_set_gadget(ecm_opts->net, cdev->gadget); 695 + status = gether_register_netdev(ecm_opts->net); 696 + } else 697 + status = gether_attach_gadget(ecm_opts->net, cdev->gadget); 693 698 694 - gether_set_gadget(ecm_opts->net, cdev->gadget); 695 - 696 - if (!ecm_opts->bound) { 697 - status = gether_register_netdev(ecm_opts->net); 698 - ecm_opts->bound = true; 699 - } 700 - 701 - mutex_unlock(&ecm_opts->lock); 702 - if (status) 703 - return status; 699 + if (status) 700 + return status; 701 + net = ecm_opts->net; 702 + } 704 703 705 704 ecm_string_defs[1].s = ecm->ethaddr; 706 705 ··· 791 790 792 791 ecm->notify_req = no_free_ptr(request); 793 792 793 + ecm_opts->bind_count++; 794 + retain_and_null_ptr(net); 795 + 794 796 DBG(cdev, "CDC Ethernet: IN/%s OUT/%s NOTIFY/%s\n", 795 797 ecm->port.in_ep->name, ecm->port.out_ep->name, 796 798 ecm->notify->name); ··· 840 836 struct f_ecm_opts *opts; 841 837 842 838 opts = container_of(f, struct f_ecm_opts, func_inst); 843 - if (opts->bound) 839 + if (device_is_registered(&opts->net->dev)) 844 840 gether_cleanup(netdev_priv(opts->net)); 845 841 else 846 842 free_netdev(opts->net); ··· 910 906 static void ecm_unbind(struct usb_configuration *c, struct usb_function *f) 911 907 { 912 908 struct f_ecm *ecm = func_to_ecm(f); 909 + struct f_ecm_opts *ecm_opts; 913 910 914 911 DBG(c->cdev, "ecm unbind\n"); 912 + 913 + ecm_opts = container_of(f->fi, struct f_ecm_opts, func_inst); 915 914 916 915 usb_free_all_descriptors(f); 917 916 ··· 925 918 926 919 kfree(ecm->notify_req->buf); 927 920 usb_ep_free_request(ecm->notify, ecm->notify_req); 921 + 922 + ecm_opts->bind_count--; 923 + if (ecm_opts->bind_count == 0 && !ecm_opts->bound) 924 + gether_detach_gadget(ecm_opts->net); 928 925 } 929 926 930 927 static struct usb_function *ecm_alloc(struct usb_function_instance *fi)
+31 -28
drivers/usb/gadget/function/f_eem.c
··· 7 7 * Copyright (C) 2009 EF Johnson Technologies 8 8 */ 9 9 10 + #include <linux/cleanup.h> 10 11 #include <linux/kernel.h> 11 12 #include <linux/module.h> 12 13 #include <linux/device.h> ··· 252 251 struct usb_ep *ep; 253 252 254 253 struct f_eem_opts *eem_opts; 254 + struct net_device *net __free(detach_gadget) = NULL; 255 255 256 256 eem_opts = container_of(f->fi, struct f_eem_opts, func_inst); 257 - /* 258 - * in drivers/usb/gadget/configfs.c:configfs_composite_bind() 259 - * configurations are bound in sequence with list_for_each_entry, 260 - * in each configuration its functions are bound in sequence 261 - * with list_for_each_entry, so we assume no race condition 262 - * with regard to eem_opts->bound access 263 - */ 264 - if (!eem_opts->bound) { 265 - mutex_lock(&eem_opts->lock); 266 - gether_set_gadget(eem_opts->net, cdev->gadget); 267 - status = gether_register_netdev(eem_opts->net); 268 - mutex_unlock(&eem_opts->lock); 269 - if (status) 270 - return status; 271 - eem_opts->bound = true; 272 - } 257 + 258 + scoped_guard(mutex, &eem_opts->lock) 259 + if (eem_opts->bind_count == 0 && !eem_opts->bound) { 260 + if (!device_is_registered(&eem_opts->net->dev)) { 261 + gether_set_gadget(eem_opts->net, cdev->gadget); 262 + status = gether_register_netdev(eem_opts->net); 263 + } else 264 + status = gether_attach_gadget(eem_opts->net, cdev->gadget); 265 + 266 + if (status) 267 + return status; 268 + net = eem_opts->net; 269 + } 273 270 274 271 us = usb_gstrings_attach(cdev, eem_strings, 275 272 ARRAY_SIZE(eem_string_defs)); ··· 278 279 /* allocate instance-specific interface IDs */ 279 280 status = usb_interface_id(c, f); 280 281 if (status < 0) 281 - goto fail; 282 + return status; 282 283 eem->ctrl_id = status; 283 284 eem_intf.bInterfaceNumber = status; 284 - 285 - status = -ENODEV; 286 285 287 286 /* allocate instance-specific endpoints */ 288 287 ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_in_desc); 289 288 if (!ep) 290 - goto fail; 289 + return -ENODEV; 291 290 eem->port.in_ep = ep; 292 291 293 292 ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_out_desc); 294 293 if (!ep) 295 - goto fail; 294 + return -ENODEV; 296 295 eem->port.out_ep = ep; 297 296 298 297 /* support all relevant hardware speeds... we expect that when ··· 306 309 status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function, 307 310 eem_ss_function, eem_ss_function); 308 311 if (status) 309 - goto fail; 312 + return status; 313 + 314 + eem_opts->bind_count++; 315 + retain_and_null_ptr(net); 310 316 311 317 DBG(cdev, "CDC Ethernet (EEM): IN/%s OUT/%s\n", 312 318 eem->port.in_ep->name, eem->port.out_ep->name); 313 319 return 0; 314 - 315 - fail: 316 - ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); 317 - 318 - return status; 319 320 } 320 321 321 322 static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req) ··· 592 597 struct f_eem_opts *opts; 593 598 594 599 opts = container_of(f, struct f_eem_opts, func_inst); 595 - if (opts->bound) 600 + if (device_is_registered(&opts->net->dev)) 596 601 gether_cleanup(netdev_priv(opts->net)); 597 602 else 598 603 free_netdev(opts->net); ··· 635 640 636 641 static void eem_unbind(struct usb_configuration *c, struct usb_function *f) 637 642 { 643 + struct f_eem_opts *opts; 644 + 638 645 DBG(c->cdev, "eem unbind\n"); 639 646 647 + opts = container_of(f->fi, struct f_eem_opts, func_inst); 648 + 640 649 usb_free_all_descriptors(f); 650 + 651 + opts->bind_count--; 652 + if (opts->bind_count == 0 && !opts->bound) 653 + gether_detach_gadget(opts->net); 641 654 } 642 655 643 656 static struct usb_function *eem_alloc(struct usb_function_instance *fi)
+10 -9
drivers/usb/gadget/function/f_hid.c
··· 1262 1262 if (status) 1263 1263 goto fail; 1264 1264 1265 - spin_lock_init(&hidg->write_spinlock); 1266 1265 hidg->write_pending = 1; 1267 1266 hidg->req = NULL; 1268 - spin_lock_init(&hidg->read_spinlock); 1269 - spin_lock_init(&hidg->get_report_spinlock); 1270 - init_waitqueue_head(&hidg->write_queue); 1271 - init_waitqueue_head(&hidg->read_queue); 1272 - init_waitqueue_head(&hidg->get_queue); 1273 - init_waitqueue_head(&hidg->get_id_queue); 1274 - INIT_LIST_HEAD(&hidg->completed_out_req); 1275 - INIT_LIST_HEAD(&hidg->report_list); 1276 1267 1277 1268 INIT_WORK(&hidg->work, get_report_workqueue_handler); 1278 1269 hidg->workqueue = alloc_workqueue("report_work", ··· 1598 1607 opts = container_of(fi, struct f_hid_opts, func_inst); 1599 1608 1600 1609 mutex_lock(&opts->lock); 1610 + 1611 + spin_lock_init(&hidg->write_spinlock); 1612 + spin_lock_init(&hidg->read_spinlock); 1613 + spin_lock_init(&hidg->get_report_spinlock); 1614 + init_waitqueue_head(&hidg->write_queue); 1615 + init_waitqueue_head(&hidg->read_queue); 1616 + init_waitqueue_head(&hidg->get_queue); 1617 + init_waitqueue_head(&hidg->get_id_queue); 1618 + INIT_LIST_HEAD(&hidg->completed_out_req); 1619 + INIT_LIST_HEAD(&hidg->report_list); 1601 1620 1602 1621 device_initialize(&hidg->dev); 1603 1622 hidg->dev.release = hidg_release;
+30 -19
drivers/usb/gadget/function/f_rndis.c
··· 11 11 12 12 /* #define VERBOSE_DEBUG */ 13 13 14 + #include <linux/cleanup.h> 14 15 #include <linux/slab.h> 15 16 #include <linux/kernel.h> 16 17 #include <linux/module.h> ··· 666 665 667 666 struct f_rndis_opts *rndis_opts; 668 667 struct usb_os_desc_table *os_desc_table __free(kfree) = NULL; 668 + struct net_device *net __free(detach_gadget) = NULL; 669 669 struct usb_request *request __free(free_usb_request) = NULL; 670 670 671 671 if (!can_support_rndis(c)) ··· 680 678 return -ENOMEM; 681 679 } 682 680 683 - rndis_iad_descriptor.bFunctionClass = rndis_opts->class; 684 - rndis_iad_descriptor.bFunctionSubClass = rndis_opts->subclass; 685 - rndis_iad_descriptor.bFunctionProtocol = rndis_opts->protocol; 681 + scoped_guard(mutex, &rndis_opts->lock) { 682 + rndis_iad_descriptor.bFunctionClass = rndis_opts->class; 683 + rndis_iad_descriptor.bFunctionSubClass = rndis_opts->subclass; 684 + rndis_iad_descriptor.bFunctionProtocol = rndis_opts->protocol; 686 685 687 - /* 688 - * in drivers/usb/gadget/configfs.c:configfs_composite_bind() 689 - * configurations are bound in sequence with list_for_each_entry, 690 - * in each configuration its functions are bound in sequence 691 - * with list_for_each_entry, so we assume no race condition 692 - * with regard to rndis_opts->bound access 693 - */ 694 - if (!rndis_opts->bound) { 695 - gether_set_gadget(rndis_opts->net, cdev->gadget); 696 - status = gether_register_netdev(rndis_opts->net); 697 - if (status) 698 - return status; 699 - rndis_opts->bound = true; 686 + if (rndis_opts->bind_count == 0 && !rndis_opts->borrowed_net) { 687 + if (!device_is_registered(&rndis_opts->net->dev)) { 688 + gether_set_gadget(rndis_opts->net, cdev->gadget); 689 + status = gether_register_netdev(rndis_opts->net); 690 + } else 691 + status = gether_attach_gadget(rndis_opts->net, cdev->gadget); 692 + 693 + if (status) 694 + return status; 695 + net = rndis_opts->net; 696 + } 700 697 } 701 698 702 699 us = usb_gstrings_attach(cdev, rndis_strings, ··· 794 793 } 795 794 rndis->notify_req = no_free_ptr(request); 796 795 796 + rndis_opts->bind_count++; 797 + retain_and_null_ptr(net); 798 + 797 799 /* NOTE: all that is done without knowing or caring about 798 800 * the network link ... which is unavailable to this code 799 801 * until we're activated via set_alt(). ··· 813 809 struct f_rndis_opts *opts; 814 810 815 811 opts = container_of(f, struct f_rndis_opts, func_inst); 816 - if (opts->bound) 812 + if (device_is_registered(&opts->net->dev)) 817 813 gether_cleanup(netdev_priv(opts->net)); 818 814 else 819 815 free_netdev(opts->net); 820 - opts->borrowed_net = opts->bound = true; 816 + opts->borrowed_net = true; 821 817 opts->net = net; 822 818 } 823 819 EXPORT_SYMBOL_GPL(rndis_borrow_net); ··· 875 871 876 872 opts = container_of(f, struct f_rndis_opts, func_inst); 877 873 if (!opts->borrowed_net) { 878 - if (opts->bound) 874 + if (device_is_registered(&opts->net->dev)) 879 875 gether_cleanup(netdev_priv(opts->net)); 880 876 else 881 877 free_netdev(opts->net); ··· 944 940 static void rndis_unbind(struct usb_configuration *c, struct usb_function *f) 945 941 { 946 942 struct f_rndis *rndis = func_to_rndis(f); 943 + struct f_rndis_opts *rndis_opts; 944 + 945 + rndis_opts = container_of(f->fi, struct f_rndis_opts, func_inst); 947 946 948 947 kfree(f->os_desc_table); 949 948 f->os_desc_n = 0; ··· 954 947 955 948 kfree(rndis->notify_req->buf); 956 949 usb_ep_free_request(rndis->notify, rndis->notify_req); 950 + 951 + rndis_opts->bind_count--; 952 + if (rndis_opts->bind_count == 0 && !rndis_opts->borrowed_net) 953 + gether_detach_gadget(rndis_opts->net); 957 954 } 958 955 959 956 static struct usb_function *rndis_alloc(struct usb_function_instance *fi)
+35 -28
drivers/usb/gadget/function/f_subset.c
··· 6 6 * Copyright (C) 2008 Nokia Corporation 7 7 */ 8 8 9 + #include <linux/cleanup.h> 9 10 #include <linux/slab.h> 10 11 #include <linux/kernel.h> 11 12 #include <linux/module.h> ··· 299 298 struct usb_ep *ep; 300 299 301 300 struct f_gether_opts *gether_opts; 301 + struct net_device *net __free(detach_gadget) = NULL; 302 302 303 303 gether_opts = container_of(f->fi, struct f_gether_opts, func_inst); 304 304 305 - /* 306 - * in drivers/usb/gadget/configfs.c:configfs_composite_bind() 307 - * configurations are bound in sequence with list_for_each_entry, 308 - * in each configuration its functions are bound in sequence 309 - * with list_for_each_entry, so we assume no race condition 310 - * with regard to gether_opts->bound access 311 - */ 312 - if (!gether_opts->bound) { 313 - mutex_lock(&gether_opts->lock); 314 - gether_set_gadget(gether_opts->net, cdev->gadget); 315 - status = gether_register_netdev(gether_opts->net); 316 - mutex_unlock(&gether_opts->lock); 317 - if (status) 318 - return status; 319 - gether_opts->bound = true; 320 - } 305 + scoped_guard(mutex, &gether_opts->lock) 306 + if (gether_opts->bind_count == 0 && !gether_opts->bound) { 307 + if (!device_is_registered(&gether_opts->net->dev)) { 308 + gether_set_gadget(gether_opts->net, cdev->gadget); 309 + status = gether_register_netdev(gether_opts->net); 310 + } else 311 + status = gether_attach_gadget(gether_opts->net, cdev->gadget); 312 + 313 + if (status) 314 + return status; 315 + net = gether_opts->net; 316 + } 321 317 322 318 us = usb_gstrings_attach(cdev, geth_strings, 323 319 ARRAY_SIZE(geth_string_defs)); ··· 327 329 /* allocate instance-specific interface IDs */ 328 330 status = usb_interface_id(c, f); 329 331 if (status < 0) 330 - goto fail; 332 + return status; 331 333 subset_data_intf.bInterfaceNumber = status; 332 - 333 - status = -ENODEV; 334 334 335 335 /* allocate instance-specific endpoints */ 336 336 ep = usb_ep_autoconfig(cdev->gadget, &fs_subset_in_desc); 337 337 if (!ep) 338 - goto fail; 338 + return -ENODEV; 339 339 geth->port.in_ep = ep; 340 340 341 341 ep = usb_ep_autoconfig(cdev->gadget, &fs_subset_out_desc); 342 342 if (!ep) 343 - goto fail; 343 + return -ENODEV; 344 344 geth->port.out_ep = ep; 345 345 346 346 /* support all relevant hardware speeds... we expect that when ··· 356 360 status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function, 357 361 ss_eth_function, ss_eth_function); 358 362 if (status) 359 - goto fail; 363 + return status; 360 364 361 365 /* NOTE: all that is done without knowing or caring about 362 366 * the network link ... which is unavailable to this code 363 367 * until we're activated via set_alt(). 364 368 */ 365 369 370 + gether_opts->bind_count++; 371 + retain_and_null_ptr(net); 372 + 366 373 DBG(cdev, "CDC Subset: IN/%s OUT/%s\n", 367 374 geth->port.in_ep->name, geth->port.out_ep->name); 368 375 return 0; 369 - 370 - fail: 371 - ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); 372 - 373 - return status; 374 376 } 375 377 376 378 static inline struct f_gether_opts *to_f_gether_opts(struct config_item *item) ··· 411 417 struct f_gether_opts *opts; 412 418 413 419 opts = container_of(f, struct f_gether_opts, func_inst); 414 - if (opts->bound) 420 + if (device_is_registered(&opts->net->dev)) 415 421 gether_cleanup(netdev_priv(opts->net)); 416 422 else 417 423 free_netdev(opts->net); ··· 443 449 static void geth_free(struct usb_function *f) 444 450 { 445 451 struct f_gether *eth; 452 + struct f_gether_opts *opts; 453 + 454 + opts = container_of(f->fi, struct f_gether_opts, func_inst); 446 455 447 456 eth = func_to_geth(f); 457 + scoped_guard(mutex, &opts->lock) 458 + opts->refcnt--; 448 459 kfree(eth); 449 460 } 450 461 451 462 static void geth_unbind(struct usb_configuration *c, struct usb_function *f) 452 463 { 464 + struct f_gether_opts *opts; 465 + 466 + opts = container_of(f->fi, struct f_gether_opts, func_inst); 467 + 453 468 geth_string_defs[0].id = 0; 454 469 usb_free_all_descriptors(f); 470 + 471 + opts->bind_count--; 472 + if (opts->bind_count == 0 && !opts->bound) 473 + gether_detach_gadget(opts->net); 455 474 } 456 475 457 476 static struct usb_function *geth_alloc(struct usb_function_instance *fi)
+37 -10
drivers/usb/gadget/function/f_uac1_legacy.c
··· 360 360 static void f_audio_complete(struct usb_ep *ep, struct usb_request *req) 361 361 { 362 362 struct f_audio *audio = req->context; 363 - int status = req->status; 364 - u32 data = 0; 365 363 struct usb_ep *out_ep = audio->out_ep; 366 364 367 - switch (status) { 368 - 369 - case 0: /* normal completion? */ 370 - if (ep == out_ep) 365 + switch (req->status) { 366 + case 0: 367 + if (ep == out_ep) { 371 368 f_audio_out_ep_complete(ep, req); 372 - else if (audio->set_con) { 373 - memcpy(&data, req->buf, req->length); 374 - audio->set_con->set(audio->set_con, audio->set_cmd, 375 - le16_to_cpu(data)); 369 + } else if (audio->set_con) { 370 + struct usb_audio_control *con = audio->set_con; 371 + u8 type = con->type; 372 + u32 data; 373 + bool valid_request = false; 374 + 375 + switch (type) { 376 + case UAC_FU_MUTE: { 377 + u8 value; 378 + 379 + if (req->actual == sizeof(value)) { 380 + memcpy(&value, req->buf, sizeof(value)); 381 + data = value; 382 + valid_request = true; 383 + } 384 + break; 385 + } 386 + case UAC_FU_VOLUME: { 387 + __le16 value; 388 + 389 + if (req->actual == sizeof(value)) { 390 + memcpy(&value, req->buf, sizeof(value)); 391 + data = le16_to_cpu(value); 392 + valid_request = true; 393 + } 394 + break; 395 + } 396 + } 397 + 398 + if (valid_request) 399 + con->set(con, audio->set_cmd, data); 400 + else 401 + usb_ep_set_halt(ep); 402 + 376 403 audio->set_con = NULL; 377 404 } 378 405 break;
+36 -3
drivers/usb/gadget/function/f_uvc.c
··· 413 413 { 414 414 int ret; 415 415 416 + guard(mutex)(&uvc->lock); 417 + if (uvc->func_unbound) { 418 + dev_dbg(&uvc->vdev.dev, "skipping function deactivate (unbound)\n"); 419 + return; 420 + } 421 + 416 422 if ((ret = usb_function_deactivate(&uvc->func)) < 0) 417 423 uvcg_info(&uvc->func, "UVC disconnect failed with %d\n", ret); 418 424 } ··· 437 431 438 432 static DEVICE_ATTR_RO(function_name); 439 433 434 + static void uvc_vdev_release(struct video_device *vdev) 435 + { 436 + struct uvc_device *uvc = video_get_drvdata(vdev); 437 + 438 + /* Signal uvc_function_unbind() that the video device has been released */ 439 + if (uvc->vdev_release_done) 440 + complete(uvc->vdev_release_done); 441 + } 442 + 440 443 static int 441 444 uvc_register_video(struct uvc_device *uvc) 442 445 { ··· 458 443 uvc->vdev.v4l2_dev->dev = &cdev->gadget->dev; 459 444 uvc->vdev.fops = &uvc_v4l2_fops; 460 445 uvc->vdev.ioctl_ops = &uvc_v4l2_ioctl_ops; 461 - uvc->vdev.release = video_device_release_empty; 446 + uvc->vdev.release = uvc_vdev_release; 462 447 uvc->vdev.vfl_dir = VFL_DIR_TX; 463 448 uvc->vdev.lock = &uvc->video.mutex; 464 449 uvc->vdev.device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; ··· 674 659 int ret = -EINVAL; 675 660 676 661 uvcg_info(f, "%s()\n", __func__); 662 + scoped_guard(mutex, &uvc->lock) 663 + uvc->func_unbound = false; 677 664 678 665 opts = fi_to_f_uvc_opts(f->fi); 679 666 /* Sanity check the streaming endpoint module parameters. */ ··· 1005 988 static void uvc_function_unbind(struct usb_configuration *c, 1006 989 struct usb_function *f) 1007 990 { 991 + DECLARE_COMPLETION_ONSTACK(vdev_release_done); 1008 992 struct usb_composite_dev *cdev = c->cdev; 1009 993 struct uvc_device *uvc = to_uvc(f); 1010 994 struct uvc_video *video = &uvc->video; 1011 995 long wait_ret = 1; 996 + bool connected; 1012 997 1013 998 uvcg_info(f, "%s()\n", __func__); 999 + scoped_guard(mutex, &uvc->lock) { 1000 + uvc->func_unbound = true; 1001 + uvc->vdev_release_done = &vdev_release_done; 1002 + connected = uvc->func_connected; 1003 + } 1014 1004 1015 1005 kthread_cancel_work_sync(&video->hw_submit); 1016 1006 ··· 1030 1006 * though the video device removal uevent. Allow some time for the 1031 1007 * application to close out before things get deleted. 1032 1008 */ 1033 - if (uvc->func_connected) { 1009 + if (connected) { 1034 1010 uvcg_dbg(f, "waiting for clean disconnect\n"); 1035 1011 wait_ret = wait_event_interruptible_timeout(uvc->func_connected_queue, 1036 1012 uvc->func_connected == false, msecs_to_jiffies(500)); ··· 1041 1017 video_unregister_device(&uvc->vdev); 1042 1018 v4l2_device_unregister(&uvc->v4l2_dev); 1043 1019 1044 - if (uvc->func_connected) { 1020 + scoped_guard(mutex, &uvc->lock) 1021 + connected = uvc->func_connected; 1022 + 1023 + if (connected) { 1045 1024 /* 1046 1025 * Wait for the release to occur to ensure there are no longer any 1047 1026 * pending operations that may cause panics when resources are cleaned ··· 1055 1028 uvc->func_connected == false, msecs_to_jiffies(1000)); 1056 1029 uvcg_dbg(f, "done waiting for release with ret: %ld\n", wait_ret); 1057 1030 } 1031 + 1032 + /* Wait for the video device to be released */ 1033 + wait_for_completion(&vdev_release_done); 1034 + uvc->vdev_release_done = NULL; 1058 1035 1059 1036 usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); 1060 1037 kfree(uvc->control_buf); ··· 1078 1047 return ERR_PTR(-ENOMEM); 1079 1048 1080 1049 mutex_init(&uvc->video.mutex); 1050 + mutex_init(&uvc->lock); 1051 + uvc->func_unbound = true; 1081 1052 uvc->state = UVC_STATE_DISCONNECTED; 1082 1053 init_waitqueue_head(&uvc->func_connected_queue); 1083 1054 opts = fi_to_f_uvc_opts(fi);
+15 -6
drivers/usb/gadget/function/u_ecm.h
··· 15 15 16 16 #include <linux/usb/composite.h> 17 17 18 + /** 19 + * struct f_ecm_opts - ECM function options 20 + * @func_inst: USB function instance. 21 + * @net: The net_device associated with the ECM function. 22 + * @bound: True if the net_device is shared and pre-registered during the 23 + * legacy composite driver's bind phase (e.g., multi.c). If false, 24 + * the ECM function will register the net_device during its own 25 + * bind phase. 26 + * @bind_count: Tracks the number of configurations the ECM function is 27 + * bound to, preventing double-registration of the @net device. 28 + * @lock: Protects the data from concurrent access by configfs read/write 29 + * and create symlink/remove symlink operations. 30 + * @refcnt: Reference counter for the function instance. 31 + */ 18 32 struct f_ecm_opts { 19 33 struct usb_function_instance func_inst; 20 34 struct net_device *net; 21 35 bool bound; 36 + int bind_count; 22 37 23 - /* 24 - * Read/write access to configfs attributes is handled by configfs. 25 - * 26 - * This is to protect the data from concurrent access by read/write 27 - * and create symlink/remove symlink. 28 - */ 29 38 struct mutex lock; 30 39 int refcnt; 31 40 };
+15 -6
drivers/usb/gadget/function/u_eem.h
··· 15 15 16 16 #include <linux/usb/composite.h> 17 17 18 + /** 19 + * struct f_eem_opts - EEM function options 20 + * @func_inst: USB function instance. 21 + * @net: The net_device associated with the EEM function. 22 + * @bound: True if the net_device is shared and pre-registered during the 23 + * legacy composite driver's bind phase (e.g., multi.c). If false, 24 + * the EEM function will register the net_device during its own 25 + * bind phase. 26 + * @bind_count: Tracks the number of configurations the EEM function is 27 + * bound to, preventing double-registration of the @net device. 28 + * @lock: Protects the data from concurrent access by configfs read/write 29 + * and create symlink/remove symlink operations. 30 + * @refcnt: Reference counter for the function instance. 31 + */ 18 32 struct f_eem_opts { 19 33 struct usb_function_instance func_inst; 20 34 struct net_device *net; 21 35 bool bound; 36 + int bind_count; 22 37 23 - /* 24 - * Read/write access to configfs attributes is handled by configfs. 25 - * 26 - * This is to protect the data from concurrent access by read/write 27 - * and create symlink/remove symlink. 28 - */ 29 38 struct mutex lock; 30 39 int refcnt; 31 40 };
+9 -7
drivers/usb/gadget/function/u_ether.c
··· 113 113 114 114 strscpy(p->driver, "g_ether", sizeof(p->driver)); 115 115 strscpy(p->version, UETH__VERSION, sizeof(p->version)); 116 - strscpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version)); 117 - strscpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info)); 116 + if (dev->gadget) { 117 + strscpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version)); 118 + strscpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info)); 119 + } 118 120 } 119 121 120 122 /* REVISIT can also support: ··· 1225 1223 1226 1224 DBG(dev, "%s\n", __func__); 1227 1225 1226 + spin_lock(&dev->lock); 1227 + dev->port_usb = NULL; 1228 + link->is_suspend = false; 1229 + spin_unlock(&dev->lock); 1230 + 1228 1231 netif_stop_queue(dev->net); 1229 1232 netif_carrier_off(dev->net); 1230 1233 ··· 1267 1260 dev->header_len = 0; 1268 1261 dev->unwrap = NULL; 1269 1262 dev->wrap = NULL; 1270 - 1271 - spin_lock(&dev->lock); 1272 - dev->port_usb = NULL; 1273 - link->is_suspend = false; 1274 - spin_unlock(&dev->lock); 1275 1263 } 1276 1264 EXPORT_SYMBOL_GPL(gether_disconnect); 1277 1265
+15 -7
drivers/usb/gadget/function/u_gether.h
··· 15 15 16 16 #include <linux/usb/composite.h> 17 17 18 + /** 19 + * struct f_gether_opts - subset function options 20 + * @func_inst: USB function instance. 21 + * @net: The net_device associated with the subset function. 22 + * @bound: True if the net_device is shared and pre-registered during the 23 + * legacy composite driver's bind phase (e.g., multi.c). If false, 24 + * the subset function will register the net_device during its own 25 + * bind phase. 26 + * @bind_count: Tracks the number of configurations the subset function is 27 + * bound to, preventing double-registration of the @net device. 28 + * @lock: Protects the data from concurrent access by configfs read/write 29 + * and create symlink/remove symlink operations. 30 + * @refcnt: Reference counter for the function instance. 31 + */ 18 32 struct f_gether_opts { 19 33 struct usb_function_instance func_inst; 20 34 struct net_device *net; 21 35 bool bound; 22 - 23 - /* 24 - * Read/write access to configfs attributes is handled by configfs. 25 - * 26 - * This is to protect the data from concurrent access by read/write 27 - * and create symlink/remove symlink. 28 - */ 36 + int bind_count; 29 37 struct mutex lock; 30 38 int refcnt; 31 39 };
+15 -6
drivers/usb/gadget/function/u_ncm.h
··· 15 15 16 16 #include <linux/usb/composite.h> 17 17 18 + /** 19 + * struct f_ncm_opts - NCM function options 20 + * @func_inst: USB function instance. 21 + * @net: The net_device associated with the NCM function. 22 + * @bind_count: Tracks the number of configurations the NCM function is 23 + * bound to, preventing double-registration of the @net device. 24 + * @ncm_interf_group: ConfigFS group for NCM interface. 25 + * @ncm_os_desc: USB OS descriptor for NCM. 26 + * @ncm_ext_compat_id: Extended compatibility ID. 27 + * @lock: Protects the data from concurrent access by configfs read/write 28 + * and create symlink/remove symlink operations. 29 + * @refcnt: Reference counter for the function instance. 30 + * @max_segment_size: Maximum segment size. 31 + */ 18 32 struct f_ncm_opts { 19 33 struct usb_function_instance func_inst; 20 34 struct net_device *net; ··· 37 23 struct config_group *ncm_interf_group; 38 24 struct usb_os_desc ncm_os_desc; 39 25 char ncm_ext_compat_id[16]; 40 - /* 41 - * Read/write access to configfs attributes is handled by configfs. 42 - * 43 - * This is to protect the data from concurrent access by read/write 44 - * and create symlink/remove symlink. 45 - */ 26 + 46 27 struct mutex lock; 47 28 int refcnt; 48 29
+23 -8
drivers/usb/gadget/function/u_rndis.h
··· 15 15 16 16 #include <linux/usb/composite.h> 17 17 18 + /** 19 + * struct f_rndis_opts - RNDIS function options 20 + * @func_inst: USB function instance. 21 + * @vendor_id: Vendor ID. 22 + * @manufacturer: Manufacturer string. 23 + * @net: The net_device associated with the RNDIS function. 24 + * @bind_count: Tracks the number of configurations the RNDIS function is 25 + * bound to, preventing double-registration of the @net device. 26 + * @borrowed_net: True if the net_device is shared and pre-registered during 27 + * the legacy composite driver's bind phase (e.g., multi.c). 28 + * If false, the RNDIS function will register the net_device 29 + * during its own bind phase. 30 + * @rndis_interf_group: ConfigFS group for RNDIS interface. 31 + * @rndis_os_desc: USB OS descriptor for RNDIS. 32 + * @rndis_ext_compat_id: Extended compatibility ID. 33 + * @class: USB class. 34 + * @subclass: USB subclass. 35 + * @protocol: USB protocol. 36 + * @lock: Protects the data from concurrent access by configfs read/write 37 + * and create symlink/remove symlink operations. 38 + * @refcnt: Reference counter for the function instance. 39 + */ 18 40 struct f_rndis_opts { 19 41 struct usb_function_instance func_inst; 20 42 u32 vendor_id; 21 43 const char *manufacturer; 22 44 struct net_device *net; 23 - bool bound; 45 + int bind_count; 24 46 bool borrowed_net; 25 47 26 48 struct config_group *rndis_interf_group; ··· 52 30 u8 class; 53 31 u8 subclass; 54 32 u8 protocol; 55 - 56 - /* 57 - * Read/write access to configfs attributes is handled by configfs. 58 - * 59 - * This is to protect the data from concurrent access by read/write 60 - * and create symlink/remove symlink. 61 - */ 62 33 struct mutex lock; 63 34 int refcnt; 64 35 };
+3
drivers/usb/gadget/function/uvc.h
··· 155 155 enum uvc_state state; 156 156 struct usb_function func; 157 157 struct uvc_video video; 158 + struct completion *vdev_release_done; 159 + struct mutex lock; /* protects func_unbound and func_connected */ 160 + bool func_unbound; 158 161 bool func_connected; 159 162 wait_queue_head_t func_connected_queue; 160 163
+4 -1
drivers/usb/gadget/function/uvc_v4l2.c
··· 574 574 if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST) 575 575 return -EINVAL; 576 576 577 + guard(mutex)(&uvc->lock); 578 + 577 579 if (sub->type == UVC_EVENT_SETUP && uvc->func_connected) 578 580 return -EBUSY; 579 581 ··· 597 595 uvc_function_disconnect(uvc); 598 596 uvcg_video_disable(&uvc->video); 599 597 uvcg_free_buffers(&uvc->video.queue); 600 - uvc->func_connected = false; 598 + scoped_guard(mutex, &uvc->lock) 599 + uvc->func_connected = false; 601 600 wake_up_interruptible(&uvc->func_connected_queue); 602 601 } 603 602
+26 -16
drivers/usb/gadget/udc/dummy_hcd.c
··· 462 462 463 463 /* Report reset and disconnect events to the driver */ 464 464 if (dum->ints_enabled && (disconnect || reset)) { 465 - stop_activity(dum); 466 465 ++dum->callback_usage; 466 + /* 467 + * stop_activity() can drop dum->lock, so it must 468 + * not come between the dum->ints_enabled test 469 + * and the ++dum->callback_usage. 470 + */ 471 + stop_activity(dum); 467 472 spin_unlock(&dum->lock); 468 473 if (reset) 469 474 usb_gadget_udc_reset(&dum->gadget, dum->driver); ··· 913 908 spin_lock_irqsave(&dum->lock, flags); 914 909 dum->pullup = (value != 0); 915 910 set_link_state(dum_hcd); 916 - if (value == 0) { 917 - /* 918 - * Emulate synchronize_irq(): wait for callbacks to finish. 919 - * This seems to be the best place to emulate the call to 920 - * synchronize_irq() that's in usb_gadget_remove_driver(). 921 - * Doing it in dummy_udc_stop() would be too late since it 922 - * is called after the unbind callback and unbind shouldn't 923 - * be invoked until all the other callbacks are finished. 924 - */ 925 - while (dum->callback_usage > 0) { 926 - spin_unlock_irqrestore(&dum->lock, flags); 927 - usleep_range(1000, 2000); 928 - spin_lock_irqsave(&dum->lock, flags); 929 - } 930 - } 931 911 spin_unlock_irqrestore(&dum->lock, flags); 932 912 933 913 usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd)); ··· 935 945 936 946 spin_lock_irq(&dum->lock); 937 947 dum->ints_enabled = enable; 948 + if (!enable) { 949 + /* 950 + * Emulate synchronize_irq(): wait for callbacks to finish. 951 + * This has to happen after emulated interrupts are disabled 952 + * (dum->ints_enabled is clear) and before the unbind callback, 953 + * just like the call to synchronize_irq() in 954 + * gadget/udc/core:gadget_unbind_driver(). 955 + */ 956 + while (dum->callback_usage > 0) { 957 + spin_unlock_irq(&dum->lock); 958 + usleep_range(1000, 2000); 959 + spin_lock_irq(&dum->lock); 960 + } 961 + } 938 962 spin_unlock_irq(&dum->lock); 939 963 } 940 964 ··· 1538 1534 /* rescan to continue with any other queued i/o */ 1539 1535 if (rescan) 1540 1536 goto top; 1537 + 1538 + /* request not fully transferred; stop iterating to 1539 + * preserve data ordering across queued requests. 1540 + */ 1541 + if (req->req.actual < req->req.length) 1542 + break; 1541 1543 } 1542 1544 return sent; 1543 1545 }
+2 -2
drivers/usb/host/ehci-brcm.c
··· 31 31 int res; 32 32 33 33 /* Wait for next microframe (every 125 usecs) */ 34 - res = readl_relaxed_poll_timeout(&ehci->regs->frame_index, val, 35 - val != frame_idx, 1, 130); 34 + res = readl_relaxed_poll_timeout_atomic(&ehci->regs->frame_index, 35 + val, val != frame_idx, 1, 130); 36 36 if (res) 37 37 ehci_err(ehci, "Error waiting for SOF\n"); 38 38 udelay(delay);
+3 -15
drivers/usb/host/xhci-sideband.c
··· 93 93 static void 94 94 __xhci_sideband_remove_interrupter(struct xhci_sideband *sb) 95 95 { 96 - struct usb_device *udev; 97 - 98 96 lockdep_assert_held(&sb->mutex); 99 97 100 98 if (!sb->ir) ··· 100 102 101 103 xhci_remove_secondary_interrupter(xhci_to_hcd(sb->xhci), sb->ir); 102 104 sb->ir = NULL; 103 - udev = sb->vdev->udev; 104 - 105 - if (udev->state != USB_STATE_NOTATTACHED) 106 - usb_offload_put(udev); 107 105 } 108 106 109 107 /* sideband api functions */ ··· 285 291 * Allow other drivers, such as usb controller driver, to check if there are 286 292 * any sideband activity on the host controller. This information could be used 287 293 * for power management or other forms of resource management. The caller should 288 - * ensure downstream usb devices are all either suspended or marked as 289 - * "offload_at_suspend" to ensure the correctness of the return value. 294 + * ensure downstream usb devices are all marked as "offload_pm_locked" to ensure 295 + * the correctness of the return value. 290 296 * 291 297 * Returns true on any active sideband existence, false otherwise. 292 298 */ ··· 322 328 xhci_sideband_create_interrupter(struct xhci_sideband *sb, int num_seg, 323 329 bool ip_autoclear, u32 imod_interval, int intr_num) 324 330 { 325 - int ret = 0; 326 - struct usb_device *udev; 327 - 328 331 if (!sb || !sb->xhci) 329 332 return -ENODEV; 330 333 ··· 339 348 if (!sb->ir) 340 349 return -ENOMEM; 341 350 342 - udev = sb->vdev->udev; 343 - ret = usb_offload_get(udev); 344 - 345 351 sb->ir->ip_autoclear = ip_autoclear; 346 352 347 - return ret; 353 + return 0; 348 354 } 349 355 EXPORT_SYMBOL_GPL(xhci_sideband_create_interrupter); 350 356
+5 -2
drivers/usb/misc/usbio.c
··· 614 614 usb_fill_bulk_urb(usbio->urb, udev, usbio->rx_pipe, usbio->rxbuf, 615 615 usbio->rxbuf_len, usbio_bulk_recv, usbio); 616 616 ret = usb_submit_urb(usbio->urb, GFP_KERNEL); 617 - if (ret) 618 - return dev_err_probe(dev, ret, "Submitting usb urb\n"); 617 + if (ret) { 618 + dev_err_probe(dev, ret, "Submitting usb urb\n"); 619 + goto err_free_urb; 620 + } 619 621 620 622 mutex_lock(&usbio->ctrl_mutex); 621 623 ··· 665 663 err_unlock: 666 664 mutex_unlock(&usbio->ctrl_mutex); 667 665 usb_kill_urb(usbio->urb); 666 + err_free_urb: 668 667 usb_free_urb(usbio->urb); 669 668 670 669 return ret;
+3
drivers/usb/serial/io_edgeport.c
··· 73 73 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_22I) }, 74 74 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_412_4) }, 75 75 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_COMPATIBLE) }, 76 + { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_BLACKBOX_IC135A) }, 76 77 { } 77 78 }; 78 79 ··· 122 121 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8R) }, 123 122 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8RR) }, 124 123 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_412_8) }, 124 + { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_BLACKBOX_IC135A) }, 125 125 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) }, 126 126 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) }, 127 127 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) }, ··· 472 470 case ION_DEVICE_ID_EDGEPORT_2_DIN: 473 471 case ION_DEVICE_ID_EDGEPORT_4_DIN: 474 472 case ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU: 473 + case ION_DEVICE_ID_BLACKBOX_IC135A: 475 474 product_info->IsRS232 = 1; 476 475 break; 477 476
+1
drivers/usb/serial/io_usbvend.h
··· 211 211 212 212 // 213 213 // Definitions for other product IDs 214 + #define ION_DEVICE_ID_BLACKBOX_IC135A 0x0801 // OEM device (rebranded Edgeport/4) 214 215 #define ION_DEVICE_ID_MT4X56USB 0x1403 // OEM device 215 216 #define ION_DEVICE_ID_E5805A 0x1A01 // OEM device (rebranded Edgeport/4) 216 217
+4
drivers/usb/serial/option.c
··· 2441 2441 { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x30) }, /* MeiG Smart SRM815 and SRM825L */ 2442 2442 { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x40) }, /* MeiG Smart SRM825L */ 2443 2443 { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x60) }, /* MeiG Smart SRM825L */ 2444 + { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d38, 0xff, 0xff, 0x30) }, /* MeiG Smart SRM825WN (Diag) */ 2445 + { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d38, 0xff, 0xff, 0x40) }, /* MeiG Smart SRM825WN (AT) */ 2446 + { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d38, 0xff, 0xff, 0x60) }, /* MeiG Smart SRM825WN (NMEA) */ 2444 2447 { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */ 2445 2448 { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ 2446 2449 { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ ··· 2464 2461 { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0302, 0xff) }, /* Rolling RW101R-GL (laptop MBIM) */ 2465 2462 { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0802, 0xff), /* Rolling RW350-GL (laptop MBIM) */ 2466 2463 .driver_info = RSVD(5) }, 2464 + { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x1003, 0xff) }, /* Rolling RW135R-GL (laptop MBIM) */ 2467 2465 { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for Global */ 2468 2466 { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0x00, 0x40) }, 2469 2467 { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x40) },
+22 -22
drivers/usb/typec/altmodes/thunderbolt.c
··· 39 39 40 40 static int tbt_enter_mode(struct tbt_altmode *tbt) 41 41 { 42 - struct typec_altmode *plug = tbt->plug[TYPEC_PLUG_SOP_P]; 43 - u32 vdo; 44 - 45 - vdo = tbt->alt->vdo & (TBT_VENDOR_SPECIFIC_B0 | TBT_VENDOR_SPECIFIC_B1); 46 - vdo |= tbt->alt->vdo & TBT_INTEL_SPECIFIC_B0; 47 - vdo |= TBT_MODE; 48 - 49 - if (plug) { 50 - if (typec_cable_is_active(tbt->cable)) 51 - vdo |= TBT_ENTER_MODE_ACTIVE_CABLE; 52 - 53 - vdo |= TBT_ENTER_MODE_CABLE_SPEED(TBT_CABLE_SPEED(plug->vdo)); 54 - vdo |= plug->vdo & TBT_CABLE_ROUNDED; 55 - vdo |= plug->vdo & TBT_CABLE_OPTICAL; 56 - vdo |= plug->vdo & TBT_CABLE_RETIMER; 57 - vdo |= plug->vdo & TBT_CABLE_LINK_TRAINING; 58 - } else { 59 - vdo |= TBT_ENTER_MODE_CABLE_SPEED(TBT_CABLE_USB3_PASSIVE); 60 - } 61 - 62 - tbt->enter_vdo = vdo; 63 - return typec_altmode_enter(tbt->alt, &vdo); 42 + return typec_altmode_enter(tbt->alt, &tbt->enter_vdo); 64 43 } 65 44 66 45 static void tbt_altmode_work(struct work_struct *work) ··· 316 337 { 317 338 struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt); 318 339 struct typec_altmode *plug; 340 + u32 vdo; 319 341 320 342 if (tbt->cable) 321 343 return true; ··· 343 363 344 364 tbt->plug[i] = plug; 345 365 } 366 + 367 + vdo = tbt->alt->vdo & (TBT_VENDOR_SPECIFIC_B0 | TBT_VENDOR_SPECIFIC_B1); 368 + vdo |= tbt->alt->vdo & TBT_INTEL_SPECIFIC_B0; 369 + vdo |= TBT_MODE; 370 + plug = tbt->plug[TYPEC_PLUG_SOP_P]; 371 + 372 + if (plug) { 373 + if (typec_cable_is_active(tbt->cable)) 374 + vdo |= TBT_ENTER_MODE_ACTIVE_CABLE; 375 + 376 + vdo |= TBT_ENTER_MODE_CABLE_SPEED(TBT_CABLE_SPEED(plug->vdo)); 377 + vdo |= plug->vdo & TBT_CABLE_ROUNDED; 378 + vdo |= plug->vdo & TBT_CABLE_OPTICAL; 379 + vdo |= plug->vdo & TBT_CABLE_RETIMER; 380 + vdo |= plug->vdo & TBT_CABLE_LINK_TRAINING; 381 + } else { 382 + vdo |= TBT_ENTER_MODE_CABLE_SPEED(TBT_CABLE_USB3_PASSIVE); 383 + } 384 + 385 + tbt->enter_vdo = vdo; 346 386 347 387 return true; 348 388 }
-4
drivers/usb/typec/class.c
··· 686 686 687 687 alt->adev.dev.bus = &typec_bus; 688 688 689 - /* Plug alt modes need a class to generate udev events. */ 690 - if (is_typec_plug(parent)) 691 - alt->adev.dev.class = &typec_class; 692 - 693 689 ret = device_register(&alt->adev.dev); 694 690 if (ret) { 695 691 dev_err(parent, "failed to register alternate mode (%d)\n",
+7 -2
drivers/usb/typec/ucsi/ucsi.c
··· 43 43 if (cci & UCSI_CCI_BUSY) 44 44 return; 45 45 46 - if (UCSI_CCI_CONNECTOR(cci)) 47 - ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci)); 46 + if (UCSI_CCI_CONNECTOR(cci)) { 47 + if (UCSI_CCI_CONNECTOR(cci) <= ucsi->cap.num_connectors) 48 + ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci)); 49 + else 50 + dev_err(ucsi->dev, "bogus connector number in CCI: %lu\n", 51 + UCSI_CCI_CONNECTOR(cci)); 52 + } 48 53 49 54 if (cci & UCSI_CCI_ACK_COMPLETE && 50 55 test_and_clear_bit(ACK_PENDING, &ucsi->flags))
+1 -1
fs/btrfs/extent-tree.c
··· 495 495 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 496 496 if (key.objectid != bytenr || 497 497 key.type != BTRFS_EXTENT_DATA_REF_KEY) 498 - return ret; 498 + return -ENOENT; 499 499 500 500 ref = btrfs_item_ptr(leaf, path->slots[0], 501 501 struct btrfs_extent_data_ref);
+10
fs/ocfs2/inode.c
··· 1505 1505 goto bail; 1506 1506 } 1507 1507 1508 + if (le16_to_cpu(data->id_count) > 1509 + ocfs2_max_inline_data_with_xattr(sb, di)) { 1510 + rc = ocfs2_error(sb, 1511 + "Invalid dinode #%llu: inline data id_count %u exceeds max %d\n", 1512 + (unsigned long long)bh->b_blocknr, 1513 + le16_to_cpu(data->id_count), 1514 + ocfs2_max_inline_data_with_xattr(sb, di)); 1515 + goto bail; 1516 + } 1517 + 1508 1518 if (le64_to_cpu(di->i_size) > le16_to_cpu(data->id_count)) { 1509 1519 rc = ocfs2_error(sb, 1510 1520 "Invalid dinode #%llu: inline data i_size %llu exceeds id_count %u\n",
+4
fs/smb/client/fs_context.c
··· 588 588 while (IS_DELIM(*cursor1)) 589 589 cursor1++; 590 590 591 + /* exit in case of only delimiters */ 592 + if (!*cursor1) 593 + return NULL; 594 + 591 595 /* copy the first letter */ 592 596 *cursor2 = *cursor1; 593 597
+89 -32
fs/smb/server/smb2pdu.c
··· 3402 3402 KSMBD_SHARE_FLAG_ACL_XATTR)) { 3403 3403 struct smb_fattr fattr; 3404 3404 struct smb_ntsd *pntsd; 3405 - int pntsd_size, ace_num = 0; 3405 + int pntsd_size; 3406 + size_t scratch_len; 3406 3407 3407 3408 ksmbd_acls_fattr(&fattr, idmap, inode); 3408 - if (fattr.cf_acls) 3409 - ace_num = fattr.cf_acls->a_count; 3410 - if (fattr.cf_dacls) 3411 - ace_num += fattr.cf_dacls->a_count; 3409 + scratch_len = smb_acl_sec_desc_scratch_len(&fattr, 3410 + NULL, 0, 3411 + OWNER_SECINFO | GROUP_SECINFO | 3412 + DACL_SECINFO); 3413 + if (!scratch_len || scratch_len == SIZE_MAX) { 3414 + rc = -EFBIG; 3415 + posix_acl_release(fattr.cf_acls); 3416 + posix_acl_release(fattr.cf_dacls); 3417 + goto err_out; 3418 + } 3412 3419 3413 - pntsd = kmalloc(sizeof(struct smb_ntsd) + 3414 - sizeof(struct smb_sid) * 3 + 3415 - sizeof(struct smb_acl) + 3416 - sizeof(struct smb_ace) * ace_num * 2, 3417 - KSMBD_DEFAULT_GFP); 3420 + pntsd = kvzalloc(scratch_len, KSMBD_DEFAULT_GFP); 3418 3421 if (!pntsd) { 3422 + rc = -ENOMEM; 3419 3423 posix_acl_release(fattr.cf_acls); 3420 3424 posix_acl_release(fattr.cf_dacls); 3421 3425 goto err_out; ··· 3434 3430 posix_acl_release(fattr.cf_acls); 3435 3431 posix_acl_release(fattr.cf_dacls); 3436 3432 if (rc) { 3437 - kfree(pntsd); 3433 + kvfree(pntsd); 3438 3434 goto err_out; 3439 3435 } 3440 3436 ··· 3444 3440 pntsd, 3445 3441 pntsd_size, 3446 3442 false); 3447 - kfree(pntsd); 3443 + kvfree(pntsd); 3448 3444 if (rc) 3449 3445 pr_err("failed to store ntacl in xattr : %d\n", 3450 3446 rc); ··· 5376 5372 if (test_share_config_flag(work->tcon->share_conf, 5377 5373 KSMBD_SHARE_FLAG_PIPE)) { 5378 5374 /* smb2 info file called for pipe */ 5379 - return smb2_get_info_file_pipe(work->sess, req, rsp, 5375 + rc = smb2_get_info_file_pipe(work->sess, req, rsp, 5380 5376 work->response_buf); 5377 + goto iov_pin_out; 5381 5378 } 5382 5379 5383 5380 if (work->next_smb2_rcv_hdr_off) { ··· 5478 5473 rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength), 5479 5474 rsp, work->response_buf); 5480 5475 ksmbd_fd_put(work, fp); 5476 + 5477 + iov_pin_out: 5478 + if (!rc) 5479 + rc = ksmbd_iov_pin_rsp(work, (void *)rsp, 5480 + offsetof(struct smb2_query_info_rsp, Buffer) + 5481 + le32_to_cpu(rsp->OutputBufferLength)); 5481 5482 return rc; 5482 5483 } 5483 5484 ··· 5710 5699 rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength), 5711 5700 rsp, work->response_buf); 5712 5701 path_put(&path); 5702 + 5703 + if (!rc) 5704 + rc = ksmbd_iov_pin_rsp(work, (void *)rsp, 5705 + offsetof(struct smb2_query_info_rsp, Buffer) + 5706 + le32_to_cpu(rsp->OutputBufferLength)); 5713 5707 return rc; 5714 5708 } 5715 5709 ··· 5724 5708 { 5725 5709 struct ksmbd_file *fp; 5726 5710 struct mnt_idmap *idmap; 5727 - struct smb_ntsd *pntsd = (struct smb_ntsd *)rsp->Buffer, *ppntsd = NULL; 5711 + struct smb_ntsd *pntsd = NULL, *ppntsd = NULL; 5728 5712 struct smb_fattr fattr = {{0}}; 5729 5713 struct inode *inode; 5730 5714 __u32 secdesclen = 0; 5731 5715 unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID; 5732 5716 int addition_info = le32_to_cpu(req->AdditionalInformation); 5733 - int rc = 0, ppntsd_size = 0; 5717 + int rc = 0, ppntsd_size = 0, max_len; 5718 + size_t scratch_len = 0; 5734 5719 5735 5720 if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO | 5736 5721 PROTECTED_DACL_SECINFO | 5737 5722 UNPROTECTED_DACL_SECINFO)) { 5738 5723 ksmbd_debug(SMB, "Unsupported addition info: 0x%x)\n", 5739 5724 addition_info); 5725 + 5726 + pntsd = kzalloc(ALIGN(sizeof(struct smb_ntsd), 8), 5727 + KSMBD_DEFAULT_GFP); 5728 + if (!pntsd) 5729 + return -ENOMEM; 5740 5730 5741 5731 pntsd->revision = cpu_to_le16(1); 5742 5732 pntsd->type = cpu_to_le16(SELF_RELATIVE | DACL_PROTECTED); ··· 5752 5730 pntsd->dacloffset = 0; 5753 5731 5754 5732 secdesclen = sizeof(struct smb_ntsd); 5755 - rsp->OutputBufferLength = cpu_to_le32(secdesclen); 5756 - 5757 - return 0; 5733 + goto iov_pin; 5758 5734 } 5759 5735 5760 5736 if (work->next_smb2_rcv_hdr_off) { ··· 5784 5764 &ppntsd); 5785 5765 5786 5766 /* Check if sd buffer size exceeds response buffer size */ 5787 - if (smb2_resp_buf_len(work, 8) > ppntsd_size) 5788 - rc = build_sec_desc(idmap, pntsd, ppntsd, ppntsd_size, 5789 - addition_info, &secdesclen, &fattr); 5767 + max_len = smb2_calc_max_out_buf_len(work, 5768 + offsetof(struct smb2_query_info_rsp, Buffer), 5769 + le32_to_cpu(req->OutputBufferLength)); 5770 + if (max_len < 0) { 5771 + rc = -EINVAL; 5772 + goto release_acl; 5773 + } 5774 + 5775 + scratch_len = smb_acl_sec_desc_scratch_len(&fattr, ppntsd, 5776 + ppntsd_size, addition_info); 5777 + if (!scratch_len || scratch_len == SIZE_MAX) { 5778 + rc = -EFBIG; 5779 + goto release_acl; 5780 + } 5781 + 5782 + pntsd = kvzalloc(scratch_len, KSMBD_DEFAULT_GFP); 5783 + if (!pntsd) { 5784 + rc = -ENOMEM; 5785 + goto release_acl; 5786 + } 5787 + 5788 + rc = build_sec_desc(idmap, pntsd, ppntsd, ppntsd_size, 5789 + addition_info, &secdesclen, &fattr); 5790 + 5791 + release_acl: 5790 5792 posix_acl_release(fattr.cf_acls); 5791 5793 posix_acl_release(fattr.cf_dacls); 5792 5794 kfree(ppntsd); 5793 5795 ksmbd_fd_put(work, fp); 5794 - if (rc) 5795 - return rc; 5796 5796 5797 + if (!rc && ALIGN(secdesclen, 8) > scratch_len) 5798 + rc = -EFBIG; 5799 + if (rc) 5800 + goto err_out; 5801 + 5802 + iov_pin: 5797 5803 rsp->OutputBufferLength = cpu_to_le32(secdesclen); 5798 - return 0; 5804 + rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength), 5805 + rsp, work->response_buf); 5806 + if (rc) 5807 + goto err_out; 5808 + 5809 + rc = ksmbd_iov_pin_rsp_read(work, (void *)rsp, 5810 + offsetof(struct smb2_query_info_rsp, Buffer), 5811 + pntsd, secdesclen); 5812 + err_out: 5813 + if (rc) { 5814 + rsp->OutputBufferLength = 0; 5815 + kvfree(pntsd); 5816 + } 5817 + 5818 + return rc; 5799 5819 } 5800 5820 5801 5821 /** ··· 5859 5799 goto err_out; 5860 5800 } 5861 5801 5802 + rsp->StructureSize = cpu_to_le16(9); 5803 + rsp->OutputBufferOffset = cpu_to_le16(72); 5804 + 5862 5805 switch (req->InfoType) { 5863 5806 case SMB2_O_INFO_FILE: 5864 5807 ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n"); ··· 5882 5819 } 5883 5820 ksmbd_revert_fsids(work); 5884 5821 5885 - if (!rc) { 5886 - rsp->StructureSize = cpu_to_le16(9); 5887 - rsp->OutputBufferOffset = cpu_to_le16(72); 5888 - rc = ksmbd_iov_pin_rsp(work, (void *)rsp, 5889 - offsetof(struct smb2_query_info_rsp, Buffer) + 5890 - le32_to_cpu(rsp->OutputBufferLength)); 5891 - } 5892 - 5893 5822 err_out: 5894 5823 if (rc < 0) { 5895 5824 if (rc == -EACCES) ··· 5892 5837 rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR; 5893 5838 else if (rc == -ENOMEM) 5894 5839 rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES; 5840 + else if (rc == -EINVAL && rsp->hdr.Status == 0) 5841 + rsp->hdr.Status = STATUS_INVALID_PARAMETER; 5895 5842 else if (rc == -EOPNOTSUPP || rsp->hdr.Status == 0) 5896 5843 rsp->hdr.Status = STATUS_INVALID_INFO_CLASS; 5897 5844 smb2_set_err_rsp(work);
+43
fs/smb/server/smbacl.c
··· 915 915 return 0; 916 916 } 917 917 918 + size_t smb_acl_sec_desc_scratch_len(struct smb_fattr *fattr, 919 + struct smb_ntsd *ppntsd, int ppntsd_size, int addition_info) 920 + { 921 + size_t len = sizeof(struct smb_ntsd); 922 + size_t tmp; 923 + 924 + if (addition_info & OWNER_SECINFO) 925 + len += sizeof(struct smb_sid); 926 + if (addition_info & GROUP_SECINFO) 927 + len += sizeof(struct smb_sid); 928 + if (!(addition_info & DACL_SECINFO)) 929 + return len; 930 + 931 + len += sizeof(struct smb_acl); 932 + if (ppntsd && ppntsd_size > 0) { 933 + unsigned int dacl_offset = le32_to_cpu(ppntsd->dacloffset); 934 + 935 + if (dacl_offset < ppntsd_size && 936 + check_add_overflow(len, ppntsd_size - dacl_offset, &len)) 937 + return 0; 938 + } 939 + 940 + if (fattr->cf_acls) { 941 + if (check_mul_overflow((size_t)fattr->cf_acls->a_count, 942 + 2 * sizeof(struct smb_ace), &tmp) || 943 + check_add_overflow(len, tmp, &len)) 944 + return 0; 945 + } else { 946 + /* default/minimum DACL */ 947 + if (check_add_overflow(len, 5 * sizeof(struct smb_ace), &len)) 948 + return 0; 949 + } 950 + 951 + if (fattr->cf_dacls) { 952 + if (check_mul_overflow((size_t)fattr->cf_dacls->a_count, 953 + sizeof(struct smb_ace), &tmp) || 954 + check_add_overflow(len, tmp, &len)) 955 + return 0; 956 + } 957 + 958 + return len; 959 + } 960 + 918 961 /* Convert permission bits from mode to equivalent CIFS ACL */ 919 962 int build_sec_desc(struct mnt_idmap *idmap, 920 963 struct smb_ntsd *pntsd, struct smb_ntsd *ppntsd,
+2
fs/smb/server/smbacl.h
··· 101 101 bool type_check, bool get_write); 102 102 void id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid); 103 103 void ksmbd_init_domain(u32 *sub_auth); 104 + size_t smb_acl_sec_desc_scratch_len(struct smb_fattr *fattr, 105 + struct smb_ntsd *ppntsd, int ppntsd_size, int addition_info); 104 106 105 107 static inline uid_t posix_acl_uid_translate(struct mnt_idmap *idmap, 106 108 struct posix_acl_entry *pace)
+2 -3
include/crypto/if_alg.h
··· 230 230 return PAGE_SIZE <= af_alg_rcvbuf(sk); 231 231 } 232 232 233 - unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset); 234 - void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, 235 - size_t dst_offset); 233 + unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes); 234 + void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst); 236 235 void af_alg_wmem_wakeup(struct sock *sk); 237 236 int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min); 238 237 int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+36 -12
include/dt-bindings/reset/spacemit,k3-resets.h
··· 97 97 #define RESET_APMU_SDH0 13 98 98 #define RESET_APMU_SDH1 14 99 99 #define RESET_APMU_SDH2 15 100 - #define RESET_APMU_USB2 16 101 - #define RESET_APMU_USB3_PORTA 17 102 - #define RESET_APMU_USB3_PORTB 18 103 - #define RESET_APMU_USB3_PORTC 19 104 - #define RESET_APMU_USB3_PORTD 20 100 + #define RESET_APMU_USB2_AHB 16 101 + #define RESET_APMU_USB2_VCC 17 102 + #define RESET_APMU_USB2_PHY 18 103 + #define RESET_APMU_USB3_A_AHB 19 104 + #define RESET_APMU_USB3_A_VCC 20 105 105 #define RESET_APMU_QSPI 21 106 106 #define RESET_APMU_QSPI_BUS 22 107 107 #define RESET_APMU_DMA 23 ··· 132 132 #define RESET_APMU_CPU7_SW 48 133 133 #define RESET_APMU_C1_MPSUB_SW 49 134 134 #define RESET_APMU_MPSUB_DBG 50 135 - #define RESET_APMU_UCIE 51 136 - #define RESET_APMU_RCPU 52 135 + #define RESET_APMU_USB3_A_PHY 51 /* USB3 A */ 136 + #define RESET_APMU_USB3_B_AHB 52 137 137 #define RESET_APMU_DSI4LN2_ESCCLK 53 138 138 #define RESET_APMU_DSI4LN2_LCD_SW 54 139 139 #define RESET_APMU_DSI4LN2_LCD_MCLK 55 ··· 143 143 #define RESET_APMU_UFS_ACLK 59 144 144 #define RESET_APMU_EDP0 60 145 145 #define RESET_APMU_EDP1 61 146 - #define RESET_APMU_PCIE_PORTA 62 147 - #define RESET_APMU_PCIE_PORTB 63 148 - #define RESET_APMU_PCIE_PORTC 64 149 - #define RESET_APMU_PCIE_PORTD 65 150 - #define RESET_APMU_PCIE_PORTE 66 146 + #define RESET_APMU_USB3_B_VCC 62 /* USB3 B */ 147 + #define RESET_APMU_USB3_B_PHY 63 148 + #define RESET_APMU_USB3_C_AHB 64 149 + #define RESET_APMU_USB3_C_VCC 65 150 + #define RESET_APMU_USB3_C_PHY 66 151 151 #define RESET_APMU_EMAC0 67 152 152 #define RESET_APMU_EMAC1 68 153 153 #define RESET_APMU_EMAC2 69 154 154 #define RESET_APMU_ESPI_MCLK 70 155 155 #define RESET_APMU_ESPI_SCLK 71 156 + #define RESET_APMU_USB3_D_AHB 72 /* USB3 D */ 157 + #define RESET_APMU_USB3_D_VCC 73 158 + #define RESET_APMU_USB3_D_PHY 74 159 + #define RESET_APMU_UCIE_IP 75 160 + #define RESET_APMU_UCIE_HOT 76 161 + #define RESET_APMU_UCIE_MON 77 162 + #define RESET_APMU_RCPU_AUDIO_SYS 78 163 + #define RESET_APMU_RCPU_MCU_CORE 79 164 + #define RESET_APMU_RCPU_AUDIO_APMU 80 165 + #define RESET_APMU_PCIE_A_DBI 81 166 + #define RESET_APMU_PCIE_A_SLAVE 82 167 + #define RESET_APMU_PCIE_A_MASTER 83 168 + #define RESET_APMU_PCIE_B_DBI 84 169 + #define RESET_APMU_PCIE_B_SLAVE 85 170 + #define RESET_APMU_PCIE_B_MASTER 86 171 + #define RESET_APMU_PCIE_C_DBI 87 172 + #define RESET_APMU_PCIE_C_SLAVE 88 173 + #define RESET_APMU_PCIE_C_MASTER 89 174 + #define RESET_APMU_PCIE_D_DBI 90 175 + #define RESET_APMU_PCIE_D_SLAVE 91 176 + #define RESET_APMU_PCIE_D_MASTER 92 177 + #define RESET_APMU_PCIE_E_DBI 93 178 + #define RESET_APMU_PCIE_E_SLAVE 94 179 + #define RESET_APMU_PCIE_E_MASTER 95 156 180 157 181 /* DCIU resets*/ 158 182 #define RESET_DCIU_HDMA 0
+6
include/hyperv/hvgdk_mini.h
··· 1533 1533 u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH]; 1534 1534 } __packed; 1535 1535 1536 + enum hv_intercept_access_type { 1537 + HV_INTERCEPT_ACCESS_READ = 0, 1538 + HV_INTERCEPT_ACCESS_WRITE = 1, 1539 + HV_INTERCEPT_ACCESS_EXECUTE = 2 1540 + }; 1541 + 1536 1542 #endif /* _HV_HVGDK_MINI_H */
+2 -2
include/hyperv/hvhdk.h
··· 779 779 u32 vp_index; 780 780 u8 instruction_length:4; 781 781 u8 cr8:4; /* Only set for exo partitions */ 782 - u8 intercept_access_type; 782 + u8 intercept_access_type; /* enum hv_intercept_access_type */ 783 783 union hv_x64_vp_execution_state execution_state; 784 784 struct hv_x64_segment_register cs_segment; 785 785 u64 rip; ··· 825 825 struct hv_arm64_intercept_message_header { 826 826 u32 vp_index; 827 827 u8 instruction_length; 828 - u8 intercept_access_type; 828 + u8 intercept_access_type; /* enum hv_intercept_access_type */ 829 829 union hv_arm64_vp_execution_state execution_state; 830 830 u64 pc; 831 831 u64 cpsr;
+4
include/linux/bpf.h
··· 1854 1854 * target hook is sleepable, we'll go through tasks trace RCU GP and 1855 1855 * then "classic" RCU GP; this need for chaining tasks trace and 1856 1856 * classic RCU GPs is designated by setting bpf_link->sleepable flag 1857 + * 1858 + * For non-sleepable tracepoint links we go through SRCU gp instead, 1859 + * since RCU is not used in that case. Sleepable tracepoints still 1860 + * follow the scheme above. 1857 1861 */ 1858 1862 void (*dealloc_deferred)(struct bpf_link *link); 1859 1863 int (*detach)(struct bpf_link *link);
+5 -7
include/linux/gpio/gpio-nomadik.h
··· 114 114 } 115 115 116 116 /** 117 - * enum prcm_gpiocr_reg_index 118 - * Used to reference an PRCM GPIOCR register address. 117 + * enum prcm_gpiocr_reg_index - Used to reference a PRCM GPIOCR register address. 119 118 */ 120 119 enum prcm_gpiocr_reg_index { 121 120 PRCM_IDX_GPIOCR1, ··· 122 123 PRCM_IDX_GPIOCR3 123 124 }; 124 125 /** 125 - * enum prcm_gpiocr_altcx_index 126 - * Used to reference an Other alternate-C function. 126 + * enum prcm_gpiocr_altcx_index - Used to reference an Other alternate-C function. 127 127 */ 128 128 enum prcm_gpiocr_altcx_index { 129 129 PRCM_IDX_GPIOCR_ALTC1, ··· 133 135 }; 134 136 135 137 /** 136 - * struct prcm_gpio_altcx - Other alternate-C function 138 + * struct prcm_gpiocr_altcx - Other alternate-C function 137 139 * @used: other alternate-C function availability 138 140 * @reg_index: PRCM GPIOCR register index used to control the function 139 141 * @control_bit: PRCM GPIOCR bit used to control the function ··· 145 147 } __packed; 146 148 147 149 /** 148 - * struct prcm_gpio_altcx_pin_desc - Other alternate-C pin 150 + * struct prcm_gpiocr_altcx_pin_desc - Other alternate-C pin 149 151 * @pin: The pin number 150 152 * @altcx: array of other alternate-C[1-4] functions 151 153 */ ··· 191 193 * numbering. 192 194 * @npins: The number of entries in @pins. 193 195 * @functions: The functions supported on this SoC. 194 - * @nfunction: The number of entries in @functions. 196 + * @nfunctions: The number of entries in @functions. 195 197 * @groups: An array describing all pin groups the pin SoC supports. 196 198 * @ngroups: The number of entries in @groups. 197 199 * @altcx_pins: The pins that support Other alternate-C function on this SoC
+12
include/linux/iio/iio.h
··· 931 931 #define IIO_DECLARE_DMA_BUFFER_WITH_TS(type, name, count) \ 932 932 __IIO_DECLARE_BUFFER_WITH_TS(type, name, count) __aligned(IIO_DMA_MINALIGN) 933 933 934 + /** 935 + * IIO_DECLARE_QUATERNION() - Declare a quaternion element 936 + * @type: element type of the individual vectors 937 + * @name: identifier name 938 + * 939 + * Quaternions are a vector composed of 4 elements (W, X, Y, Z). Use this macro 940 + * to declare a quaternion element in a struct to ensure proper alignment in 941 + * an IIO buffer. 942 + */ 943 + #define IIO_DECLARE_QUATERNION(type, name) \ 944 + type name[4] __aligned(sizeof(type) * 4) 945 + 934 946 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv); 935 947 936 948 /* The information at the returned address is guaranteed to be cacheline aligned */
+2 -2
include/linux/lis3lv02d.h
··· 30 30 * @default_rate: Default sampling rate. 0 means reset default 31 31 * @setup_resources: Interrupt line setup call back function 32 32 * @release_resources: Interrupt line release call back function 33 - * @st_min_limits[3]: Selftest acceptance minimum values 34 - * @st_max_limits[3]: Selftest acceptance maximum values 33 + * @st_min_limits: Selftest acceptance minimum values (x, y, z) 34 + * @st_max_limits: Selftest acceptance maximum values (x, y, z) 35 35 * @irq2: Irq line 2 number 36 36 * 37 37 * Platform data is used to setup the sensor chip. Meaning of the different
+3 -3
include/linux/mmap_lock.h
··· 546 546 __mmap_lock_trace_acquire_returned(mm, true, true); 547 547 } 548 548 549 - static inline int mmap_write_lock_killable(struct mm_struct *mm) 549 + static inline int __must_check mmap_write_lock_killable(struct mm_struct *mm) 550 550 { 551 551 int ret; 552 552 ··· 593 593 __mmap_lock_trace_acquire_returned(mm, false, true); 594 594 } 595 595 596 - static inline int mmap_read_lock_killable(struct mm_struct *mm) 596 + static inline int __must_check mmap_read_lock_killable(struct mm_struct *mm) 597 597 { 598 598 int ret; 599 599 ··· 603 603 return ret; 604 604 } 605 605 606 - static inline bool mmap_read_trylock(struct mm_struct *mm) 606 + static inline bool __must_check mmap_read_trylock(struct mm_struct *mm) 607 607 { 608 608 bool ret; 609 609
+1
include/linux/soc/qcom/pdr.h
··· 5 5 #include <linux/soc/qcom/qmi.h> 6 6 7 7 #define SERVREG_NAME_LENGTH 64 8 + #define SERVREG_PFR_LENGTH 256 8 9 9 10 struct pdr_service; 10 11 struct pdr_handle;
+3 -3
include/linux/timb_gpio.h
··· 9 9 10 10 /** 11 11 * struct timbgpio_platform_data - Platform data of the Timberdale GPIO driver 12 - * @gpio_base The number of the first GPIO pin, set to -1 for 12 + * @gpio_base: The number of the first GPIO pin, set to -1 for 13 13 * dynamic number allocation. 14 - * @nr_pins Number of pins that is supported by the hardware (1-32) 15 - * @irq_base If IRQ is supported by the hardware, this is the base 14 + * @nr_pins: Number of pins that is supported by the hardware (1-32) 15 + * @irq_base: If IRQ is supported by the hardware, this is the base 16 16 * number of IRQ:s. One IRQ per pin will be used. Set to 17 17 * -1 if IRQ:s is not supported. 18 18 */
+20
include/linux/tracepoint.h
··· 122 122 { 123 123 return tp->ext && tp->ext->faultable; 124 124 } 125 + /* 126 + * Run RCU callback with the appropriate grace period wait for non-faultable 127 + * tracepoints, e.g., those used in atomic context. 128 + */ 129 + static inline void call_tracepoint_unregister_atomic(struct rcu_head *rcu, rcu_callback_t func) 130 + { 131 + call_srcu(&tracepoint_srcu, rcu, func); 132 + } 133 + /* 134 + * Run RCU callback with the appropriate grace period wait for faultable 135 + * tracepoints, e.g., those used in syscall context. 136 + */ 137 + static inline void call_tracepoint_unregister_syscall(struct rcu_head *rcu, rcu_callback_t func) 138 + { 139 + call_rcu_tasks_trace(rcu, func); 140 + } 125 141 #else 126 142 static inline void tracepoint_synchronize_unregister(void) 127 143 { } ··· 145 129 { 146 130 return false; 147 131 } 132 + static inline void call_tracepoint_unregister_atomic(struct rcu_head *rcu, rcu_callback_t func) 133 + { } 134 + static inline void call_tracepoint_unregister_syscall(struct rcu_head *rcu, rcu_callback_t func) 135 + { } 148 136 #endif 149 137 150 138 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+8 -2
include/linux/usb.h
··· 21 21 #include <linux/completion.h> /* for struct completion */ 22 22 #include <linux/sched.h> /* for current && schedule_timeout */ 23 23 #include <linux/mutex.h> /* for struct mutex */ 24 + #include <linux/spinlock.h> /* for spinlock_t */ 24 25 #include <linux/pm_runtime.h> /* for runtime PM */ 25 26 26 27 struct usb_device; ··· 637 636 * @do_remote_wakeup: remote wakeup should be enabled 638 637 * @reset_resume: needs reset instead of resume 639 638 * @port_is_suspended: the upstream port is suspended (L2 or U3) 640 - * @offload_at_suspend: offload activities during suspend is enabled. 639 + * @offload_pm_locked: prevents offload_usage changes during PM transitions. 641 640 * @offload_usage: number of offload activities happening on this usb device. 641 + * @offload_lock: protects offload_usage and offload_pm_locked 642 642 * @slot_id: Slot ID assigned by xHCI 643 643 * @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout. 644 644 * @u1_params: exit latencies for USB3 U1 LPM state, and hub-initiated timeout. ··· 728 726 unsigned do_remote_wakeup:1; 729 727 unsigned reset_resume:1; 730 728 unsigned port_is_suspended:1; 731 - unsigned offload_at_suspend:1; 729 + unsigned offload_pm_locked:1; 732 730 int offload_usage; 731 + spinlock_t offload_lock; 733 732 enum usb_link_tunnel_mode tunnel_mode; 734 733 struct device_link *usb4_link; 735 734 ··· 852 849 int usb_offload_get(struct usb_device *udev); 853 850 int usb_offload_put(struct usb_device *udev); 854 851 bool usb_offload_check(struct usb_device *udev); 852 + void usb_offload_set_pm_locked(struct usb_device *udev, bool locked); 855 853 #else 856 854 857 855 static inline int usb_offload_get(struct usb_device *udev) ··· 861 857 { return 0; } 862 858 static inline bool usb_offload_check(struct usb_device *udev) 863 859 { return false; } 860 + static inline void usb_offload_set_pm_locked(struct usb_device *udev, bool locked) 861 + { } 864 862 #endif 865 863 866 864 extern int usb_disable_lpm(struct usb_device *udev);
+1 -1
include/net/ip_tunnels.h
··· 32 32 * recursion involves route lookups and full IP output, consuming much 33 33 * more stack per level, so a lower limit is needed. 34 34 */ 35 - #define IP_TUNNEL_RECURSION_LIMIT 4 35 + #define IP_TUNNEL_RECURSION_LIMIT 5 36 36 37 37 /* Keep error state on tunnel for 30 sec */ 38 38 #define IPTUNNEL_ERR_TIMEO (30*HZ)
+1
include/net/netfilter/nf_conntrack_timeout.h
··· 14 14 struct nf_ct_timeout { 15 15 __u16 l3num; 16 16 const struct nf_conntrack_l4proto *l4proto; 17 + struct rcu_head rcu; 17 18 char data[]; 18 19 }; 19 20
-1
include/net/netfilter/nf_queue.h
··· 23 23 struct nf_hook_state state; 24 24 bool nf_ct_is_unconfirmed; 25 25 u16 size; /* sizeof(entry) + saved route keys */ 26 - u16 queue_num; 27 26 28 27 /* extra space to store route keys */ 29 28 };
+1 -1
include/net/xdp_sock.h
··· 14 14 #include <linux/mm.h> 15 15 #include <net/sock.h> 16 16 17 - #define XDP_UMEM_SG_FLAG (1 << 1) 17 + #define XDP_UMEM_SG_FLAG BIT(3) 18 18 19 19 struct net_device; 20 20 struct xsk_queue;
+22 -1
include/net/xdp_sock_drv.h
··· 41 41 return XDP_PACKET_HEADROOM + pool->headroom; 42 42 } 43 43 44 + static inline u32 xsk_pool_get_tailroom(bool mbuf) 45 + { 46 + return mbuf ? SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : 0; 47 + } 48 + 44 49 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool) 45 50 { 46 51 return pool->chunk_size; 47 52 } 48 53 49 - static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool) 54 + static inline u32 __xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool) 50 55 { 51 56 return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool); 57 + } 58 + 59 + static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool) 60 + { 61 + u32 frame_size = __xsk_pool_get_rx_frame_size(pool); 62 + struct xdp_umem *umem = pool->umem; 63 + bool mbuf; 64 + 65 + /* Reserve tailroom only for zero-copy pools that opted into 66 + * multi-buffer. The reserved area is used for skb_shared_info, 67 + * matching the XDP core's xdp_data_hard_end() layout. 68 + */ 69 + mbuf = pool->dev && (umem->flags & XDP_UMEM_SG_FLAG); 70 + frame_size -= xsk_pool_get_tailroom(mbuf); 71 + 72 + return ALIGN_DOWN(frame_size, 128); 52 73 } 53 74 54 75 static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool)
+3 -1
include/trace/events/rxrpc.h
··· 185 185 EM(rxrpc_skb_put_input, "PUT input ") \ 186 186 EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \ 187 187 EM(rxrpc_skb_put_oob, "PUT oob ") \ 188 + EM(rxrpc_skb_put_old_response, "PUT old-resp ") \ 188 189 EM(rxrpc_skb_put_purge, "PUT purge ") \ 189 190 EM(rxrpc_skb_put_purge_oob, "PUT purge-oob") \ 190 191 EM(rxrpc_skb_put_response, "PUT response ") \ ··· 348 347 EM(rxrpc_call_see_release, "SEE release ") \ 349 348 EM(rxrpc_call_see_userid_exists, "SEE u-exists") \ 350 349 EM(rxrpc_call_see_waiting_call, "SEE q-conn ") \ 351 - E_(rxrpc_call_see_zap, "SEE zap ") 350 + E_(rxrpc_call_see_still_live, "SEE !still-l") 352 351 353 352 #define rxrpc_txqueue_traces \ 354 353 EM(rxrpc_txqueue_await_reply, "AWR") \ ··· 521 520 #define rxrpc_req_ack_traces \ 522 521 EM(rxrpc_reqack_ack_lost, "ACK-LOST ") \ 523 522 EM(rxrpc_reqack_app_stall, "APP-STALL ") \ 523 + EM(rxrpc_reqack_jumbo_win, "JUMBO-WIN ") \ 524 524 EM(rxrpc_reqack_more_rtt, "MORE-RTT ") \ 525 525 EM(rxrpc_reqack_no_srv_last, "NO-SRVLAST") \ 526 526 EM(rxrpc_reqack_old_rtt, "OLD-RTT ") \
+4
include/uapi/linux/input-event-codes.h
··· 643 643 #define KEY_EPRIVACY_SCREEN_ON 0x252 644 644 #define KEY_EPRIVACY_SCREEN_OFF 0x253 645 645 646 + #define KEY_ACTION_ON_SELECTION 0x254 /* AL Action on Selection (HUTRR119) */ 647 + #define KEY_CONTEXTUAL_INSERT 0x255 /* AL Contextual Insertion (HUTRR119) */ 648 + #define KEY_CONTEXTUAL_QUERY 0x256 /* AL Contextual Query (HUTRR119) */ 649 + 646 650 #define KEY_KBDINPUTASSIST_PREV 0x260 647 651 #define KEY_KBDINPUTASSIST_NEXT 0x261 648 652 #define KEY_KBDINPUTASSIST_PREVGROUP 0x262
+5 -2
io_uring/io_uring.c
··· 2015 2015 if (ctx->flags & IORING_SETUP_SQ_REWIND) 2016 2016 entries = ctx->sq_entries; 2017 2017 else 2018 - entries = io_sqring_entries(ctx); 2018 + entries = __io_sqring_entries(ctx); 2019 2019 2020 2020 entries = min(nr, entries); 2021 2021 if (unlikely(!entries)) ··· 2250 2250 */ 2251 2251 poll_wait(file, &ctx->poll_wq, wait); 2252 2252 2253 - if (!io_sqring_full(ctx)) 2253 + rcu_read_lock(); 2254 + 2255 + if (!__io_sqring_full(ctx)) 2254 2256 mask |= EPOLLOUT | EPOLLWRNORM; 2255 2257 2256 2258 /* ··· 2272 2270 if (__io_cqring_events_user(ctx) || io_has_work(ctx)) 2273 2271 mask |= EPOLLIN | EPOLLRDNORM; 2274 2272 2273 + rcu_read_unlock(); 2275 2274 return mask; 2276 2275 } 2277 2276
+29 -5
io_uring/io_uring.h
··· 142 142 #endif 143 143 }; 144 144 145 + static inline struct io_rings *io_get_rings(struct io_ring_ctx *ctx) 146 + { 147 + return rcu_dereference_check(ctx->rings_rcu, 148 + lockdep_is_held(&ctx->uring_lock) || 149 + lockdep_is_held(&ctx->completion_lock)); 150 + } 151 + 145 152 static inline bool io_should_wake(struct io_wait_queue *iowq) 146 153 { 147 154 struct io_ring_ctx *ctx = iowq->ctx; 148 - int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail; 155 + struct io_rings *rings; 156 + int dist; 157 + 158 + guard(rcu)(); 159 + rings = io_get_rings(ctx); 149 160 150 161 /* 151 162 * Wake up if we have enough events, or if a timeout occurred since we 152 163 * started waiting. For timeouts, we always want to return to userspace, 153 164 * regardless of event count. 154 165 */ 166 + dist = READ_ONCE(rings->cq.tail) - (int) iowq->cq_tail; 155 167 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; 156 168 } 157 169 ··· 443 431 __io_wq_wake(&ctx->cq_wait); 444 432 } 445 433 446 - static inline bool io_sqring_full(struct io_ring_ctx *ctx) 434 + static inline bool __io_sqring_full(struct io_ring_ctx *ctx) 447 435 { 448 - struct io_rings *r = ctx->rings; 436 + struct io_rings *r = io_get_rings(ctx); 449 437 450 438 /* 451 439 * SQPOLL must use the actual sqring head, as using the cached_sq_head ··· 457 445 return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries; 458 446 } 459 447 460 - static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) 448 + static inline bool io_sqring_full(struct io_ring_ctx *ctx) 461 449 { 462 - struct io_rings *rings = ctx->rings; 450 + guard(rcu)(); 451 + return __io_sqring_full(ctx); 452 + } 453 + 454 + static inline unsigned int __io_sqring_entries(struct io_ring_ctx *ctx) 455 + { 456 + struct io_rings *rings = io_get_rings(ctx); 463 457 unsigned int entries; 464 458 465 459 /* make sure SQ entry isn't read before tail */ 466 460 entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; 467 461 return min(entries, ctx->sq_entries); 462 + } 463 + 464 + static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) 465 + { 466 + guard(rcu)(); 467 + return __io_sqring_entries(ctx); 468 468 } 469 469 470 470 /*
+4
io_uring/net.c
··· 421 421 422 422 sr->done_io = 0; 423 423 sr->len = READ_ONCE(sqe->len); 424 + if (unlikely(sr->len < 0)) 425 + return -EINVAL; 424 426 sr->flags = READ_ONCE(sqe->ioprio); 425 427 if (sr->flags & ~SENDMSG_FLAGS) 426 428 return -EINVAL; ··· 793 791 794 792 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 795 793 sr->len = READ_ONCE(sqe->len); 794 + if (unlikely(sr->len < 0)) 795 + return -EINVAL; 796 796 sr->flags = READ_ONCE(sqe->ioprio); 797 797 if (sr->flags & ~RECVMSG_FLAGS) 798 798 return -EINVAL;
+9 -1
io_uring/register.c
··· 178 178 return -EBUSY; 179 179 180 180 ret = io_parse_restrictions(arg, nr_args, &ctx->restrictions); 181 - /* Reset all restrictions if an error happened */ 181 + /* 182 + * Reset all restrictions if an error happened, but retain any COW'ed 183 + * settings. 184 + */ 182 185 if (ret < 0) { 186 + struct io_bpf_filters *bpf = ctx->restrictions.bpf_filters; 187 + bool cowed = ctx->restrictions.bpf_filters_cow; 188 + 183 189 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions)); 190 + ctx->restrictions.bpf_filters = bpf; 191 + ctx->restrictions.bpf_filters_cow = cowed; 184 192 return ret; 185 193 } 186 194 if (ctx->restrictions.op_registered)
+4
io_uring/rsrc.c
··· 1061 1061 return ret; 1062 1062 if (!(imu->dir & (1 << ddir))) 1063 1063 return -EFAULT; 1064 + if (unlikely(!len)) { 1065 + iov_iter_bvec(iter, ddir, NULL, 0, 0); 1066 + return 0; 1067 + } 1064 1068 1065 1069 offset = buf_addr - imu->ubuf; 1066 1070
+31 -19
io_uring/wait.c
··· 79 79 if (io_has_work(ctx)) 80 80 goto out_wake; 81 81 /* got events since we started waiting, min timeout is done */ 82 - if (iowq->cq_min_tail != READ_ONCE(ctx->rings->cq.tail)) 83 - goto out_wake; 84 - /* if we have any events and min timeout expired, we're done */ 85 - if (io_cqring_events(ctx)) 86 - goto out_wake; 82 + scoped_guard(rcu) { 83 + struct io_rings *rings = io_get_rings(ctx); 87 84 85 + if (iowq->cq_min_tail != READ_ONCE(rings->cq.tail)) 86 + goto out_wake; 87 + /* if we have any events and min timeout expired, we're done */ 88 + if (io_cqring_events(ctx)) 89 + goto out_wake; 90 + } 88 91 /* 89 92 * If using deferred task_work running and application is waiting on 90 93 * more than one request, ensure we reset it now where we are switching ··· 189 186 struct ext_arg *ext_arg) 190 187 { 191 188 struct io_wait_queue iowq; 192 - struct io_rings *rings = ctx->rings; 189 + struct io_rings *rings; 193 190 ktime_t start_time; 194 - int ret; 191 + int ret, nr_wait; 195 192 196 193 min_events = min_t(int, min_events, ctx->cq_entries); 197 194 ··· 204 201 205 202 if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))) 206 203 io_cqring_do_overflow_flush(ctx); 207 - if (__io_cqring_events_user(ctx) >= min_events) 204 + 205 + rcu_read_lock(); 206 + rings = io_get_rings(ctx); 207 + if (__io_cqring_events_user(ctx) >= min_events) { 208 + rcu_read_unlock(); 208 209 return 0; 210 + } 209 211 210 212 init_waitqueue_func_entry(&iowq.wq, io_wake_function); 211 213 iowq.wq.private = current; 212 214 INIT_LIST_HEAD(&iowq.wq.entry); 213 215 iowq.ctx = ctx; 214 - iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events; 215 - iowq.cq_min_tail = READ_ONCE(ctx->rings->cq.tail); 216 + iowq.cq_tail = READ_ONCE(rings->cq.head) + min_events; 217 + iowq.cq_min_tail = READ_ONCE(rings->cq.tail); 218 + nr_wait = (int) iowq.cq_tail - READ_ONCE(rings->cq.tail); 219 + rcu_read_unlock(); 220 + rings = NULL; 216 221 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); 217 222 iowq.hit_timeout = 0; 218 223 iowq.min_timeout = ext_arg->min_time; ··· 251 240 trace_io_uring_cqring_wait(ctx, min_events); 252 241 do { 253 242 unsigned long check_cq; 254 - int nr_wait; 255 - 256 - /* if min timeout has been hit, don't reset wait count */ 257 - if (!iowq.hit_timeout) 258 - nr_wait = (int) iowq.cq_tail - 259 - READ_ONCE(ctx->rings->cq.tail); 260 - else 261 - nr_wait = 1; 262 243 263 244 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { 264 245 atomic_set(&ctx->cq_wait_nr, nr_wait); ··· 301 298 break; 302 299 } 303 300 cond_resched(); 301 + 302 + /* if min timeout has been hit, don't reset wait count */ 303 + if (!iowq.hit_timeout) 304 + scoped_guard(rcu) 305 + nr_wait = (int) iowq.cq_tail - 306 + READ_ONCE(io_get_rings(ctx)->cq.tail); 307 + else 308 + nr_wait = 1; 304 309 } while (1); 305 310 306 311 if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) 307 312 finish_wait(&ctx->cq_wait, &iowq.wq); 308 313 restore_saved_sigmask_unless(ret == -EINTR); 309 314 310 - return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; 315 + guard(rcu)(); 316 + return READ_ONCE(io_get_rings(ctx)->cq.head) == READ_ONCE(io_get_rings(ctx)->cq.tail) ? ret : 0; 311 317 }
+5 -2
io_uring/wait.h
··· 28 28 29 29 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) 30 30 { 31 - return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); 31 + struct io_rings *rings = io_get_rings(ctx); 32 + return ctx->cached_cq_tail - READ_ONCE(rings->cq.head); 32 33 } 33 34 34 35 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx) 35 36 { 36 - return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head); 37 + struct io_rings *rings = io_get_rings(ctx); 38 + 39 + return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head); 37 40 } 38 41 39 42 /*
+23 -2
kernel/bpf/syscall.c
··· 3261 3261 bpf_link_dealloc(link); 3262 3262 } 3263 3263 3264 + static bool bpf_link_is_tracepoint(struct bpf_link *link) 3265 + { 3266 + /* 3267 + * Only these combinations support a tracepoint bpf_link. 3268 + * BPF_LINK_TYPE_TRACING raw_tp progs are hardcoded to use 3269 + * bpf_raw_tp_link_lops and thus dealloc_deferred(), see 3270 + * bpf_raw_tp_link_attach(). 3271 + */ 3272 + return link->type == BPF_LINK_TYPE_RAW_TRACEPOINT || 3273 + (link->type == BPF_LINK_TYPE_TRACING && link->attach_type == BPF_TRACE_RAW_TP); 3274 + } 3275 + 3264 3276 static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu) 3265 3277 { 3266 3278 if (rcu_trace_implies_rcu_gp()) ··· 3291 3279 if (link->prog) 3292 3280 ops->release(link); 3293 3281 if (ops->dealloc_deferred) { 3294 - /* Schedule BPF link deallocation, which will only then 3282 + /* 3283 + * Schedule BPF link deallocation, which will only then 3295 3284 * trigger putting BPF program refcount. 3296 3285 * If underlying BPF program is sleepable or BPF link's target 3297 3286 * attach hookpoint is sleepable or otherwise requires RCU GPs 3298 3287 * to ensure link and its underlying BPF program is not 3299 3288 * reachable anymore, we need to first wait for RCU tasks 3300 - * trace sync, and then go through "classic" RCU grace period 3289 + * trace sync, and then go through "classic" RCU grace period. 3290 + * 3291 + * For tracepoint BPF links, we need to go through SRCU grace 3292 + * period wait instead when non-faultable tracepoint is used. We 3293 + * don't need to chain SRCU grace period waits, however, for the 3294 + * faultable case, since it exclusively uses RCU Tasks Trace. 3301 3295 */ 3302 3296 if (link->sleepable || (link->prog && link->prog->sleepable)) 3303 3297 call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp); 3298 + /* We need to do a SRCU grace period wait for non-faultable tracepoint BPF links. */ 3299 + else if (bpf_link_is_tracepoint(link)) 3300 + call_tracepoint_unregister_atomic(&link->rcu, bpf_link_defer_dealloc_rcu_gp); 3304 3301 else 3305 3302 call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp); 3306 3303 } else if (ops->dealloc) {
+32 -5
kernel/bpf/verifier.c
··· 617 617 insn->imm == BPF_LOAD_ACQ; 618 618 } 619 619 620 + static bool is_atomic_fetch_insn(const struct bpf_insn *insn) 621 + { 622 + return BPF_CLASS(insn->code) == BPF_STX && 623 + BPF_MODE(insn->code) == BPF_ATOMIC && 624 + (insn->imm & BPF_FETCH); 625 + } 626 + 620 627 static int __get_spi(s32 off) 621 628 { 622 629 return (-off - 1) / BPF_REG_SIZE; ··· 4454 4447 * dreg still needs precision before this insn 4455 4448 */ 4456 4449 } 4457 - } else if (class == BPF_LDX || is_atomic_load_insn(insn)) { 4458 - if (!bt_is_reg_set(bt, dreg)) 4450 + } else if (class == BPF_LDX || 4451 + is_atomic_load_insn(insn) || 4452 + is_atomic_fetch_insn(insn)) { 4453 + u32 load_reg = dreg; 4454 + 4455 + /* 4456 + * Atomic fetch operation writes the old value into 4457 + * a register (sreg or r0) and if it was tracked for 4458 + * precision, propagate to the stack slot like we do 4459 + * in regular ldx. 4460 + */ 4461 + if (is_atomic_fetch_insn(insn)) 4462 + load_reg = insn->imm == BPF_CMPXCHG ? 4463 + BPF_REG_0 : sreg; 4464 + 4465 + if (!bt_is_reg_set(bt, load_reg)) 4459 4466 return 0; 4460 - bt_clear_reg(bt, dreg); 4467 + bt_clear_reg(bt, load_reg); 4461 4468 4462 4469 /* scalars can only be spilled into stack w/o losing precision. 4463 4470 * Load from any other memory can be zero extended. ··· 7926 7905 } else if (reg->type == CONST_PTR_TO_MAP) { 7927 7906 err = check_ptr_to_map_access(env, regs, regno, off, size, t, 7928 7907 value_regno); 7929 - } else if (base_type(reg->type) == PTR_TO_BUF) { 7908 + } else if (base_type(reg->type) == PTR_TO_BUF && 7909 + !type_may_be_null(reg->type)) { 7930 7910 bool rdonly_mem = type_is_rdonly_mem(reg->type); 7931 7911 u32 *max_access; 7932 7912 ··· 19937 19915 * since someone could have accessed through (ptr - k), or 19938 19916 * even done ptr -= k in a register, to get a safe access. 19939 19917 */ 19940 - if (rold->range > rcur->range) 19918 + if (rold->range < 0 || rcur->range < 0) { 19919 + /* special case for [BEYOND|AT]_PKT_END */ 19920 + if (rold->range != rcur->range) 19921 + return false; 19922 + } else if (rold->range > rcur->range) { 19941 19923 return false; 19924 + } 19942 19925 /* If the offsets don't match, we can't trust our alignment; 19943 19926 * nor can we be sure that we won't fall out of range. 19944 19927 */
+7 -2
kernel/liveupdate/luo_session.c
··· 558 558 } 559 559 560 560 scoped_guard(mutex, &session->mutex) { 561 - luo_file_deserialize(&session->file_set, 562 - &sh->ser[i].file_set_ser); 561 + err = luo_file_deserialize(&session->file_set, 562 + &sh->ser[i].file_set_ser); 563 + } 564 + if (err) { 565 + pr_warn("Failed to deserialize files for session [%s] %pe\n", 566 + session->name, ERR_PTR(err)); 567 + return err; 563 568 } 564 569 } 565 570
+2
kernel/power/em_netlink.c
··· 109 109 110 110 id = nla_get_u32(info->attrs[DEV_ENERGYMODEL_A_PERF_DOMAIN_PERF_DOMAIN_ID]); 111 111 pd = em_perf_domain_get_by_id(id); 112 + if (!pd) 113 + return -EINVAL; 112 114 113 115 __em_nl_get_pd_size(pd, &msg_sz); 114 116 msg = genlmsg_new(msg_sz, GFP_KERNEL);
+3 -1
kernel/sched/debug.c
··· 902 902 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 903 903 { 904 904 s64 left_vruntime = -1, zero_vruntime, right_vruntime = -1, left_deadline = -1, spread; 905 + u64 avruntime; 905 906 struct sched_entity *last, *first, *root; 906 907 struct rq *rq = cpu_rq(cpu); 907 908 unsigned long flags; ··· 926 925 if (last) 927 926 right_vruntime = last->vruntime; 928 927 zero_vruntime = cfs_rq->zero_vruntime; 928 + avruntime = avg_vruntime(cfs_rq); 929 929 raw_spin_rq_unlock_irqrestore(rq, flags); 930 930 931 931 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_deadline", ··· 936 934 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "zero_vruntime", 937 935 SPLIT_NS(zero_vruntime)); 938 936 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "avg_vruntime", 939 - SPLIT_NS(avg_vruntime(cfs_rq))); 937 + SPLIT_NS(avruntime)); 940 938 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "right_vruntime", 941 939 SPLIT_NS(right_vruntime)); 942 940 spread = right_vruntime - left_vruntime;
+35 -14
kernel/sched/ext.c
··· 1110 1110 p->scx.dsq = dsq; 1111 1111 1112 1112 /* 1113 - * scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the 1114 - * direct dispatch path, but we clear them here because the direct 1115 - * dispatch verdict may be overridden on the enqueue path during e.g. 1116 - * bypass. 1117 - */ 1118 - p->scx.ddsp_dsq_id = SCX_DSQ_INVALID; 1119 - p->scx.ddsp_enq_flags = 0; 1120 - 1121 - /* 1122 1113 * We're transitioning out of QUEUEING or DISPATCHING. store_release to 1123 1114 * match waiters' load_acquire. 1124 1115 */ ··· 1274 1283 p->scx.ddsp_enq_flags = enq_flags; 1275 1284 } 1276 1285 1286 + /* 1287 + * Clear @p direct dispatch state when leaving the scheduler. 1288 + * 1289 + * Direct dispatch state must be cleared in the following cases: 1290 + * - direct_dispatch(): cleared on the synchronous enqueue path, deferred 1291 + * dispatch keeps the state until consumed 1292 + * - process_ddsp_deferred_locals(): cleared after consuming deferred state, 1293 + * - do_enqueue_task(): cleared on enqueue fallbacks where the dispatch 1294 + * verdict is ignored (local/global/bypass) 1295 + * - dequeue_task_scx(): cleared after dispatch_dequeue(), covering deferred 1296 + * cancellation and holding_cpu races 1297 + * - scx_disable_task(): cleared for queued wakeup tasks, which are excluded by 1298 + * the scx_bypass() loop, so that stale state is not reused by a subsequent 1299 + * scheduler instance 1300 + */ 1301 + static inline void clear_direct_dispatch(struct task_struct *p) 1302 + { 1303 + p->scx.ddsp_dsq_id = SCX_DSQ_INVALID; 1304 + p->scx.ddsp_enq_flags = 0; 1305 + } 1306 + 1277 1307 static void direct_dispatch(struct scx_sched *sch, struct task_struct *p, 1278 1308 u64 enq_flags) 1279 1309 { 1280 1310 struct rq *rq = task_rq(p); 1281 1311 struct scx_dispatch_q *dsq = 1282 1312 find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); 1313 + u64 ddsp_enq_flags; 1283 1314 1284 1315 touch_core_sched_dispatch(rq, p); 1285 1316 ··· 1342 1329 return; 1343 1330 } 1344 1331 1345 - dispatch_enqueue(sch, dsq, p, 1346 - p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS); 1332 + ddsp_enq_flags = p->scx.ddsp_enq_flags; 1333 + clear_direct_dispatch(p); 1334 + 1335 + dispatch_enqueue(sch, dsq, p, ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS); 1347 1336 } 1348 1337 1349 1338 static bool scx_rq_online(struct rq *rq) ··· 1454 1439 */ 1455 1440 touch_core_sched(rq, p); 1456 1441 refill_task_slice_dfl(sch, p); 1442 + clear_direct_dispatch(p); 1457 1443 dispatch_enqueue(sch, dsq, p, enq_flags); 1458 1444 } 1459 1445 ··· 1626 1610 sub_nr_running(rq, 1); 1627 1611 1628 1612 dispatch_dequeue(rq, p); 1613 + clear_direct_dispatch(p); 1629 1614 return true; 1630 1615 } 1631 1616 ··· 2310 2293 struct task_struct, scx.dsq_list.node))) { 2311 2294 struct scx_sched *sch = scx_root; 2312 2295 struct scx_dispatch_q *dsq; 2296 + u64 dsq_id = p->scx.ddsp_dsq_id; 2297 + u64 enq_flags = p->scx.ddsp_enq_flags; 2313 2298 2314 2299 list_del_init(&p->scx.dsq_list.node); 2300 + clear_direct_dispatch(p); 2315 2301 2316 - dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p); 2302 + dsq = find_dsq_for_dispatch(sch, rq, dsq_id, p); 2317 2303 if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL)) 2318 - dispatch_to_local_dsq(sch, rq, dsq, p, 2319 - p->scx.ddsp_enq_flags); 2304 + dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags); 2320 2305 } 2321 2306 } 2322 2307 ··· 3033 3014 3034 3015 lockdep_assert_rq_held(rq); 3035 3016 WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED); 3017 + 3018 + clear_direct_dispatch(p); 3036 3019 3037 3020 if (SCX_HAS_OP(sch, disable)) 3038 3021 SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p);
+19 -12
kernel/sched/ext_idle.c
··· 860 860 * code. 861 861 * 862 862 * We can't simply check whether @p->migration_disabled is set in a 863 - * sched_ext callback, because migration is always disabled for the current 864 - * task while running BPF code. 863 + * sched_ext callback, because the BPF prolog (__bpf_prog_enter) may disable 864 + * migration for the current task while running BPF code. 865 865 * 866 - * The prolog (__bpf_prog_enter) and epilog (__bpf_prog_exit) respectively 867 - * disable and re-enable migration. For this reason, the current task 868 - * inside a sched_ext callback is always a migration-disabled task. 866 + * Since the BPF prolog calls migrate_disable() only when CONFIG_PREEMPT_RCU 867 + * is enabled (via rcu_read_lock_dont_migrate()), migration_disabled == 1 for 868 + * the current task is ambiguous only in that case: it could be from the BPF 869 + * prolog rather than a real migrate_disable() call. 869 870 * 870 - * Therefore, when @p->migration_disabled == 1, check whether @p is the 871 - * current task or not: if it is, then migration was not disabled before 872 - * entering the callback, otherwise migration was disabled. 871 + * Without CONFIG_PREEMPT_RCU, the BPF prolog never calls migrate_disable(), 872 + * so migration_disabled == 1 always means the task is truly 873 + * migration-disabled. 874 + * 875 + * Therefore, when migration_disabled == 1 and CONFIG_PREEMPT_RCU is enabled, 876 + * check whether @p is the current task or not: if it is, then migration was 877 + * not disabled before entering the callback, otherwise migration was disabled. 873 878 * 874 879 * Returns true if @p is migration-disabled, false otherwise. 875 880 */ 876 881 static bool is_bpf_migration_disabled(const struct task_struct *p) 877 882 { 878 - if (p->migration_disabled == 1) 879 - return p != current; 880 - else 881 - return p->migration_disabled; 883 + if (p->migration_disabled == 1) { 884 + if (IS_ENABLED(CONFIG_PREEMPT_RCU)) 885 + return p != current; 886 + return true; 887 + } 888 + return p->migration_disabled; 882 889 } 883 890 884 891 static s32 select_cpu_from_kfunc(struct scx_sched *sch, struct task_struct *p,
+3 -7
kernel/sched/fair.c
··· 707 707 * Called in: 708 708 * - place_entity() -- before enqueue 709 709 * - update_entity_lag() -- before dequeue 710 - * - entity_tick() 710 + * - update_deadline() -- slice expiration 711 711 * 712 712 * This means it is one entry 'behind' but that puts it close enough to where 713 713 * the bound on entity_key() is at most two lag bounds. ··· 1131 1131 * EEVDF: vd_i = ve_i + r_i / w_i 1132 1132 */ 1133 1133 se->deadline = se->vruntime + calc_delta_fair(se->slice, se); 1134 + avg_vruntime(cfs_rq); 1134 1135 1135 1136 /* 1136 1137 * The task has consumed its request, reschedule. ··· 5594 5593 update_load_avg(cfs_rq, curr, UPDATE_TG); 5595 5594 update_cfs_group(curr); 5596 5595 5597 - /* 5598 - * Pulls along cfs_rq::zero_vruntime. 5599 - */ 5600 - avg_vruntime(cfs_rq); 5601 - 5602 5596 #ifdef CONFIG_SCHED_HRTICK 5603 5597 /* 5604 5598 * queued ticks are scheduled to match the slice, so don't bother ··· 9124 9128 */ 9125 9129 if (entity_eligible(cfs_rq, se)) { 9126 9130 se->vruntime = se->deadline; 9127 - se->deadline += calc_delta_fair(se->slice, se); 9131 + update_deadline(cfs_rq, se); 9128 9132 } 9129 9133 } 9130 9134
+4
kernel/trace/bpf_trace.c
··· 2752 2752 if (!is_kprobe_multi(prog)) 2753 2753 return -EINVAL; 2754 2754 2755 + /* kprobe_multi is not allowed to be sleepable. */ 2756 + if (prog->sleepable) 2757 + return -EINVAL; 2758 + 2755 2759 /* Writing to context is not allowed for kprobes. */ 2756 2760 if (prog->aux->kprobe_write_ctx) 2757 2761 return -EINVAL;
+7
mm/damon/stat.c
··· 245 245 { 246 246 int err; 247 247 248 + if (damon_stat_context) { 249 + if (damon_is_running(damon_stat_context)) 250 + return -EAGAIN; 251 + damon_destroy_ctx(damon_stat_context); 252 + } 253 + 248 254 damon_stat_context = damon_stat_build_ctx(); 249 255 if (!damon_stat_context) 250 256 return -ENOMEM; ··· 267 261 { 268 262 damon_stop(&damon_stat_context, 1); 269 263 damon_destroy_ctx(damon_stat_context); 264 + damon_stat_context = NULL; 270 265 } 271 266 272 267 static int damon_stat_enabled_store(
+2 -1
mm/damon/sysfs.c
··· 1670 1670 repeat_call_control->data = kdamond; 1671 1671 repeat_call_control->repeat = true; 1672 1672 repeat_call_control->dealloc_on_cancel = true; 1673 - damon_call(ctx, repeat_call_control); 1673 + if (damon_call(ctx, repeat_call_control)) 1674 + kfree(repeat_call_control); 1674 1675 return err; 1675 1676 } 1676 1677
+8 -3
mm/filemap.c
··· 3883 3883 unsigned int nr_pages = 0, folio_type; 3884 3884 unsigned short mmap_miss = 0, mmap_miss_saved; 3885 3885 3886 + /* 3887 + * Recalculate end_pgoff based on file_end before calling 3888 + * next_uptodate_folio() to avoid races with concurrent 3889 + * truncation. 3890 + */ 3891 + file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1; 3892 + end_pgoff = min(end_pgoff, file_end); 3893 + 3886 3894 rcu_read_lock(); 3887 3895 folio = next_uptodate_folio(&xas, mapping, end_pgoff); 3888 3896 if (!folio) 3889 3897 goto out; 3890 - 3891 - file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1; 3892 - end_pgoff = min(end_pgoff, file_end); 3893 3898 3894 3899 /* 3895 3900 * Do not allow to map with PMD across i_size to preserve
+20
mm/memory_hotplug.c
··· 1209 1209 1210 1210 if (node_arg.nid >= 0) 1211 1211 node_set_state(nid, N_MEMORY); 1212 + /* 1213 + * Check whether we are adding normal memory to the node for the first 1214 + * time. 1215 + */ 1216 + if (!node_state(nid, N_NORMAL_MEMORY) && zone_idx(zone) <= ZONE_NORMAL) 1217 + node_set_state(nid, N_NORMAL_MEMORY); 1218 + 1212 1219 if (need_zonelists_rebuild) 1213 1220 build_all_zonelists(NULL); 1214 1221 ··· 1915 1908 unsigned long flags; 1916 1909 char *reason; 1917 1910 int ret; 1911 + unsigned long normal_pages = 0; 1912 + enum zone_type zt; 1918 1913 1919 1914 /* 1920 1915 * {on,off}lining is constrained to full memory sections (or more ··· 2064 2055 /* reinitialise watermarks and update pcp limits */ 2065 2056 init_per_zone_wmark_min(); 2066 2057 2058 + /* 2059 + * Check whether this operation removes the last normal memory from 2060 + * the node. We do this before clearing N_MEMORY to avoid the possible 2061 + * transient "!N_MEMORY && N_NORMAL_MEMORY" state. 2062 + */ 2063 + if (zone_idx(zone) <= ZONE_NORMAL) { 2064 + for (zt = 0; zt <= ZONE_NORMAL; zt++) 2065 + normal_pages += pgdat->node_zones[zt].present_pages; 2066 + if (!normal_pages) 2067 + node_clear_state(node, N_NORMAL_MEMORY); 2068 + } 2067 2069 /* 2068 2070 * Make sure to mark the node as memory-less before rebuilding the zone 2069 2071 * list. Otherwise this node would still appear in the fallback lists.
+21
mm/page-writeback.c
··· 1858 1858 break; 1859 1859 } 1860 1860 1861 + /* 1862 + * Unconditionally start background writeback if it's not 1863 + * already in progress. We need to do this because the global 1864 + * dirty threshold check above (nr_dirty > gdtc->bg_thresh) 1865 + * doesn't account for these cases: 1866 + * 1867 + * a) strictlimit BDIs: throttling is calculated using per-wb 1868 + * thresholds. The per-wb threshold can be exceeded even when 1869 + * nr_dirty < gdtc->bg_thresh 1870 + * 1871 + * b) memcg-based throttling: memcg uses its own dirty count and 1872 + * thresholds and can trigger throttling even when global 1873 + * nr_dirty < gdtc->bg_thresh 1874 + * 1875 + * Writeback needs to be started else the writer stalls in the 1876 + * throttle loop waiting for dirty pages to be written back 1877 + * while no writeback is running. 1878 + */ 1879 + if (unlikely(!writeback_in_progress(wb))) 1880 + wb_start_background_writeback(wb); 1881 + 1861 1882 mem_cgroup_flush_foreign(wb); 1862 1883 1863 1884 /*
+7
mm/vma.c
··· 2781 2781 if (map.charged) 2782 2782 vm_unacct_memory(map.charged); 2783 2783 abort_munmap: 2784 + /* 2785 + * This indicates that .mmap_prepare has set a new file, differing from 2786 + * desc->vm_file. But since we're aborting the operation, only the 2787 + * original file will be cleaned up. Ensure we clean up both. 2788 + */ 2789 + if (map.file_doesnt_need_get) 2790 + fput(map.file); 2784 2791 vms_abort_munmap_vmas(&map.vms, &map.mas_detach); 2785 2792 return error; 2786 2793 }
+18 -9
net/batman-adv/bridge_loop_avoidance.c
··· 2130 2130 struct batadv_bla_claim *claim) 2131 2131 { 2132 2132 const u8 *primary_addr = primary_if->net_dev->dev_addr; 2133 + struct batadv_bla_backbone_gw *backbone_gw; 2133 2134 u16 backbone_crc; 2134 2135 bool is_own; 2135 2136 void *hdr; ··· 2146 2145 2147 2146 genl_dump_check_consistent(cb, hdr); 2148 2147 2149 - is_own = batadv_compare_eth(claim->backbone_gw->orig, 2150 - primary_addr); 2148 + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); 2151 2149 2152 - spin_lock_bh(&claim->backbone_gw->crc_lock); 2153 - backbone_crc = claim->backbone_gw->crc; 2154 - spin_unlock_bh(&claim->backbone_gw->crc_lock); 2150 + is_own = batadv_compare_eth(backbone_gw->orig, primary_addr); 2151 + 2152 + spin_lock_bh(&backbone_gw->crc_lock); 2153 + backbone_crc = backbone_gw->crc; 2154 + spin_unlock_bh(&backbone_gw->crc_lock); 2155 2155 2156 2156 if (is_own) 2157 2157 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) { 2158 2158 genlmsg_cancel(msg, hdr); 2159 - goto out; 2159 + goto put_backbone_gw; 2160 2160 } 2161 2161 2162 2162 if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) || 2163 2163 nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) || 2164 2164 nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN, 2165 - claim->backbone_gw->orig) || 2165 + backbone_gw->orig) || 2166 2166 nla_put_u16(msg, BATADV_ATTR_BLA_CRC, 2167 2167 backbone_crc)) { 2168 2168 genlmsg_cancel(msg, hdr); 2169 - goto out; 2169 + goto put_backbone_gw; 2170 2170 } 2171 2171 2172 2172 genlmsg_end(msg, hdr); 2173 2173 ret = 0; 2174 2174 2175 + put_backbone_gw: 2176 + batadv_backbone_gw_put(backbone_gw); 2175 2177 out: 2176 2178 return ret; 2177 2179 } ··· 2452 2448 bool batadv_bla_check_claim(struct batadv_priv *bat_priv, 2453 2449 u8 *addr, unsigned short vid) 2454 2450 { 2451 + struct batadv_bla_backbone_gw *backbone_gw; 2455 2452 struct batadv_bla_claim search_claim; 2456 2453 struct batadv_bla_claim *claim = NULL; 2457 2454 struct batadv_hard_iface *primary_if = NULL; ··· 2475 2470 * return false. 2476 2471 */ 2477 2472 if (claim) { 2478 - if (!batadv_compare_eth(claim->backbone_gw->orig, 2473 + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); 2474 + 2475 + if (!batadv_compare_eth(backbone_gw->orig, 2479 2476 primary_if->net_dev->dev_addr)) 2480 2477 ret = false; 2478 + 2479 + batadv_backbone_gw_put(backbone_gw); 2481 2480 batadv_claim_put(claim); 2482 2481 } 2483 2482
+7 -2
net/batman-adv/translation-table.c
··· 798 798 { 799 799 u16 num_vlan = 0; 800 800 u16 num_entries = 0; 801 - u16 change_offset; 802 - u16 tvlv_len; 801 + u16 tvlv_len = 0; 802 + unsigned int change_offset; 803 803 struct batadv_tvlv_tt_vlan_data *tt_vlan; 804 804 struct batadv_orig_node_vlan *vlan; 805 805 u8 *tt_change_ptr; ··· 815 815 /* if tt_len is negative, allocate the space needed by the full table */ 816 816 if (*tt_len < 0) 817 817 *tt_len = batadv_tt_len(num_entries); 818 + 819 + if (change_offset > U16_MAX || *tt_len > U16_MAX - change_offset) { 820 + *tt_len = 0; 821 + goto out; 822 + } 818 823 819 824 tvlv_len = *tt_len; 820 825 tvlv_len += change_offset;
+6
net/bridge/br_fdb.c
··· 597 597 dev = br->dev; 598 598 } 599 599 600 + if (!vg) 601 + return; 602 + 600 603 list_for_each_entry(v, &vg->vlan_list, vlist) 601 604 br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid); 602 605 } ··· 632 629 vg = br_vlan_group(br); 633 630 dev = br->dev; 634 631 } 632 + 633 + if (!vg) 634 + return 0; 635 635 636 636 list_for_each_entry(v, &vg->vlan_list, vlist) { 637 637 if (!br_vlan_should_use(v))
+1 -1
net/core/netdev_rx_queue.c
··· 117 117 struct netdev_rx_queue *rxq; 118 118 int ret; 119 119 120 - if (!netdev_need_ops_lock(dev)) 120 + if (!qops) 121 121 return -EOPNOTSUPP; 122 122 123 123 if (rxq_idx >= dev->real_num_rx_queues) {
+27 -13
net/core/rtnetlink.c
··· 3894 3894 goto out; 3895 3895 } 3896 3896 3897 - static struct net *rtnl_get_peer_net(const struct rtnl_link_ops *ops, 3897 + static struct net *rtnl_get_peer_net(struct sk_buff *skb, 3898 + const struct rtnl_link_ops *ops, 3898 3899 struct nlattr *tbp[], 3899 3900 struct nlattr *data[], 3900 3901 struct netlink_ext_ack *extack) 3901 3902 { 3902 - struct nlattr *tb[IFLA_MAX + 1]; 3903 + struct nlattr *tb[IFLA_MAX + 1], **attrs; 3904 + struct net *net; 3903 3905 int err; 3904 3906 3905 - if (!data || !data[ops->peer_type]) 3906 - return rtnl_link_get_net_ifla(tbp); 3907 - 3908 - err = rtnl_nla_parse_ifinfomsg(tb, data[ops->peer_type], extack); 3909 - if (err < 0) 3910 - return ERR_PTR(err); 3911 - 3912 - if (ops->validate) { 3913 - err = ops->validate(tb, NULL, extack); 3907 + if (!data || !data[ops->peer_type]) { 3908 + attrs = tbp; 3909 + } else { 3910 + err = rtnl_nla_parse_ifinfomsg(tb, data[ops->peer_type], extack); 3914 3911 if (err < 0) 3915 3912 return ERR_PTR(err); 3913 + 3914 + if (ops->validate) { 3915 + err = ops->validate(tb, NULL, extack); 3916 + if (err < 0) 3917 + return ERR_PTR(err); 3918 + } 3919 + 3920 + attrs = tb; 3916 3921 } 3917 3922 3918 - return rtnl_link_get_net_ifla(tb); 3923 + net = rtnl_link_get_net_ifla(attrs); 3924 + if (IS_ERR_OR_NULL(net)) 3925 + return net; 3926 + 3927 + if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { 3928 + put_net(net); 3929 + return ERR_PTR(-EPERM); 3930 + } 3931 + 3932 + return net; 3919 3933 } 3920 3934 3921 3935 static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, ··· 4068 4054 } 4069 4055 4070 4056 if (ops->peer_type) { 4071 - peer_net = rtnl_get_peer_net(ops, tb, data, extack); 4057 + peer_net = rtnl_get_peer_net(skb, ops, tb, data, extack); 4072 4058 if (IS_ERR(peer_net)) { 4073 4059 ret = PTR_ERR(peer_net); 4074 4060 goto put_ops;
+1 -4
net/core/skbuff.c
··· 1073 1073 1074 1074 static void skb_kfree_head(void *head, unsigned int end_offset) 1075 1075 { 1076 - if (end_offset == SKB_SMALL_HEAD_HEADROOM) 1077 - kmem_cache_free(net_hotdata.skb_small_head_cache, head); 1078 - else 1079 - kfree(head); 1076 + kfree(head); 1080 1077 } 1081 1078 1082 1079 static void skb_free_head(struct sk_buff *skb)
+8 -5
net/core/skmsg.c
··· 1267 1267 1268 1268 static void sk_psock_verdict_data_ready(struct sock *sk) 1269 1269 { 1270 - struct socket *sock = sk->sk_socket; 1271 - const struct proto_ops *ops; 1270 + const struct proto_ops *ops = NULL; 1271 + struct socket *sock; 1272 1272 int copied; 1273 1273 1274 1274 trace_sk_data_ready(sk); 1275 1275 1276 - if (unlikely(!sock)) 1277 - return; 1278 - ops = READ_ONCE(sock->ops); 1276 + rcu_read_lock(); 1277 + sock = READ_ONCE(sk->sk_socket); 1278 + if (likely(sock)) 1279 + ops = READ_ONCE(sock->ops); 1280 + rcu_read_unlock(); 1279 1281 if (!ops || !ops->read_skb) 1280 1282 return; 1283 + 1281 1284 copied = ops->read_skb(sk, sk_psock_verdict_recv); 1282 1285 if (copied >= 0) { 1283 1286 struct sk_psock *psock;
+1 -1
net/devlink/health.c
··· 1327 1327 if (sk) { 1328 1328 devlink_fmsg_pair_nest_start(fmsg, "sk"); 1329 1329 devlink_fmsg_obj_nest_start(fmsg); 1330 - devlink_fmsg_put(fmsg, "family", sk->sk_type); 1330 + devlink_fmsg_put(fmsg, "family", sk->sk_family); 1331 1331 devlink_fmsg_put(fmsg, "type", sk->sk_type); 1332 1332 devlink_fmsg_put(fmsg, "proto", sk->sk_protocol); 1333 1333 devlink_fmsg_obj_nest_end(fmsg);
+28 -13
net/ipv4/nexthop.c
··· 902 902 goto nla_put_failure; 903 903 904 904 if (op_flags & NHA_OP_FLAG_DUMP_STATS && 905 - (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) || 906 - nla_put_nh_group_stats(skb, nh, op_flags))) 905 + nla_put_nh_group_stats(skb, nh, op_flags)) 907 906 goto nla_put_failure; 908 907 909 908 return 0; ··· 1003 1004 nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */ 1004 1005 } 1005 1006 1006 - static size_t nh_nlmsg_size_grp(struct nexthop *nh) 1007 + static size_t nh_nlmsg_size_grp(struct nexthop *nh, u32 op_flags) 1007 1008 { 1008 1009 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 1009 1010 size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh; 1010 1011 size_t tot = nla_total_size(sz) + 1011 - nla_total_size(2); /* NHA_GROUP_TYPE */ 1012 + nla_total_size(2) + /* NHA_GROUP_TYPE */ 1013 + nla_total_size(0); /* NHA_FDB */ 1012 1014 1013 1015 if (nhg->resilient) 1014 1016 tot += nh_nlmsg_size_grp_res(nhg); 1017 + 1018 + if (op_flags & NHA_OP_FLAG_DUMP_STATS) { 1019 + tot += nla_total_size(0) + /* NHA_GROUP_STATS */ 1020 + nla_total_size(4); /* NHA_HW_STATS_ENABLE */ 1021 + tot += nhg->num_nh * 1022 + (nla_total_size(0) + /* NHA_GROUP_STATS_ENTRY */ 1023 + nla_total_size(4) + /* NHA_GROUP_STATS_ENTRY_ID */ 1024 + nla_total_size_64bit(8)); /* NHA_GROUP_STATS_ENTRY_PACKETS */ 1025 + 1026 + if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS) { 1027 + tot += nhg->num_nh * 1028 + nla_total_size_64bit(8); /* NHA_GROUP_STATS_ENTRY_PACKETS_HW */ 1029 + tot += nla_total_size(4); /* NHA_HW_STATS_USED */ 1030 + } 1031 + } 1015 1032 1016 1033 return tot; 1017 1034 } ··· 1063 1048 return sz; 1064 1049 } 1065 1050 1066 - static size_t nh_nlmsg_size(struct nexthop *nh) 1051 + static size_t nh_nlmsg_size(struct nexthop *nh, u32 op_flags) 1067 1052 { 1068 1053 size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg)); 1069 1054 1070 1055 sz += nla_total_size(4); /* NHA_ID */ 1071 1056 1072 1057 if (nh->is_group) 1073 - sz += nh_nlmsg_size_grp(nh) + 1058 + sz += nh_nlmsg_size_grp(nh, op_flags) + 1074 1059 nla_total_size(4) + /* NHA_OP_FLAGS */ 1075 1060 0; 1076 1061 else ··· 1086 1071 struct sk_buff *skb; 1087 1072 int err = -ENOBUFS; 1088 1073 1089 - skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any()); 1074 + skb = nlmsg_new(nh_nlmsg_size(nh, 0), gfp_any()); 1090 1075 if (!skb) 1091 1076 goto errout; 1092 1077 ··· 3394 3379 if (err) 3395 3380 return err; 3396 3381 3397 - err = -ENOBUFS; 3398 - skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 3399 - if (!skb) 3400 - goto out; 3401 - 3402 3382 err = -ENOENT; 3403 3383 nh = nexthop_find_by_id(net, id); 3404 3384 if (!nh) 3405 - goto errout_free; 3385 + goto out; 3386 + 3387 + err = -ENOBUFS; 3388 + skb = nlmsg_new(nh_nlmsg_size(nh, op_flags), GFP_KERNEL); 3389 + if (!skb) 3390 + goto out; 3406 3391 3407 3392 err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid, 3408 3393 nlh->nlmsg_seq, 0, op_flags);
+4 -1
net/ipv4/xfrm4_input.c
··· 50 50 { 51 51 struct xfrm_offload *xo = xfrm_offload(skb); 52 52 struct iphdr *iph = ip_hdr(skb); 53 + struct net_device *dev = skb->dev; 53 54 54 55 iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol; 55 56 ··· 74 73 } 75 74 76 75 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, 77 - dev_net(skb->dev), NULL, skb, skb->dev, NULL, 76 + dev_net(dev), NULL, skb, dev, NULL, 78 77 xfrm4_rcv_encap_finish); 78 + if (async) 79 + dev_put(dev); 79 80 return 0; 80 81 } 81 82
+21 -12
net/ipv6/ioam6.c
··· 710 710 struct ioam6_schema *sc, 711 711 unsigned int sclen, bool is_input) 712 712 { 713 - struct net_device *dev = skb_dst_dev(skb); 713 + /* Note: skb_dst_dev_rcu() can't be NULL at this point. */ 714 + struct net_device *dev = skb_dst_dev_rcu(skb); 715 + struct inet6_dev *i_skb_dev, *idev; 714 716 struct timespec64 ts; 715 717 ktime_t tstamp; 716 718 u64 raw64; ··· 723 721 724 722 data = trace->data + trace->remlen * 4 - trace->nodelen * 4 - sclen * 4; 725 723 724 + i_skb_dev = skb->dev ? __in6_dev_get(skb->dev) : NULL; 725 + idev = __in6_dev_get(dev); 726 + 726 727 /* hop_lim and node_id */ 727 728 if (trace->type.bit0) { 728 729 byte = ipv6_hdr(skb)->hop_limit; 729 730 if (is_input) 730 731 byte--; 731 732 732 - raw32 = dev_net(dev)->ipv6.sysctl.ioam6_id; 733 + raw32 = READ_ONCE(dev_net(dev)->ipv6.sysctl.ioam6_id); 733 734 734 735 *(__be32 *)data = cpu_to_be32((byte << 24) | raw32); 735 736 data += sizeof(__be32); ··· 740 735 741 736 /* ingress_if_id and egress_if_id */ 742 737 if (trace->type.bit1) { 743 - if (!skb->dev) 738 + if (!i_skb_dev) 744 739 raw16 = IOAM6_U16_UNAVAILABLE; 745 740 else 746 - raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id); 741 + raw16 = (__force u16)READ_ONCE(i_skb_dev->cnf.ioam6_id); 747 742 748 743 *(__be16 *)data = cpu_to_be16(raw16); 749 744 data += sizeof(__be16); 750 745 751 - if (dev->flags & IFF_LOOPBACK) 746 + if ((dev->flags & IFF_LOOPBACK) || !idev) 752 747 raw16 = IOAM6_U16_UNAVAILABLE; 753 748 else 754 - raw16 = (__force u16)READ_ONCE(__in6_dev_get(dev)->cnf.ioam6_id); 749 + raw16 = (__force u16)READ_ONCE(idev->cnf.ioam6_id); 755 750 756 751 *(__be16 *)data = cpu_to_be16(raw16); 757 752 data += sizeof(__be16); ··· 803 798 struct Qdisc *qdisc; 804 799 __u32 qlen, backlog; 805 800 806 - if (dev->flags & IFF_LOOPBACK) { 801 + if (dev->flags & IFF_LOOPBACK || 802 + skb_get_queue_mapping(skb) >= dev->num_tx_queues) { 807 803 *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE); 808 804 } else { 809 805 queue = skb_get_tx_queue(dev, skb); 810 806 qdisc = rcu_dereference(queue->qdisc); 807 + 808 + spin_lock_bh(qdisc_lock(qdisc)); 811 809 qdisc_qstats_qlen_backlog(qdisc, &qlen, &backlog); 810 + spin_unlock_bh(qdisc_lock(qdisc)); 812 811 813 812 *(__be32 *)data = cpu_to_be32(backlog); 814 813 } ··· 831 822 if (is_input) 832 823 byte--; 833 824 834 - raw64 = dev_net(dev)->ipv6.sysctl.ioam6_id_wide; 825 + raw64 = READ_ONCE(dev_net(dev)->ipv6.sysctl.ioam6_id_wide); 835 826 836 827 *(__be64 *)data = cpu_to_be64(((u64)byte << 56) | raw64); 837 828 data += sizeof(__be64); ··· 839 830 840 831 /* ingress_if_id and egress_if_id (wide) */ 841 832 if (trace->type.bit9) { 842 - if (!skb->dev) 833 + if (!i_skb_dev) 843 834 raw32 = IOAM6_U32_UNAVAILABLE; 844 835 else 845 - raw32 = READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id_wide); 836 + raw32 = READ_ONCE(i_skb_dev->cnf.ioam6_id_wide); 846 837 847 838 *(__be32 *)data = cpu_to_be32(raw32); 848 839 data += sizeof(__be32); 849 840 850 - if (dev->flags & IFF_LOOPBACK) 841 + if ((dev->flags & IFF_LOOPBACK) || !idev) 851 842 raw32 = IOAM6_U32_UNAVAILABLE; 852 843 else 853 - raw32 = READ_ONCE(__in6_dev_get(dev)->cnf.ioam6_id_wide); 844 + raw32 = READ_ONCE(idev->cnf.ioam6_id_wide); 854 845 855 846 *(__be32 *)data = cpu_to_be32(raw32); 856 847 data += sizeof(__be32);
+1 -2
net/ipv6/netfilter/ip6t_eui64.c
··· 22 22 unsigned char eui64[8]; 23 23 24 24 if (!(skb_mac_header(skb) >= skb->head && 25 - skb_mac_header(skb) + ETH_HLEN <= skb->data) && 26 - par->fragoff != 0) { 25 + skb_mac_header(skb) + ETH_HLEN <= skb->data)) { 27 26 par->hotdrop = true; 28 27 return false; 29 28 }
+22 -12
net/ipv6/seg6_iptunnel.c
··· 48 48 } 49 49 50 50 struct seg6_lwt { 51 - struct dst_cache cache; 51 + struct dst_cache cache_input; 52 + struct dst_cache cache_output; 52 53 struct in6_addr tunsrc; 53 54 struct seg6_iptunnel_encap tuninfo[]; 54 55 }; ··· 504 503 slwt = seg6_lwt_lwtunnel(lwtst); 505 504 506 505 local_bh_disable(); 507 - dst = dst_cache_get(&slwt->cache); 506 + dst = dst_cache_get(&slwt->cache_input); 508 507 local_bh_enable(); 509 508 510 509 err = seg6_do_srh(skb, dst); ··· 520 519 /* cache only if we don't create a dst reference loop */ 521 520 if (!dst->error && lwtst != dst->lwtstate) { 522 521 local_bh_disable(); 523 - dst_cache_set_ip6(&slwt->cache, dst, 522 + dst_cache_set_ip6(&slwt->cache_input, dst, 524 523 &ipv6_hdr(skb)->saddr); 525 524 local_bh_enable(); 526 525 } ··· 580 579 slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate); 581 580 582 581 local_bh_disable(); 583 - dst = dst_cache_get(&slwt->cache); 582 + dst = dst_cache_get(&slwt->cache_output); 584 583 local_bh_enable(); 585 584 586 585 err = seg6_do_srh(skb, dst); ··· 607 606 /* cache only if we don't create a dst reference loop */ 608 607 if (orig_dst->lwtstate != dst->lwtstate) { 609 608 local_bh_disable(); 610 - dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr); 609 + dst_cache_set_ip6(&slwt->cache_output, dst, &fl6.saddr); 611 610 local_bh_enable(); 612 611 } 613 612 ··· 721 720 722 721 slwt = seg6_lwt_lwtunnel(newts); 723 722 724 - err = dst_cache_init(&slwt->cache, GFP_ATOMIC); 723 + err = dst_cache_init(&slwt->cache_input, GFP_ATOMIC); 725 724 if (err) 726 - goto free_lwt_state; 725 + goto err_free_newts; 726 + 727 + err = dst_cache_init(&slwt->cache_output, GFP_ATOMIC); 728 + if (err) 729 + goto err_destroy_input; 727 730 728 731 memcpy(&slwt->tuninfo, tuninfo, tuninfo_len); 729 732 ··· 739 734 ipv6_addr_loopback(&slwt->tunsrc)) { 740 735 NL_SET_ERR_MSG(extack, "invalid tunsrc address"); 741 736 err = -EINVAL; 742 - goto free_dst_cache; 737 + goto err_destroy_output; 743 738 } 744 739 } 745 740 ··· 755 750 756 751 return 0; 757 752 758 - free_dst_cache: 759 - dst_cache_destroy(&slwt->cache); 760 - free_lwt_state: 753 + err_destroy_output: 754 + dst_cache_destroy(&slwt->cache_output); 755 + err_destroy_input: 756 + dst_cache_destroy(&slwt->cache_input); 757 + err_free_newts: 761 758 kfree(newts); 762 759 return err; 763 760 } 764 761 765 762 static void seg6_destroy_state(struct lwtunnel_state *lwt) 766 763 { 767 - dst_cache_destroy(&seg6_lwt_lwtunnel(lwt)->cache); 764 + struct seg6_lwt *slwt = seg6_lwt_lwtunnel(lwt); 765 + 766 + dst_cache_destroy(&slwt->cache_input); 767 + dst_cache_destroy(&slwt->cache_output); 768 768 } 769 769 770 770 static int seg6_fill_encap_info(struct sk_buff *skb,
+4 -1
net/ipv6/xfrm6_input.c
··· 43 43 int xfrm6_transport_finish(struct sk_buff *skb, int async) 44 44 { 45 45 struct xfrm_offload *xo = xfrm_offload(skb); 46 + struct net_device *dev = skb->dev; 46 47 int nhlen = -skb_network_offset(skb); 47 48 48 49 skb_network_header(skb)[IP6CB(skb)->nhoff] = ··· 69 68 } 70 69 71 70 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, 72 - dev_net(skb->dev), NULL, skb, skb->dev, NULL, 71 + dev_net(dev), NULL, skb, dev, NULL, 73 72 xfrm6_transport_finish2); 73 + if (async) 74 + dev_put(dev); 74 75 return 0; 75 76 } 76 77
+34 -18
net/key/af_key.c
··· 757 757 return 0; 758 758 } 759 759 760 + static unsigned int pfkey_sockaddr_fill_zero_tail(const xfrm_address_t *xaddr, 761 + __be16 port, 762 + struct sockaddr *sa, 763 + unsigned short family) 764 + { 765 + unsigned int prefixlen; 766 + int sockaddr_len = pfkey_sockaddr_len(family); 767 + int sockaddr_size = pfkey_sockaddr_size(family); 768 + 769 + prefixlen = pfkey_sockaddr_fill(xaddr, port, sa, family); 770 + if (sockaddr_size > sockaddr_len) 771 + memset((u8 *)sa + sockaddr_len, 0, sockaddr_size - sockaddr_len); 772 + 773 + return prefixlen; 774 + } 775 + 760 776 static struct sk_buff *__pfkey_xfrm_state2msg(const struct xfrm_state *x, 761 777 int add_keys, int hsc) 762 778 { ··· 3222 3206 addr->sadb_address_proto = 0; 3223 3207 addr->sadb_address_reserved = 0; 3224 3208 addr->sadb_address_prefixlen = 3225 - pfkey_sockaddr_fill(&x->props.saddr, 0, 3226 - (struct sockaddr *) (addr + 1), 3227 - x->props.family); 3209 + pfkey_sockaddr_fill_zero_tail(&x->props.saddr, 0, 3210 + (struct sockaddr *)(addr + 1), 3211 + x->props.family); 3228 3212 if (!addr->sadb_address_prefixlen) 3229 3213 BUG(); 3230 3214 ··· 3237 3221 addr->sadb_address_proto = 0; 3238 3222 addr->sadb_address_reserved = 0; 3239 3223 addr->sadb_address_prefixlen = 3240 - pfkey_sockaddr_fill(&x->id.daddr, 0, 3241 - (struct sockaddr *) (addr + 1), 3242 - x->props.family); 3224 + pfkey_sockaddr_fill_zero_tail(&x->id.daddr, 0, 3225 + (struct sockaddr *)(addr + 1), 3226 + x->props.family); 3243 3227 if (!addr->sadb_address_prefixlen) 3244 3228 BUG(); 3245 3229 ··· 3437 3421 addr->sadb_address_proto = 0; 3438 3422 addr->sadb_address_reserved = 0; 3439 3423 addr->sadb_address_prefixlen = 3440 - pfkey_sockaddr_fill(&x->props.saddr, 0, 3441 - (struct sockaddr *) (addr + 1), 3442 - x->props.family); 3424 + pfkey_sockaddr_fill_zero_tail(&x->props.saddr, 0, 3425 + (struct sockaddr *)(addr + 1), 3426 + x->props.family); 3443 3427 if (!addr->sadb_address_prefixlen) 3444 3428 BUG(); 3445 3429 ··· 3459 3443 addr->sadb_address_proto = 0; 3460 3444 addr->sadb_address_reserved = 0; 3461 3445 addr->sadb_address_prefixlen = 3462 - pfkey_sockaddr_fill(ipaddr, 0, 3463 - (struct sockaddr *) (addr + 1), 3464 - x->props.family); 3446 + pfkey_sockaddr_fill_zero_tail(ipaddr, 0, 3447 + (struct sockaddr *)(addr + 1), 3448 + x->props.family); 3465 3449 if (!addr->sadb_address_prefixlen) 3466 3450 BUG(); 3467 3451 ··· 3490 3474 switch (type) { 3491 3475 case SADB_EXT_ADDRESS_SRC: 3492 3476 addr->sadb_address_prefixlen = sel->prefixlen_s; 3493 - pfkey_sockaddr_fill(&sel->saddr, 0, 3494 - (struct sockaddr *)(addr + 1), 3495 - sel->family); 3477 + pfkey_sockaddr_fill_zero_tail(&sel->saddr, 0, 3478 + (struct sockaddr *)(addr + 1), 3479 + sel->family); 3496 3480 break; 3497 3481 case SADB_EXT_ADDRESS_DST: 3498 3482 addr->sadb_address_prefixlen = sel->prefixlen_d; 3499 - pfkey_sockaddr_fill(&sel->daddr, 0, 3500 - (struct sockaddr *)(addr + 1), 3501 - sel->family); 3483 + pfkey_sockaddr_fill_zero_tail(&sel->daddr, 0, 3484 + (struct sockaddr *)(addr + 1), 3485 + sel->family); 3502 3486 break; 3503 3487 default: 3504 3488 return -EINVAL;
+5
net/l2tp/l2tp_core.c
··· 1290 1290 uh->source = inet->inet_sport; 1291 1291 uh->dest = inet->inet_dport; 1292 1292 udp_len = uhlen + session->hdr_len + data_len; 1293 + if (udp_len > U16_MAX) { 1294 + kfree_skb(skb); 1295 + ret = NET_XMIT_DROP; 1296 + goto out_unlock; 1297 + } 1293 1298 uh->len = htons(udp_len); 1294 1299 1295 1300 /* Calculate UDP checksum if configured to do so */
+5 -19
net/mptcp/pm_kernel.c
··· 720 720 721 721 static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, 722 722 struct mptcp_pm_addr_entry *entry, 723 - bool needs_id, bool replace) 723 + bool replace) 724 724 { 725 725 struct mptcp_pm_addr_entry *cur, *del_entry = NULL; 726 726 int ret = -EINVAL; ··· 779 779 } 780 780 } 781 781 782 - if (!entry->addr.id && needs_id) { 782 + if (!entry->addr.id) { 783 783 find_next: 784 784 entry->addr.id = find_next_zero_bit(pernet->id_bitmap, 785 785 MPTCP_PM_MAX_ADDR_ID + 1, ··· 790 790 } 791 791 } 792 792 793 - if (!entry->addr.id && needs_id) 793 + if (!entry->addr.id) 794 794 goto out; 795 795 796 796 __set_bit(entry->addr.id, pernet->id_bitmap); ··· 923 923 return -ENOMEM; 924 924 925 925 entry->addr.port = 0; 926 - ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true, false); 926 + ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, false); 927 927 if (ret < 0) 928 928 kfree(entry); 929 929 ··· 977 977 return 0; 978 978 } 979 979 980 - static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr, 981 - struct genl_info *info) 982 - { 983 - struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; 984 - 985 - if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr, 986 - mptcp_pm_address_nl_policy, info->extack) && 987 - tb[MPTCP_PM_ADDR_ATTR_ID]) 988 - return true; 989 - return false; 990 - } 991 - 992 980 /* Add an MPTCP endpoint */ 993 981 int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info) 994 982 { ··· 1025 1037 goto out_free; 1026 1038 } 1027 1039 } 1028 - ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, 1029 - !mptcp_pm_has_addr_attr_id(attr, info), 1030 - true); 1040 + ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true); 1031 1041 if (ret < 0) { 1032 1042 GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret); 1033 1043 goto out_free;
+2
net/mptcp/protocol.c
··· 4688 4688 { 4689 4689 int err; 4690 4690 4691 + mptcp_subflow_v6_init(); 4692 + 4691 4693 mptcp_v6_prot = mptcp_prot; 4692 4694 strscpy(mptcp_v6_prot.name, "MPTCPv6", sizeof(mptcp_v6_prot.name)); 4693 4695 mptcp_v6_prot.slab = NULL;
+1
net/mptcp/protocol.h
··· 911 911 void __init mptcp_proto_init(void); 912 912 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 913 913 int __init mptcp_proto_v6_init(void); 914 + void __init mptcp_subflow_v6_init(void); 914 915 #endif 915 916 916 917 struct sock *mptcp_sk_clone_init(const struct sock *sk,
+9 -6
net/mptcp/subflow.c
··· 2165 2165 tcp_prot_override.psock_update_sk_prot = NULL; 2166 2166 #endif 2167 2167 2168 + mptcp_diag_subflow_init(&subflow_ulp_ops); 2169 + 2170 + if (tcp_register_ulp(&subflow_ulp_ops) != 0) 2171 + panic("MPTCP: failed to register subflows to ULP\n"); 2172 + } 2173 + 2168 2174 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 2175 + void __init mptcp_subflow_v6_init(void) 2176 + { 2169 2177 /* In struct mptcp_subflow_request_sock, we assume the TCP request sock 2170 2178 * structures for v4 and v6 have the same size. It should not changed in 2171 2179 * the future but better to make sure to be warned if it is no longer ··· 2211 2203 /* Disable sockmap processing for subflows */ 2212 2204 tcpv6_prot_override.psock_update_sk_prot = NULL; 2213 2205 #endif 2214 - #endif 2215 - 2216 - mptcp_diag_subflow_init(&subflow_ulp_ops); 2217 - 2218 - if (tcp_register_ulp(&subflow_ulp_ops) != 0) 2219 - panic("MPTCP: failed to register subflows to ULP\n"); 2220 2206 } 2207 + #endif
-1
net/netfilter/ipvs/ip_vs_ctl.c
··· 1752 1752 ret = ip_vs_bind_scheduler(svc, sched); 1753 1753 if (ret) 1754 1754 goto out_err; 1755 - sched = NULL; 1756 1755 } 1757 1756 1758 1757 ret = ip_vs_start_estimator(ipvs, &svc->stats);
+1 -1
net/netfilter/nft_ct.c
··· 974 974 nf_queue_nf_hook_drop(ctx->net); 975 975 nf_ct_untimeout(ctx->net, timeout); 976 976 nf_ct_netns_put(ctx->net, ctx->family); 977 - kfree(priv->timeout); 977 + kfree_rcu(priv->timeout, rcu); 978 978 } 979 979 980 980 static int nft_ct_timeout_obj_dump(struct sk_buff *skb,
+30 -4
net/netfilter/xt_multiport.c
··· 105 105 return ports_match_v1(multiinfo, ntohs(pptr[0]), ntohs(pptr[1])); 106 106 } 107 107 108 + static bool 109 + multiport_valid_ranges(const struct xt_multiport_v1 *multiinfo) 110 + { 111 + unsigned int i; 112 + 113 + for (i = 0; i < multiinfo->count; i++) { 114 + if (!multiinfo->pflags[i]) 115 + continue; 116 + 117 + if (++i >= multiinfo->count) 118 + return false; 119 + 120 + if (multiinfo->pflags[i]) 121 + return false; 122 + 123 + if (multiinfo->ports[i - 1] > multiinfo->ports[i]) 124 + return false; 125 + } 126 + 127 + return true; 128 + } 129 + 108 130 static inline bool 109 131 check(u_int16_t proto, 110 132 u_int8_t ip_invflags, ··· 149 127 const struct ipt_ip *ip = par->entryinfo; 150 128 const struct xt_multiport_v1 *multiinfo = par->matchinfo; 151 129 152 - return check(ip->proto, ip->invflags, multiinfo->flags, 153 - multiinfo->count) ? 0 : -EINVAL; 130 + if (!check(ip->proto, ip->invflags, multiinfo->flags, multiinfo->count)) 131 + return -EINVAL; 132 + 133 + return multiport_valid_ranges(multiinfo) ? 0 : -EINVAL; 154 134 } 155 135 156 136 static int multiport_mt6_check(const struct xt_mtchk_param *par) ··· 160 136 const struct ip6t_ip6 *ip = par->entryinfo; 161 137 const struct xt_multiport_v1 *multiinfo = par->matchinfo; 162 138 163 - return check(ip->proto, ip->invflags, multiinfo->flags, 164 - multiinfo->count) ? 0 : -EINVAL; 139 + if (!check(ip->proto, ip->invflags, multiinfo->flags, multiinfo->count)) 140 + return -EINVAL; 141 + 142 + return multiport_valid_ranges(multiinfo) ? 0 : -EINVAL; 165 143 } 166 144 167 145 static struct xt_match multiport_mt_reg[] __read_mostly = {
+24 -11
net/rfkill/core.c
··· 73 73 struct rfkill_event_ext ev; 74 74 }; 75 75 76 + /* Max rfkill events that can be "in-flight" for one data source */ 77 + #define MAX_RFKILL_EVENT 1000 76 78 struct rfkill_data { 77 79 struct list_head list; 78 80 struct list_head events; 79 81 struct mutex mtx; 80 82 wait_queue_head_t read_wait; 83 + u32 event_count; 81 84 bool input_handler; 82 85 u8 max_size; 83 86 }; ··· 258 255 } 259 256 #endif /* CONFIG_RFKILL_LEDS */ 260 257 261 - static void rfkill_fill_event(struct rfkill_event_ext *ev, 262 - struct rfkill *rfkill, 263 - enum rfkill_operation op) 258 + static int rfkill_fill_event(struct rfkill_int_event *int_ev, 259 + struct rfkill *rfkill, 260 + struct rfkill_data *data, 261 + enum rfkill_operation op) 264 262 { 263 + struct rfkill_event_ext *ev = &int_ev->ev; 265 264 unsigned long flags; 266 265 267 266 ev->idx = rfkill->idx; ··· 276 271 RFKILL_BLOCK_SW_PREV)); 277 272 ev->hard_block_reasons = rfkill->hard_block_reasons; 278 273 spin_unlock_irqrestore(&rfkill->lock, flags); 274 + 275 + scoped_guard(mutex, &data->mtx) { 276 + if (data->event_count++ > MAX_RFKILL_EVENT) { 277 + data->event_count--; 278 + return -ENOSPC; 279 + } 280 + list_add_tail(&int_ev->list, &data->events); 281 + } 282 + return 0; 279 283 } 280 284 281 285 static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op) ··· 296 282 ev = kzalloc_obj(*ev); 297 283 if (!ev) 298 284 continue; 299 - rfkill_fill_event(&ev->ev, rfkill, op); 300 - mutex_lock(&data->mtx); 301 - list_add_tail(&ev->list, &data->events); 302 - mutex_unlock(&data->mtx); 285 + if (rfkill_fill_event(ev, rfkill, data, op)) { 286 + kfree(ev); 287 + continue; 288 + } 303 289 wake_up_interruptible(&data->read_wait); 304 290 } 305 291 } ··· 1200 1186 if (!ev) 1201 1187 goto free; 1202 1188 rfkill_sync(rfkill); 1203 - rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); 1204 - mutex_lock(&data->mtx); 1205 - list_add_tail(&ev->list, &data->events); 1206 - mutex_unlock(&data->mtx); 1189 + if (rfkill_fill_event(ev, rfkill, data, RFKILL_OP_ADD)) 1190 + kfree(ev); 1207 1191 } 1208 1192 list_add(&data->list, &rfkill_fds); 1209 1193 mutex_unlock(&rfkill_global_mutex); ··· 1271 1259 ret = -EFAULT; 1272 1260 1273 1261 list_del(&ev->list); 1262 + data->event_count--; 1274 1263 kfree(ev); 1275 1264 out: 1276 1265 mutex_unlock(&data->mtx);
-6
net/rxrpc/af_rxrpc.c
··· 654 654 goto success; 655 655 656 656 case RXRPC_SECURITY_KEY: 657 - ret = -EINVAL; 658 - if (rx->key) 659 - goto error; 660 657 ret = -EISCONN; 661 658 if (rx->sk.sk_state != RXRPC_UNBOUND) 662 659 goto error; ··· 661 664 goto error; 662 665 663 666 case RXRPC_SECURITY_KEYRING: 664 - ret = -EINVAL; 665 - if (rx->key) 666 - goto error; 667 667 ret = -EISCONN; 668 668 if (rx->sk.sk_state != RXRPC_UNBOUND) 669 669 goto error;
+1 -1
net/rxrpc/ar-internal.h
··· 117 117 atomic_t stat_tx_jumbo[10]; 118 118 atomic_t stat_rx_jumbo[10]; 119 119 120 - atomic_t stat_why_req_ack[8]; 120 + atomic_t stat_why_req_ack[9]; 121 121 122 122 atomic_t stat_io_loop; 123 123 };
+10 -15
net/rxrpc/call_object.c
··· 654 654 if (dead) { 655 655 ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE); 656 656 657 - if (!list_empty(&call->link)) { 658 - spin_lock(&rxnet->call_lock); 659 - list_del_init(&call->link); 660 - spin_unlock(&rxnet->call_lock); 661 - } 657 + spin_lock(&rxnet->call_lock); 658 + list_del_rcu(&call->link); 659 + spin_unlock(&rxnet->call_lock); 662 660 663 661 rxrpc_cleanup_call(call); 664 662 } ··· 692 694 rxrpc_put_bundle(call->bundle, rxrpc_bundle_put_call); 693 695 rxrpc_put_peer(call->peer, rxrpc_peer_put_call); 694 696 rxrpc_put_local(call->local, rxrpc_local_put_call); 697 + key_put(call->key); 695 698 call_rcu(&call->rcu, rxrpc_rcu_free_call); 696 699 } 697 700 ··· 729 730 _enter(""); 730 731 731 732 if (!list_empty(&rxnet->calls)) { 733 + int shown = 0; 734 + 732 735 spin_lock(&rxnet->call_lock); 733 736 734 - while (!list_empty(&rxnet->calls)) { 735 - call = list_entry(rxnet->calls.next, 736 - struct rxrpc_call, link); 737 - _debug("Zapping call %p", call); 738 - 739 - rxrpc_see_call(call, rxrpc_call_see_zap); 740 - list_del_init(&call->link); 737 + list_for_each_entry(call, &rxnet->calls, link) { 738 + rxrpc_see_call(call, rxrpc_call_see_still_live); 741 739 742 740 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n", 743 741 call, refcount_read(&call->ref), 744 742 rxrpc_call_states[__rxrpc_call_state(call)], 745 743 call->flags, call->events); 746 744 747 - spin_unlock(&rxnet->call_lock); 748 - cond_resched(); 749 - spin_lock(&rxnet->call_lock); 745 + if (++shown >= 10) 746 + break; 750 747 } 751 748 752 749 spin_unlock(&rxnet->call_lock);
+15 -4
net/rxrpc/conn_event.c
··· 247 247 struct sk_buff *skb) 248 248 { 249 249 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 250 + bool secured = false; 250 251 int ret; 251 252 252 253 if (conn->state == RXRPC_CONN_ABORTED) ··· 263 262 return ret; 264 263 265 264 case RXRPC_PACKET_TYPE_RESPONSE: 265 + spin_lock_irq(&conn->state_lock); 266 + if (conn->state != RXRPC_CONN_SERVICE_CHALLENGING) { 267 + spin_unlock_irq(&conn->state_lock); 268 + return 0; 269 + } 270 + spin_unlock_irq(&conn->state_lock); 271 + 266 272 ret = conn->security->verify_response(conn, skb); 267 273 if (ret < 0) 268 274 return ret; ··· 280 272 return ret; 281 273 282 274 spin_lock_irq(&conn->state_lock); 283 - if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) 275 + if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { 284 276 conn->state = RXRPC_CONN_SERVICE; 277 + secured = true; 278 + } 285 279 spin_unlock_irq(&conn->state_lock); 286 280 287 - if (conn->state == RXRPC_CONN_SERVICE) { 281 + if (secured) { 288 282 /* Offload call state flipping to the I/O thread. As 289 283 * we've already received the packet, put it on the 290 284 * front of the queue. ··· 567 557 spin_lock_irq(&local->lock); 568 558 old = conn->tx_response; 569 559 if (old) { 570 - struct rxrpc_skb_priv *osp = rxrpc_skb(skb); 560 + struct rxrpc_skb_priv *osp = rxrpc_skb(old); 571 561 572 562 /* Always go with the response to the most recent challenge. */ 573 563 if (after(sp->resp.challenge_serial, osp->resp.challenge_serial)) 574 - conn->tx_response = old; 564 + conn->tx_response = skb; 575 565 else 576 566 old = skb; 577 567 } else { ··· 579 569 } 580 570 spin_unlock_irq(&local->lock); 581 571 rxrpc_poke_conn(conn, rxrpc_conn_get_poke_response); 572 + rxrpc_free_skb(old, rxrpc_skb_put_old_response); 582 573 }
+1 -1
net/rxrpc/input_rack.c
··· 413 413 break; 414 414 //case RXRPC_CALL_RACKTIMER_ZEROWIN: 415 415 default: 416 - pr_warn("Unexpected rack timer %u", call->rack_timer_mode); 416 + pr_warn("Unexpected rack timer %u", mode); 417 417 } 418 418 }
+2 -1
net/rxrpc/io_thread.c
··· 419 419 420 420 if (sp->hdr.callNumber > chan->call_id) { 421 421 if (rxrpc_to_client(sp)) { 422 - rxrpc_put_call(call, rxrpc_call_put_input); 422 + if (call) 423 + rxrpc_put_call(call, rxrpc_call_put_input); 423 424 return rxrpc_protocol_error(skb, 424 425 rxrpc_eproto_unexpected_implicit_end); 425 426 }
+23 -17
net/rxrpc/key.c
··· 13 13 #include <crypto/skcipher.h> 14 14 #include <linux/module.h> 15 15 #include <linux/net.h> 16 + #include <linux/overflow.h> 16 17 #include <linux/skbuff.h> 17 18 #include <linux/key-type.h> 18 19 #include <linux/ctype.h> ··· 73 72 return -EKEYREJECTED; 74 73 75 74 plen = sizeof(*token) + sizeof(*token->kad) + tktlen; 76 - prep->quotalen = datalen + plen; 75 + prep->quotalen += datalen + plen; 77 76 78 77 plen -= sizeof(*token); 79 78 token = kzalloc_obj(*token); ··· 172 171 size_t plen; 173 172 const __be32 *ticket, *key; 174 173 s64 tmp; 175 - u32 tktlen, keylen; 174 + size_t raw_keylen, raw_tktlen, keylen, tktlen; 176 175 177 176 _enter(",{%x,%x,%x,%x},%x", 178 177 ntohl(xdr[0]), ntohl(xdr[1]), ntohl(xdr[2]), ntohl(xdr[3]), ··· 182 181 goto reject; 183 182 184 183 key = xdr + (6 * 2 + 1); 185 - keylen = ntohl(key[-1]); 186 - _debug("keylen: %x", keylen); 187 - keylen = round_up(keylen, 4); 184 + raw_keylen = ntohl(key[-1]); 185 + _debug("keylen: %zx", raw_keylen); 186 + if (raw_keylen > AFSTOKEN_GK_KEY_MAX) 187 + goto reject; 188 + keylen = round_up(raw_keylen, 4); 188 189 if ((6 * 2 + 2) * 4 + keylen > toklen) 189 190 goto reject; 190 191 191 192 ticket = xdr + (6 * 2 + 1 + (keylen / 4) + 1); 192 - tktlen = ntohl(ticket[-1]); 193 - _debug("tktlen: %x", tktlen); 194 - tktlen = round_up(tktlen, 4); 193 + raw_tktlen = ntohl(ticket[-1]); 194 + _debug("tktlen: %zx", raw_tktlen); 195 + if (raw_tktlen > AFSTOKEN_GK_TOKEN_MAX) 196 + goto reject; 197 + tktlen = round_up(raw_tktlen, 4); 195 198 if ((6 * 2 + 2) * 4 + keylen + tktlen != toklen) { 196 - kleave(" = -EKEYREJECTED [%x!=%x, %x,%x]", 199 + kleave(" = -EKEYREJECTED [%zx!=%x, %zx,%zx]", 197 200 (6 * 2 + 2) * 4 + keylen + tktlen, toklen, 198 201 keylen, tktlen); 199 202 goto reject; 200 203 } 201 204 202 205 plen = sizeof(*token) + sizeof(*token->rxgk) + tktlen + keylen; 203 - prep->quotalen = datalen + plen; 206 + prep->quotalen += datalen + plen; 204 207 205 208 plen -= sizeof(*token); 206 209 token = kzalloc_obj(*token); 207 210 if (!token) 208 211 goto nomem; 209 212 210 - token->rxgk = kzalloc(sizeof(*token->rxgk) + keylen, GFP_KERNEL); 213 + token->rxgk = kzalloc(struct_size_t(struct rxgk_key, _key, raw_keylen), GFP_KERNEL); 211 214 if (!token->rxgk) 212 215 goto nomem_token; 213 216 ··· 226 221 token->rxgk->enctype = tmp = xdr_dec64(xdr + 5 * 2); 227 222 if (tmp < 0 || tmp > UINT_MAX) 228 223 goto reject_token; 229 - token->rxgk->key.len = ntohl(key[-1]); 224 + token->rxgk->key.len = raw_keylen; 230 225 token->rxgk->key.data = token->rxgk->_key; 231 - token->rxgk->ticket.len = ntohl(ticket[-1]); 226 + token->rxgk->ticket.len = raw_tktlen; 232 227 233 228 if (token->rxgk->endtime != 0) { 234 229 expiry = rxrpc_s64_to_time64(token->rxgk->endtime); ··· 241 236 memcpy(token->rxgk->key.data, key, token->rxgk->key.len); 242 237 243 238 /* Pad the ticket so that we can use it directly in XDR */ 244 - token->rxgk->ticket.data = kzalloc(round_up(token->rxgk->ticket.len, 4), 245 - GFP_KERNEL); 239 + token->rxgk->ticket.data = kzalloc(tktlen, GFP_KERNEL); 246 240 if (!token->rxgk->ticket.data) 247 241 goto nomem_yrxgk; 248 242 memcpy(token->rxgk->ticket.data, ticket, token->rxgk->ticket.len); ··· 278 274 nomem: 279 275 return -ENOMEM; 280 276 reject_token: 277 + kfree(token->rxgk); 281 278 kfree(token); 282 279 reject: 283 280 return -EKEYREJECTED; ··· 465 460 memcpy(&kver, prep->data, sizeof(kver)); 466 461 prep->data += sizeof(kver); 467 462 prep->datalen -= sizeof(kver); 463 + prep->quotalen = 0; 468 464 469 465 _debug("KEY I/F VERSION: %u", kver); 470 466 ··· 503 497 goto error; 504 498 505 499 plen = sizeof(*token->kad) + v1->ticket_length; 506 - prep->quotalen = plen + sizeof(*token); 500 + prep->quotalen += plen + sizeof(*token); 507 501 508 502 ret = -ENOMEM; 509 503 token = kzalloc_obj(*token); ··· 622 616 623 617 _enter(""); 624 618 625 - if (optlen <= 0 || optlen > PAGE_SIZE - 1 || rx->securities) 619 + if (optlen <= 0 || optlen > PAGE_SIZE - 1 || rx->key) 626 620 return -EINVAL; 627 621 628 622 description = memdup_sockptr_nul(optval, optlen);
+2
net/rxrpc/output.c
··· 477 477 why = rxrpc_reqack_old_rtt; 478 478 else if (!last && !after(READ_ONCE(call->send_top), txb->seq)) 479 479 why = rxrpc_reqack_app_stall; 480 + else if (call->tx_winsize <= (2 * req->n) || call->cong_cwnd <= (2 * req->n)) 481 + why = rxrpc_reqack_jumbo_win; 480 482 else 481 483 goto dont_set_request_ack; 482 484
+21 -16
net/rxrpc/proc.c
··· 10 10 #include <net/af_rxrpc.h> 11 11 #include "ar-internal.h" 12 12 13 + #define RXRPC_PROC_ADDRBUF_SIZE \ 14 + (sizeof("[xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255]") + \ 15 + sizeof(":12345")) 16 + 13 17 static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = { 14 18 [RXRPC_CONN_UNUSED] = "Unused ", 15 19 [RXRPC_CONN_CLIENT_UNSECURED] = "ClUnsec ", ··· 57 53 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 58 54 enum rxrpc_call_state state; 59 55 rxrpc_seq_t tx_bottom; 60 - char lbuff[50], rbuff[50]; 56 + char lbuff[RXRPC_PROC_ADDRBUF_SIZE], rbuff[RXRPC_PROC_ADDRBUF_SIZE]; 61 57 long timeout = 0; 62 58 63 59 if (v == &rxnet->calls) { ··· 73 69 74 70 local = call->local; 75 71 if (local) 76 - sprintf(lbuff, "%pISpc", &local->srx.transport); 72 + scnprintf(lbuff, sizeof(lbuff), "%pISpc", &local->srx.transport); 77 73 else 78 74 strcpy(lbuff, "no_local"); 79 75 80 - sprintf(rbuff, "%pISpc", &call->dest_srx.transport); 76 + scnprintf(rbuff, sizeof(rbuff), "%pISpc", &call->dest_srx.transport); 81 77 82 78 state = rxrpc_call_state(call); 83 79 if (state != RXRPC_CALL_SERVER_PREALLOC) ··· 146 142 struct rxrpc_connection *conn; 147 143 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 148 144 const char *state; 149 - char lbuff[50], rbuff[50]; 145 + char lbuff[RXRPC_PROC_ADDRBUF_SIZE], rbuff[RXRPC_PROC_ADDRBUF_SIZE]; 150 146 151 147 if (v == &rxnet->conn_proc_list) { 152 148 seq_puts(seq, ··· 165 161 goto print; 166 162 } 167 163 168 - sprintf(lbuff, "%pISpc", &conn->local->srx.transport); 169 - sprintf(rbuff, "%pISpc", &conn->peer->srx.transport); 164 + scnprintf(lbuff, sizeof(lbuff), "%pISpc", &conn->local->srx.transport); 165 + scnprintf(rbuff, sizeof(rbuff), "%pISpc", &conn->peer->srx.transport); 170 166 print: 171 167 state = rxrpc_is_conn_aborted(conn) ? 172 168 rxrpc_call_completions[conn->completion] : ··· 232 228 { 233 229 struct rxrpc_bundle *bundle; 234 230 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 235 - char lbuff[50], rbuff[50]; 231 + char lbuff[RXRPC_PROC_ADDRBUF_SIZE], rbuff[RXRPC_PROC_ADDRBUF_SIZE]; 236 232 237 233 if (v == &rxnet->bundle_proc_list) { 238 234 seq_puts(seq, ··· 246 242 247 243 bundle = list_entry(v, struct rxrpc_bundle, proc_link); 248 244 249 - sprintf(lbuff, "%pISpc", &bundle->local->srx.transport); 250 - sprintf(rbuff, "%pISpc", &bundle->peer->srx.transport); 245 + scnprintf(lbuff, sizeof(lbuff), "%pISpc", &bundle->local->srx.transport); 246 + scnprintf(rbuff, sizeof(rbuff), "%pISpc", &bundle->peer->srx.transport); 251 247 seq_printf(seq, 252 248 "UDP %-47.47s %-47.47s %4x %3u %3d" 253 249 " %c%c%c %08x | %08x %08x %08x %08x %08x\n", ··· 283 279 { 284 280 struct rxrpc_peer *peer; 285 281 time64_t now; 286 - char lbuff[50], rbuff[50]; 282 + char lbuff[RXRPC_PROC_ADDRBUF_SIZE], rbuff[RXRPC_PROC_ADDRBUF_SIZE]; 287 283 288 284 if (v == SEQ_START_TOKEN) { 289 285 seq_puts(seq, ··· 294 290 295 291 peer = list_entry(v, struct rxrpc_peer, hash_link); 296 292 297 - sprintf(lbuff, "%pISpc", &peer->local->srx.transport); 293 + scnprintf(lbuff, sizeof(lbuff), "%pISpc", &peer->local->srx.transport); 298 294 299 - sprintf(rbuff, "%pISpc", &peer->srx.transport); 295 + scnprintf(rbuff, sizeof(rbuff), "%pISpc", &peer->srx.transport); 300 296 301 297 now = ktime_get_seconds(); 302 298 seq_printf(seq, ··· 405 401 static int rxrpc_local_seq_show(struct seq_file *seq, void *v) 406 402 { 407 403 struct rxrpc_local *local; 408 - char lbuff[50]; 404 + char lbuff[RXRPC_PROC_ADDRBUF_SIZE]; 409 405 410 406 if (v == SEQ_START_TOKEN) { 411 407 seq_puts(seq, ··· 416 412 417 413 local = hlist_entry(v, struct rxrpc_local, link); 418 414 419 - sprintf(lbuff, "%pISpc", &local->srx.transport); 415 + scnprintf(lbuff, sizeof(lbuff), "%pISpc", &local->srx.transport); 420 416 421 417 seq_printf(seq, 422 418 "UDP %-47.47s %3u %3u %3u\n", ··· 522 518 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_IDLE]), 523 519 atomic_read(&rxnet->stat_rx_acks[0])); 524 520 seq_printf(seq, 525 - "Why-Req-A: acklost=%u mrtt=%u ortt=%u stall=%u\n", 521 + "Why-Req-A: acklost=%u mrtt=%u ortt=%u stall=%u jwin=%u\n", 526 522 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_ack_lost]), 527 523 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_more_rtt]), 528 524 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_old_rtt]), 529 - atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_app_stall])); 525 + atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_app_stall]), 526 + atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_jumbo_win])); 530 527 seq_printf(seq, 531 528 "Why-Req-A: nolast=%u retx=%u slows=%u smtxw=%u\n", 532 529 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_no_srv_last]),
+13 -6
net/rxrpc/rxgk.c
··· 1085 1085 1086 1086 _enter(""); 1087 1087 1088 + if ((end - p) * sizeof(__be32) < 24) 1089 + return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO, 1090 + rxgk_abort_resp_short_auth); 1088 1091 if (memcmp(p, conn->rxgk.nonce, 20) != 0) 1089 1092 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO, 1090 1093 rxgk_abort_resp_bad_nonce); ··· 1101 1098 p += xdr_round_up(app_len) / sizeof(__be32); 1102 1099 if (end - p < 4) 1103 1100 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO, 1104 - rxgk_abort_resp_short_applen); 1101 + rxgk_abort_resp_short_auth); 1105 1102 1106 1103 level = ntohl(*p++); 1107 1104 epoch = ntohl(*p++); ··· 1167 1164 } 1168 1165 1169 1166 p = auth; 1170 - ret = rxgk_do_verify_authenticator(conn, krb5, skb, p, p + auth_len); 1167 + ret = rxgk_do_verify_authenticator(conn, krb5, skb, p, 1168 + p + auth_len / sizeof(*p)); 1171 1169 error: 1172 1170 kfree(auth); 1173 1171 return ret; ··· 1212 1208 1213 1209 token_offset = offset; 1214 1210 token_len = ntohl(rhdr.token_len); 1215 - if (xdr_round_up(token_len) + sizeof(__be32) > len) 1211 + if (token_len > len || 1212 + xdr_round_up(token_len) + sizeof(__be32) > len) 1216 1213 goto short_packet; 1217 1214 1218 1215 trace_rxrpc_rx_response(conn, sp->hdr.serial, 0, sp->hdr.cksum, token_len); ··· 1228 1223 1229 1224 auth_offset = offset; 1230 1225 auth_len = ntohl(xauth_len); 1231 - if (auth_len < len) 1226 + if (auth_len > len) 1232 1227 goto short_packet; 1233 1228 if (auth_len & 3) 1234 1229 goto inconsistent; ··· 1273 1268 if (ret < 0) { 1274 1269 rxrpc_abort_conn(conn, skb, RXGK_SEALEDINCON, ret, 1275 1270 rxgk_abort_resp_auth_dec); 1276 - goto out; 1271 + goto out_gk; 1277 1272 } 1278 1273 1279 1274 ret = rxgk_verify_authenticator(conn, krb5, skb, auth_offset, auth_len); 1280 1275 if (ret < 0) 1281 - goto out; 1276 + goto out_gk; 1282 1277 1283 1278 conn->key = key; 1284 1279 key = NULL; 1285 1280 ret = 0; 1281 + out_gk: 1282 + rxgk_put(gk); 1286 1283 out: 1287 1284 key_put(key); 1288 1285 _leave(" = %d", ret);
+43 -20
net/rxrpc/rxkad.c
··· 197 197 struct rxrpc_crypt iv; 198 198 __be32 *tmpbuf; 199 199 size_t tmpsize = 4 * sizeof(__be32); 200 + int ret; 200 201 201 202 _enter(""); 202 203 ··· 226 225 skcipher_request_set_sync_tfm(req, ci); 227 226 skcipher_request_set_callback(req, 0, NULL, NULL); 228 227 skcipher_request_set_crypt(req, &sg, &sg, tmpsize, iv.x); 229 - crypto_skcipher_encrypt(req); 228 + ret = crypto_skcipher_encrypt(req); 230 229 skcipher_request_free(req); 231 230 232 231 memcpy(&conn->rxkad.csum_iv, tmpbuf + 2, sizeof(conn->rxkad.csum_iv)); 233 232 kfree(tmpbuf); 234 - _leave(" = 0"); 235 - return 0; 233 + _leave(" = %d", ret); 234 + return ret; 236 235 } 237 236 238 237 /* ··· 265 264 struct scatterlist sg; 266 265 size_t pad; 267 266 u16 check; 267 + int ret; 268 268 269 269 _enter(""); 270 270 ··· 288 286 skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher); 289 287 skcipher_request_set_callback(req, 0, NULL, NULL); 290 288 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); 291 - crypto_skcipher_encrypt(req); 289 + ret = crypto_skcipher_encrypt(req); 292 290 skcipher_request_zero(req); 293 291 294 - _leave(" = 0"); 295 - return 0; 292 + _leave(" = %d", ret); 293 + return ret; 296 294 } 297 295 298 296 /* ··· 347 345 union { 348 346 __be32 buf[2]; 349 347 } crypto __aligned(8); 350 - u32 x, y; 348 + u32 x, y = 0; 351 349 int ret; 352 350 353 351 _enter("{%d{%x}},{#%u},%u,", ··· 378 376 skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher); 379 377 skcipher_request_set_callback(req, 0, NULL, NULL); 380 378 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); 381 - crypto_skcipher_encrypt(req); 379 + ret = crypto_skcipher_encrypt(req); 382 380 skcipher_request_zero(req); 381 + if (ret < 0) 382 + goto out; 383 383 384 384 y = ntohl(crypto.buf[1]); 385 385 y = (y >> 16) & 0xffff; ··· 417 413 memset(p + txb->pkt_len, 0, gap); 418 414 } 419 415 416 + out: 420 417 skcipher_request_free(req); 421 418 _leave(" = %d [set %x]", ret, y); 422 419 return ret; ··· 458 453 skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher); 459 454 skcipher_request_set_callback(req, 0, NULL, NULL); 460 455 skcipher_request_set_crypt(req, sg, sg, 8, iv.x); 461 - crypto_skcipher_decrypt(req); 456 + ret = crypto_skcipher_decrypt(req); 462 457 skcipher_request_zero(req); 458 + if (ret < 0) 459 + return ret; 463 460 464 461 /* Extract the decrypted packet length */ 465 462 if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) ··· 538 531 skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher); 539 532 skcipher_request_set_callback(req, 0, NULL, NULL); 540 533 skcipher_request_set_crypt(req, sg, sg, sp->len, iv.x); 541 - crypto_skcipher_decrypt(req); 534 + ret = crypto_skcipher_decrypt(req); 542 535 skcipher_request_zero(req); 543 536 if (sg != _sg) 544 537 kfree(sg); 538 + if (ret < 0) { 539 + WARN_ON_ONCE(ret != -ENOMEM); 540 + return ret; 541 + } 545 542 546 543 /* Extract the decrypted packet length */ 547 544 if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) ··· 613 602 skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher); 614 603 skcipher_request_set_callback(req, 0, NULL, NULL); 615 604 skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); 616 - crypto_skcipher_encrypt(req); 605 + ret = crypto_skcipher_encrypt(req); 617 606 skcipher_request_zero(req); 607 + if (ret < 0) 608 + goto out; 618 609 619 610 y = ntohl(crypto.buf[1]); 620 611 cksum = (y >> 16) & 0xffff; ··· 971 958 struct in_addr addr; 972 959 unsigned int life; 973 960 time64_t issue, now; 961 + int ret; 974 962 bool little_endian; 975 963 u8 *p, *q, *name, *end; 976 964 ··· 991 977 sg_init_one(&sg[0], ticket, ticket_len); 992 978 skcipher_request_set_callback(req, 0, NULL, NULL); 993 979 skcipher_request_set_crypt(req, sg, sg, ticket_len, iv.x); 994 - crypto_skcipher_decrypt(req); 980 + ret = crypto_skcipher_decrypt(req); 995 981 skcipher_request_free(req); 982 + if (ret < 0) 983 + return rxrpc_abort_conn(conn, skb, RXKADBADTICKET, -EPROTO, 984 + rxkad_abort_resp_tkt_short); 996 985 997 986 p = ticket; 998 987 end = p + ticket_len; ··· 1090 1073 /* 1091 1074 * decrypt the response packet 1092 1075 */ 1093 - static void rxkad_decrypt_response(struct rxrpc_connection *conn, 1094 - struct rxkad_response *resp, 1095 - const struct rxrpc_crypt *session_key) 1076 + static int rxkad_decrypt_response(struct rxrpc_connection *conn, 1077 + struct rxkad_response *resp, 1078 + const struct rxrpc_crypt *session_key) 1096 1079 { 1097 1080 struct skcipher_request *req = rxkad_ci_req; 1098 1081 struct scatterlist sg[1]; 1099 1082 struct rxrpc_crypt iv; 1083 + int ret; 1100 1084 1101 1085 _enter(",,%08x%08x", 1102 1086 ntohl(session_key->n[0]), ntohl(session_key->n[1])); 1103 1087 1104 1088 mutex_lock(&rxkad_ci_mutex); 1105 - if (crypto_sync_skcipher_setkey(rxkad_ci, session_key->x, 1106 - sizeof(*session_key)) < 0) 1107 - BUG(); 1089 + ret = crypto_sync_skcipher_setkey(rxkad_ci, session_key->x, 1090 + sizeof(*session_key)); 1091 + if (ret < 0) 1092 + goto unlock; 1108 1093 1109 1094 memcpy(&iv, session_key, sizeof(iv)); 1110 1095 ··· 1115 1096 skcipher_request_set_sync_tfm(req, rxkad_ci); 1116 1097 skcipher_request_set_callback(req, 0, NULL, NULL); 1117 1098 skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x); 1118 - crypto_skcipher_decrypt(req); 1099 + ret = crypto_skcipher_decrypt(req); 1119 1100 skcipher_request_zero(req); 1120 1101 1102 + unlock: 1121 1103 mutex_unlock(&rxkad_ci_mutex); 1122 1104 1123 1105 _leave(""); 1106 + return ret; 1124 1107 } 1125 1108 1126 1109 /* ··· 1215 1194 1216 1195 /* use the session key from inside the ticket to decrypt the 1217 1196 * response */ 1218 - rxkad_decrypt_response(conn, response, &session_key); 1197 + ret = rxkad_decrypt_response(conn, response, &session_key); 1198 + if (ret < 0) 1199 + goto temporary_error_free_ticket; 1219 1200 1220 1201 if (ntohl(response->encrypted.epoch) != conn->proto.epoch || 1221 1202 ntohl(response->encrypted.cid) != conn->proto.cid ||
+1 -1
net/rxrpc/sendmsg.c
··· 637 637 memset(&cp, 0, sizeof(cp)); 638 638 cp.local = rx->local; 639 639 cp.peer = peer; 640 - cp.key = rx->key; 640 + cp.key = key; 641 641 cp.security_level = rx->min_sec_level; 642 642 cp.exclusive = rx->exclusive | p->exclusive; 643 643 cp.upgrade = p->upgrade;
+3
net/rxrpc/server_key.c
··· 125 125 126 126 _enter(""); 127 127 128 + if (rx->securities) 129 + return -EINVAL; 130 + 128 131 if (optlen <= 0 || optlen > PAGE_SIZE - 1) 129 132 return -EINVAL; 130 133
+5 -1
net/sched/act_csum.c
··· 604 604 protocol = skb->protocol; 605 605 orig_vlan_tag_present = true; 606 606 } else { 607 - struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data; 607 + struct vlan_hdr *vlan; 608 608 609 + if (!pskb_may_pull(skb, VLAN_HLEN)) 610 + goto drop; 611 + 612 + vlan = (struct vlan_hdr *)skb->data; 609 613 protocol = vlan->h_vlan_encapsulated_proto; 610 614 skb_pull(skb, VLAN_HLEN); 611 615 skb_reset_network_header(skb);
+5 -1
net/tipc/group.c
··· 746 746 u32 port = msg_origport(hdr); 747 747 struct tipc_member *m, *pm; 748 748 u16 remitted, in_flight; 749 + u16 acked; 749 750 750 751 if (!grp) 751 752 return; ··· 799 798 case GRP_ACK_MSG: 800 799 if (!m) 801 800 return; 802 - m->bc_acked = msg_grp_bc_acked(hdr); 801 + acked = msg_grp_bc_acked(hdr); 802 + if (less_eq(acked, m->bc_acked)) 803 + return; 804 + m->bc_acked = acked; 803 805 if (--grp->bc_ackers) 804 806 return; 805 807 list_del_init(&m->small_win);
+10
net/tls/tls_sw.c
··· 584 584 if (rc == -EBUSY) { 585 585 rc = tls_encrypt_async_wait(ctx); 586 586 rc = rc ?: -EINPROGRESS; 587 + /* 588 + * The async callback tls_encrypt_done() has already 589 + * decremented encrypt_pending and restored the sge on 590 + * both success and error. Skip the synchronous cleanup 591 + * below on error, just remove the record and return. 592 + */ 593 + if (rc != -EINPROGRESS) { 594 + list_del(&rec->list); 595 + return rc; 596 + } 587 597 } 588 598 if (!rc || rc != -EINPROGRESS) { 589 599 atomic_dec(&ctx->encrypt_pending);
+13 -8
net/unix/diag.c
··· 28 28 29 29 static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) 30 30 { 31 - struct dentry *dentry = unix_sk(sk)->path.dentry; 31 + struct unix_diag_vfs uv; 32 + struct dentry *dentry; 33 + bool have_vfs = false; 32 34 35 + unix_state_lock(sk); 36 + dentry = unix_sk(sk)->path.dentry; 33 37 if (dentry) { 34 - struct unix_diag_vfs uv = { 35 - .udiag_vfs_ino = d_backing_inode(dentry)->i_ino, 36 - .udiag_vfs_dev = dentry->d_sb->s_dev, 37 - }; 38 - 39 - return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv); 38 + uv.udiag_vfs_ino = d_backing_inode(dentry)->i_ino; 39 + uv.udiag_vfs_dev = dentry->d_sb->s_dev; 40 + have_vfs = true; 40 41 } 42 + unix_state_unlock(sk); 41 43 42 - return 0; 44 + if (!have_vfs) 45 + return 0; 46 + 47 + return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv); 43 48 } 44 49 45 50 static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
+2 -1
net/xdp/xdp_umem.c
··· 203 203 if (!unaligned_chunks && chunks_rem) 204 204 return -EINVAL; 205 205 206 - if (headroom >= chunk_size - XDP_PACKET_HEADROOM) 206 + if (headroom > chunk_size - XDP_PACKET_HEADROOM - 207 + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) - 128) 207 208 return -EINVAL; 208 209 209 210 if (mr->flags & XDP_UMEM_TX_METADATA_LEN) {
+2 -2
net/xdp/xsk.c
··· 239 239 240 240 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 241 241 { 242 - u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool); 242 + u32 frame_size = __xsk_pool_get_rx_frame_size(xs->pool); 243 243 void *copy_from = xsk_copy_xdp_start(xdp), *copy_to; 244 244 u32 from_len, meta_len, rem, num_desc; 245 245 struct xdp_buff_xsk *xskb; ··· 338 338 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) 339 339 return -EINVAL; 340 340 341 - if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) { 341 + if (len > __xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) { 342 342 xs->rx_dropped++; 343 343 return -ENOSPC; 344 344 }
+29 -3
net/xdp/xsk_buff_pool.c
··· 10 10 #include "xdp_umem.h" 11 11 #include "xsk.h" 12 12 13 + #define ETH_PAD_LEN (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN) 14 + 13 15 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) 14 16 { 15 17 if (!xs->tx) ··· 159 157 int xp_assign_dev(struct xsk_buff_pool *pool, 160 158 struct net_device *netdev, u16 queue_id, u16 flags) 161 159 { 160 + u32 needed = netdev->mtu + ETH_PAD_LEN; 161 + u32 segs = netdev->xdp_zc_max_segs; 162 + bool mbuf = flags & XDP_USE_SG; 162 163 bool force_zc, force_copy; 163 164 struct netdev_bpf bpf; 165 + u32 frame_size; 164 166 int err = 0; 165 167 166 168 ASSERT_RTNL(); ··· 184 178 if (err) 185 179 return err; 186 180 187 - if (flags & XDP_USE_SG) 181 + if (mbuf) 188 182 pool->umem->flags |= XDP_UMEM_SG_FLAG; 189 183 190 184 if (flags & XDP_USE_NEED_WAKEUP) ··· 206 200 goto err_unreg_pool; 207 201 } 208 202 209 - if (netdev->xdp_zc_max_segs == 1 && (flags & XDP_USE_SG)) { 210 - err = -EOPNOTSUPP; 203 + if (mbuf) { 204 + if (segs == 1) { 205 + err = -EOPNOTSUPP; 206 + goto err_unreg_pool; 207 + } 208 + } else { 209 + segs = 1; 210 + } 211 + 212 + /* open-code xsk_pool_get_rx_frame_size() as pool->dev is not 213 + * set yet at this point; we are before getting down to driver 214 + */ 215 + frame_size = __xsk_pool_get_rx_frame_size(pool) - 216 + xsk_pool_get_tailroom(mbuf); 217 + frame_size = ALIGN_DOWN(frame_size, 128); 218 + 219 + if (needed > frame_size * segs) { 220 + err = -EINVAL; 211 221 goto err_unreg_pool; 212 222 } 213 223 ··· 269 247 struct xdp_umem *umem = umem_xs->umem; 270 248 271 249 flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY; 250 + 251 + if (umem->flags & XDP_UMEM_SG_FLAG) 252 + flags |= XDP_USE_SG; 253 + 272 254 if (umem_xs->pool->uses_need_wakeup) 273 255 flags |= XDP_USE_NEED_WAKEUP; 274 256
+14 -4
net/xfrm/xfrm_input.c
··· 506 506 /* An encap_type of -1 indicates async resumption. */ 507 507 if (encap_type == -1) { 508 508 async = 1; 509 - dev_put(skb->dev); 510 509 seq = XFRM_SKB_CB(skb)->seq.input.low; 511 510 spin_lock(&x->lock); 512 511 goto resume; ··· 658 659 dev_hold(skb->dev); 659 660 660 661 nexthdr = x->type->input(x, skb); 661 - if (nexthdr == -EINPROGRESS) 662 + if (nexthdr == -EINPROGRESS) { 663 + if (async) 664 + dev_put(skb->dev); 662 665 return 0; 666 + } 663 667 664 668 dev_put(skb->dev); 665 669 spin_lock(&x->lock); ··· 697 695 XFRM_MODE_SKB_CB(skb)->protocol = nexthdr; 698 696 699 697 err = xfrm_inner_mode_input(x, skb); 700 - if (err == -EINPROGRESS) 698 + if (err == -EINPROGRESS) { 699 + if (async) 700 + dev_put(skb->dev); 701 701 return 0; 702 - else if (err) { 702 + } else if (err) { 703 703 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); 704 704 goto drop; 705 705 } ··· 738 734 sp->olen = 0; 739 735 if (skb_valid_dst(skb)) 740 736 skb_dst_drop(skb); 737 + if (async) 738 + dev_put(skb->dev); 741 739 gro_cells_receive(&gro_cells, skb); 742 740 return 0; 743 741 } else { ··· 759 753 sp->olen = 0; 760 754 if (skb_valid_dst(skb)) 761 755 skb_dst_drop(skb); 756 + if (async) 757 + dev_put(skb->dev); 762 758 gro_cells_receive(&gro_cells, skb); 763 759 return err; 764 760 } ··· 771 763 drop_unlock: 772 764 spin_unlock(&x->lock); 773 765 drop: 766 + if (async) 767 + dev_put(skb->dev); 774 768 xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1); 775 769 kfree_skb(skb); 776 770 return 0;
+2 -3
net/xfrm/xfrm_policy.c
··· 4290 4290 #endif 4291 4291 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false); 4292 4292 4293 + synchronize_rcu(); 4294 + 4293 4295 WARN_ON(!list_empty(&net->xfrm.policy_all)); 4294 4296 4295 4297 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { ··· 4528 4526 pol = xfrm_policy_lookup_bytype(net, type, &fl, sel->family, dir, if_id); 4529 4527 if (IS_ERR_OR_NULL(pol)) 4530 4528 goto out_unlock; 4531 - 4532 - if (!xfrm_pol_hold_rcu(pol)) 4533 - pol = NULL; 4534 4529 out_unlock: 4535 4530 rcu_read_unlock(); 4536 4531 return pol;
+12 -2
net/xfrm/xfrm_user.c
··· 2677 2677 + nla_total_size(4) /* XFRM_AE_RTHR */ 2678 2678 + nla_total_size(4) /* XFRM_AE_ETHR */ 2679 2679 + nla_total_size(sizeof(x->dir)) /* XFRMA_SA_DIR */ 2680 - + nla_total_size(4); /* XFRMA_SA_PCPU */ 2680 + + nla_total_size(4) /* XFRMA_SA_PCPU */ 2681 + + nla_total_size(sizeof(x->if_id)); /* XFRMA_IF_ID */ 2681 2682 } 2682 2683 2683 2684 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) ··· 2790 2789 c.portid = nlh->nlmsg_pid; 2791 2790 2792 2791 err = build_aevent(r_skb, x, &c); 2793 - BUG_ON(err < 0); 2792 + if (err < 0) { 2793 + spin_unlock_bh(&x->lock); 2794 + xfrm_state_put(x); 2795 + kfree_skb(r_skb); 2796 + return err; 2797 + } 2794 2798 2795 2799 err = nlmsg_unicast(xfrm_net_nlsk(net, skb), r_skb, NETLINK_CB(skb).portid); 2796 2800 spin_unlock_bh(&x->lock); ··· 3966 3960 return err; 3967 3961 } 3968 3962 upe->hard = !!hard; 3963 + /* clear the padding bytes */ 3964 + memset_after(upe, 0, hard); 3969 3965 3970 3966 nlmsg_end(skb, nlh); 3971 3967 return 0; ··· 4125 4117 return -EMSGSIZE; 4126 4118 4127 4119 ur = nlmsg_data(nlh); 4120 + memset(ur, 0, sizeof(*ur)); 4128 4121 ur->proto = proto; 4129 4122 memcpy(&ur->sel, sel, sizeof(ur->sel)); 4130 4123 ··· 4173 4164 4174 4165 um = nlmsg_data(nlh); 4175 4166 4167 + memset(&um->id, 0, sizeof(um->id)); 4176 4168 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr)); 4177 4169 um->id.spi = x->id.spi; 4178 4170 um->id.family = x->props.family;
+9 -1
sound/usb/qcom/qc_audio_offload.c
··· 699 699 uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE, 700 700 PAGE_SIZE); 701 701 xhci_sideband_remove_interrupter(uadev[dev->chip->card->number].sb); 702 + usb_offload_put(dev->udev); 702 703 } 703 704 } 704 705 ··· 1183 1182 dma_coherent = dev_is_dma_coherent(subs->dev->bus->sysdev); 1184 1183 er_pa = 0; 1185 1184 1185 + ret = usb_offload_get(subs->dev); 1186 + if (ret < 0) 1187 + goto exit; 1188 + 1186 1189 /* event ring */ 1187 1190 ret = xhci_sideband_create_interrupter(uadev[card_num].sb, 1, false, 1188 1191 0, uaudio_qdev->data->intr_num); 1189 1192 if (ret < 0) { 1190 1193 dev_err(&subs->dev->dev, "failed to fetch interrupter\n"); 1191 - goto exit; 1194 + goto put_offload; 1192 1195 } 1193 1196 1194 1197 sgt = xhci_sideband_get_event_buffer(uadev[card_num].sb); ··· 1224 1219 mem_info->dma = 0; 1225 1220 remove_interrupter: 1226 1221 xhci_sideband_remove_interrupter(uadev[card_num].sb); 1222 + put_offload: 1223 + usb_offload_put(subs->dev); 1227 1224 exit: 1228 1225 return ret; 1229 1226 } ··· 1489 1482 uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE, PAGE_SIZE); 1490 1483 free_sec_ring: 1491 1484 xhci_sideband_remove_interrupter(uadev[card_num].sb); 1485 + usb_offload_put(subs->dev); 1492 1486 drop_sync_ep: 1493 1487 if (subs->sync_endpoint) { 1494 1488 uaudio_iommu_unmap(MEM_XFER_RING,
+26 -29
tools/testing/selftests/bpf/prog_tests/test_xsk.c
··· 179 179 return xsk_socket__create(&xsk->xsk, ifobject->ifindex, 0, umem->umem, rxr, txr, &cfg); 180 180 } 181 181 182 - #define MAX_SKB_FRAGS_PATH "/proc/sys/net/core/max_skb_frags" 183 - static unsigned int get_max_skb_frags(void) 184 - { 185 - unsigned int max_skb_frags = 0; 186 - FILE *file; 187 - 188 - file = fopen(MAX_SKB_FRAGS_PATH, "r"); 189 - if (!file) { 190 - ksft_print_msg("Error opening %s\n", MAX_SKB_FRAGS_PATH); 191 - return 0; 192 - } 193 - 194 - if (fscanf(file, "%u", &max_skb_frags) != 1) 195 - ksft_print_msg("Error reading %s\n", MAX_SKB_FRAGS_PATH); 196 - 197 - fclose(file); 198 - return max_skb_frags; 199 - } 200 - 201 182 static int set_ring_size(struct ifobject *ifobj) 202 183 { 203 184 int ret; ··· 1959 1978 1960 1979 int testapp_stats_rx_dropped(struct test_spec *test) 1961 1980 { 1981 + u32 umem_tr = test->ifobj_tx->umem_tailroom; 1982 + 1962 1983 if (test->mode == TEST_MODE_ZC) { 1963 1984 ksft_print_msg("Can not run RX_DROPPED test for ZC mode\n"); 1964 1985 return TEST_SKIP; 1965 1986 } 1966 1987 1967 - if (pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0)) 1988 + if (pkt_stream_replace_half(test, (MIN_PKT_SIZE * 3) + umem_tr, 0)) 1968 1989 return TEST_FAILURE; 1969 1990 test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size - 1970 - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3; 1991 + XDP_PACKET_HEADROOM - (MIN_PKT_SIZE * 2) - umem_tr; 1971 1992 if (pkt_stream_receive_half(test)) 1972 1993 return TEST_FAILURE; 1973 1994 test->ifobj_rx->validation_func = validate_rx_dropped; ··· 2225 2242 if (test->mode == TEST_MODE_ZC) { 2226 2243 max_frags = test->ifobj_tx->xdp_zc_max_segs; 2227 2244 } else { 2228 - max_frags = get_max_skb_frags(); 2229 - if (!max_frags) { 2230 - ksft_print_msg("Can't get MAX_SKB_FRAGS from system, using default (17)\n"); 2231 - max_frags = 17; 2232 - } 2245 + max_frags = test->ifobj_tx->max_skb_frags; 2233 2246 max_frags += 1; 2234 2247 } 2235 2248 ··· 2530 2551 2531 2552 int testapp_adjust_tail_grow(struct test_spec *test) 2532 2553 { 2554 + if (test->mode == TEST_MODE_SKB) 2555 + return TEST_SKIP; 2556 + 2533 2557 /* Grow by 4 bytes for testing purpose */ 2534 2558 return testapp_adjust_tail(test, 4, MIN_PKT_SIZE * 2); 2535 2559 } 2536 2560 2537 2561 int testapp_adjust_tail_grow_mb(struct test_spec *test) 2538 2562 { 2563 + u32 grow_size; 2564 + 2565 + if (test->mode == TEST_MODE_SKB) 2566 + return TEST_SKIP; 2567 + 2568 + /* worst case scenario is when underlying setup will work on 3k 2569 + * buffers, let us account for it; given that we will use 6k as 2570 + * pkt_len, expect that it will be broken down to 2 descs each 2571 + * with 3k payload; 2572 + * 2573 + * 4k is truesize, 3k payload, 256 HR, 320 TR; 2574 + */ 2575 + grow_size = XSK_UMEM__MAX_FRAME_SIZE - 2576 + XSK_UMEM__LARGE_FRAME_SIZE - 2577 + XDP_PACKET_HEADROOM - 2578 + test->ifobj_tx->umem_tailroom; 2539 2579 test->mtu = MAX_ETH_JUMBO_SIZE; 2540 - /* Grow by (frag_size - last_frag_Size) - 1 to stay inside the last fragment */ 2541 - return testapp_adjust_tail(test, (XSK_UMEM__MAX_FRAME_SIZE / 2) - 1, 2542 - XSK_UMEM__LARGE_FRAME_SIZE * 2); 2580 + 2581 + return testapp_adjust_tail(test, grow_size, XSK_UMEM__LARGE_FRAME_SIZE * 2); 2543 2582 } 2544 2583 2545 2584 int testapp_tx_queue_consumer(struct test_spec *test)
+23
tools/testing/selftests/bpf/prog_tests/test_xsk.h
··· 31 31 #define SOCK_RECONF_CTR 10 32 32 #define USLEEP_MAX 10000 33 33 34 + #define MAX_SKB_FRAGS_PATH "/proc/sys/net/core/max_skb_frags" 35 + #define SMP_CACHE_BYTES_PATH "/sys/devices/system/cpu/cpu0/cache/index0/coherency_line_size" 36 + 34 37 extern bool opt_verbose; 35 38 #define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0) 36 39 ··· 46 43 static inline u64 ceil_u64(u64 a, u64 b) 47 44 { 48 45 return (a + b - 1) / b; 46 + } 47 + 48 + static inline unsigned int read_procfs_val(const char *path) 49 + { 50 + unsigned int read_val = 0; 51 + FILE *file; 52 + 53 + file = fopen(path, "r"); 54 + if (!file) { 55 + ksft_print_msg("Error opening %s\n", path); 56 + return 0; 57 + } 58 + 59 + if (fscanf(file, "%u", &read_val) != 1) 60 + ksft_print_msg("Error reading %s\n", path); 61 + 62 + fclose(file); 63 + return read_val; 49 64 } 50 65 51 66 /* Simple test */ ··· 136 115 int mtu; 137 116 u32 bind_flags; 138 117 u32 xdp_zc_max_segs; 118 + u32 umem_tailroom; 119 + u32 max_skb_frags; 139 120 bool tx_on; 140 121 bool rx_on; 141 122 bool use_poll;
+19
tools/testing/selftests/bpf/prog_tests/xsk.c
··· 62 62 63 63 static void test_xsk(const struct test_spec *test_to_run, enum test_mode mode) 64 64 { 65 + u32 max_frags, umem_tailroom, cache_line_size; 65 66 struct ifobject *ifobj_tx, *ifobj_rx; 66 67 struct test_spec test; 67 68 int ret; ··· 84 83 ifobj_tx->set_ring.default_tx = ifobj_tx->ring.tx_pending; 85 84 ifobj_tx->set_ring.default_rx = ifobj_tx->ring.rx_pending; 86 85 } 86 + 87 + cache_line_size = read_procfs_val(SMP_CACHE_BYTES_PATH); 88 + if (!cache_line_size) 89 + cache_line_size = 64; 90 + 91 + max_frags = read_procfs_val(MAX_SKB_FRAGS_PATH); 92 + if (!max_frags) 93 + max_frags = 17; 94 + 95 + ifobj_tx->max_skb_frags = max_frags; 96 + ifobj_rx->max_skb_frags = max_frags; 97 + 98 + /* 48 bytes is a part of skb_shared_info w/o frags array; 99 + * 16 bytes is sizeof(skb_frag_t) 100 + */ 101 + umem_tailroom = ALIGN(48 + (max_frags * 16), cache_line_size); 102 + ifobj_tx->umem_tailroom = umem_tailroom; 103 + ifobj_rx->umem_tailroom = umem_tailroom; 87 104 88 105 if (!ASSERT_OK(init_iface(ifobj_rx, worker_testapp_validate_rx), "init RX")) 89 106 goto delete_rx;
+341
tools/testing/selftests/bpf/progs/verifier_precision.c
··· 5 5 #include "../../../include/linux/filter.h" 6 6 #include "bpf_misc.h" 7 7 8 + struct { 9 + __uint(type, BPF_MAP_TYPE_ARRAY); 10 + __uint(max_entries, 1); 11 + __type(key, __u32); 12 + __type(value, __u64); 13 + } precision_map SEC(".maps"); 14 + 8 15 SEC("?raw_tp") 9 16 __success __log_level(2) 10 17 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") ··· 306 299 "r0 = -r0;" 307 300 "exit;" 308 301 ::: __clobber_all); 302 + } 303 + 304 + SEC("?raw_tp") 305 + __success __log_level(2) 306 + __msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10") 307 + __msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_add((u64 *)(r10 -8), r2)") 308 + __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0") 309 + __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") 310 + __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") 311 + __naked int bpf_atomic_fetch_add_precision(void) 312 + { 313 + asm volatile ( 314 + "r1 = 8;" 315 + "*(u64 *)(r10 - 8) = r1;" 316 + "r2 = 0;" 317 + ".8byte %[fetch_add_insn];" /* r2 = atomic_fetch_add(*(u64 *)(r10 - 8), r2) */ 318 + "r3 = r10;" 319 + "r3 += r2;" /* mark_precise */ 320 + "r0 = 0;" 321 + "exit;" 322 + : 323 + : __imm_insn(fetch_add_insn, 324 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8)) 325 + : __clobber_all); 326 + } 327 + 328 + SEC("?raw_tp") 329 + __success __log_level(2) 330 + __msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10") 331 + __msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_xchg((u64 *)(r10 -8), r2)") 332 + __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0") 333 + __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") 334 + __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") 335 + __naked int bpf_atomic_xchg_precision(void) 336 + { 337 + asm volatile ( 338 + "r1 = 8;" 339 + "*(u64 *)(r10 - 8) = r1;" 340 + "r2 = 0;" 341 + ".8byte %[xchg_insn];" /* r2 = atomic_xchg(*(u64 *)(r10 - 8), r2) */ 342 + "r3 = r10;" 343 + "r3 += r2;" /* mark_precise */ 344 + "r0 = 0;" 345 + "exit;" 346 + : 347 + : __imm_insn(xchg_insn, 348 + BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_10, BPF_REG_2, -8)) 349 + : __clobber_all); 350 + } 351 + 352 + SEC("?raw_tp") 353 + __success __log_level(2) 354 + __msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10") 355 + __msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_or((u64 *)(r10 -8), r2)") 356 + __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0") 357 + __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") 358 + __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") 359 + __naked int bpf_atomic_fetch_or_precision(void) 360 + { 361 + asm volatile ( 362 + "r1 = 8;" 363 + "*(u64 *)(r10 - 8) = r1;" 364 + "r2 = 0;" 365 + ".8byte %[fetch_or_insn];" /* r2 = atomic_fetch_or(*(u64 *)(r10 - 8), r2) */ 366 + "r3 = r10;" 367 + "r3 += r2;" /* mark_precise */ 368 + "r0 = 0;" 369 + "exit;" 370 + : 371 + : __imm_insn(fetch_or_insn, 372 + BPF_ATOMIC_OP(BPF_DW, BPF_OR | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8)) 373 + : __clobber_all); 374 + } 375 + 376 + SEC("?raw_tp") 377 + __success __log_level(2) 378 + __msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10") 379 + __msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_and((u64 *)(r10 -8), r2)") 380 + __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0") 381 + __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") 382 + __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") 383 + __naked int bpf_atomic_fetch_and_precision(void) 384 + { 385 + asm volatile ( 386 + "r1 = 8;" 387 + "*(u64 *)(r10 - 8) = r1;" 388 + "r2 = 0;" 389 + ".8byte %[fetch_and_insn];" /* r2 = atomic_fetch_and(*(u64 *)(r10 - 8), r2) */ 390 + "r3 = r10;" 391 + "r3 += r2;" /* mark_precise */ 392 + "r0 = 0;" 393 + "exit;" 394 + : 395 + : __imm_insn(fetch_and_insn, 396 + BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8)) 397 + : __clobber_all); 398 + } 399 + 400 + SEC("?raw_tp") 401 + __success __log_level(2) 402 + __msg("mark_precise: frame0: regs=r2 stack= before 4: (bf) r3 = r10") 403 + __msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_xor((u64 *)(r10 -8), r2)") 404 + __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0") 405 + __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") 406 + __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") 407 + __naked int bpf_atomic_fetch_xor_precision(void) 408 + { 409 + asm volatile ( 410 + "r1 = 8;" 411 + "*(u64 *)(r10 - 8) = r1;" 412 + "r2 = 0;" 413 + ".8byte %[fetch_xor_insn];" /* r2 = atomic_fetch_xor(*(u64 *)(r10 - 8), r2) */ 414 + "r3 = r10;" 415 + "r3 += r2;" /* mark_precise */ 416 + "r0 = 0;" 417 + "exit;" 418 + : 419 + : __imm_insn(fetch_xor_insn, 420 + BPF_ATOMIC_OP(BPF_DW, BPF_XOR | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8)) 421 + : __clobber_all); 422 + } 423 + 424 + SEC("?raw_tp") 425 + __success __log_level(2) 426 + __msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r3 = r10") 427 + __msg("mark_precise: frame0: regs=r0 stack= before 4: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r2)") 428 + __msg("mark_precise: frame0: regs= stack=-8 before 3: (b7) r2 = 0") 429 + __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r0 = 0") 430 + __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") 431 + __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") 432 + __naked int bpf_atomic_cmpxchg_precision(void) 433 + { 434 + asm volatile ( 435 + "r1 = 8;" 436 + "*(u64 *)(r10 - 8) = r1;" 437 + "r0 = 0;" 438 + "r2 = 0;" 439 + ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r10 - 8), r0, r2) */ 440 + "r3 = r10;" 441 + "r3 += r0;" /* mark_precise */ 442 + "r0 = 0;" 443 + "exit;" 444 + : 445 + : __imm_insn(cmpxchg_insn, 446 + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8)) 447 + : __clobber_all); 448 + } 449 + 450 + /* Regression test for dual precision: Both the fetched value (r2) and 451 + * a reread of the same stack slot (r3) are tracked for precision. After 452 + * the atomic operation, the stack slot is STACK_MISC. Thus, the ldx at 453 + * insn 4 does NOT set INSN_F_STACK_ACCESS. Precision for the stack slot 454 + * propagates solely through the atomic fetch's load side (insn 3). 455 + */ 456 + SEC("?raw_tp") 457 + __success __log_level(2) 458 + __msg("mark_precise: frame0: regs=r2,r3 stack= before 4: (79) r3 = *(u64 *)(r10 -8)") 459 + __msg("mark_precise: frame0: regs=r2 stack= before 3: (db) r2 = atomic64_fetch_add((u64 *)(r10 -8), r2)") 460 + __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r2 = 0") 461 + __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") 462 + __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") 463 + __naked int bpf_atomic_fetch_add_dual_precision(void) 464 + { 465 + asm volatile ( 466 + "r1 = 8;" 467 + "*(u64 *)(r10 - 8) = r1;" 468 + "r2 = 0;" 469 + ".8byte %[fetch_add_insn];" /* r2 = atomic_fetch_add(*(u64 *)(r10 - 8), r2) */ 470 + "r3 = *(u64 *)(r10 - 8);" 471 + "r4 = r2;" 472 + "r4 += r3;" 473 + "r4 &= 7;" 474 + "r5 = r10;" 475 + "r5 += r4;" /* mark_precise */ 476 + "r0 = 0;" 477 + "exit;" 478 + : 479 + : __imm_insn(fetch_add_insn, 480 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_10, BPF_REG_2, -8)) 481 + : __clobber_all); 482 + } 483 + 484 + SEC("?raw_tp") 485 + __success __log_level(2) 486 + __msg("mark_precise: frame0: regs=r0,r3 stack= before 5: (79) r3 = *(u64 *)(r10 -8)") 487 + __msg("mark_precise: frame0: regs=r0 stack= before 4: (db) r0 = atomic64_cmpxchg((u64 *)(r10 -8), r0, r2)") 488 + __msg("mark_precise: frame0: regs= stack=-8 before 3: (b7) r2 = 0") 489 + __msg("mark_precise: frame0: regs= stack=-8 before 2: (b7) r0 = 8") 490 + __msg("mark_precise: frame0: regs= stack=-8 before 1: (7b) *(u64 *)(r10 -8) = r1") 491 + __msg("mark_precise: frame0: regs=r1 stack= before 0: (b7) r1 = 8") 492 + __naked int bpf_atomic_cmpxchg_dual_precision(void) 493 + { 494 + asm volatile ( 495 + "r1 = 8;" 496 + "*(u64 *)(r10 - 8) = r1;" 497 + "r0 = 8;" 498 + "r2 = 0;" 499 + ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r10 - 8), r0, r2) */ 500 + "r3 = *(u64 *)(r10 - 8);" 501 + "r4 = r0;" 502 + "r4 += r3;" 503 + "r4 &= 7;" 504 + "r5 = r10;" 505 + "r5 += r4;" /* mark_precise */ 506 + "r0 = 0;" 507 + "exit;" 508 + : 509 + : __imm_insn(cmpxchg_insn, 510 + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8)) 511 + : __clobber_all); 512 + } 513 + 514 + SEC("?raw_tp") 515 + __success __log_level(2) 516 + __msg("mark_precise: frame0: regs=r1 stack= before 10: (57) r1 &= 7") 517 + __msg("mark_precise: frame0: regs=r1 stack= before 9: (db) r1 = atomic64_fetch_add((u64 *)(r0 +0), r1)") 518 + __not_msg("falling back to forcing all scalars precise") 519 + __naked int bpf_atomic_fetch_add_map_precision(void) 520 + { 521 + asm volatile ( 522 + "r1 = 0;" 523 + "*(u64 *)(r10 - 8) = r1;" 524 + "r2 = r10;" 525 + "r2 += -8;" 526 + "r1 = %[precision_map] ll;" 527 + "call %[bpf_map_lookup_elem];" 528 + "if r0 == 0 goto 1f;" 529 + "r1 = 0;" 530 + ".8byte %[fetch_add_insn];" /* r1 = atomic_fetch_add(*(u64 *)(r0 + 0), r1) */ 531 + "r1 &= 7;" 532 + "r2 = r10;" 533 + "r2 += r1;" /* mark_precise */ 534 + "1: r0 = 0;" 535 + "exit;" 536 + : 537 + : __imm_addr(precision_map), 538 + __imm(bpf_map_lookup_elem), 539 + __imm_insn(fetch_add_insn, 540 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD | BPF_FETCH, BPF_REG_0, BPF_REG_1, 0)) 541 + : __clobber_all); 542 + } 543 + 544 + SEC("?raw_tp") 545 + __success __log_level(2) 546 + __msg("mark_precise: frame0: regs=r0 stack= before 12: (57) r0 &= 7") 547 + __msg("mark_precise: frame0: regs=r0 stack= before 11: (db) r0 = atomic64_cmpxchg((u64 *)(r6 +0), r0, r1)") 548 + __not_msg("falling back to forcing all scalars precise") 549 + __naked int bpf_atomic_cmpxchg_map_precision(void) 550 + { 551 + asm volatile ( 552 + "r1 = 0;" 553 + "*(u64 *)(r10 - 8) = r1;" 554 + "r2 = r10;" 555 + "r2 += -8;" 556 + "r1 = %[precision_map] ll;" 557 + "call %[bpf_map_lookup_elem];" 558 + "if r0 == 0 goto 1f;" 559 + "r6 = r0;" 560 + "r0 = 0;" 561 + "r1 = 0;" 562 + ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u64 *)(r6 + 0), r0, r1) */ 563 + "r0 &= 7;" 564 + "r2 = r10;" 565 + "r2 += r0;" /* mark_precise */ 566 + "1: r0 = 0;" 567 + "exit;" 568 + : 569 + : __imm_addr(precision_map), 570 + __imm(bpf_map_lookup_elem), 571 + __imm_insn(cmpxchg_insn, 572 + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_6, BPF_REG_1, 0)) 573 + : __clobber_all); 574 + } 575 + 576 + SEC("?raw_tp") 577 + __success __log_level(2) 578 + __msg("mark_precise: frame0: regs=r1 stack= before 10: (57) r1 &= 7") 579 + __msg("mark_precise: frame0: regs=r1 stack= before 9: (c3) r1 = atomic_fetch_add((u32 *)(r0 +0), r1)") 580 + __not_msg("falling back to forcing all scalars precise") 581 + __naked int bpf_atomic_fetch_add_32bit_precision(void) 582 + { 583 + asm volatile ( 584 + "r1 = 0;" 585 + "*(u64 *)(r10 - 8) = r1;" 586 + "r2 = r10;" 587 + "r2 += -8;" 588 + "r1 = %[precision_map] ll;" 589 + "call %[bpf_map_lookup_elem];" 590 + "if r0 == 0 goto 1f;" 591 + "r1 = 0;" 592 + ".8byte %[fetch_add_insn];" /* r1 = atomic_fetch_add(*(u32 *)(r0 + 0), r1) */ 593 + "r1 &= 7;" 594 + "r2 = r10;" 595 + "r2 += r1;" /* mark_precise */ 596 + "1: r0 = 0;" 597 + "exit;" 598 + : 599 + : __imm_addr(precision_map), 600 + __imm(bpf_map_lookup_elem), 601 + __imm_insn(fetch_add_insn, 602 + BPF_ATOMIC_OP(BPF_W, BPF_ADD | BPF_FETCH, BPF_REG_0, BPF_REG_1, 0)) 603 + : __clobber_all); 604 + } 605 + 606 + SEC("?raw_tp") 607 + __success __log_level(2) 608 + __msg("mark_precise: frame0: regs=r0 stack= before 12: (57) r0 &= 7") 609 + __msg("mark_precise: frame0: regs=r0 stack= before 11: (c3) r0 = atomic_cmpxchg((u32 *)(r6 +0), r0, r1)") 610 + __not_msg("falling back to forcing all scalars precise") 611 + __naked int bpf_atomic_cmpxchg_32bit_precision(void) 612 + { 613 + asm volatile ( 614 + "r1 = 0;" 615 + "*(u64 *)(r10 - 8) = r1;" 616 + "r2 = r10;" 617 + "r2 += -8;" 618 + "r1 = %[precision_map] ll;" 619 + "call %[bpf_map_lookup_elem];" 620 + "if r0 == 0 goto 1f;" 621 + "r6 = r0;" 622 + "r0 = 0;" 623 + "r1 = 0;" 624 + ".8byte %[cmpxchg_insn];" /* r0 = atomic_cmpxchg(*(u32 *)(r6 + 0), r0, r1) */ 625 + "r0 &= 7;" 626 + "r2 = r10;" 627 + "r2 += r0;" /* mark_precise */ 628 + "1: r0 = 0;" 629 + "exit;" 630 + : 631 + : __imm_addr(precision_map), 632 + __imm(bpf_map_lookup_elem), 633 + __imm_insn(cmpxchg_insn, 634 + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_6, BPF_REG_1, 0)) 635 + : __clobber_all); 309 636 } 310 637 311 638 char _license[] SEC("license") = "GPL";
+3 -1
tools/testing/selftests/bpf/progs/xsk_xdp_progs.c
··· 26 26 27 27 SEC("xdp.frags") int xsk_xdp_drop(struct xdp_md *xdp) 28 28 { 29 + static unsigned int drop_idx; 30 + 29 31 /* Drop every other packet */ 30 - if (idx++ % 2) 32 + if (drop_idx++ % 2) 31 33 return XDP_DROP; 32 34 33 35 return bpf_redirect_map(&xsk, 0, XDP_DROP);
+23
tools/testing/selftests/bpf/xskxceiver.c
··· 80 80 #include <linux/mman.h> 81 81 #include <linux/netdev.h> 82 82 #include <linux/ethtool.h> 83 + #include <linux/align.h> 83 84 #include <arpa/inet.h> 84 85 #include <net/if.h> 85 86 #include <locale.h> ··· 334 333 int main(int argc, char **argv) 335 334 { 336 335 const size_t total_tests = ARRAY_SIZE(tests) + ARRAY_SIZE(ci_skip_tests); 336 + u32 cache_line_size, max_frags, umem_tailroom; 337 337 struct pkt_stream *rx_pkt_stream_default; 338 338 struct pkt_stream *tx_pkt_stream_default; 339 339 struct ifobject *ifobj_tx, *ifobj_rx; ··· 355 353 exit_with_error(ENOMEM); 356 354 357 355 setlocale(LC_ALL, ""); 356 + 357 + cache_line_size = read_procfs_val(SMP_CACHE_BYTES_PATH); 358 + if (!cache_line_size) { 359 + ksft_print_msg("Can't get SMP_CACHE_BYTES from system, using default (64)\n"); 360 + cache_line_size = 64; 361 + } 362 + 363 + max_frags = read_procfs_val(MAX_SKB_FRAGS_PATH); 364 + if (!max_frags) { 365 + ksft_print_msg("Can't get MAX_SKB_FRAGS from system, using default (17)\n"); 366 + max_frags = 17; 367 + } 368 + ifobj_tx->max_skb_frags = max_frags; 369 + ifobj_rx->max_skb_frags = max_frags; 370 + 371 + /* 48 bytes is a part of skb_shared_info w/o frags array; 372 + * 16 bytes is sizeof(skb_frag_t) 373 + */ 374 + umem_tailroom = ALIGN(48 + (max_frags * 16), cache_line_size); 375 + ifobj_tx->umem_tailroom = umem_tailroom; 376 + ifobj_rx->umem_tailroom = umem_tailroom; 358 377 359 378 parse_command_line(ifobj_tx, ifobj_rx, argc, argv); 360 379
+1
tools/testing/selftests/net/Makefile
··· 92 92 srv6_end_x_next_csid_l3vpn_test.sh \ 93 93 srv6_hencap_red_l3vpn_test.sh \ 94 94 srv6_hl2encap_red_l2vpn_test.sh \ 95 + srv6_iptunnel_cache.sh \ 95 96 stress_reuseport_listen.sh \ 96 97 tcp_fastopen_backup_key.sh \ 97 98 test_bpf.sh \
+1
tools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh
··· 414 414 bridge vlan add vid 10 dev br1 self pvid untagged 415 415 ip link set dev $h1 master br1 416 416 ip link set dev br1 up 417 + setup_wait_dev $h1 0 417 418 bridge vlan add vid 10 dev $h1 master 418 419 bridge vlan global set vid 10 dev br1 mcast_snooping 1 mcast_querier 1 419 420 sleep 2
+44 -6
tools/testing/selftests/net/netfilter/nf_queue.c
··· 19 19 bool count_packets; 20 20 bool gso_enabled; 21 21 bool failopen; 22 + bool out_of_order; 23 + bool bogus_verdict; 22 24 int verbose; 23 25 unsigned int queue_num; 24 26 unsigned int timeout; ··· 33 31 34 32 static void help(const char *p) 35 33 { 36 - printf("Usage: %s [-c|-v [-vv] ] [-o] [-t timeout] [-q queue_num] [-Qdst_queue ] [ -d ms_delay ] [-G]\n", p); 34 + printf("Usage: %s [-c|-v [-vv] ] [-o] [-O] [-b] [-t timeout] [-q queue_num] [-Qdst_queue ] [ -d ms_delay ] [-G]\n", p); 37 35 } 38 36 39 37 static int parse_attr_cb(const struct nlattr *attr, void *data) ··· 277 275 unsigned int buflen = 64 * 1024 + MNL_SOCKET_BUFFER_SIZE; 278 276 struct mnl_socket *nl; 279 277 struct nlmsghdr *nlh; 278 + uint32_t ooo_ids[16]; 280 279 unsigned int portid; 280 + int ooo_count = 0; 281 281 char *buf; 282 282 int ret; 283 283 ··· 312 308 313 309 ret = mnl_cb_run(buf, ret, 0, portid, queue_cb, NULL); 314 310 if (ret < 0) { 311 + /* bogus verdict mode will generate ENOENT error messages */ 312 + if (opts.bogus_verdict && errno == ENOENT) 313 + continue; 315 314 perror("mnl_cb_run"); 316 315 exit(EXIT_FAILURE); 317 316 } ··· 323 316 if (opts.delay_ms) 324 317 sleep_ms(opts.delay_ms); 325 318 326 - nlh = nfq_build_verdict(buf, id, opts.queue_num, opts.verdict); 327 - if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) { 328 - perror("mnl_socket_sendto"); 329 - exit(EXIT_FAILURE); 319 + if (opts.bogus_verdict) { 320 + for (int i = 0; i < 50; i++) { 321 + nlh = nfq_build_verdict(buf, id + 0x7FFFFFFF + i, 322 + opts.queue_num, opts.verdict); 323 + mnl_socket_sendto(nl, nlh, nlh->nlmsg_len); 324 + } 325 + } 326 + 327 + if (opts.out_of_order) { 328 + ooo_ids[ooo_count] = id; 329 + if (ooo_count >= 15) { 330 + for (ooo_count; ooo_count >= 0; ooo_count--) { 331 + nlh = nfq_build_verdict(buf, ooo_ids[ooo_count], 332 + opts.queue_num, opts.verdict); 333 + if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) { 334 + perror("mnl_socket_sendto"); 335 + exit(EXIT_FAILURE); 336 + } 337 + } 338 + ooo_count = 0; 339 + } else { 340 + ooo_count++; 341 + } 342 + } else { 343 + nlh = nfq_build_verdict(buf, id, opts.queue_num, opts.verdict); 344 + if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) { 345 + perror("mnl_socket_sendto"); 346 + exit(EXIT_FAILURE); 347 + } 330 348 } 331 349 } 332 350 ··· 364 332 { 365 333 int c; 366 334 367 - while ((c = getopt(argc, argv, "chvot:q:Q:d:G")) != -1) { 335 + while ((c = getopt(argc, argv, "chvoObt:q:Q:d:G")) != -1) { 368 336 switch (c) { 369 337 case 'c': 370 338 opts.count_packets = true; ··· 406 374 break; 407 375 case 'v': 408 376 opts.verbose++; 377 + break; 378 + case 'O': 379 + opts.out_of_order = true; 380 + break; 381 + case 'b': 382 + opts.bogus_verdict = true; 409 383 break; 410 384 } 411 385 }
+71 -12
tools/testing/selftests/net/netfilter/nft_queue.sh
··· 11 11 timeout=5 12 12 13 13 SCTP_TEST_TIMEOUT=60 14 + STRESS_TEST_TIMEOUT=30 14 15 15 16 cleanup() 16 17 { ··· 720 719 fi 721 720 } 722 721 722 + check_tainted() 723 + { 724 + local msg="$1" 725 + 726 + if [ "$tainted_then" -ne 0 ];then 727 + return 728 + fi 729 + 730 + read tainted_now < /proc/sys/kernel/tainted 731 + if [ "$tainted_now" -eq 0 ];then 732 + echo "PASS: $msg" 733 + else 734 + echo "TAINT: $msg" 735 + dmesg 736 + ret=1 737 + fi 738 + } 739 + 740 + test_queue_stress() 741 + { 742 + read tainted_then < /proc/sys/kernel/tainted 743 + local i 744 + 745 + ip netns exec "$nsrouter" nft -f /dev/stdin <<EOF 746 + flush ruleset 747 + table inet t { 748 + chain forward { 749 + type filter hook forward priority 0; policy accept; 750 + 751 + queue flags bypass to numgen random mod 8 752 + } 753 + } 754 + EOF 755 + timeout "$STRESS_TEST_TIMEOUT" ip netns exec "$ns2" \ 756 + socat -u UDP-LISTEN:12345,fork,pf=ipv4 STDOUT > /dev/null & 757 + 758 + timeout "$STRESS_TEST_TIMEOUT" ip netns exec "$ns3" \ 759 + socat -u UDP-LISTEN:12345,fork,pf=ipv4 STDOUT > /dev/null & 760 + 761 + for i in $(seq 0 7); do 762 + ip netns exec "$nsrouter" timeout "$STRESS_TEST_TIMEOUT" \ 763 + ./nf_queue -q $i -t 2 -O -b > /dev/null & 764 + done 765 + 766 + ip netns exec "$ns1" timeout "$STRESS_TEST_TIMEOUT" \ 767 + ping -q -f 10.0.2.99 > /dev/null 2>&1 & 768 + ip netns exec "$ns1" timeout "$STRESS_TEST_TIMEOUT" \ 769 + ping -q -f 10.0.3.99 > /dev/null 2>&1 & 770 + ip netns exec "$ns1" timeout "$STRESS_TEST_TIMEOUT" \ 771 + ping -q -f "dead:2::99" > /dev/null 2>&1 & 772 + ip netns exec "$ns1" timeout "$STRESS_TEST_TIMEOUT" \ 773 + ping -q -f "dead:3::99" > /dev/null 2>&1 & 774 + 775 + busywait "$BUSYWAIT_TIMEOUT" udp_listener_ready "$ns2" 12345 776 + busywait "$BUSYWAIT_TIMEOUT" udp_listener_ready "$ns3" 12345 777 + 778 + for i in $(seq 1 4);do 779 + ip netns exec "$ns1" timeout "$STRESS_TEST_TIMEOUT" \ 780 + socat -u STDIN UDP-DATAGRAM:10.0.2.99:12345 < /dev/zero > /dev/null & 781 + ip netns exec "$ns1" timeout "$STRESS_TEST_TIMEOUT" \ 782 + socat -u STDIN UDP-DATAGRAM:10.0.3.99:12345 < /dev/zero > /dev/null & 783 + done 784 + 785 + wait 786 + 787 + check_tainted "concurrent queueing" 788 + } 789 + 723 790 test_queue_removal() 724 791 { 725 792 read tainted_then < /proc/sys/kernel/tainted ··· 811 742 812 743 ip netns exec "$ns1" nft flush ruleset 813 744 814 - if [ "$tainted_then" -ne 0 ];then 815 - return 816 - fi 817 - 818 - read tainted_now < /proc/sys/kernel/tainted 819 - if [ "$tainted_now" -eq 0 ];then 820 - echo "PASS: queue program exiting while packets queued" 821 - else 822 - echo "TAINT: queue program exiting while packets queued" 823 - dmesg 824 - ret=1 825 - fi 745 + check_tainted "queue program exiting while packets queued" 826 746 } 827 747 828 748 ip netns exec "$nsrouter" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null ··· 857 799 test_sctp_output 858 800 test_udp_nat_race 859 801 test_udp_gro_ct 802 + test_queue_stress 860 803 861 804 # should be last, adds vrf device in ns1 and changes routes 862 805 test_icmp_vrf
+197
tools/testing/selftests/net/srv6_iptunnel_cache.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + # 4 + # author: Andrea Mayer <andrea.mayer@uniroma2.it> 5 + 6 + # This test verifies that the seg6 lwtunnel does not share the dst_cache 7 + # between the input (forwarding) and output (locally generated) paths. 8 + # 9 + # A shared dst_cache allows a forwarded packet to populate the cache and a 10 + # subsequent locally generated packet to silently reuse that entry, bypassing 11 + # its own route lookup. To expose this, the SID is made reachable only for 12 + # forwarded traffic (via an ip rule matching iif) and blackholed for everything 13 + # else. A local ping on ns_router must always hit the blackhole; 14 + # if it succeeds after a forwarded packet has populated the 15 + # cache, the bug is confirmed. 16 + # 17 + # Both forwarded and local packets are pinned to the same CPU with taskset, 18 + # since dst_cache is per-cpu. 19 + # 20 + # 21 + # +--------------------+ +--------------------+ 22 + # | ns_src | | ns_dst | 23 + # | | | | 24 + # | veth-s0 | | veth-d0 | 25 + # | fd00::1/64 | | fd01::2/64 | 26 + # +-------+------------+ +----------+---------+ 27 + # | | 28 + # | +--------------------+ | 29 + # | | ns_router | | 30 + # | | | | 31 + # +------------+ veth-r0 veth-r1 +--------------+ 32 + # | fd00::2 fd01::1 | 33 + # +--------------------+ 34 + # 35 + # 36 + # ns_router: encap (main table) 37 + # +---------+---------------------------------------+ 38 + # | dst | action | 39 + # +---------+---------------------------------------+ 40 + # | cafe::1 | encap seg6 mode encap segs fc00::100 | 41 + # +---------+---------------------------------------+ 42 + # 43 + # ns_router: post-encap SID resolution 44 + # +-------+------------+----------------------------+ 45 + # | table | dst | action | 46 + # +-------+------------+----------------------------+ 47 + # | 100 | fc00::100 | via fd01::2 dev veth-r1 | 48 + # +-------+------------+----------------------------+ 49 + # | main | fc00::100 | blackhole | 50 + # +-------+------------+----------------------------+ 51 + # 52 + # ns_router: ip rule 53 + # +------------------+------------------------------+ 54 + # | match | action | 55 + # +------------------+------------------------------+ 56 + # | iif veth-r0 | lookup 100 | 57 + # +------------------+------------------------------+ 58 + # 59 + # ns_dst: SRv6 decap (main table) 60 + # +--------------+----------------------------------+ 61 + # | SID | action | 62 + # +--------------+----------------------------------+ 63 + # | fc00::100 | End.DT6 table 255 (local) | 64 + # +--------------+----------------------------------+ 65 + 66 + source lib.sh 67 + 68 + readonly SID="fc00::100" 69 + readonly DEST="cafe::1" 70 + 71 + readonly SRC_MAC="02:00:00:00:00:01" 72 + readonly RTR_R0_MAC="02:00:00:00:00:02" 73 + readonly RTR_R1_MAC="02:00:00:00:00:03" 74 + readonly DST_MAC="02:00:00:00:00:04" 75 + 76 + cleanup() 77 + { 78 + cleanup_ns "${NS_SRC}" "${NS_RTR}" "${NS_DST}" 79 + } 80 + 81 + check_prerequisites() 82 + { 83 + if ! command -v ip &>/dev/null; then 84 + echo "SKIP: ip tool not found" 85 + exit "${ksft_skip}" 86 + fi 87 + 88 + if ! command -v ping &>/dev/null; then 89 + echo "SKIP: ping not found" 90 + exit "${ksft_skip}" 91 + fi 92 + 93 + if ! command -v sysctl &>/dev/null; then 94 + echo "SKIP: sysctl not found" 95 + exit "${ksft_skip}" 96 + fi 97 + 98 + if ! command -v taskset &>/dev/null; then 99 + echo "SKIP: taskset not found" 100 + exit "${ksft_skip}" 101 + fi 102 + } 103 + 104 + setup() 105 + { 106 + setup_ns NS_SRC NS_RTR NS_DST 107 + 108 + ip link add veth-s0 netns "${NS_SRC}" type veth \ 109 + peer name veth-r0 netns "${NS_RTR}" 110 + ip link add veth-r1 netns "${NS_RTR}" type veth \ 111 + peer name veth-d0 netns "${NS_DST}" 112 + 113 + ip -n "${NS_SRC}" link set veth-s0 address "${SRC_MAC}" 114 + ip -n "${NS_RTR}" link set veth-r0 address "${RTR_R0_MAC}" 115 + ip -n "${NS_RTR}" link set veth-r1 address "${RTR_R1_MAC}" 116 + ip -n "${NS_DST}" link set veth-d0 address "${DST_MAC}" 117 + 118 + # ns_src 119 + ip -n "${NS_SRC}" link set veth-s0 up 120 + ip -n "${NS_SRC}" addr add fd00::1/64 dev veth-s0 nodad 121 + ip -n "${NS_SRC}" -6 route add "${DEST}"/128 via fd00::2 122 + 123 + # ns_router 124 + ip -n "${NS_RTR}" link set veth-r0 up 125 + ip -n "${NS_RTR}" addr add fd00::2/64 dev veth-r0 nodad 126 + ip -n "${NS_RTR}" link set veth-r1 up 127 + ip -n "${NS_RTR}" addr add fd01::1/64 dev veth-r1 nodad 128 + ip netns exec "${NS_RTR}" sysctl -qw net.ipv6.conf.all.forwarding=1 129 + 130 + ip -n "${NS_RTR}" -6 route add "${DEST}"/128 \ 131 + encap seg6 mode encap segs "${SID}" dev veth-r0 132 + ip -n "${NS_RTR}" -6 route add "${SID}"/128 table 100 \ 133 + via fd01::2 dev veth-r1 134 + ip -n "${NS_RTR}" -6 route add blackhole "${SID}"/128 135 + ip -n "${NS_RTR}" -6 rule add iif veth-r0 lookup 100 136 + 137 + # ns_dst 138 + ip -n "${NS_DST}" link set veth-d0 up 139 + ip -n "${NS_DST}" addr add fd01::2/64 dev veth-d0 nodad 140 + ip -n "${NS_DST}" addr add "${DEST}"/128 dev lo nodad 141 + ip -n "${NS_DST}" -6 route add "${SID}"/128 \ 142 + encap seg6local action End.DT6 table 255 dev veth-d0 143 + ip -n "${NS_DST}" -6 route add fd00::/64 via fd01::1 144 + 145 + # static neighbors 146 + ip -n "${NS_SRC}" -6 neigh add fd00::2 dev veth-s0 \ 147 + lladdr "${RTR_R0_MAC}" nud permanent 148 + ip -n "${NS_RTR}" -6 neigh add fd00::1 dev veth-r0 \ 149 + lladdr "${SRC_MAC}" nud permanent 150 + ip -n "${NS_RTR}" -6 neigh add fd01::2 dev veth-r1 \ 151 + lladdr "${DST_MAC}" nud permanent 152 + ip -n "${NS_DST}" -6 neigh add fd01::1 dev veth-d0 \ 153 + lladdr "${RTR_R1_MAC}" nud permanent 154 + } 155 + 156 + test_cache_isolation() 157 + { 158 + RET=0 159 + 160 + # local ping with empty cache: must fail (SID is blackholed) 161 + if ip netns exec "${NS_RTR}" taskset -c 0 \ 162 + ping -c 1 -W 2 "${DEST}" &>/dev/null; then 163 + echo "SKIP: local ping succeeded, topology broken" 164 + exit "${ksft_skip}" 165 + fi 166 + 167 + # forward from ns_src to populate the input cache 168 + if ! ip netns exec "${NS_SRC}" taskset -c 0 \ 169 + ping -c 1 -W 2 "${DEST}" &>/dev/null; then 170 + echo "SKIP: forwarded ping failed, topology broken" 171 + exit "${ksft_skip}" 172 + fi 173 + 174 + # local ping again: must still fail; if the output path reuses 175 + # the input cache, it bypasses the blackhole and the ping succeeds 176 + if ip netns exec "${NS_RTR}" taskset -c 0 \ 177 + ping -c 1 -W 2 "${DEST}" &>/dev/null; then 178 + echo "FAIL: output path used dst cached by input path" 179 + RET="${ksft_fail}" 180 + else 181 + echo "PASS: output path dst_cache is independent" 182 + fi 183 + 184 + return "${RET}" 185 + } 186 + 187 + if [ "$(id -u)" -ne 0 ]; then 188 + echo "SKIP: Need root privileges" 189 + exit "${ksft_skip}" 190 + fi 191 + 192 + trap cleanup EXIT 193 + 194 + check_prerequisites 195 + setup 196 + test_cache_isolation 197 + exit "${RET}"
+11 -8
tools/testing/selftests/riscv/vector/validate_v_ptrace.c
··· 290 290 291 291 /* verify initial vsetvli settings */ 292 292 293 - if (is_xtheadvector_supported()) 293 + if (is_xtheadvector_supported()) { 294 294 EXPECT_EQ(5UL, regset_data->vtype); 295 - else 295 + } else { 296 296 EXPECT_EQ(9UL, regset_data->vtype); 297 + } 297 298 298 299 EXPECT_EQ(regset_data->vlenb, regset_data->vl); 299 300 EXPECT_EQ(vlenb, regset_data->vlenb); ··· 347 346 { 348 347 } 349 348 350 - #define VECTOR_1_0 BIT(0) 351 - #define XTHEAD_VECTOR_0_7 BIT(1) 349 + #define VECTOR_1_0 _BITUL(0) 350 + #define XTHEAD_VECTOR_0_7 _BITUL(1) 352 351 353 352 #define vector_test(x) ((x) & VECTOR_1_0) 354 353 #define xthead_test(x) ((x) & XTHEAD_VECTOR_0_7) ··· 620 619 621 620 /* verify initial vsetvli settings */ 622 621 623 - if (is_xtheadvector_supported()) 622 + if (is_xtheadvector_supported()) { 624 623 EXPECT_EQ(5UL, regset_data->vtype); 625 - else 624 + } else { 626 625 EXPECT_EQ(9UL, regset_data->vtype); 626 + } 627 627 628 628 EXPECT_EQ(regset_data->vlenb, regset_data->vl); 629 629 EXPECT_EQ(vlenb, regset_data->vlenb); ··· 829 827 830 828 /* verify initial vsetvli settings */ 831 829 832 - if (is_xtheadvector_supported()) 830 + if (is_xtheadvector_supported()) { 833 831 EXPECT_EQ(5UL, regset_data->vtype); 834 - else 832 + } else { 835 833 EXPECT_EQ(9UL, regset_data->vtype); 834 + } 836 835 837 836 EXPECT_EQ(regset_data->vlenb, regset_data->vl); 838 837 EXPECT_EQ(vlenb, regset_data->vlenb);
+6 -2
tools/testing/vsock/util.c
··· 344 344 ret = send(fd, buf + nwritten, len - nwritten, flags); 345 345 timeout_check("send"); 346 346 347 - if (ret == 0 || (ret < 0 && errno != EINTR)) 347 + if (ret < 0 && errno == EINTR) 348 + continue; 349 + if (ret <= 0) 348 350 break; 349 351 350 352 nwritten += ret; ··· 398 396 ret = recv(fd, buf + nread, len - nread, flags); 399 397 timeout_check("recv"); 400 398 401 - if (ret == 0 || (ret < 0 && errno != EINTR)) 399 + if (ret < 0 && errno == EINTR) 400 + continue; 401 + if (ret <= 0) 402 402 break; 403 403 404 404 nread += ret;