Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'drm-misc-next-2025-12-01-1' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next

Extra drm-misc-next for v6.19-rc1:

UAPI Changes:
- Add support for drm colorop pipeline.
- Add COLOR PIPELINE plane property.
- Add DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE.

Cross-subsystem Changes:
- Attempt to use higher order mappings in system heap allocator.
- Always taint kernel with sw-sync.

Core Changes:
- Small fixes to drm/gem.
- Support emergency restore to drm-client.
- Allocate and release fb_info in single place.
- Rework ttm pipelined eviction fence handling.

Driver Changes:
- Support the drm color pipeline in vkms, amdgfx.
- Add NVJPG driver for tegra.
- Assorted small fixes and updates to rockchip, bridge/dw-hdmi-qp,
panthor.
- Add ASL CS5263 DP-to-HDMI simple bridge.
- Add and improve support for G LD070WX3-SL01 MIPI DSI, Samsung LTL106AL0,
Samsung LTL106AL01, Raystar RFF500F-AWH-DNN, Winstar WF70A8SYJHLNGA,
Wanchanglong w552946aaa, Samsung SOFEF00, Lenovo X13s panel.
- Add support for it66122 to it66121.
- Support mali-G1 gpu in panthor.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patch.msgid.link/aa5cbd50-7676-4a59-bbed-e8428af86804@linux.intel.com

+7928 -818
+1
Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
··· 19 19 compatible: 20 20 enum: 21 21 - ite,it66121 22 + - ite,it66122 22 23 - ite,it6610 23 24 24 25 reg:
+2
Documentation/devicetree/bindings/display/bridge/simple-bridge.yaml
··· 27 27 - const: adi,adv7123 28 28 - enum: 29 29 - adi,adv7123 30 + - asl-tek,cs5263 30 31 - dumb-vga-dac 32 + - parade,ps185hdm 31 33 - radxa,ra620 32 34 - realtek,rtd2171 33 35 - ti,opa362
+1
Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml
··· 24 24 - raspberrypi,dsi-7inch 25 25 - startek,kd050hdfia020 26 26 - tdo,tl050hdv35 27 + - wanchanglong,w552946aaa 27 28 - wanchanglong,w552946aba 28 29 - const: ilitek,ili9881c 29 30
+60
Documentation/devicetree/bindings/display/panel/lg,ld070wx3-sl01.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/panel/lg,ld070wx3-sl01.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: LG Corporation 7" WXGA TFT LCD panel 8 + 9 + maintainers: 10 + - Svyatoslav Ryhel <clamor95@gmail.com> 11 + 12 + allOf: 13 + - $ref: panel-common.yaml# 14 + 15 + properties: 16 + compatible: 17 + items: 18 + - const: lg,ld070wx3-sl01 19 + 20 + reg: 21 + maxItems: 1 22 + 23 + vdd-supply: true 24 + vcc-supply: true 25 + 26 + backlight: true 27 + port: true 28 + 29 + required: 30 + - compatible 31 + - vdd-supply 32 + - vcc-supply 33 + 34 + additionalProperties: false 35 + 36 + examples: 37 + - | 38 + #include <dt-bindings/gpio/gpio.h> 39 + 40 + dsi { 41 + #address-cells = <1>; 42 + #size-cells = <0>; 43 + 44 + panel@0 { 45 + compatible = "lg,ld070wx3-sl01"; 46 + reg = <0>; 47 + 48 + vdd-supply = <&vdd_3v3_lcd>; 49 + vcc-supply = <&vcc_1v8_lcd>; 50 + 51 + backlight = <&backlight>; 52 + 53 + port { 54 + endpoint { 55 + remote-endpoint = <&dsi0_out>; 56 + }; 57 + }; 58 + }; 59 + }; 60 + ...
+2
Documentation/devicetree/bindings/display/panel/panel-lvds.yaml
··· 59 59 # Jenson Display BL-JT60050-01A 7" WSVGA (1024x600) color TFT LCD LVDS panel 60 60 - jenson,bl-jt60050-01a 61 61 - tbs,a711-panel 62 + # Winstar WF70A8SYJHLNGA 7" WSVGA (1024x600) color TFT LCD LVDS panel 63 + - winstar,wf70a8syjhlnga 62 64 63 65 - const: panel-lvds 64 66
+4 -23
Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
··· 19 19 20 20 If the panel is more advanced a dedicated binding file is required. 21 21 22 + allOf: 23 + - $ref: panel-common.yaml# 24 + 22 25 properties: 23 26 24 27 compatible: ··· 45 42 - kingdisplay,kd097d04 46 43 # LG ACX467AKM-7 4.95" 1080×1920 LCD Panel 47 44 - lg,acx467akm-7 48 - # LG Corporation 7" WXGA TFT LCD panel 49 - - lg,ld070wx3-sl01 50 45 # LG Corporation 5" HD TFT LCD panel 51 46 - lg,lh500wx1-sd03 52 47 # Lincoln LCD197 5" 1080x1920 LCD panel ··· 57 56 - panasonic,vvx10f034n00 58 57 # Samsung s6e3fa7 1080x2220 based AMS559NK06 AMOLED panel 59 58 - samsung,s6e3fa7-ams559nk06 60 - # Samsung sofef00 1080x2280 AMOLED panel 61 - - samsung,sofef00 62 59 # Shangai Top Display Optoelectronics 7" TL070WSH30 1024x600 TFT LCD panel 63 60 - tdo,tl070wsh30 64 61 ··· 69 70 reset-gpios: true 70 71 port: true 71 72 power-supply: true 72 - vddio-supply: true 73 - 74 - allOf: 75 - - $ref: panel-common.yaml# 76 - - if: 77 - properties: 78 - compatible: 79 - enum: 80 - - samsung,sofef00 81 - then: 82 - properties: 83 - power-supply: false 84 - required: 85 - - vddio-supply 86 - else: 87 - properties: 88 - vddio-supply: false 89 - required: 90 - - power-supply 91 73 92 74 additionalProperties: false 93 75 94 76 required: 95 77 - compatible 78 + - power-supply 96 79 - reg 97 80 98 81 examples:
+4
Documentation/devicetree/bindings/display/panel/panel-simple.yaml
··· 270 270 - qiaodian,qd43003c0-40 271 271 # Shenzhen QiShenglong Industrialist Co., Ltd. Gopher 2b 4.3" 480(RGB)x272 TFT LCD panel 272 272 - qishenglong,gopher2b-lcd 273 + # Raystar Optronics, Inc. RFF500F-AWH-DNN 5.0" TFT 840x480 274 + - raystar,rff500f-awh-dnn 273 275 # Rocktech Displays Ltd. RK101II01D-CT 10.1" TFT 1280x800 274 276 - rocktech,rk101ii01d-ct 275 277 # Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel ··· 280 278 - rocktech,rk043fn48h 281 279 # Samsung Electronics 10.1" WXGA (1280x800) TFT LCD panel 282 280 - samsung,ltl101al01 281 + # Samsung Electronics 10.6" FWXGA (1366x768) TFT LCD panel 282 + - samsung,ltl106al01 283 283 # Samsung Electronics 10.1" WSVGA TFT LCD panel 284 284 - samsung,ltn101nt05 285 285 # Satoz SAT050AT40H12R2 5.0" WVGA TFT LCD panel
+5 -9
Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml
··· 9 9 maintainers: 10 10 - Maxime Ripard <mripard@kernel.org> 11 11 12 + allOf: 13 + - $ref: panel-common.yaml# 14 + 12 15 properties: 13 16 compatible: 14 17 const: ronbo,rb070d30 ··· 21 18 22 19 power-gpios: 23 20 description: GPIO used for the power pin 24 - maxItems: 1 25 - 26 - reset-gpios: 27 - description: GPIO used for the reset pin 28 21 maxItems: 1 29 22 30 23 shlr-gpios: ··· 34 35 vcc-lcd-supply: 35 36 description: Power regulator 36 37 37 - backlight: 38 - description: Backlight used by the panel 39 - $ref: /schemas/types.yaml#/definitions/phandle 40 - 41 38 required: 42 39 - compatible 43 40 - power-gpios ··· 42 47 - shlr-gpios 43 48 - updn-gpios 44 49 - vcc-lcd-supply 50 + - port 45 51 46 - additionalProperties: false 52 + unevaluatedProperties: false
+79
Documentation/devicetree/bindings/display/panel/samsung,sofef00.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/panel/samsung,sofef00.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Samsung SOFEF00 AMOLED DDIC 8 + 9 + description: The SOFEF00 is display driver IC with connected panel. 10 + 11 + maintainers: 12 + - David Heidelberg <david@ixit.cz> 13 + 14 + allOf: 15 + - $ref: panel-common.yaml# 16 + 17 + properties: 18 + compatible: 19 + items: 20 + - enum: 21 + # Samsung 6.01 inch, 1080x2160 pixels, 18:9 ratio 22 + - samsung,sofef00-ams601nt22 23 + # Samsung 6.28 inch, 1080x2280 pixels, 19:9 ratio 24 + - samsung,sofef00-ams628nw01 25 + - const: samsung,sofef00 26 + 27 + reg: 28 + maxItems: 1 29 + 30 + poc-supply: 31 + description: POC regulator 32 + 33 + vci-supply: 34 + description: VCI regulator 35 + 36 + vddio-supply: 37 + description: VDD regulator 38 + 39 + required: 40 + - compatible 41 + - reset-gpios 42 + - poc-supply 43 + - vci-supply 44 + - vddio-supply 45 + 46 + unevaluatedProperties: false 47 + 48 + examples: 49 + - | 50 + #include <dt-bindings/gpio/gpio.h> 51 + 52 + dsi { 53 + #address-cells = <1>; 54 + #size-cells = <0>; 55 + 56 + panel@0 { 57 + compatible = "samsung,sofef00-ams628nw01", "samsung,sofef00"; 58 + reg = <0>; 59 + 60 + vddio-supply = <&vreg_l14a_1p88>; 61 + vci-supply = <&s2dos05_buck1>; 62 + poc-supply = <&s2dos05_ldo1>; 63 + 64 + te-gpios = <&tlmm 10 GPIO_ACTIVE_HIGH>; 65 + reset-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>; 66 + 67 + pinctrl-0 = <&panel_active>; 68 + pinctrl-1 = <&panel_suspend>; 69 + pinctrl-names = "default", "sleep"; 70 + 71 + port { 72 + panel_in: endpoint { 73 + remote-endpoint = <&mdss_dsi0_out>; 74 + }; 75 + }; 76 + }; 77 + }; 78 + 79 + ...
+11
Documentation/devicetree/bindings/display/rockchip/rockchip,rk3588-dw-hdmi-qp.yaml
··· 113 113 description: 114 114 Additional HDMI QP related data is accessed through VO GRF regs. 115 115 116 + frl-enable-gpios: 117 + description: 118 + Optional GPIO line to be asserted when operating in HDMI 2.1 FRL mode and 119 + deasserted for HDMI 1.4/2.0 TMDS. It can be used to control external 120 + voltage bias for HDMI data lines. When not present the HDMI encoder will 121 + operate in TMDS mode only. 122 + maxItems: 1 123 + 116 124 required: 117 125 - compatible 118 126 - reg ··· 140 132 examples: 141 133 - | 142 134 #include <dt-bindings/clock/rockchip,rk3588-cru.h> 135 + #include <dt-bindings/gpio/gpio.h> 143 136 #include <dt-bindings/interrupt-controller/arm-gic.h> 144 137 #include <dt-bindings/interrupt-controller/irq.h> 138 + #include <dt-bindings/pinctrl/rockchip.h> 145 139 #include <dt-bindings/power/rk3588-power.h> 146 140 #include <dt-bindings/reset/rockchip,rk3588-cru.h> 147 141 ··· 174 164 rockchip,grf = <&sys_grf>; 175 165 rockchip,vo-grf = <&vo1_grf>; 176 166 #sound-dai-cells = <0>; 167 + frl-enable-gpios = <&gpio4 RK_PB1 GPIO_ACTIVE_LOW>; 177 168 178 169 ports { 179 170 #address-cells = <1>;
+4
Documentation/devicetree/bindings/vendor-prefixes.yaml
··· 176 176 description: All Sensors Corporation 177 177 "^asix,.*": 178 178 description: ASIX Electronics Corporation 179 + "^asl-tek,.*": 180 + description: ASL Xiamen Technology Co., Ltd. 179 181 "^aspeed,.*": 180 182 description: ASPEED Technology Inc. 181 183 "^asrock,.*": ··· 1327 1325 description: Raumfeld GmbH 1328 1326 "^raydium,.*": 1329 1327 description: Raydium Semiconductor Corp. 1328 + "^raystar,.*": 1329 + description: Raystar Optronics, Inc. 1330 1330 "^rda,.*": 1331 1331 description: Unisoc Communications, Inc. 1332 1332 "^realtek,.*":
+15
Documentation/gpu/drm-kms.rst
··· 413 413 .. kernel-doc:: drivers/gpu/drm/drm_panic.c 414 414 :export: 415 415 416 + Colorop Abstraction 417 + =================== 418 + 419 + .. kernel-doc:: drivers/gpu/drm/drm_colorop.c 420 + :doc: overview 421 + 422 + Colorop Functions Reference 423 + --------------------------- 424 + 425 + .. kernel-doc:: include/drm/drm_colorop.h 426 + :internal: 427 + 428 + .. kernel-doc:: drivers/gpu/drm/drm_colorop.c 429 + :export: 430 + 416 431 Display Modes Function Reference 417 432 ================================ 418 433
+378
Documentation/gpu/rfc/color_pipeline.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0 2 + 3 + ======================== 4 + Linux Color Pipeline API 5 + ======================== 6 + 7 + What problem are we solving? 8 + ============================ 9 + 10 + We would like to support pre-, and post-blending complex color 11 + transformations in display controller hardware in order to allow for 12 + HW-supported HDR use-cases, as well as to provide support to 13 + color-managed applications, such as video or image editors. 14 + 15 + It is possible to support an HDR output on HW supporting the Colorspace 16 + and HDR Metadata drm_connector properties, but that requires the 17 + compositor or application to render and compose the content into one 18 + final buffer intended for display. Doing so is costly. 19 + 20 + Most modern display HW offers various 1D LUTs, 3D LUTs, matrices, and other 21 + operations to support color transformations. These operations are often 22 + implemented in fixed-function HW and therefore much more power efficient than 23 + performing similar operations via shaders or CPU. 24 + 25 + We would like to make use of this HW functionality to support complex color 26 + transformations with no, or minimal CPU or shader load. The switch between HW 27 + fixed-function blocks and shaders/CPU must be seamless with no visible 28 + difference when fallback to shaders/CPU is neceesary at any time. 29 + 30 + 31 + How are other OSes solving this problem? 32 + ======================================== 33 + 34 + The most widely supported use-cases regard HDR content, whether video or 35 + gaming. 36 + 37 + Most OSes will specify the source content format (color gamut, encoding transfer 38 + function, and other metadata, such as max and average light levels) to a driver. 39 + Drivers will then program their fixed-function HW accordingly to map from a 40 + source content buffer's space to a display's space. 41 + 42 + When fixed-function HW is not available the compositor will assemble a shader to 43 + ask the GPU to perform the transformation from the source content format to the 44 + display's format. 45 + 46 + A compositor's mapping function and a driver's mapping function are usually 47 + entirely separate concepts. On OSes where a HW vendor has no insight into 48 + closed-source compositor code such a vendor will tune their color management 49 + code to visually match the compositor's. On other OSes, where both mapping 50 + functions are open to an implementer they will ensure both mappings match. 51 + 52 + This results in mapping algorithm lock-in, meaning that no-one alone can 53 + experiment with or introduce new mapping algorithms and achieve 54 + consistent results regardless of which implementation path is taken. 55 + 56 + Why is Linux different? 57 + ======================= 58 + 59 + Unlike other OSes, where there is one compositor for one or more drivers, on 60 + Linux we have a many-to-many relationship. Many compositors; many drivers. 61 + In addition each compositor vendor or community has their own view of how 62 + color management should be done. This is what makes Linux so beautiful. 63 + 64 + This means that a HW vendor can now no longer tune their driver to one 65 + compositor, as tuning it to one could make it look fairly different from 66 + another compositor's color mapping. 67 + 68 + We need a better solution. 69 + 70 + 71 + Descriptive API 72 + =============== 73 + 74 + An API that describes the source and destination colorspaces is a descriptive 75 + API. It describes the input and output color spaces but does not describe 76 + how precisely they should be mapped. Such a mapping includes many minute 77 + design decision that can greatly affect the look of the final result. 78 + 79 + It is not feasible to describe such mapping with enough detail to ensure the 80 + same result from each implementation. In fact, these mappings are a very active 81 + research area. 82 + 83 + 84 + Prescriptive API 85 + ================ 86 + 87 + A prescriptive API describes not the source and destination colorspaces. It 88 + instead prescribes a recipe for how to manipulate pixel values to arrive at the 89 + desired outcome. 90 + 91 + This recipe is generally an ordered list of straight-forward operations, 92 + with clear mathematical definitions, such as 1D LUTs, 3D LUTs, matrices, 93 + or other operations that can be described in a precise manner. 94 + 95 + 96 + The Color Pipeline API 97 + ====================== 98 + 99 + HW color management pipelines can significantly differ between HW 100 + vendors in terms of availability, ordering, and capabilities of HW 101 + blocks. This makes a common definition of color management blocks and 102 + their ordering nigh impossible. Instead we are defining an API that 103 + allows user space to discover the HW capabilities in a generic manner, 104 + agnostic of specific drivers and hardware. 105 + 106 + 107 + drm_colorop Object 108 + ================== 109 + 110 + To support the definition of color pipelines we define the DRM core 111 + object type drm_colorop. Individual drm_colorop objects will be chained 112 + via the NEXT property of a drm_colorop to constitute a color pipeline. 113 + Each drm_colorop object is unique, i.e., even if multiple color 114 + pipelines have the same operation they won't share the same drm_colorop 115 + object to describe that operation. 116 + 117 + Note that drivers are not expected to map drm_colorop objects statically 118 + to specific HW blocks. The mapping of drm_colorop objects is entirely a 119 + driver-internal detail and can be as dynamic or static as a driver needs 120 + it to be. See more in the Driver Implementation Guide section below. 121 + 122 + Each drm_colorop has three core properties: 123 + 124 + TYPE: An enumeration property, defining the type of transformation, such as 125 + * enumerated curve 126 + * custom (uniform) 1D LUT 127 + * 3x3 matrix 128 + * 3x4 matrix 129 + * 3D LUT 130 + * etc. 131 + 132 + Depending on the type of transformation other properties will describe 133 + more details. 134 + 135 + BYPASS: A boolean property that can be used to easily put a block into 136 + bypass mode. The BYPASS property is not mandatory for a colorop, as long 137 + as the entire pipeline can get bypassed by setting the COLOR_PIPELINE on 138 + a plane to '0'. 139 + 140 + NEXT: The ID of the next drm_colorop in a color pipeline, or 0 if this 141 + drm_colorop is the last in the chain. 142 + 143 + An example of a drm_colorop object might look like one of these:: 144 + 145 + /* 1D enumerated curve */ 146 + Color operation 42 147 + ├─ "TYPE": immutable enum {1D enumerated curve, 1D LUT, 3x3 matrix, 3x4 matrix, 3D LUT, etc.} = 1D enumerated curve 148 + ├─ "BYPASS": bool {true, false} 149 + ├─ "CURVE_1D_TYPE": enum {sRGB EOTF, sRGB inverse EOTF, PQ EOTF, PQ inverse EOTF, …} 150 + └─ "NEXT": immutable color operation ID = 43 151 + 152 + /* custom 4k entry 1D LUT */ 153 + Color operation 52 154 + ├─ "TYPE": immutable enum {1D enumerated curve, 1D LUT, 3x3 matrix, 3x4 matrix, 3D LUT, etc.} = 1D LUT 155 + ├─ "BYPASS": bool {true, false} 156 + ├─ "SIZE": immutable range = 4096 157 + ├─ "DATA": blob 158 + └─ "NEXT": immutable color operation ID = 0 159 + 160 + /* 17^3 3D LUT */ 161 + Color operation 72 162 + ├─ "TYPE": immutable enum {1D enumerated curve, 1D LUT, 3x3 matrix, 3x4 matrix, 3D LUT, etc.} = 3D LUT 163 + ├─ "BYPASS": bool {true, false} 164 + ├─ "SIZE": immutable range = 17 165 + ├─ "DATA": blob 166 + └─ "NEXT": immutable color operation ID = 73 167 + 168 + drm_colorop extensibility 169 + ------------------------- 170 + 171 + Unlike existing DRM core objects, like &drm_plane, drm_colorop is not 172 + extensible. This simplifies implementations and keeps all functionality 173 + for managing &drm_colorop objects in the DRM core. 174 + 175 + If there is a need one may introduce a simple &drm_colorop_funcs 176 + function table in the future, for example to support an IN_FORMATS 177 + property on a &drm_colorop. 178 + 179 + If a driver requires the ability to create a driver-specific colorop 180 + object they will need to add &drm_colorop func table support with 181 + support for the usual functions, like destroy, atomic_duplicate_state, 182 + and atomic_destroy_state. 183 + 184 + 185 + COLOR_PIPELINE Plane Property 186 + ============================= 187 + 188 + Color Pipelines are created by a driver and advertised via a new 189 + COLOR_PIPELINE enum property on each plane. Values of the property 190 + always include object id 0, which is the default and means all color 191 + processing is disabled. Additional values will be the object IDs of the 192 + first drm_colorop in a pipeline. A driver can create and advertise none, 193 + one, or more possible color pipelines. A DRM client will select a color 194 + pipeline by setting the COLOR PIPELINE to the respective value. 195 + 196 + NOTE: Many DRM clients will set enumeration properties via the string 197 + value, often hard-coding it. Since this enumeration is generated based 198 + on the colorop object IDs it is important to perform the Color Pipeline 199 + Discovery, described below, instead of hard-coding color pipeline 200 + assignment. Drivers might generate the enum strings dynamically. 201 + Hard-coded strings might only work for specific drivers on a specific 202 + pieces of HW. Color Pipeline Discovery can work universally, as long as 203 + drivers implement the required color operations. 204 + 205 + The COLOR_PIPELINE property is only exposed when the 206 + DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE is set. Drivers shall ignore any 207 + existing pre-blend color operations when this cap is set, such as 208 + COLOR_RANGE and COLOR_ENCODING. If drivers want to support COLOR_RANGE 209 + or COLOR_ENCODING functionality when the color pipeline client cap is 210 + set, they are expected to expose colorops in the pipeline to allow for 211 + the appropriate color transformation. 212 + 213 + Setting of the COLOR_PIPELINE plane property or drm_colorop properties 214 + is only allowed for userspace that sets this client cap. 215 + 216 + An example of a COLOR_PIPELINE property on a plane might look like this:: 217 + 218 + Plane 10 219 + ├─ "TYPE": immutable enum {Overlay, Primary, Cursor} = Primary 220 + ├─ … 221 + └─ "COLOR_PIPELINE": enum {0, 42, 52} = 0 222 + 223 + 224 + Color Pipeline Discovery 225 + ======================== 226 + 227 + A DRM client wanting color management on a drm_plane will: 228 + 229 + 1. Get the COLOR_PIPELINE property of the plane 230 + 2. iterate all COLOR_PIPELINE enum values 231 + 3. for each enum value walk the color pipeline (via the NEXT pointers) 232 + and see if the available color operations are suitable for the 233 + desired color management operations 234 + 235 + If userspace encounters an unknown or unsuitable color operation during 236 + discovery it does not need to reject the entire color pipeline outright, 237 + as long as the unknown or unsuitable colorop has a "BYPASS" property. 238 + Drivers will ensure that a bypassed block does not have any effect. 239 + 240 + An example of chained properties to define an AMD pre-blending color 241 + pipeline might look like this:: 242 + 243 + Plane 10 244 + ├─ "TYPE" (immutable) = Primary 245 + └─ "COLOR_PIPELINE": enum {0, 44} = 0 246 + 247 + Color operation 44 248 + ├─ "TYPE" (immutable) = 1D enumerated curve 249 + ├─ "BYPASS": bool 250 + ├─ "CURVE_1D_TYPE": enum {sRGB EOTF, PQ EOTF} = sRGB EOTF 251 + └─ "NEXT" (immutable) = 45 252 + 253 + Color operation 45 254 + ├─ "TYPE" (immutable) = 3x4 Matrix 255 + ├─ "BYPASS": bool 256 + ├─ "DATA": blob 257 + └─ "NEXT" (immutable) = 46 258 + 259 + Color operation 46 260 + ├─ "TYPE" (immutable) = 1D enumerated curve 261 + ├─ "BYPASS": bool 262 + ├─ "CURVE_1D_TYPE": enum {sRGB Inverse EOTF, PQ Inverse EOTF} = sRGB EOTF 263 + └─ "NEXT" (immutable) = 47 264 + 265 + Color operation 47 266 + ├─ "TYPE" (immutable) = 1D LUT 267 + ├─ "SIZE": immutable range = 4096 268 + ├─ "DATA": blob 269 + └─ "NEXT" (immutable) = 48 270 + 271 + Color operation 48 272 + ├─ "TYPE" (immutable) = 3D LUT 273 + ├─ "DATA": blob 274 + └─ "NEXT" (immutable) = 49 275 + 276 + Color operation 49 277 + ├─ "TYPE" (immutable) = 1D enumerated curve 278 + ├─ "BYPASS": bool 279 + ├─ "CURVE_1D_TYPE": enum {sRGB EOTF, PQ EOTF} = sRGB EOTF 280 + └─ "NEXT" (immutable) = 0 281 + 282 + 283 + Color Pipeline Programming 284 + ========================== 285 + 286 + Once a DRM client has found a suitable pipeline it will: 287 + 288 + 1. Set the COLOR_PIPELINE enum value to the one pointing at the first 289 + drm_colorop object of the desired pipeline 290 + 2. Set the properties for all drm_colorop objects in the pipeline to the 291 + desired values, setting BYPASS to true for unused drm_colorop blocks, 292 + and false for enabled drm_colorop blocks 293 + 3. Perform (TEST_ONLY or not) atomic commit with all the other KMS 294 + states it wishes to change 295 + 296 + To configure the pipeline for an HDR10 PQ plane and blending in linear 297 + space, a compositor might perform an atomic commit with the following 298 + property values:: 299 + 300 + Plane 10 301 + └─ "COLOR_PIPELINE" = 42 302 + 303 + Color operation 42 304 + └─ "BYPASS" = true 305 + 306 + Color operation 44 307 + └─ "BYPASS" = true 308 + 309 + Color operation 45 310 + └─ "BYPASS" = true 311 + 312 + Color operation 46 313 + └─ "BYPASS" = true 314 + 315 + Color operation 47 316 + ├─ "DATA" = Gamut mapping + tone mapping + night mode 317 + └─ "BYPASS" = false 318 + 319 + Color operation 48 320 + ├─ "CURVE_1D_TYPE" = PQ EOTF 321 + └─ "BYPASS" = false 322 + 323 + 324 + Driver Implementer's Guide 325 + ========================== 326 + 327 + What does this all mean for driver implementations? As noted above the 328 + colorops can map to HW directly but don't need to do so. Here are some 329 + suggestions on how to think about creating your color pipelines: 330 + 331 + - Try to expose pipelines that use already defined colorops, even if 332 + your hardware pipeline is split differently. This allows existing 333 + userspace to immediately take advantage of the hardware. 334 + 335 + - Additionally, try to expose your actual hardware blocks as colorops. 336 + Define new colorop types where you believe it can offer significant 337 + benefits if userspace learns to program them. 338 + 339 + - Avoid defining new colorops for compound operations with very narrow 340 + scope. If you have a hardware block for a special operation that 341 + cannot be split further, you can expose that as a new colorop type. 342 + However, try to not define colorops for "use cases", especially if 343 + they require you to combine multiple hardware blocks. 344 + 345 + - Design new colorops as prescriptive, not descriptive; by the 346 + mathematical formula, not by the assumed input and output. 347 + 348 + A defined colorop type must be deterministic. The exact behavior of the 349 + colorop must be documented entirely, whether via a mathematical formula 350 + or some other description. Its operation can depend only on its 351 + properties and input and nothing else, allowed error tolerance 352 + notwithstanding. 353 + 354 + 355 + Driver Forward/Backward Compatibility 356 + ===================================== 357 + 358 + As this is uAPI drivers can't regress color pipelines that have been 359 + introduced for a given HW generation. New HW generations are free to 360 + abandon color pipelines advertised for previous generations. 361 + Nevertheless, it can be beneficial to carry support for existing color 362 + pipelines forward as those will likely already have support in DRM 363 + clients. 364 + 365 + Introducing new colorops to a pipeline is fine, as long as they can be 366 + bypassed or are purely informational. DRM clients implementing support 367 + for the pipeline can always skip unknown properties as long as they can 368 + be confident that doing so will not cause unexpected results. 369 + 370 + If a new colorop doesn't fall into one of the above categories 371 + (bypassable or informational) the modified pipeline would be unusable 372 + for user space. In this case a new pipeline should be defined. 373 + 374 + 375 + References 376 + ========== 377 + 378 + 1. https://lore.kernel.org/dri-devel/QMers3awXvNCQlyhWdTtsPwkp5ie9bze_hD5nAccFW7a_RXlWjYB7MoUW_8CKLT2bSQwIXVi5H6VULYIxCdgvryZoAoJnC5lZgyK1QWn488=@emersion.fr/
+3
Documentation/gpu/rfc/index.rst
··· 35 35 .. toctree:: 36 36 37 37 i915_vm_bind.rst 38 + 39 + .. toctree:: 40 + color_pipeline.rst
+7
MAINTAINERS
··· 8088 8088 F: Documentation/devicetree/bindings/display/panel/samsung,s6e3ha8.yaml 8089 8089 F: drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c 8090 8090 8091 + DRM DRIVER FOR SAMSUNG SOFEF00 DDIC 8092 + M: David Heidelberg <david@ixit.cz> 8093 + M: Casey Connolly <casey.connolly@linaro.org> 8094 + S: Maintained 8095 + F: Documentation/devicetree/bindings/display/panel/samsung,sofef00.yaml 8096 + F: drivers/gpu/drm/panel/panel-samsung-sofef00.c 8097 + 8091 8098 DRM DRIVER FOR SHARP MEMORY LCD 8092 8099 M: Alex Lanzano <lanzano.alex@gmail.com> 8093 8100 S: Maintained
+11 -9
drivers/dma-buf/dma-fence.c
··· 997 997 */ 998 998 void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq) 999 999 { 1000 - const char __rcu *timeline; 1001 - const char __rcu *driver; 1000 + const char __rcu *timeline = ""; 1001 + const char __rcu *driver = ""; 1002 + const char *signaled = ""; 1002 1003 1003 1004 rcu_read_lock(); 1004 1005 1005 - timeline = dma_fence_timeline_name(fence); 1006 - driver = dma_fence_driver_name(fence); 1006 + if (!dma_fence_is_signaled(fence)) { 1007 + timeline = dma_fence_timeline_name(fence); 1008 + driver = dma_fence_driver_name(fence); 1009 + signaled = "un"; 1010 + } 1007 1011 1008 - seq_printf(seq, "%s %s seq %llu %ssignalled\n", 1009 - rcu_dereference(driver), 1010 - rcu_dereference(timeline), 1011 - fence->seqno, 1012 - dma_fence_is_signaled(fence) ? "" : "un"); 1012 + seq_printf(seq, "%llu:%llu %s %s %ssignalled\n", 1013 + fence->context, fence->seqno, timeline, driver, 1014 + signaled); 1013 1015 1014 1016 rcu_read_unlock(); 1015 1017 }
+24 -9
drivers/dma-buf/heaps/system_heap.c
··· 186 186 struct system_heap_buffer *buffer = dmabuf->priv; 187 187 struct sg_table *table = &buffer->sg_table; 188 188 unsigned long addr = vma->vm_start; 189 - struct sg_page_iter piter; 190 - int ret; 189 + unsigned long pgoff = vma->vm_pgoff; 190 + struct scatterlist *sg; 191 + int i, ret; 191 192 192 - for_each_sgtable_page(table, &piter, vma->vm_pgoff) { 193 - struct page *page = sg_page_iter_page(&piter); 193 + for_each_sgtable_sg(table, sg, i) { 194 + unsigned long n = sg->length >> PAGE_SHIFT; 194 195 195 - ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, 196 - vma->vm_page_prot); 196 + if (pgoff < n) 197 + break; 198 + pgoff -= n; 199 + } 200 + 201 + for (; sg && addr < vma->vm_end; sg = sg_next(sg)) { 202 + unsigned long n = (sg->length >> PAGE_SHIFT) - pgoff; 203 + struct page *page = sg_page(sg) + pgoff; 204 + unsigned long size = n << PAGE_SHIFT; 205 + 206 + if (addr + size > vma->vm_end) 207 + size = vma->vm_end - addr; 208 + 209 + ret = remap_pfn_range(vma, addr, page_to_pfn(page), 210 + size, vma->vm_page_prot); 197 211 if (ret) 198 212 return ret; 199 - addr += PAGE_SIZE; 200 - if (addr >= vma->vm_end) 201 - return 0; 213 + 214 + addr += size; 215 + pgoff = 0; 202 216 } 217 + 203 218 return 0; 204 219 } 205 220
+4
drivers/dma-buf/sw_sync.c
··· 8 8 #include <linux/file.h> 9 9 #include <linux/fs.h> 10 10 #include <linux/uaccess.h> 11 + #include <linux/panic.h> 11 12 #include <linux/slab.h> 12 13 #include <linux/sync_file.h> 13 14 ··· 349 348 struct sync_pt *pt; 350 349 struct sync_file *sync_file; 351 350 struct sw_sync_create_fence_data data; 351 + 352 + /* SW sync fence are inherently unsafe and can deadlock the kernel */ 353 + add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); 352 354 353 355 if (fd < 0) 354 356 return fd;
+3 -1
drivers/gpu/drm/Makefile
··· 41 41 drm_bridge.o \ 42 42 drm_cache.o \ 43 43 drm_color_mgmt.o \ 44 + drm_colorop.o \ 44 45 drm_connector.o \ 45 46 drm_crtc.o \ 46 47 drm_displayid.o \ ··· 77 76 drm-$(CONFIG_DRM_CLIENT) += \ 78 77 drm_client.o \ 79 78 drm_client_event.o \ 80 - drm_client_modeset.o 79 + drm_client_modeset.o \ 80 + drm_client_sysrq.o 81 81 drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o 82 82 drm-$(CONFIG_COMPAT) += drm_ioc32.o 83 83 drm-$(CONFIG_DRM_PANEL) += drm_panel.o
+4 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 2185 2185 } else { 2186 2186 drm_sched_entity_destroy(&adev->mman.high_pr); 2187 2187 drm_sched_entity_destroy(&adev->mman.low_pr); 2188 - dma_fence_put(man->move); 2189 - man->move = NULL; 2188 + /* Drop all the old fences since re-creating the scheduler entities 2189 + * will allocate new contexts. 2190 + */ 2191 + ttm_resource_manager_cleanup(man); 2190 2192 } 2191 2193 2192 2194 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
+2 -1
drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
··· 39 39 amdgpu_dm_psr.o \ 40 40 amdgpu_dm_replay.o \ 41 41 amdgpu_dm_quirks.o \ 42 - amdgpu_dm_wb.o 42 + amdgpu_dm_wb.o \ 43 + amdgpu_dm_colorop.o 43 44 44 45 ifdef CONFIG_DRM_AMD_DC_FP 45 46 AMDGPUDM += dc_fpu.o
+4
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 5888 5888 5889 5889 *color_space = COLOR_SPACE_SRGB; 5890 5890 5891 + /* Ignore properties when DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE is set */ 5892 + if (plane_state->state && plane_state->state->plane_color_pipeline) 5893 + return 0; 5894 + 5891 5895 /* DRM color properties only affect non-RGB formats. */ 5892 5896 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 5893 5897 return 0;
+748 -20
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
··· 26 26 #include "amdgpu.h" 27 27 #include "amdgpu_mode.h" 28 28 #include "amdgpu_dm.h" 29 + #include "amdgpu_dm_colorop.h" 29 30 #include "dc.h" 30 31 #include "modules/color/color_gamma.h" 31 32 32 33 /** 33 34 * DOC: overview 35 + * 36 + * We have three types of color management in the AMD display driver. 37 + * 1. the legacy &drm_crtc DEGAMMA, CTM, and GAMMA properties 38 + * 2. AMD driver private color management on &drm_plane and &drm_crtc 39 + * 3. AMD plane color pipeline 40 + * 41 + * The CRTC properties are the original color management. When they were 42 + * implemented per-plane color management was not a thing yet. Because 43 + * of that we could get away with plumbing the DEGAMMA and CTM 44 + * properties to pre-blending HW functions. This is incompatible with 45 + * per-plane color management, such as via the AMD private properties or 46 + * the new drm_plane color pipeline. The only compatible CRTC property 47 + * with per-plane color management is the GAMMA property as it is 48 + * applied post-blending. 49 + * 50 + * The AMD driver private color management properties are only exposed 51 + * when the kernel is built explicitly with -DAMD_PRIVATE_COLOR. They 52 + * are temporary building blocks on the path to full-fledged &drm_plane 53 + * and &drm_crtc color pipelines and lay the driver's groundwork for the 54 + * color pipelines. 55 + * 56 + * The AMD plane color pipeline describes AMD's &drm_colorops via the 57 + * &drm_plane's COLOR_PIPELINE property. 58 + * 59 + * drm_crtc Properties 60 + * ------------------- 34 61 * 35 62 * The DC interface to HW gives us the following color management blocks 36 63 * per pipe (surface): ··· 69 42 * - Surface regamma LUT (normalized) 70 43 * - Output CSC (normalized) 71 44 * 72 - * But these aren't a direct mapping to DRM color properties. The current DRM 73 - * interface exposes CRTC degamma, CRTC CTM and CRTC regamma while our hardware 74 - * is essentially giving: 45 + * But these aren't a direct mapping to DRM color properties. The 46 + * current DRM interface exposes CRTC degamma, CRTC CTM and CRTC regamma 47 + * while our hardware is essentially giving: 75 48 * 76 49 * Plane CTM -> Plane degamma -> Plane CTM -> Plane regamma -> Plane CTM 77 50 * 78 - * The input gamma LUT block isn't really applicable here since it operates 79 - * on the actual input data itself rather than the HW fp representation. The 80 - * input and output CSC blocks are technically available to use as part of 81 - * the DC interface but are typically used internally by DC for conversions 82 - * between color spaces. These could be blended together with user 83 - * adjustments in the future but for now these should remain untouched. 51 + * The input gamma LUT block isn't really applicable here since it 52 + * operates on the actual input data itself rather than the HW fp 53 + * representation. The input and output CSC blocks are technically 54 + * available to use as part of the DC interface but are typically used 55 + * internally by DC for conversions between color spaces. These could be 56 + * blended together with user adjustments in the future but for now 57 + * these should remain untouched. 84 58 * 85 - * The pipe blending also happens after these blocks so we don't actually 86 - * support any CRTC props with correct blending with multiple planes - but we 87 - * can still support CRTC color management properties in DM in most single 88 - * plane cases correctly with clever management of the DC interface in DM. 59 + * The pipe blending also happens after these blocks so we don't 60 + * actually support any CRTC props with correct blending with multiple 61 + * planes - but we can still support CRTC color management properties in 62 + * DM in most single plane cases correctly with clever management of the 63 + * DC interface in DM. 89 64 * 90 - * As per DRM documentation, blocks should be in hardware bypass when their 91 - * respective property is set to NULL. A linear DGM/RGM LUT should also 92 - * considered as putting the respective block into bypass mode. 65 + * As per DRM documentation, blocks should be in hardware bypass when 66 + * their respective property is set to NULL. A linear DGM/RGM LUT should 67 + * also considered as putting the respective block into bypass mode. 93 68 * 94 - * This means that the following 95 - * configuration is assumed to be the default: 69 + * This means that the following configuration is assumed to be the 70 + * default: 96 71 * 97 - * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... 98 - * CRTC DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass 72 + * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... CRTC 73 + * DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass 74 + * 75 + * AMD Private Color Management on drm_plane 76 + * ----------------------------------------- 77 + * 78 + * The AMD private color management properties on a &drm_plane are: 79 + * 80 + * - AMD_PLANE_DEGAMMA_LUT 81 + * - AMD_PLANE_DEGAMMA_LUT_SIZE 82 + * - AMD_PLANE_DEGAMMA_TF 83 + * - AMD_PLANE_HDR_MULT 84 + * - AMD_PLANE_CTM 85 + * - AMD_PLANE_SHAPER_LUT 86 + * - AMD_PLANE_SHAPER_LUT_SIZE 87 + * - AMD_PLANE_SHAPER_TF 88 + * - AMD_PLANE_LUT3D 89 + * - AMD_PLANE_LUT3D_SIZE 90 + * - AMD_PLANE_BLEND_LUT 91 + * - AMD_PLANE_BLEND_LUT_SIZE 92 + * - AMD_PLANE_BLEND_TF 93 + * 94 + * The AMD private color management property on a &drm_crtc is: 95 + * 96 + * - AMD_CRTC_REGAMMA_TF 97 + * 98 + * Use of these properties is discouraged. 99 + * 100 + * AMD plane color pipeline 101 + * ------------------------ 102 + * 103 + * The AMD &drm_plane color pipeline is advertised for DCN generations 104 + * 3.0 and newer. It exposes these elements in this order: 105 + * 106 + * 1. 1D curve colorop 107 + * 2. Multiplier 108 + * 3. 3x4 CTM 109 + * 4. 1D curve colorop 110 + * 5. 1D LUT 111 + * 6. 3D LUT 112 + * 7. 1D curve colorop 113 + * 8. 1D LUT 114 + * 115 + * The multiplier (#2) is a simple multiplier that is applied to all 116 + * channels. 117 + * 118 + * The 3x4 CTM (#3) is a simple 3x4 matrix. 119 + * 120 + * #1, and #7 are non-linear to linear curves. #4 is a linear to 121 + * non-linear curve. They support sRGB, PQ, and BT.709/BT.2020 EOTFs or 122 + * their inverse. 123 + * 124 + * The 1D LUTs (#5 and #8) are plain 4096 entry LUTs. 125 + * 126 + * The 3DLUT (#6) is a tetrahedrally interpolated 17 cube LUT. 127 + * 99 128 */ 100 129 101 130 #define MAX_DRM_LUT_VALUE 0xFFFF 131 + #define MAX_DRM_LUT32_VALUE 0xFFFFFFFF 102 132 #define SDR_WHITE_LEVEL_INIT_VALUE 80 103 133 104 134 /** ··· 426 342 } 427 343 428 344 /** 345 + * __extract_blob_lut32 - Extracts the DRM lut and lut size from a blob. 346 + * @blob: DRM color mgmt property blob 347 + * @size: lut size 348 + * 349 + * Returns: 350 + * DRM LUT or NULL 351 + */ 352 + static const struct drm_color_lut32 * 353 + __extract_blob_lut32(const struct drm_property_blob *blob, uint32_t *size) 354 + { 355 + *size = blob ? drm_color_lut32_size(blob) : 0; 356 + return blob ? (struct drm_color_lut32 *)blob->data : NULL; 357 + } 358 + 359 + /** 429 360 * __is_lut_linear - check if the given lut is a linear mapping of values 430 361 * @lut: given lut to check values 431 362 * @size: lut size ··· 510 411 gamma->entries.red[i] = dc_fixpt_from_fraction(r, MAX_DRM_LUT_VALUE); 511 412 gamma->entries.green[i] = dc_fixpt_from_fraction(g, MAX_DRM_LUT_VALUE); 512 413 gamma->entries.blue[i] = dc_fixpt_from_fraction(b, MAX_DRM_LUT_VALUE); 414 + } 415 + } 416 + 417 + /** 418 + * __drm_lut32_to_dc_gamma - convert the drm_color_lut to dc_gamma. 419 + * @lut: DRM lookup table for color conversion 420 + * @gamma: DC gamma to set entries 421 + * 422 + * The conversion depends on the size of the lut - whether or not it's legacy. 423 + */ 424 + static void __drm_lut32_to_dc_gamma(const struct drm_color_lut32 *lut, struct dc_gamma *gamma) 425 + { 426 + int i; 427 + 428 + for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) { 429 + gamma->entries.red[i] = dc_fixpt_from_fraction(lut[i].red, MAX_DRM_LUT32_VALUE); 430 + gamma->entries.green[i] = dc_fixpt_from_fraction(lut[i].green, MAX_DRM_LUT32_VALUE); 431 + gamma->entries.blue[i] = dc_fixpt_from_fraction(lut[i].blue, MAX_DRM_LUT32_VALUE); 513 432 } 514 433 } 515 434 ··· 683 566 return res ? 0 : -ENOMEM; 684 567 } 685 568 569 + /** 570 + * __set_output_tf_32 - calculates the output transfer function based on expected input space. 571 + * @func: transfer function 572 + * @lut: lookup table that defines the color space 573 + * @lut_size: size of respective lut 574 + * @has_rom: if ROM can be used for hardcoded curve 575 + * 576 + * Returns: 577 + * 0 in case of success. -ENOMEM if fails. 578 + */ 579 + static int __set_output_tf_32(struct dc_transfer_func *func, 580 + const struct drm_color_lut32 *lut, uint32_t lut_size, 581 + bool has_rom) 582 + { 583 + struct dc_gamma *gamma = NULL; 584 + struct calculate_buffer cal_buffer = {0}; 585 + bool res; 586 + 587 + cal_buffer.buffer_index = -1; 588 + 589 + if (lut_size) { 590 + gamma = dc_create_gamma(); 591 + if (!gamma) 592 + return -ENOMEM; 593 + 594 + gamma->num_entries = lut_size; 595 + __drm_lut32_to_dc_gamma(lut, gamma); 596 + } 597 + 598 + if (func->tf == TRANSFER_FUNCTION_LINEAR) { 599 + /* 600 + * Color module doesn't like calculating regamma params 601 + * on top of a linear input. But degamma params can be used 602 + * instead to simulate this. 603 + */ 604 + if (gamma) 605 + gamma->type = GAMMA_CUSTOM; 606 + res = mod_color_calculate_degamma_params(NULL, func, 607 + gamma, gamma != NULL); 608 + } else { 609 + /* 610 + * Assume sRGB. The actual mapping will depend on whether the 611 + * input was legacy or not. 612 + */ 613 + if (gamma) 614 + gamma->type = GAMMA_CS_TFM_1D; 615 + res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL, 616 + has_rom, NULL, &cal_buffer); 617 + } 618 + 619 + if (gamma) 620 + dc_gamma_release(&gamma); 621 + 622 + return res ? 0 : -ENOMEM; 623 + } 624 + 625 + 686 626 static int amdgpu_dm_set_atomic_regamma(struct dc_transfer_func *out_tf, 687 627 const struct drm_color_lut *regamma_lut, 688 628 uint32_t regamma_size, bool has_rom, ··· 812 638 return res ? 0 : -ENOMEM; 813 639 } 814 640 641 + /** 642 + * __set_input_tf_32 - calculates the input transfer function based on expected 643 + * input space. 644 + * @caps: dc color capabilities 645 + * @func: transfer function 646 + * @lut: lookup table that defines the color space 647 + * @lut_size: size of respective lut. 648 + * 649 + * Returns: 650 + * 0 in case of success. -ENOMEM if fails. 651 + */ 652 + static int __set_input_tf_32(struct dc_color_caps *caps, struct dc_transfer_func *func, 653 + const struct drm_color_lut32 *lut, uint32_t lut_size) 654 + { 655 + struct dc_gamma *gamma = NULL; 656 + bool res; 657 + 658 + if (lut_size) { 659 + gamma = dc_create_gamma(); 660 + if (!gamma) 661 + return -ENOMEM; 662 + 663 + gamma->type = GAMMA_CUSTOM; 664 + gamma->num_entries = lut_size; 665 + 666 + __drm_lut32_to_dc_gamma(lut, gamma); 667 + } 668 + 669 + res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL); 670 + 671 + if (gamma) 672 + dc_gamma_release(&gamma); 673 + 674 + return res ? 0 : -ENOMEM; 675 + } 676 + 815 677 static enum dc_transfer_func_predefined 816 678 amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf) 817 679 { ··· 874 664 case AMDGPU_TRANSFER_FUNCTION_GAMMA26_EOTF: 875 665 case AMDGPU_TRANSFER_FUNCTION_GAMMA26_INV_EOTF: 876 666 return TRANSFER_FUNCTION_GAMMA26; 667 + } 668 + } 669 + 670 + static enum dc_transfer_func_predefined 671 + amdgpu_colorop_tf_to_dc_tf(enum drm_colorop_curve_1d_type tf) 672 + { 673 + switch (tf) { 674 + case DRM_COLOROP_1D_CURVE_SRGB_EOTF: 675 + case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF: 676 + return TRANSFER_FUNCTION_SRGB; 677 + case DRM_COLOROP_1D_CURVE_PQ_125_EOTF: 678 + case DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF: 679 + return TRANSFER_FUNCTION_PQ; 680 + case DRM_COLOROP_1D_CURVE_BT2020_INV_OETF: 681 + case DRM_COLOROP_1D_CURVE_BT2020_OETF: 682 + return TRANSFER_FUNCTION_BT709; 683 + case DRM_COLOROP_1D_CURVE_GAMMA22: 684 + case DRM_COLOROP_1D_CURVE_GAMMA22_INV: 685 + return TRANSFER_FUNCTION_GAMMA22; 686 + default: 687 + return TRANSFER_FUNCTION_LINEAR; 877 688 } 878 689 } 879 690 ··· 949 718 } 950 719 /* lut0 has 1229 points (lut_size/4 + 1) */ 951 720 __to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth); 721 + } 722 + 723 + static void __to_dc_lut3d_32_color(struct dc_rgb *rgb, 724 + const struct drm_color_lut32 lut, 725 + int bit_precision) 726 + { 727 + rgb->red = drm_color_lut32_extract(lut.red, bit_precision); 728 + rgb->green = drm_color_lut32_extract(lut.green, bit_precision); 729 + rgb->blue = drm_color_lut32_extract(lut.blue, bit_precision); 730 + } 731 + 732 + static void __drm_3dlut32_to_dc_3dlut(const struct drm_color_lut32 *lut, 733 + uint32_t lut3d_size, 734 + struct tetrahedral_params *params, 735 + bool use_tetrahedral_9, 736 + int bit_depth) 737 + { 738 + struct dc_rgb *lut0; 739 + struct dc_rgb *lut1; 740 + struct dc_rgb *lut2; 741 + struct dc_rgb *lut3; 742 + int lut_i, i; 743 + 744 + 745 + if (use_tetrahedral_9) { 746 + lut0 = params->tetrahedral_9.lut0; 747 + lut1 = params->tetrahedral_9.lut1; 748 + lut2 = params->tetrahedral_9.lut2; 749 + lut3 = params->tetrahedral_9.lut3; 750 + } else { 751 + lut0 = params->tetrahedral_17.lut0; 752 + lut1 = params->tetrahedral_17.lut1; 753 + lut2 = params->tetrahedral_17.lut2; 754 + lut3 = params->tetrahedral_17.lut3; 755 + } 756 + 757 + for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) { 758 + /* 759 + * We should consider the 3D LUT RGB values are distributed 760 + * along four arrays lut0-3 where the first sizes 1229 and the 761 + * other 1228. The bit depth supported for 3dlut channel is 762 + * 12-bit, but DC also supports 10-bit. 763 + * 764 + * TODO: improve color pipeline API to enable the userspace set 765 + * bit depth and 3D LUT size/stride, as specified by VA-API. 766 + */ 767 + __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth); 768 + __to_dc_lut3d_32_color(&lut1[lut_i], lut[i + 1], bit_depth); 769 + __to_dc_lut3d_32_color(&lut2[lut_i], lut[i + 2], bit_depth); 770 + __to_dc_lut3d_32_color(&lut3[lut_i], lut[i + 3], bit_depth); 771 + } 772 + /* lut0 has 1229 points (lut_size/4 + 1) */ 773 + __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth); 952 774 } 953 775 954 776 /* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream ··· 1462 1178 } 1463 1179 1464 1180 static int 1181 + __set_colorop_in_tf_1d_curve(struct dc_plane_state *dc_plane_state, 1182 + struct drm_colorop_state *colorop_state) 1183 + { 1184 + struct dc_transfer_func *tf = &dc_plane_state->in_transfer_func; 1185 + struct drm_colorop *colorop = colorop_state->colorop; 1186 + struct drm_device *drm = colorop->dev; 1187 + 1188 + if (colorop->type != DRM_COLOROP_1D_CURVE) 1189 + return -EINVAL; 1190 + 1191 + if (!(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) 1192 + return -EINVAL; 1193 + 1194 + if (colorop_state->bypass) { 1195 + tf->type = TF_TYPE_BYPASS; 1196 + tf->tf = TRANSFER_FUNCTION_LINEAR; 1197 + return 0; 1198 + } 1199 + 1200 + drm_dbg(drm, "Degamma colorop with ID: %d\n", colorop->base.id); 1201 + 1202 + tf->type = TF_TYPE_PREDEFINED; 1203 + tf->tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type); 1204 + 1205 + return 0; 1206 + } 1207 + 1208 + static int 1209 + __set_dm_plane_colorop_degamma(struct drm_plane_state *plane_state, 1210 + struct dc_plane_state *dc_plane_state, 1211 + struct drm_colorop *colorop) 1212 + { 1213 + struct drm_colorop *old_colorop; 1214 + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; 1215 + struct drm_atomic_state *state = plane_state->state; 1216 + int i = 0; 1217 + 1218 + old_colorop = colorop; 1219 + 1220 + /* 1st op: 1d curve - degamma */ 1221 + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { 1222 + if (new_colorop_state->colorop == old_colorop && 1223 + (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) { 1224 + colorop_state = new_colorop_state; 1225 + break; 1226 + } 1227 + } 1228 + 1229 + if (!colorop_state) 1230 + return -EINVAL; 1231 + 1232 + return __set_colorop_in_tf_1d_curve(dc_plane_state, colorop_state); 1233 + } 1234 + 1235 + static int 1236 + __set_dm_plane_colorop_3x4_matrix(struct drm_plane_state *plane_state, 1237 + struct dc_plane_state *dc_plane_state, 1238 + struct drm_colorop *colorop) 1239 + { 1240 + struct drm_colorop *old_colorop; 1241 + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; 1242 + struct drm_atomic_state *state = plane_state->state; 1243 + const struct drm_device *dev = colorop->dev; 1244 + const struct drm_property_blob *blob; 1245 + struct drm_color_ctm_3x4 *ctm = NULL; 1246 + int i = 0; 1247 + 1248 + /* 3x4 matrix */ 1249 + old_colorop = colorop; 1250 + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { 1251 + if (new_colorop_state->colorop == old_colorop && 1252 + new_colorop_state->colorop->type == DRM_COLOROP_CTM_3X4) { 1253 + colorop_state = new_colorop_state; 1254 + break; 1255 + } 1256 + } 1257 + 1258 + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_CTM_3X4) { 1259 + drm_dbg(dev, "3x4 matrix colorop with ID: %d\n", colorop->base.id); 1260 + blob = colorop_state->data; 1261 + if (blob->length == sizeof(struct drm_color_ctm_3x4)) { 1262 + ctm = (struct drm_color_ctm_3x4 *) blob->data; 1263 + __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix); 1264 + dc_plane_state->gamut_remap_matrix.enable_remap = true; 1265 + dc_plane_state->input_csc_color_matrix.enable_adjustment = false; 1266 + } else { 1267 + drm_warn(dev, "blob->length (%zu) isn't equal to drm_color_ctm_3x4 (%zu)\n", 1268 + blob->length, sizeof(struct drm_color_ctm_3x4)); 1269 + return -EINVAL; 1270 + } 1271 + } 1272 + 1273 + return 0; 1274 + } 1275 + 1276 + static int 1277 + __set_dm_plane_colorop_multiplier(struct drm_plane_state *plane_state, 1278 + struct dc_plane_state *dc_plane_state, 1279 + struct drm_colorop *colorop) 1280 + { 1281 + struct drm_colorop *old_colorop; 1282 + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; 1283 + struct drm_atomic_state *state = plane_state->state; 1284 + const struct drm_device *dev = colorop->dev; 1285 + int i = 0; 1286 + 1287 + /* Multiplier */ 1288 + old_colorop = colorop; 1289 + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { 1290 + if (new_colorop_state->colorop == old_colorop && 1291 + new_colorop_state->colorop->type == DRM_COLOROP_MULTIPLIER) { 1292 + colorop_state = new_colorop_state; 1293 + break; 1294 + } 1295 + } 1296 + 1297 + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_MULTIPLIER) { 1298 + drm_dbg(dev, "Multiplier colorop with ID: %d\n", colorop->base.id); 1299 + dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(colorop_state->multiplier); 1300 + } 1301 + 1302 + return 0; 1303 + } 1304 + 1305 + static int 1306 + __set_dm_plane_colorop_shaper(struct drm_plane_state *plane_state, 1307 + struct dc_plane_state *dc_plane_state, 1308 + struct drm_colorop *colorop) 1309 + { 1310 + struct drm_colorop *old_colorop; 1311 + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; 1312 + struct drm_atomic_state *state = plane_state->state; 1313 + enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR; 1314 + struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func; 1315 + const struct drm_color_lut32 *shaper_lut; 1316 + struct drm_device *dev = colorop->dev; 1317 + bool enabled = false; 1318 + u32 shaper_size; 1319 + int i = 0, ret = 0; 1320 + 1321 + /* 1D Curve - SHAPER TF */ 1322 + old_colorop = colorop; 1323 + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { 1324 + if (new_colorop_state->colorop == old_colorop && 1325 + (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_shaper_tfs)) { 1326 + colorop_state = new_colorop_state; 1327 + break; 1328 + } 1329 + } 1330 + 1331 + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE) { 1332 + drm_dbg(dev, "Shaper TF colorop with ID: %d\n", colorop->base.id); 1333 + tf->type = TF_TYPE_DISTRIBUTED_POINTS; 1334 + tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type); 1335 + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; 1336 + ret = __set_output_tf(tf, 0, 0, false); 1337 + if (ret) 1338 + return ret; 1339 + enabled = true; 1340 + } 1341 + 1342 + /* 1D LUT - SHAPER LUT */ 1343 + colorop = old_colorop->next; 1344 + if (!colorop) { 1345 + drm_dbg(dev, "no Shaper LUT colorop found\n"); 1346 + return -EINVAL; 1347 + } 1348 + 1349 + old_colorop = colorop; 1350 + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { 1351 + if (new_colorop_state->colorop == old_colorop && 1352 + new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) { 1353 + colorop_state = new_colorop_state; 1354 + break; 1355 + } 1356 + } 1357 + 1358 + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT) { 1359 + drm_dbg(dev, "Shaper LUT colorop with ID: %d\n", colorop->base.id); 1360 + tf->type = TF_TYPE_DISTRIBUTED_POINTS; 1361 + tf->tf = default_tf; 1362 + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; 1363 + shaper_lut = __extract_blob_lut32(colorop_state->data, &shaper_size); 1364 + shaper_size = shaper_lut != NULL ? shaper_size : 0; 1365 + 1366 + /* Custom LUT size must be the same as supported size */ 1367 + if (shaper_size == colorop->size) { 1368 + ret = __set_output_tf_32(tf, shaper_lut, shaper_size, false); 1369 + if (ret) 1370 + return ret; 1371 + enabled = true; 1372 + } 1373 + } 1374 + 1375 + if (!enabled) 1376 + tf->type = TF_TYPE_BYPASS; 1377 + 1378 + return 0; 1379 + } 1380 + 1381 + /* __set_colorop_3dlut - set DRM 3D LUT to DC stream 1382 + * @drm_lut3d: user 3D LUT 1383 + * @drm_lut3d_size: size of 3D LUT 1384 + * @lut3d: DC 3D LUT 1385 + * 1386 + * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it 1387 + * on DCN accordingly. 1388 + * 1389 + * Returns: 1390 + * 0 on success. -EINVAL if drm_lut3d_size is zero. 1391 + */ 1392 + static int __set_colorop_3dlut(const struct drm_color_lut32 *drm_lut3d, 1393 + uint32_t drm_lut3d_size, 1394 + struct dc_3dlut *lut) 1395 + { 1396 + if (!drm_lut3d_size) { 1397 + lut->state.bits.initialized = 0; 1398 + return -EINVAL; 1399 + } 1400 + 1401 + /* Only supports 17x17x17 3D LUT (12-bit) now */ 1402 + lut->lut_3d.use_12bits = true; 1403 + lut->lut_3d.use_tetrahedral_9 = false; 1404 + 1405 + lut->state.bits.initialized = 1; 1406 + __drm_3dlut32_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d, 1407 + lut->lut_3d.use_tetrahedral_9, 12); 1408 + 1409 + return 0; 1410 + } 1411 + 1412 + static int 1413 + __set_dm_plane_colorop_3dlut(struct drm_plane_state *plane_state, 1414 + struct dc_plane_state *dc_plane_state, 1415 + struct drm_colorop *colorop) 1416 + { 1417 + struct drm_colorop *old_colorop; 1418 + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; 1419 + struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func; 1420 + struct drm_atomic_state *state = plane_state->state; 1421 + const struct amdgpu_device *adev = drm_to_adev(colorop->dev); 1422 + const struct drm_device *dev = colorop->dev; 1423 + const struct drm_color_lut32 *lut3d; 1424 + uint32_t lut3d_size; 1425 + int i = 0, ret = 0; 1426 + 1427 + /* 3D LUT */ 1428 + old_colorop = colorop; 1429 + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { 1430 + if (new_colorop_state->colorop == old_colorop && 1431 + new_colorop_state->colorop->type == DRM_COLOROP_3D_LUT) { 1432 + colorop_state = new_colorop_state; 1433 + break; 1434 + } 1435 + } 1436 + 1437 + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_3D_LUT) { 1438 + if (!adev->dm.dc->caps.color.dpp.hw_3d_lut) { 1439 + drm_dbg(dev, "3D LUT is not supported by hardware\n"); 1440 + return -EINVAL; 1441 + } 1442 + 1443 + drm_dbg(dev, "3D LUT colorop with ID: %d\n", colorop->base.id); 1444 + lut3d = __extract_blob_lut32(colorop_state->data, &lut3d_size); 1445 + lut3d_size = lut3d != NULL ? lut3d_size : 0; 1446 + ret = __set_colorop_3dlut(lut3d, lut3d_size, &dc_plane_state->lut3d_func); 1447 + if (ret) { 1448 + drm_dbg(dev, "3D LUT colorop with ID: %d has LUT size = %d\n", 1449 + colorop->base.id, lut3d_size); 1450 + return ret; 1451 + } 1452 + 1453 + /* 3D LUT requires shaper. If shaper colorop is bypassed, enable shaper curve 1454 + * with TRANSFER_FUNCTION_LINEAR 1455 + */ 1456 + if (tf->type == TF_TYPE_BYPASS) { 1457 + tf->type = TF_TYPE_DISTRIBUTED_POINTS; 1458 + tf->tf = TRANSFER_FUNCTION_LINEAR; 1459 + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; 1460 + ret = __set_output_tf_32(tf, NULL, 0, false); 1461 + } 1462 + } 1463 + 1464 + return ret; 1465 + } 1466 + 1467 + static int 1468 + __set_dm_plane_colorop_blend(struct drm_plane_state *plane_state, 1469 + struct dc_plane_state *dc_plane_state, 1470 + struct drm_colorop *colorop) 1471 + { 1472 + struct drm_colorop *old_colorop; 1473 + struct drm_colorop_state *colorop_state = NULL, *new_colorop_state; 1474 + struct drm_atomic_state *state = plane_state->state; 1475 + enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR; 1476 + struct dc_transfer_func *tf = &dc_plane_state->blend_tf; 1477 + const struct drm_color_lut32 *blend_lut = NULL; 1478 + struct drm_device *dev = colorop->dev; 1479 + uint32_t blend_size = 0; 1480 + int i = 0; 1481 + 1482 + /* 1D Curve - BLND TF */ 1483 + old_colorop = colorop; 1484 + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { 1485 + if (new_colorop_state->colorop == old_colorop && 1486 + (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) { 1487 + colorop_state = new_colorop_state; 1488 + break; 1489 + } 1490 + } 1491 + 1492 + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE && 1493 + (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) { 1494 + drm_dbg(dev, "Blend TF colorop with ID: %d\n", colorop->base.id); 1495 + tf->type = TF_TYPE_DISTRIBUTED_POINTS; 1496 + tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type); 1497 + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; 1498 + __set_input_tf_32(NULL, tf, blend_lut, blend_size); 1499 + } 1500 + 1501 + /* 1D Curve - BLND LUT */ 1502 + colorop = old_colorop->next; 1503 + if (!colorop) { 1504 + drm_dbg(dev, "no Blend LUT colorop found\n"); 1505 + return -EINVAL; 1506 + } 1507 + 1508 + old_colorop = colorop; 1509 + for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) { 1510 + if (new_colorop_state->colorop == old_colorop && 1511 + new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) { 1512 + colorop_state = new_colorop_state; 1513 + break; 1514 + } 1515 + } 1516 + 1517 + if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT && 1518 + (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) { 1519 + drm_dbg(dev, "Blend LUT colorop with ID: %d\n", colorop->base.id); 1520 + tf->type = TF_TYPE_DISTRIBUTED_POINTS; 1521 + tf->tf = default_tf; 1522 + tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE; 1523 + blend_lut = __extract_blob_lut32(colorop_state->data, &blend_size); 1524 + blend_size = blend_lut != NULL ? blend_size : 0; 1525 + 1526 + /* Custom LUT size must be the same as supported size */ 1527 + if (blend_size == colorop->size) 1528 + __set_input_tf_32(NULL, tf, blend_lut, blend_size); 1529 + } 1530 + 1531 + return 0; 1532 + } 1533 + 1534 + static int 1465 1535 amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state, 1466 1536 struct dc_plane_state *dc_plane_state) 1467 1537 { ··· 1861 1223 1862 1224 return ret; 1863 1225 } 1226 + 1227 + return 0; 1228 + } 1229 + 1230 + static int 1231 + amdgpu_dm_plane_set_colorop_properties(struct drm_plane_state *plane_state, 1232 + struct dc_plane_state *dc_plane_state) 1233 + { 1234 + struct drm_colorop *colorop = plane_state->color_pipeline; 1235 + struct drm_device *dev = plane_state->plane->dev; 1236 + struct amdgpu_device *adev = drm_to_adev(dev); 1237 + int ret; 1238 + 1239 + /* 1D Curve - DEGAM TF */ 1240 + if (!colorop) 1241 + return -EINVAL; 1242 + 1243 + ret = __set_dm_plane_colorop_degamma(plane_state, dc_plane_state, colorop); 1244 + if (ret) 1245 + return ret; 1246 + 1247 + /* Multiplier */ 1248 + colorop = colorop->next; 1249 + if (!colorop) { 1250 + drm_dbg(dev, "no multiplier colorop found\n"); 1251 + return -EINVAL; 1252 + } 1253 + 1254 + ret = __set_dm_plane_colorop_multiplier(plane_state, dc_plane_state, colorop); 1255 + if (ret) 1256 + return ret; 1257 + 1258 + /* 3x4 matrix */ 1259 + colorop = colorop->next; 1260 + if (!colorop) { 1261 + drm_dbg(dev, "no 3x4 matrix colorop found\n"); 1262 + return -EINVAL; 1263 + } 1264 + 1265 + ret = __set_dm_plane_colorop_3x4_matrix(plane_state, dc_plane_state, colorop); 1266 + if (ret) 1267 + return ret; 1268 + 1269 + if (adev->dm.dc->caps.color.dpp.hw_3d_lut) { 1270 + /* 1D Curve & LUT - SHAPER TF & LUT */ 1271 + colorop = colorop->next; 1272 + if (!colorop) { 1273 + drm_dbg(dev, "no Shaper TF colorop found\n"); 1274 + return -EINVAL; 1275 + } 1276 + 1277 + ret = __set_dm_plane_colorop_shaper(plane_state, dc_plane_state, colorop); 1278 + if (ret) 1279 + return ret; 1280 + 1281 + /* Shaper LUT colorop is already handled, just skip here */ 1282 + colorop = colorop->next; 1283 + if (!colorop) 1284 + return -EINVAL; 1285 + 1286 + /* 3D LUT */ 1287 + colorop = colorop->next; 1288 + if (!colorop) { 1289 + drm_dbg(dev, "no 3D LUT colorop found\n"); 1290 + return -EINVAL; 1291 + } 1292 + 1293 + ret = __set_dm_plane_colorop_3dlut(plane_state, dc_plane_state, colorop); 1294 + if (ret) 1295 + return ret; 1296 + } 1297 + 1298 + /* 1D Curve & LUT - BLND TF & LUT */ 1299 + colorop = colorop->next; 1300 + if (!colorop) { 1301 + drm_dbg(dev, "no Blend TF colorop found\n"); 1302 + return -EINVAL; 1303 + } 1304 + 1305 + ret = __set_dm_plane_colorop_blend(plane_state, dc_plane_state, colorop); 1306 + if (ret) 1307 + return ret; 1308 + 1309 + /* BLND LUT colorop is already handled, just skip here */ 1310 + colorop = colorop->next; 1311 + if (!colorop) 1312 + return -EINVAL; 1864 1313 1865 1314 return 0; 1866 1315 } ··· 2047 1322 dc_plane_state->gamut_remap_matrix.enable_remap = false; 2048 1323 dc_plane_state->input_csc_color_matrix.enable_adjustment = false; 2049 1324 } 1325 + 1326 + if (!amdgpu_dm_plane_set_colorop_properties(plane_state, dc_plane_state)) 1327 + return 0; 2050 1328 2051 1329 return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state); 2052 1330 }
+209
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2023 Advanced Micro Devices, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + * Authors: AMD 24 + * 25 + */ 26 + 27 + #include <drm/drm_print.h> 28 + #include <drm/drm_plane.h> 29 + #include <drm/drm_property.h> 30 + #include <drm/drm_colorop.h> 31 + 32 + #include "amdgpu.h" 33 + #include "amdgpu_dm_colorop.h" 34 + #include "dc.h" 35 + 36 + const u64 amdgpu_dm_supported_degam_tfs = 37 + BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | 38 + BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) | 39 + BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) | 40 + BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV); 41 + 42 + const u64 amdgpu_dm_supported_shaper_tfs = 43 + BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF) | 44 + BIT(DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF) | 45 + BIT(DRM_COLOROP_1D_CURVE_BT2020_OETF) | 46 + BIT(DRM_COLOROP_1D_CURVE_GAMMA22); 47 + 48 + const u64 amdgpu_dm_supported_blnd_tfs = 49 + BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | 50 + BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) | 51 + BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) | 52 + BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV); 53 + 54 + #define MAX_COLOR_PIPELINE_OPS 10 55 + 56 + #define LUT3D_SIZE 17 57 + 58 + int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list) 59 + { 60 + struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS]; 61 + struct drm_device *dev = plane->dev; 62 + struct amdgpu_device *adev = drm_to_adev(dev); 63 + int ret; 64 + int i = 0; 65 + 66 + memset(ops, 0, sizeof(ops)); 67 + 68 + /* 1D curve - DEGAM TF */ 69 + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); 70 + if (!ops[i]) { 71 + ret = -ENOMEM; 72 + goto cleanup; 73 + } 74 + 75 + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, 76 + amdgpu_dm_supported_degam_tfs, 77 + DRM_COLOROP_FLAG_ALLOW_BYPASS); 78 + if (ret) 79 + goto cleanup; 80 + 81 + list->type = ops[i]->base.id; 82 + list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id); 83 + 84 + i++; 85 + 86 + /* Multiplier */ 87 + ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL); 88 + if (!ops[i]) { 89 + ret = -ENOMEM; 90 + goto cleanup; 91 + } 92 + 93 + ret = drm_plane_colorop_mult_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); 94 + if (ret) 95 + goto cleanup; 96 + 97 + drm_colorop_set_next_property(ops[i-1], ops[i]); 98 + 99 + i++; 100 + 101 + /* 3x4 matrix */ 102 + ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL); 103 + if (!ops[i]) { 104 + ret = -ENOMEM; 105 + goto cleanup; 106 + } 107 + 108 + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); 109 + if (ret) 110 + goto cleanup; 111 + 112 + drm_colorop_set_next_property(ops[i-1], ops[i]); 113 + 114 + i++; 115 + 116 + if (adev->dm.dc->caps.color.dpp.hw_3d_lut) { 117 + /* 1D curve - SHAPER TF */ 118 + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); 119 + if (!ops[i]) { 120 + ret = -ENOMEM; 121 + goto cleanup; 122 + } 123 + 124 + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, 125 + amdgpu_dm_supported_shaper_tfs, 126 + DRM_COLOROP_FLAG_ALLOW_BYPASS); 127 + if (ret) 128 + goto cleanup; 129 + 130 + drm_colorop_set_next_property(ops[i-1], ops[i]); 131 + 132 + i++; 133 + 134 + /* 1D LUT - SHAPER LUT */ 135 + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); 136 + if (!ops[i]) { 137 + ret = -ENOMEM; 138 + goto cleanup; 139 + } 140 + 141 + ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES, 142 + DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, 143 + DRM_COLOROP_FLAG_ALLOW_BYPASS); 144 + if (ret) 145 + goto cleanup; 146 + 147 + drm_colorop_set_next_property(ops[i-1], ops[i]); 148 + 149 + i++; 150 + 151 + /* 3D LUT */ 152 + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); 153 + if (!ops[i]) { 154 + ret = -ENOMEM; 155 + goto cleanup; 156 + } 157 + 158 + ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, LUT3D_SIZE, 159 + DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, 160 + DRM_COLOROP_FLAG_ALLOW_BYPASS); 161 + if (ret) 162 + goto cleanup; 163 + 164 + drm_colorop_set_next_property(ops[i-1], ops[i]); 165 + 166 + i++; 167 + } 168 + 169 + /* 1D curve - BLND TF */ 170 + ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL); 171 + if (!ops[i]) { 172 + ret = -ENOMEM; 173 + goto cleanup; 174 + } 175 + 176 + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, 177 + amdgpu_dm_supported_blnd_tfs, 178 + DRM_COLOROP_FLAG_ALLOW_BYPASS); 179 + if (ret) 180 + goto cleanup; 181 + 182 + drm_colorop_set_next_property(ops[i - 1], ops[i]); 183 + 184 + i++; 185 + 186 + /* 1D LUT - BLND LUT */ 187 + ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL); 188 + if (!ops[i]) { 189 + ret = -ENOMEM; 190 + goto cleanup; 191 + } 192 + 193 + ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES, 194 + DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, 195 + DRM_COLOROP_FLAG_ALLOW_BYPASS); 196 + if (ret) 197 + goto cleanup; 198 + 199 + drm_colorop_set_next_property(ops[i-1], ops[i]); 200 + return 0; 201 + 202 + cleanup: 203 + if (ret == -ENOMEM) 204 + drm_err(plane->dev, "KMS: Failed to allocate colorop\n"); 205 + 206 + drm_colorop_pipeline_destroy(dev); 207 + 208 + return ret; 209 + }
+36
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright 2023 Advanced Micro Devices, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + * Authors: AMD 24 + * 25 + */ 26 + 27 + #ifndef __AMDGPU_DM_COLOROP_H__ 28 + #define __AMDGPU_DM_COLOROP_H__ 29 + 30 + extern const u64 amdgpu_dm_supported_degam_tfs; 31 + extern const u64 amdgpu_dm_supported_shaper_tfs; 32 + extern const u64 amdgpu_dm_supported_blnd_tfs; 33 + 34 + int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list); 35 + 36 + #endif /* __AMDGPU_DM_COLOROP_H__*/
+12 -14
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
··· 736 736 { 737 737 struct amdgpu_crtc *acrtc = NULL; 738 738 struct drm_plane *cursor_plane; 739 - bool is_dcn; 739 + bool has_degamma; 740 740 int res = -ENOMEM; 741 741 742 742 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); ··· 775 775 776 776 dm->adev->mode_info.crtcs[crtc_index] = acrtc; 777 777 778 - /* Don't enable DRM CRTC degamma property for DCE since it doesn't 779 - * support programmable degamma anywhere. 778 + /* Don't enable DRM CRTC degamma property for 779 + * 1. Degamma is replaced by color pipeline. 780 + * 2. DCE since it doesn't support programmable degamma anywhere. 781 + * 3. DCN401 since pre-blending degamma LUT doesn't apply to cursor. 780 782 */ 781 - is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch; 782 - /* Dont't enable DRM CRTC degamma property for DCN401 since the 783 - * pre-blending degamma LUT doesn't apply to cursor, and therefore 784 - * can't work similar to a post-blending degamma LUT as in other hw 785 - * versions. 786 - * TODO: revisit it once KMS plane color API is merged. 787 - */ 788 - drm_crtc_enable_color_mgmt(&acrtc->base, 789 - (is_dcn && 790 - dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ? 791 - MAX_COLOR_LUT_ENTRIES : 0, 783 + if (plane->color_pipeline_property) 784 + has_degamma = false; 785 + else 786 + has_degamma = dm->adev->dm.dc->caps.color.dpp.dcn_arch && 787 + dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01; 788 + 789 + drm_crtc_enable_color_mgmt(&acrtc->base, has_degamma ? MAX_COLOR_LUT_ENTRIES : 0, 792 790 true, MAX_COLOR_LUT_ENTRIES); 793 791 794 792 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
+39
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
··· 37 37 #include "amdgpu_display.h" 38 38 #include "amdgpu_dm_trace.h" 39 39 #include "amdgpu_dm_plane.h" 40 + #include "amdgpu_dm_colorop.h" 40 41 #include "gc/gc_11_0_0_offset.h" 41 42 #include "gc/gc_11_0_0_sh_mask.h" 42 43 ··· 1783 1782 1784 1783 return 0; 1785 1784 } 1785 + #else 1786 + 1787 + #define MAX_COLOR_PIPELINES 5 1788 + 1789 + static int 1790 + dm_plane_init_colorops(struct drm_plane *plane) 1791 + { 1792 + struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES]; 1793 + struct drm_device *dev = plane->dev; 1794 + struct amdgpu_device *adev = drm_to_adev(dev); 1795 + struct dc *dc = adev->dm.dc; 1796 + int len = 0; 1797 + int ret; 1798 + 1799 + if (plane->type == DRM_PLANE_TYPE_CURSOR) 1800 + return 0; 1801 + 1802 + /* initialize pipeline */ 1803 + if (dc->ctx->dce_version >= DCN_VERSION_3_0) { 1804 + ret = amdgpu_dm_initialize_default_pipeline(plane, &pipelines[len]); 1805 + if (ret) { 1806 + drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n", 1807 + plane->base.id, ret); 1808 + return ret; 1809 + } 1810 + len++; 1811 + 1812 + /* Create COLOR_PIPELINE property and attach */ 1813 + drm_plane_create_color_pipeline_property(plane, pipelines, len); 1814 + } 1815 + 1816 + return 0; 1817 + } 1786 1818 #endif 1787 1819 1788 1820 static const struct drm_plane_funcs dm_plane_funcs = { ··· 1924 1890 1925 1891 #ifdef AMD_PRIVATE_COLOR 1926 1892 dm_atomic_plane_attach_color_mgmt_properties(dm, plane); 1893 + #else 1894 + res = dm_plane_init_colorops(plane); 1895 + if (res) 1896 + return res; 1927 1897 #endif 1898 + 1928 1899 /* Create (reset) the plane state */ 1929 1900 if (plane->funcs->reset) 1930 1901 plane->funcs->reset(plane);
+1 -11
drivers/gpu/drm/armada/armada_fbdev.c
··· 44 44 struct drm_fb_helper_surface_size *sizes) 45 45 { 46 46 struct drm_device *dev = fbh->dev; 47 + struct fb_info *info = fbh->info; 47 48 struct drm_mode_fb_cmd2 mode; 48 49 struct armada_framebuffer *dfb; 49 50 struct armada_gem_object *obj; 50 - struct fb_info *info; 51 51 int size, ret; 52 52 void *ptr; 53 53 ··· 91 91 if (IS_ERR(dfb)) 92 92 return PTR_ERR(dfb); 93 93 94 - info = drm_fb_helper_alloc_info(fbh); 95 - if (IS_ERR(info)) { 96 - ret = PTR_ERR(info); 97 - goto err_fballoc; 98 - } 99 - 100 94 info->fbops = &armada_fb_ops; 101 95 info->fix.smem_start = obj->phys_addr; 102 96 info->fix.smem_len = obj->obj.size; ··· 106 112 (unsigned long long)obj->phys_addr); 107 113 108 114 return 0; 109 - 110 - err_fballoc: 111 - dfb->fb.funcs->destroy(&dfb->fb); 112 - return ret; 113 115 }
+33 -35
drivers/gpu/drm/bridge/ite-it66121.c
··· 287 287 enum chip_id { 288 288 ID_IT6610, 289 289 ID_IT66121, 290 + ID_IT66122, 290 291 }; 291 292 292 293 struct it66121_chip_info { ··· 313 312 u8 swl; 314 313 bool auto_cts; 315 314 } audio; 316 - const struct it66121_chip_info *info; 315 + enum chip_id id; 317 316 }; 318 317 319 318 static const struct regmap_range_cfg it66121_regmap_banks[] = { ··· 403 402 if (ret) 404 403 return ret; 405 404 406 - if (ctx->info->id == ID_IT66121) { 405 + if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) { 407 406 ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG, 408 407 IT66121_AFE_IP_EC1, 0); 409 408 if (ret) ··· 429 428 if (ret) 430 429 return ret; 431 430 432 - if (ctx->info->id == ID_IT66121) { 431 + if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) { 433 432 ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG, 434 433 IT66121_AFE_IP_EC1, 435 434 IT66121_AFE_IP_EC1); ··· 450 449 if (ret) 451 450 return ret; 452 451 453 - if (ctx->info->id == ID_IT6610) { 452 + if (ctx->id == ID_IT6610) { 454 453 ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG, 455 454 IT6610_AFE_XP_BYPASS, 456 455 IT6610_AFE_XP_BYPASS); ··· 600 599 if (ret) 601 600 return ret; 602 601 603 - if (ctx->info->id == ID_IT66121) { 602 + if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) { 604 603 ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, 605 604 IT66121_CLK_BANK_PWROFF_RCLK, 0); 606 605 if (ret) ··· 749 748 { 750 749 struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); 751 750 752 - if (ctx->info->id == ID_IT6610) { 751 + if (ctx->id == ID_IT6610) { 753 752 /* The IT6610 only supports these settings */ 754 753 bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_HIGH | 755 754 DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; ··· 803 802 if (regmap_write(ctx->regmap, IT66121_HDMI_MODE_REG, IT66121_HDMI_MODE_HDMI)) 804 803 goto unlock; 805 804 806 - if (ctx->info->id == ID_IT66121 && 805 + if ((ctx->id == ID_IT66121 || ctx->id == ID_IT66122) && 807 806 regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, 808 807 IT66121_CLK_BANK_PWROFF_TXCLK, 809 808 IT66121_CLK_BANK_PWROFF_TXCLK)) { ··· 816 815 if (it66121_configure_afe(ctx, adjusted_mode)) 817 816 goto unlock; 818 817 819 - if (ctx->info->id == ID_IT66121 && 818 + if ((ctx->id == ID_IT66121 || ctx->id == ID_IT66122) && 820 819 regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, 821 820 IT66121_CLK_BANK_PWROFF_TXCLK, 0)) { 822 821 goto unlock; ··· 1385 1384 int ret; 1386 1385 struct it66121_ctx *ctx = dev_get_drvdata(dev); 1387 1386 1388 - dev_dbg(dev, "%s\n", __func__); 1389 - 1390 1387 mutex_lock(&ctx->lock); 1391 1388 ret = it661221_audio_output_enable(ctx, true); 1392 1389 if (ret) ··· 1399 1400 { 1400 1401 int ret; 1401 1402 struct it66121_ctx *ctx = dev_get_drvdata(dev); 1402 - 1403 - dev_dbg(dev, "%s\n", __func__); 1404 1403 1405 1404 mutex_lock(&ctx->lock); 1406 1405 ret = it661221_audio_output_enable(ctx, false); ··· 1476 1479 .no_capture_mute = 1, 1477 1480 }; 1478 1481 1479 - dev_dbg(dev, "%s\n", __func__); 1480 - 1481 1482 if (!of_property_present(dev->of_node, "#sound-dai-cells")) { 1482 1483 dev_info(dev, "No \"#sound-dai-cells\", no audio\n"); 1483 1484 return 0; ··· 1499 1504 "vcn33", "vcn18", "vrf12" 1500 1505 }; 1501 1506 1507 + static const struct it66121_chip_info it66xx_chip_info[] = { 1508 + {.id = ID_IT6610, .vid = 0xca00, .pid = 0x0611 }, 1509 + {.id = ID_IT66121, .vid = 0x4954, .pid = 0x0612 }, 1510 + {.id = ID_IT66122, .vid = 0x4954, .pid = 0x0622 }, 1511 + }; 1512 + 1502 1513 static int it66121_probe(struct i2c_client *client) 1503 1514 { 1504 1515 u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 }; 1505 1516 struct device_node *ep; 1506 - int ret; 1517 + int ret, i; 1507 1518 struct it66121_ctx *ctx; 1508 1519 struct device *dev = &client->dev; 1520 + const struct it66121_chip_info *chip_info; 1509 1521 1510 1522 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { 1511 1523 dev_err(dev, "I2C check functionality failed.\n"); ··· 1530 1528 1531 1529 ctx->dev = dev; 1532 1530 ctx->client = client; 1533 - ctx->info = i2c_get_match_data(client); 1534 1531 1535 1532 of_property_read_u32(ep, "bus-width", &ctx->bus_width); 1536 1533 of_node_put(ep); ··· 1575 1574 revision_id = FIELD_GET(IT66121_REVISION_MASK, device_ids[1]); 1576 1575 device_ids[1] &= IT66121_DEVICE_ID1_MASK; 1577 1576 1578 - if ((vendor_ids[1] << 8 | vendor_ids[0]) != ctx->info->vid || 1579 - (device_ids[1] << 8 | device_ids[0]) != ctx->info->pid) { 1580 - return -ENODEV; 1577 + for (i = 0; i < ARRAY_SIZE(it66xx_chip_info); i++) { 1578 + chip_info = &it66xx_chip_info[i]; 1579 + if ((vendor_ids[1] << 8 | vendor_ids[0]) == chip_info->vid && 1580 + (device_ids[1] << 8 | device_ids[0]) == chip_info->pid) { 1581 + ctx->id = chip_info->id; 1582 + break; 1583 + } 1581 1584 } 1585 + 1586 + if (i == ARRAY_SIZE(it66xx_chip_info)) 1587 + return -ENODEV; 1582 1588 1583 1589 ctx->bridge.of_node = dev->of_node; 1584 1590 ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA; ··· 1620 1612 mutex_destroy(&ctx->lock); 1621 1613 } 1622 1614 1623 - static const struct it66121_chip_info it66121_chip_info = { 1624 - .id = ID_IT66121, 1625 - .vid = 0x4954, 1626 - .pid = 0x0612, 1627 - }; 1628 - 1629 - static const struct it66121_chip_info it6610_chip_info = { 1630 - .id = ID_IT6610, 1631 - .vid = 0xca00, 1632 - .pid = 0x0611, 1633 - }; 1634 - 1635 1615 static const struct of_device_id it66121_dt_match[] = { 1636 - { .compatible = "ite,it66121", &it66121_chip_info }, 1637 - { .compatible = "ite,it6610", &it6610_chip_info }, 1616 + { .compatible = "ite,it6610" }, 1617 + { .compatible = "ite,it66121" }, 1618 + { .compatible = "ite,it66122" }, 1638 1619 { } 1639 1620 }; 1640 1621 MODULE_DEVICE_TABLE(of, it66121_dt_match); 1641 1622 1642 1623 static const struct i2c_device_id it66121_id[] = { 1643 - { "it66121", (kernel_ulong_t) &it66121_chip_info }, 1644 - { "it6610", (kernel_ulong_t) &it6610_chip_info }, 1624 + { .name = "it6610" }, 1625 + { .name = "it66121" }, 1626 + { .name = "it66122" }, 1645 1627 { } 1646 1628 }; 1647 1629 MODULE_DEVICE_TABLE(i2c, it66121_id);
+10
drivers/gpu/drm/bridge/simple-bridge.c
··· 262 262 .connector_type = DRM_MODE_CONNECTOR_VGA, 263 263 }, 264 264 }, { 265 + .compatible = "asl-tek,cs5263", 266 + .data = &(const struct simple_bridge_info) { 267 + .connector_type = DRM_MODE_CONNECTOR_HDMIA, 268 + }, 269 + }, { 270 + .compatible = "parade,ps185hdm", 271 + .data = &(const struct simple_bridge_info) { 272 + .connector_type = DRM_MODE_CONNECTOR_HDMIA, 273 + }, 274 + }, { 265 275 .compatible = "radxa,ra620", 266 276 .data = &(const struct simple_bridge_info) { 267 277 .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+9 -2
drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
··· 868 868 return; 869 869 870 870 if (connector->display_info.is_hdmi) { 871 - dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n", 872 - __func__, conn_state->hdmi.tmds_char_rate); 871 + dev_dbg(hdmi->dev, "%s mode=HDMI %s rate=%llu bpc=%u\n", __func__, 872 + drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format), 873 + conn_state->hdmi.tmds_char_rate, conn_state->hdmi.output_bpc); 873 874 op_mode = 0; 874 875 hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate; 875 876 } else { ··· 1287 1286 hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; 1288 1287 hdmi->bridge.vendor = "Synopsys"; 1289 1288 hdmi->bridge.product = "DW HDMI QP TX"; 1289 + 1290 + if (plat_data->supported_formats) 1291 + hdmi->bridge.supported_formats = plat_data->supported_formats; 1292 + 1293 + if (plat_data->max_bpc) 1294 + hdmi->bridge.max_bpc = plat_data->max_bpc; 1290 1295 1291 1296 hdmi->bridge.ddc = dw_hdmi_qp_i2c_adapter(hdmi); 1292 1297 if (IS_ERR(hdmi->bridge.ddc))
+4 -2
drivers/gpu/drm/clients/drm_fbdev_client.c
··· 38 38 } 39 39 } 40 40 41 - static int drm_fbdev_client_restore(struct drm_client_dev *client) 41 + static int drm_fbdev_client_restore(struct drm_client_dev *client, bool force) 42 42 { 43 - drm_fb_helper_lastclose(client->dev); 43 + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); 44 + 45 + drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force); 44 46 45 47 return 0; 46 48 }
+13
drivers/gpu/drm/clients/drm_log.c
··· 315 315 drm_client_release(client); 316 316 } 317 317 318 + static int drm_log_client_restore(struct drm_client_dev *client, bool force) 319 + { 320 + int ret; 321 + 322 + if (force) 323 + ret = drm_client_modeset_commit_locked(client); 324 + else 325 + ret = drm_client_modeset_commit(client); 326 + 327 + return ret; 328 + } 329 + 318 330 static int drm_log_client_hotplug(struct drm_client_dev *client) 319 331 { 320 332 struct drm_log *dlog = client_to_drm_log(client); ··· 360 348 .owner = THIS_MODULE, 361 349 .free = drm_log_client_free, 362 350 .unregister = drm_log_client_unregister, 351 + .restore = drm_log_client_restore, 363 352 .hotplug = drm_log_client_hotplug, 364 353 .suspend = drm_log_client_suspend, 365 354 .resume = drm_log_client_resume,
+166 -1
drivers/gpu/drm/drm_atomic.c
··· 42 42 #include <drm/drm_mode.h> 43 43 #include <drm/drm_print.h> 44 44 #include <drm/drm_writeback.h> 45 + #include <drm/drm_colorop.h> 45 46 46 47 #include "drm_crtc_internal.h" 47 48 #include "drm_internal.h" ··· 108 107 kfree(state->connectors); 109 108 kfree(state->crtcs); 110 109 kfree(state->planes); 110 + kfree(state->colorops); 111 111 kfree(state->private_objs); 112 112 } 113 113 EXPORT_SYMBOL(drm_atomic_state_default_release); ··· 139 137 state->planes = kcalloc(dev->mode_config.num_total_plane, 140 138 sizeof(*state->planes), GFP_KERNEL); 141 139 if (!state->planes) 140 + goto fail; 141 + state->colorops = kcalloc(dev->mode_config.num_colorop, 142 + sizeof(*state->colorops), GFP_KERNEL); 143 + if (!state->colorops) 142 144 goto fail; 143 145 144 146 /* ··· 255 249 state->planes[i].state_to_destroy = NULL; 256 250 state->planes[i].old_state = NULL; 257 251 state->planes[i].new_state = NULL; 252 + } 253 + 254 + for (i = 0; i < config->num_colorop; i++) { 255 + struct drm_colorop *colorop = state->colorops[i].ptr; 256 + 257 + if (!colorop) 258 + continue; 259 + 260 + drm_colorop_atomic_destroy_state(colorop, 261 + state->colorops[i].state); 262 + state->colorops[i].ptr = NULL; 263 + state->colorops[i].state = NULL; 264 + state->colorops[i].old_state = NULL; 265 + state->colorops[i].new_state = NULL; 258 266 } 259 267 260 268 for (i = 0; i < state->num_private_objs; i++) { ··· 592 572 } 593 573 EXPORT_SYMBOL(drm_atomic_get_plane_state); 594 574 575 + /** 576 + * drm_atomic_get_colorop_state - get colorop state 577 + * @state: global atomic state object 578 + * @colorop: colorop to get state object for 579 + * 580 + * This function returns the colorop state for the given colorop, allocating it 581 + * if needed. It will also grab the relevant plane lock to make sure that the 582 + * state is consistent. 583 + * 584 + * Returns: 585 + * 586 + * Either the allocated state or the error code encoded into the pointer. When 587 + * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 588 + * entire atomic sequence must be restarted. All other errors are fatal. 589 + */ 590 + struct drm_colorop_state * 591 + drm_atomic_get_colorop_state(struct drm_atomic_state *state, 592 + struct drm_colorop *colorop) 593 + { 594 + int ret, index = drm_colorop_index(colorop); 595 + struct drm_colorop_state *colorop_state; 596 + 597 + WARN_ON(!state->acquire_ctx); 598 + 599 + colorop_state = drm_atomic_get_new_colorop_state(state, colorop); 600 + if (colorop_state) 601 + return colorop_state; 602 + 603 + ret = drm_modeset_lock(&colorop->plane->mutex, state->acquire_ctx); 604 + if (ret) 605 + return ERR_PTR(ret); 606 + 607 + colorop_state = drm_atomic_helper_colorop_duplicate_state(colorop); 608 + if (!colorop_state) 609 + return ERR_PTR(-ENOMEM); 610 + 611 + state->colorops[index].state = colorop_state; 612 + state->colorops[index].ptr = colorop; 613 + state->colorops[index].old_state = colorop->state; 614 + state->colorops[index].new_state = colorop_state; 615 + colorop_state->state = state; 616 + 617 + drm_dbg_atomic(colorop->dev, "Added [COLOROP:%d:%d] %p state to %p\n", 618 + colorop->base.id, colorop->type, colorop_state, state); 619 + 620 + return colorop_state; 621 + } 622 + EXPORT_SYMBOL(drm_atomic_get_colorop_state); 623 + 595 624 static bool 596 625 plane_switching_crtc(const struct drm_plane_state *old_plane_state, 597 626 const struct drm_plane_state *new_plane_state) ··· 780 711 return 0; 781 712 } 782 713 714 + static void drm_atomic_colorop_print_state(struct drm_printer *p, 715 + const struct drm_colorop_state *state) 716 + { 717 + struct drm_colorop *colorop = state->colorop; 718 + 719 + drm_printf(p, "colorop[%u]:\n", colorop->base.id); 720 + drm_printf(p, "\ttype=%s\n", drm_get_colorop_type_name(colorop->type)); 721 + if (colorop->bypass_property) 722 + drm_printf(p, "\tbypass=%u\n", state->bypass); 723 + 724 + switch (colorop->type) { 725 + case DRM_COLOROP_1D_CURVE: 726 + drm_printf(p, "\tcurve_1d_type=%s\n", 727 + drm_get_colorop_curve_1d_type_name(state->curve_1d_type)); 728 + break; 729 + case DRM_COLOROP_1D_LUT: 730 + drm_printf(p, "\tsize=%d\n", colorop->size); 731 + drm_printf(p, "\tinterpolation=%s\n", 732 + drm_get_colorop_lut1d_interpolation_name(colorop->lut1d_interpolation)); 733 + drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0); 734 + break; 735 + case DRM_COLOROP_CTM_3X4: 736 + drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0); 737 + break; 738 + case DRM_COLOROP_MULTIPLIER: 739 + drm_printf(p, "\tmultiplier=%llu\n", state->multiplier); 740 + break; 741 + case DRM_COLOROP_3D_LUT: 742 + drm_printf(p, "\tsize=%d\n", colorop->size); 743 + drm_printf(p, "\tinterpolation=%s\n", 744 + drm_get_colorop_lut3d_interpolation_name(colorop->lut3d_interpolation)); 745 + drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0); 746 + break; 747 + default: 748 + break; 749 + } 750 + 751 + drm_printf(p, "\tnext=%d\n", colorop->next ? colorop->next->base.id : 0); 752 + } 753 + 783 754 static void drm_atomic_plane_print_state(struct drm_printer *p, 784 755 const struct drm_plane_state *state) 785 756 { ··· 841 732 drm_printf(p, "\tcolor-range=%s\n", 842 733 drm_get_color_range_name(state->color_range)); 843 734 drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); 844 - 735 + drm_printf(p, "\tcolor-pipeline=%d\n", 736 + state->color_pipeline ? state->color_pipeline->base.id : 0); 845 737 if (plane->funcs->atomic_print_state) 846 738 plane->funcs->atomic_print_state(p, state); 847 739 } ··· 1556 1446 EXPORT_SYMBOL(drm_atomic_add_affected_planes); 1557 1447 1558 1448 /** 1449 + * drm_atomic_add_affected_colorops - add colorops for plane 1450 + * @state: atomic state 1451 + * @plane: DRM plane 1452 + * 1453 + * This function walks the current configuration and adds all colorops 1454 + * currently used by @plane to the atomic configuration @state. This is useful 1455 + * when an atomic commit also needs to check all currently enabled colorop on 1456 + * @plane, e.g. when changing the mode. It's also useful when re-enabling a plane 1457 + * to avoid special code to force-enable all colorops. 1458 + * 1459 + * Since acquiring a colorop state will always also acquire the w/w mutex of the 1460 + * current plane for that colorop (if there is any) adding all the colorop states for 1461 + * a plane will not reduce parallelism of atomic updates. 1462 + * 1463 + * Returns: 1464 + * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1465 + * then the w/w mutex code has detected a deadlock and the entire atomic 1466 + * sequence must be restarted. All other errors are fatal. 1467 + */ 1468 + int 1469 + drm_atomic_add_affected_colorops(struct drm_atomic_state *state, 1470 + struct drm_plane *plane) 1471 + { 1472 + struct drm_colorop *colorop; 1473 + struct drm_colorop_state *colorop_state; 1474 + 1475 + WARN_ON(!drm_atomic_get_new_plane_state(state, plane)); 1476 + 1477 + drm_dbg_atomic(plane->dev, 1478 + "Adding all current colorops for [PLANE:%d:%s] to %p\n", 1479 + plane->base.id, plane->name, state); 1480 + 1481 + drm_for_each_colorop(colorop, plane->dev) { 1482 + if (colorop->plane != plane) 1483 + continue; 1484 + 1485 + colorop_state = drm_atomic_get_colorop_state(state, colorop); 1486 + if (IS_ERR(colorop_state)) 1487 + return PTR_ERR(colorop_state); 1488 + } 1489 + 1490 + return 0; 1491 + } 1492 + EXPORT_SYMBOL(drm_atomic_add_affected_colorops); 1493 + 1494 + /** 1559 1495 * drm_atomic_check_only - check whether a given config would work 1560 1496 * @state: atomic configuration to check 1561 1497 * ··· 1999 1843 bool take_locks) 2000 1844 { 2001 1845 struct drm_mode_config *config = &dev->mode_config; 1846 + struct drm_colorop *colorop; 2002 1847 struct drm_plane *plane; 2003 1848 struct drm_crtc *crtc; 2004 1849 struct drm_connector *connector; ··· 2008 1851 2009 1852 if (!drm_drv_uses_atomic_modeset(dev)) 2010 1853 return; 1854 + 1855 + list_for_each_entry(colorop, &config->colorop_list, head) { 1856 + if (take_locks) 1857 + drm_modeset_lock(&colorop->plane->mutex, NULL); 1858 + drm_atomic_colorop_print_state(p, colorop->state); 1859 + if (take_locks) 1860 + drm_modeset_unlock(&colorop->plane->mutex); 1861 + } 2011 1862 2012 1863 list_for_each_entry(plane, &config->plane_list, head) { 2013 1864 if (take_locks)
+12
drivers/gpu/drm/drm_atomic_helper.c
··· 3184 3184 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 3185 3185 struct drm_plane *plane; 3186 3186 struct drm_plane_state *old_plane_state, *new_plane_state; 3187 + struct drm_colorop *colorop; 3188 + struct drm_colorop_state *old_colorop_state, *new_colorop_state; 3187 3189 struct drm_crtc_commit *commit; 3188 3190 struct drm_private_obj *obj; 3189 3191 struct drm_private_state *old_obj_state, *new_obj_state; ··· 3261 3259 3262 3260 new_crtc_state->commit->event = NULL; 3263 3261 } 3262 + } 3263 + 3264 + for_each_oldnew_colorop_in_state(state, colorop, old_colorop_state, new_colorop_state, i) { 3265 + WARN_ON(colorop->state != old_colorop_state); 3266 + 3267 + old_colorop_state->state = state; 3268 + new_colorop_state->state = NULL; 3269 + 3270 + state->colorops[i].state = old_colorop_state; 3271 + colorop->state = new_colorop_state; 3264 3272 } 3265 3273 3266 3274 drm_panic_lock(state->dev, flags);
+5
drivers/gpu/drm/drm_atomic_state_helper.c
··· 268 268 plane_state->color_range = val; 269 269 } 270 270 271 + if (plane->color_pipeline_property) { 272 + /* default is always NULL, i.e., bypass */ 273 + plane_state->color_pipeline = NULL; 274 + } 275 + 271 276 if (plane->zpos_property) { 272 277 if (!drm_object_property_get_default_value(&plane->base, 273 278 plane->zpos_property,
+156
drivers/gpu/drm/drm_atomic_uapi.c
··· 35 35 #include <drm/drm_drv.h> 36 36 #include <drm/drm_writeback.h> 37 37 #include <drm/drm_vblank.h> 38 + #include <drm/drm_colorop.h> 38 39 39 40 #include <linux/export.h> 40 41 #include <linux/dma-fence.h> ··· 257 256 drm_framebuffer_assign(&plane_state->fb, fb); 258 257 } 259 258 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); 259 + 260 + /** 261 + * drm_atomic_set_colorop_for_plane - set colorop for plane 262 + * @plane_state: atomic state object for the plane 263 + * @colorop: colorop to use for the plane 264 + * 265 + * Helper function to select the color pipeline on a plane by setting 266 + * it to the first drm_colorop element of the pipeline. 267 + */ 268 + void 269 + drm_atomic_set_colorop_for_plane(struct drm_plane_state *plane_state, 270 + struct drm_colorop *colorop) 271 + { 272 + struct drm_plane *plane = plane_state->plane; 273 + 274 + if (colorop) 275 + drm_dbg_atomic(plane->dev, 276 + "Set [COLOROP:%d] for [PLANE:%d:%s] state %p\n", 277 + colorop->base.id, plane->base.id, plane->name, 278 + plane_state); 279 + else 280 + drm_dbg_atomic(plane->dev, 281 + "Set [NOCOLOROP] for [PLANE:%d:%s] state %p\n", 282 + plane->base.id, plane->name, plane_state); 283 + 284 + plane_state->color_pipeline = colorop; 285 + } 286 + EXPORT_SYMBOL(drm_atomic_set_colorop_for_plane); 260 287 261 288 /** 262 289 * drm_atomic_set_crtc_for_connector - set CRTC for connector ··· 573 544 state->color_encoding = val; 574 545 } else if (property == plane->color_range_property) { 575 546 state->color_range = val; 547 + } else if (property == plane->color_pipeline_property) { 548 + /* find DRM colorop object */ 549 + struct drm_colorop *colorop = NULL; 550 + 551 + colorop = drm_colorop_find(dev, file_priv, val); 552 + 553 + if (val && !colorop) 554 + return -EACCES; 555 + 556 + drm_atomic_set_colorop_for_plane(state, colorop); 576 557 } else if (property == config->prop_fb_damage_clips) { 577 558 ret = drm_property_replace_blob_from_id(dev, 578 559 &state->fb_damage_clips, ··· 665 626 *val = state->color_encoding; 666 627 } else if (property == plane->color_range_property) { 667 628 *val = state->color_range; 629 + } else if (property == plane->color_pipeline_property) { 630 + *val = (state->color_pipeline) ? state->color_pipeline->base.id : 0; 668 631 } else if (property == config->prop_fb_damage_clips) { 669 632 *val = (state->fb_damage_clips) ? 670 633 state->fb_damage_clips->base.id : 0; ··· 685 644 property->base.id, property->name); 686 645 return -EINVAL; 687 646 } 647 + 648 + return 0; 649 + } 650 + 651 + static int drm_atomic_color_set_data_property(struct drm_colorop *colorop, 652 + struct drm_colorop_state *state, 653 + struct drm_property *property, 654 + uint64_t val) 655 + { 656 + ssize_t elem_size = -1; 657 + ssize_t size = -1; 658 + bool replaced = false; 659 + 660 + switch (colorop->type) { 661 + case DRM_COLOROP_1D_LUT: 662 + size = colorop->size * sizeof(struct drm_color_lut32); 663 + break; 664 + case DRM_COLOROP_CTM_3X4: 665 + size = sizeof(struct drm_color_ctm_3x4); 666 + break; 667 + case DRM_COLOROP_3D_LUT: 668 + size = colorop->size * colorop->size * colorop->size * 669 + sizeof(struct drm_color_lut32); 670 + break; 671 + default: 672 + /* should never get here */ 673 + return -EINVAL; 674 + } 675 + 676 + return drm_property_replace_blob_from_id(colorop->dev, 677 + &state->data, 678 + val, 679 + size, 680 + elem_size, 681 + &replaced); 682 + } 683 + 684 + static int drm_atomic_colorop_set_property(struct drm_colorop *colorop, 685 + struct drm_colorop_state *state, 686 + struct drm_file *file_priv, 687 + struct drm_property *property, 688 + uint64_t val) 689 + { 690 + if (property == colorop->bypass_property) { 691 + state->bypass = val; 692 + } else if (property == colorop->lut1d_interpolation_property) { 693 + colorop->lut1d_interpolation = val; 694 + } else if (property == colorop->curve_1d_type_property) { 695 + state->curve_1d_type = val; 696 + } else if (property == colorop->multiplier_property) { 697 + state->multiplier = val; 698 + } else if (property == colorop->lut3d_interpolation_property) { 699 + colorop->lut3d_interpolation = val; 700 + } else if (property == colorop->data_property) { 701 + return drm_atomic_color_set_data_property(colorop, state, 702 + property, val); 703 + } else { 704 + drm_dbg_atomic(colorop->dev, 705 + "[COLOROP:%d:%d] unknown property [PROP:%d:%s]\n", 706 + colorop->base.id, colorop->type, 707 + property->base.id, property->name); 708 + return -EINVAL; 709 + } 710 + 711 + return 0; 712 + } 713 + 714 + static int 715 + drm_atomic_colorop_get_property(struct drm_colorop *colorop, 716 + const struct drm_colorop_state *state, 717 + struct drm_property *property, uint64_t *val) 718 + { 719 + if (property == colorop->type_property) 720 + *val = colorop->type; 721 + else if (property == colorop->bypass_property) 722 + *val = state->bypass; 723 + else if (property == colorop->lut1d_interpolation_property) 724 + *val = colorop->lut1d_interpolation; 725 + else if (property == colorop->curve_1d_type_property) 726 + *val = state->curve_1d_type; 727 + else if (property == colorop->multiplier_property) 728 + *val = state->multiplier; 729 + else if (property == colorop->size_property) 730 + *val = colorop->size; 731 + else if (property == colorop->lut3d_interpolation_property) 732 + *val = colorop->lut3d_interpolation; 733 + else if (property == colorop->data_property) 734 + *val = (state->data) ? state->data->base.id : 0; 735 + else 736 + return -EINVAL; 688 737 689 738 return 0; 690 739 } ··· 1045 914 plane->state, property, val); 1046 915 break; 1047 916 } 917 + case DRM_MODE_OBJECT_COLOROP: { 918 + struct drm_colorop *colorop = obj_to_colorop(obj); 919 + 920 + if (colorop->plane) 921 + WARN_ON(!drm_modeset_is_locked(&colorop->plane->mutex)); 922 + 923 + ret = drm_atomic_colorop_get_property(colorop, colorop->state, property, val); 924 + break; 925 + } 1048 926 default: 1049 927 drm_dbg_atomic(dev, "[OBJECT:%d] has no properties\n", obj->id); 1050 928 ret = -EINVAL; ··· 1251 1111 ret = drm_atomic_plane_set_property(plane, 1252 1112 plane_state, file_priv, 1253 1113 prop, prop_value); 1114 + 1115 + break; 1116 + } 1117 + case DRM_MODE_OBJECT_COLOROP: { 1118 + struct drm_colorop *colorop = obj_to_colorop(obj); 1119 + struct drm_colorop_state *colorop_state; 1120 + 1121 + colorop_state = drm_atomic_get_colorop_state(state, colorop); 1122 + if (IS_ERR(colorop_state)) { 1123 + ret = PTR_ERR(colorop_state); 1124 + break; 1125 + } 1126 + 1127 + ret = drm_atomic_colorop_set_property(colorop, colorop_state, 1128 + file_priv, prop, prop_value); 1254 1129 break; 1255 1130 } 1256 1131 default: ··· 1605 1450 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 1606 1451 state->acquire_ctx = &ctx; 1607 1452 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 1453 + state->plane_color_pipeline = file_priv->plane_color_pipeline; 1608 1454 1609 1455 retry: 1610 1456 copied_objs = 0;
+1
drivers/gpu/drm/drm_client.c
··· 11 11 #include <linux/slab.h> 12 12 13 13 #include <drm/drm_client.h> 14 + #include <drm/drm_client_event.h> 14 15 #include <drm/drm_device.h> 15 16 #include <drm/drm_drv.h> 16 17 #include <drm/drm_file.h>
+2 -2
drivers/gpu/drm/drm_client_event.c
··· 102 102 } 103 103 EXPORT_SYMBOL(drm_client_dev_hotplug); 104 104 105 - void drm_client_dev_restore(struct drm_device *dev) 105 + void drm_client_dev_restore(struct drm_device *dev, bool force) 106 106 { 107 107 struct drm_client_dev *client; 108 108 int ret; ··· 115 115 if (!client->funcs || !client->funcs->restore) 116 116 continue; 117 117 118 - ret = client->funcs->restore(client); 118 + ret = client->funcs->restore(client, force); 119 119 drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); 120 120 if (!ret) /* The first one to return zero gets the privilege to restore */ 121 121 break;
+65
drivers/gpu/drm/drm_client_sysrq.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 or MIT 2 + 3 + #include <linux/sysrq.h> 4 + 5 + #include <drm/drm_client_event.h> 6 + #include <drm/drm_device.h> 7 + #include <drm/drm_print.h> 8 + 9 + #include "drm_internal.h" 10 + 11 + #ifdef CONFIG_MAGIC_SYSRQ 12 + static LIST_HEAD(drm_client_sysrq_dev_list); 13 + static DEFINE_MUTEX(drm_client_sysrq_dev_lock); 14 + 15 + /* emergency restore, don't bother with error reporting */ 16 + static void drm_client_sysrq_restore_work_fn(struct work_struct *ignored) 17 + { 18 + struct drm_device *dev; 19 + 20 + guard(mutex)(&drm_client_sysrq_dev_lock); 21 + 22 + list_for_each_entry(dev, &drm_client_sysrq_dev_list, client_sysrq_list) { 23 + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 24 + continue; 25 + 26 + drm_client_dev_restore(dev, true); 27 + } 28 + } 29 + 30 + static DECLARE_WORK(drm_client_sysrq_restore_work, drm_client_sysrq_restore_work_fn); 31 + 32 + static void drm_client_sysrq_restore_handler(u8 ignored) 33 + { 34 + schedule_work(&drm_client_sysrq_restore_work); 35 + } 36 + 37 + static const struct sysrq_key_op drm_client_sysrq_restore_op = { 38 + .handler = drm_client_sysrq_restore_handler, 39 + .help_msg = "force-fb(v)", 40 + .action_msg = "Restore framebuffer console", 41 + }; 42 + 43 + void drm_client_sysrq_register(struct drm_device *dev) 44 + { 45 + guard(mutex)(&drm_client_sysrq_dev_lock); 46 + 47 + if (list_empty(&drm_client_sysrq_dev_list)) 48 + register_sysrq_key('v', &drm_client_sysrq_restore_op); 49 + 50 + list_add(&dev->client_sysrq_list, &drm_client_sysrq_dev_list); 51 + } 52 + 53 + void drm_client_sysrq_unregister(struct drm_device *dev) 54 + { 55 + guard(mutex)(&drm_client_sysrq_dev_lock); 56 + 57 + /* remove device from global restore list */ 58 + if (!drm_WARN_ON(dev, list_empty(&dev->client_sysrq_list))) 59 + list_del(&dev->client_sysrq_list); 60 + 61 + /* no devices left; unregister key */ 62 + if (list_empty(&drm_client_sysrq_dev_list)) 63 + unregister_sysrq_key('v', &drm_client_sysrq_restore_op); 64 + } 65 + #endif
+43
drivers/gpu/drm/drm_color_mgmt.c
··· 874 874 fill_palette_8(crtc, i, set_palette); 875 875 } 876 876 EXPORT_SYMBOL(drm_crtc_fill_palette_8); 877 + 878 + /** 879 + * drm_color_lut32_check - check validity of extended lookup table 880 + * @lut: property blob containing extended LUT to check 881 + * @tests: bitmask of tests to run 882 + * 883 + * Helper to check whether a userspace-provided extended lookup table is valid and 884 + * satisfies hardware requirements. Drivers pass a bitmask indicating which of 885 + * the tests in &drm_color_lut_tests should be performed. 886 + * 887 + * Returns 0 on success, -EINVAL on failure. 888 + */ 889 + int drm_color_lut32_check(const struct drm_property_blob *lut, u32 tests) 890 + { 891 + const struct drm_color_lut32 *entry; 892 + int i; 893 + 894 + if (!lut || !tests) 895 + return 0; 896 + 897 + entry = lut->data; 898 + for (i = 0; i < drm_color_lut32_size(lut); i++) { 899 + if (tests & DRM_COLOR_LUT_EQUAL_CHANNELS) { 900 + if (entry[i].red != entry[i].blue || 901 + entry[i].red != entry[i].green) { 902 + DRM_DEBUG_KMS("All LUT entries must have equal r/g/b\n"); 903 + return -EINVAL; 904 + } 905 + } 906 + 907 + if (i > 0 && tests & DRM_COLOR_LUT_NON_DECREASING) { 908 + if (entry[i].red < entry[i - 1].red || 909 + entry[i].green < entry[i - 1].green || 910 + entry[i].blue < entry[i - 1].blue) { 911 + DRM_DEBUG_KMS("LUT entries must never decrease.\n"); 912 + return -EINVAL; 913 + } 914 + } 915 + } 916 + 917 + return 0; 918 + } 919 + EXPORT_SYMBOL(drm_color_lut32_check);
+599
drivers/gpu/drm/drm_colorop.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + * Authors: AMD 24 + * 25 + */ 26 + 27 + #include <drm/drm_colorop.h> 28 + #include <drm/drm_print.h> 29 + #include <drm/drm_drv.h> 30 + #include <drm/drm_plane.h> 31 + 32 + #include "drm_crtc_internal.h" 33 + 34 + /** 35 + * DOC: overview 36 + * 37 + * When userspace signals the &DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE it 38 + * should use the COLOR_PIPELINE plane property and associated colorops 39 + * for any color operation on the &drm_plane. Setting of all old color 40 + * properties, such as COLOR_ENCODING and COLOR_RANGE, will be rejected 41 + * and the values of the properties will be ignored. 42 + * 43 + * Colorops are only advertised and valid for atomic drivers and atomic 44 + * userspace that signals the &DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE 45 + * client cap. 46 + * 47 + * A colorop represents a single color operation. Colorops are chained 48 + * via the NEXT property and make up color pipelines. Color pipelines 49 + * are advertised and selected via the COLOR_PIPELINE &drm_plane 50 + * property. 51 + * 52 + * A colorop will be of a certain type, advertised by the read-only TYPE 53 + * property. Each type of colorop will advertise a different set of 54 + * properties and is programmed in a different manner. Types can be 55 + * enumerated 1D curves, 1D LUTs, 3D LUTs, matrices, etc. See the 56 + * &drm_colorop_type documentation for information on each type. 57 + * 58 + * If a colorop advertises the BYPASS property it can be bypassed. 59 + * 60 + * Information about colorop and color pipeline design decisions can be 61 + * found at rfc/color_pipeline.rst, but note that this document will 62 + * grow stale over time. 63 + */ 64 + 65 + static const struct drm_prop_enum_list drm_colorop_type_enum_list[] = { 66 + { DRM_COLOROP_1D_CURVE, "1D Curve" }, 67 + { DRM_COLOROP_1D_LUT, "1D LUT" }, 68 + { DRM_COLOROP_CTM_3X4, "3x4 Matrix"}, 69 + { DRM_COLOROP_MULTIPLIER, "Multiplier"}, 70 + { DRM_COLOROP_3D_LUT, "3D LUT"}, 71 + }; 72 + 73 + static const char * const colorop_curve_1d_type_names[] = { 74 + [DRM_COLOROP_1D_CURVE_SRGB_EOTF] = "sRGB EOTF", 75 + [DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF] = "sRGB Inverse EOTF", 76 + [DRM_COLOROP_1D_CURVE_PQ_125_EOTF] = "PQ 125 EOTF", 77 + [DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF] = "PQ 125 Inverse EOTF", 78 + [DRM_COLOROP_1D_CURVE_BT2020_INV_OETF] = "BT.2020 Inverse OETF", 79 + [DRM_COLOROP_1D_CURVE_BT2020_OETF] = "BT.2020 OETF", 80 + [DRM_COLOROP_1D_CURVE_GAMMA22] = "Gamma 2.2", 81 + [DRM_COLOROP_1D_CURVE_GAMMA22_INV] = "Gamma 2.2 Inverse", 82 + }; 83 + 84 + static const struct drm_prop_enum_list drm_colorop_lut1d_interpolation_list[] = { 85 + { DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, "Linear" }, 86 + }; 87 + 88 + 89 + static const struct drm_prop_enum_list drm_colorop_lut3d_interpolation_list[] = { 90 + { DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, "Tetrahedral" }, 91 + }; 92 + 93 + /* Init Helpers */ 94 + 95 + static int drm_plane_colorop_init(struct drm_device *dev, struct drm_colorop *colorop, 96 + struct drm_plane *plane, enum drm_colorop_type type, 97 + uint32_t flags) 98 + { 99 + struct drm_mode_config *config = &dev->mode_config; 100 + struct drm_property *prop; 101 + int ret = 0; 102 + 103 + ret = drm_mode_object_add(dev, &colorop->base, DRM_MODE_OBJECT_COLOROP); 104 + if (ret) 105 + return ret; 106 + 107 + colorop->base.properties = &colorop->properties; 108 + colorop->dev = dev; 109 + colorop->type = type; 110 + colorop->plane = plane; 111 + colorop->next = NULL; 112 + 113 + list_add_tail(&colorop->head, &config->colorop_list); 114 + colorop->index = config->num_colorop++; 115 + 116 + /* add properties */ 117 + 118 + /* type */ 119 + prop = drm_property_create_enum(dev, 120 + DRM_MODE_PROP_IMMUTABLE, 121 + "TYPE", drm_colorop_type_enum_list, 122 + ARRAY_SIZE(drm_colorop_type_enum_list)); 123 + 124 + if (!prop) 125 + return -ENOMEM; 126 + 127 + colorop->type_property = prop; 128 + 129 + drm_object_attach_property(&colorop->base, 130 + colorop->type_property, 131 + colorop->type); 132 + 133 + if (flags & DRM_COLOROP_FLAG_ALLOW_BYPASS) { 134 + /* bypass */ 135 + prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC, 136 + "BYPASS"); 137 + if (!prop) 138 + return -ENOMEM; 139 + 140 + colorop->bypass_property = prop; 141 + drm_object_attach_property(&colorop->base, 142 + colorop->bypass_property, 143 + 1); 144 + } 145 + 146 + /* next */ 147 + prop = drm_property_create_object(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC, 148 + "NEXT", DRM_MODE_OBJECT_COLOROP); 149 + if (!prop) 150 + return -ENOMEM; 151 + colorop->next_property = prop; 152 + drm_object_attach_property(&colorop->base, 153 + colorop->next_property, 154 + 0); 155 + 156 + return ret; 157 + } 158 + 159 + /** 160 + * drm_colorop_cleanup - Cleanup a drm_colorop object in color_pipeline 161 + * 162 + * @colorop: The drm_colorop object to be cleaned 163 + */ 164 + void drm_colorop_cleanup(struct drm_colorop *colorop) 165 + { 166 + struct drm_device *dev = colorop->dev; 167 + struct drm_mode_config *config = &dev->mode_config; 168 + 169 + list_del(&colorop->head); 170 + config->num_colorop--; 171 + 172 + if (colorop->state && colorop->state->data) { 173 + drm_property_blob_put(colorop->state->data); 174 + colorop->state->data = NULL; 175 + } 176 + 177 + kfree(colorop->state); 178 + } 179 + EXPORT_SYMBOL(drm_colorop_cleanup); 180 + 181 + /** 182 + * drm_colorop_pipeline_destroy - Helper for color pipeline destruction 183 + * 184 + * @dev: - The drm_device containing the drm_planes with the color_pipelines 185 + * 186 + * Provides a default color pipeline destroy handler for drm_device. 187 + */ 188 + void drm_colorop_pipeline_destroy(struct drm_device *dev) 189 + { 190 + struct drm_mode_config *config = &dev->mode_config; 191 + struct drm_colorop *colorop, *next; 192 + 193 + list_for_each_entry_safe(colorop, next, &config->colorop_list, head) { 194 + drm_colorop_cleanup(colorop); 195 + kfree(colorop); 196 + } 197 + } 198 + EXPORT_SYMBOL(drm_colorop_pipeline_destroy); 199 + 200 + /** 201 + * drm_plane_colorop_curve_1d_init - Initialize a DRM_COLOROP_1D_CURVE 202 + * 203 + * @dev: DRM device 204 + * @colorop: The drm_colorop object to initialize 205 + * @plane: The associated drm_plane 206 + * @supported_tfs: A bitfield of supported drm_plane_colorop_curve_1d_init enum values, 207 + * created using BIT(curve_type) and combined with the OR '|' 208 + * operator. 209 + * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines. 210 + * @return zero on success, -E value on failure 211 + */ 212 + int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop, 213 + struct drm_plane *plane, u64 supported_tfs, uint32_t flags) 214 + { 215 + struct drm_prop_enum_list enum_list[DRM_COLOROP_1D_CURVE_COUNT]; 216 + int i, len; 217 + 218 + struct drm_property *prop; 219 + int ret; 220 + 221 + if (!supported_tfs) { 222 + drm_err(dev, 223 + "No supported TFs for new 1D curve colorop on [PLANE:%d:%s]\n", 224 + plane->base.id, plane->name); 225 + return -EINVAL; 226 + } 227 + 228 + if ((supported_tfs & -BIT(DRM_COLOROP_1D_CURVE_COUNT)) != 0) { 229 + drm_err(dev, "Unknown TF provided on [PLANE:%d:%s]\n", 230 + plane->base.id, plane->name); 231 + return -EINVAL; 232 + } 233 + 234 + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_CURVE, flags); 235 + if (ret) 236 + return ret; 237 + 238 + len = 0; 239 + for (i = 0; i < DRM_COLOROP_1D_CURVE_COUNT; i++) { 240 + if ((supported_tfs & BIT(i)) == 0) 241 + continue; 242 + 243 + enum_list[len].type = i; 244 + enum_list[len].name = colorop_curve_1d_type_names[i]; 245 + len++; 246 + } 247 + 248 + if (WARN_ON(len <= 0)) 249 + return -EINVAL; 250 + 251 + /* initialize 1D curve only attribute */ 252 + prop = drm_property_create_enum(dev, DRM_MODE_PROP_ATOMIC, "CURVE_1D_TYPE", 253 + enum_list, len); 254 + 255 + if (!prop) 256 + return -ENOMEM; 257 + 258 + colorop->curve_1d_type_property = prop; 259 + drm_object_attach_property(&colorop->base, colorop->curve_1d_type_property, 260 + enum_list[0].type); 261 + drm_colorop_reset(colorop); 262 + 263 + return 0; 264 + } 265 + EXPORT_SYMBOL(drm_plane_colorop_curve_1d_init); 266 + 267 + static int drm_colorop_create_data_prop(struct drm_device *dev, struct drm_colorop *colorop) 268 + { 269 + struct drm_property *prop; 270 + 271 + /* data */ 272 + prop = drm_property_create(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB, 273 + "DATA", 0); 274 + if (!prop) 275 + return -ENOMEM; 276 + 277 + colorop->data_property = prop; 278 + drm_object_attach_property(&colorop->base, 279 + colorop->data_property, 280 + 0); 281 + 282 + return 0; 283 + } 284 + 285 + /** 286 + * drm_plane_colorop_curve_1d_lut_init - Initialize a DRM_COLOROP_1D_LUT 287 + * 288 + * @dev: DRM device 289 + * @colorop: The drm_colorop object to initialize 290 + * @plane: The associated drm_plane 291 + * @lut_size: LUT size supported by driver 292 + * @interpolation: 1D LUT interpolation type 293 + * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines. 294 + * @return zero on success, -E value on failure 295 + */ 296 + int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop, 297 + struct drm_plane *plane, uint32_t lut_size, 298 + enum drm_colorop_lut1d_interpolation_type interpolation, 299 + uint32_t flags) 300 + { 301 + struct drm_property *prop; 302 + int ret; 303 + 304 + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_LUT, flags); 305 + if (ret) 306 + return ret; 307 + 308 + /* initialize 1D LUT only attribute */ 309 + /* LUT size */ 310 + prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC, 311 + "SIZE", 0, UINT_MAX); 312 + if (!prop) 313 + return -ENOMEM; 314 + 315 + colorop->size_property = prop; 316 + drm_object_attach_property(&colorop->base, colorop->size_property, lut_size); 317 + colorop->size = lut_size; 318 + 319 + /* interpolation */ 320 + prop = drm_property_create_enum(dev, 0, "LUT1D_INTERPOLATION", 321 + drm_colorop_lut1d_interpolation_list, 322 + ARRAY_SIZE(drm_colorop_lut1d_interpolation_list)); 323 + if (!prop) 324 + return -ENOMEM; 325 + 326 + colorop->lut1d_interpolation_property = prop; 327 + drm_object_attach_property(&colorop->base, prop, interpolation); 328 + colorop->lut1d_interpolation = interpolation; 329 + 330 + /* data */ 331 + ret = drm_colorop_create_data_prop(dev, colorop); 332 + if (ret) 333 + return ret; 334 + 335 + drm_colorop_reset(colorop); 336 + 337 + return 0; 338 + } 339 + EXPORT_SYMBOL(drm_plane_colorop_curve_1d_lut_init); 340 + 341 + int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop, 342 + struct drm_plane *plane, uint32_t flags) 343 + { 344 + int ret; 345 + 346 + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_CTM_3X4, flags); 347 + if (ret) 348 + return ret; 349 + 350 + ret = drm_colorop_create_data_prop(dev, colorop); 351 + if (ret) 352 + return ret; 353 + 354 + drm_colorop_reset(colorop); 355 + 356 + return 0; 357 + } 358 + EXPORT_SYMBOL(drm_plane_colorop_ctm_3x4_init); 359 + 360 + /** 361 + * drm_plane_colorop_mult_init - Initialize a DRM_COLOROP_MULTIPLIER 362 + * 363 + * @dev: DRM device 364 + * @colorop: The drm_colorop object to initialize 365 + * @plane: The associated drm_plane 366 + * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines. 367 + * @return zero on success, -E value on failure 368 + */ 369 + int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop, 370 + struct drm_plane *plane, uint32_t flags) 371 + { 372 + struct drm_property *prop; 373 + int ret; 374 + 375 + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_MULTIPLIER, flags); 376 + if (ret) 377 + return ret; 378 + 379 + prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "MULTIPLIER", 0, U64_MAX); 380 + if (!prop) 381 + return -ENOMEM; 382 + 383 + colorop->multiplier_property = prop; 384 + drm_object_attach_property(&colorop->base, colorop->multiplier_property, 0); 385 + 386 + drm_colorop_reset(colorop); 387 + 388 + return 0; 389 + } 390 + EXPORT_SYMBOL(drm_plane_colorop_mult_init); 391 + 392 + int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop, 393 + struct drm_plane *plane, 394 + uint32_t lut_size, 395 + enum drm_colorop_lut3d_interpolation_type interpolation, 396 + uint32_t flags) 397 + { 398 + struct drm_property *prop; 399 + int ret; 400 + 401 + ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_3D_LUT, flags); 402 + if (ret) 403 + return ret; 404 + 405 + /* LUT size */ 406 + prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC, 407 + "SIZE", 0, UINT_MAX); 408 + if (!prop) 409 + return -ENOMEM; 410 + 411 + colorop->size_property = prop; 412 + drm_object_attach_property(&colorop->base, colorop->size_property, lut_size); 413 + colorop->size = lut_size; 414 + 415 + /* interpolation */ 416 + prop = drm_property_create_enum(dev, 0, "LUT3D_INTERPOLATION", 417 + drm_colorop_lut3d_interpolation_list, 418 + ARRAY_SIZE(drm_colorop_lut3d_interpolation_list)); 419 + if (!prop) 420 + return -ENOMEM; 421 + 422 + colorop->lut3d_interpolation_property = prop; 423 + drm_object_attach_property(&colorop->base, prop, interpolation); 424 + colorop->lut3d_interpolation = interpolation; 425 + 426 + /* data */ 427 + ret = drm_colorop_create_data_prop(dev, colorop); 428 + if (ret) 429 + return ret; 430 + 431 + drm_colorop_reset(colorop); 432 + 433 + return 0; 434 + } 435 + EXPORT_SYMBOL(drm_plane_colorop_3dlut_init); 436 + 437 + static void __drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop, 438 + struct drm_colorop_state *state) 439 + { 440 + memcpy(state, colorop->state, sizeof(*state)); 441 + 442 + if (state->data) 443 + drm_property_blob_get(state->data); 444 + 445 + state->bypass = true; 446 + } 447 + 448 + struct drm_colorop_state * 449 + drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop) 450 + { 451 + struct drm_colorop_state *state; 452 + 453 + if (WARN_ON(!colorop->state)) 454 + return NULL; 455 + 456 + state = kmalloc(sizeof(*state), GFP_KERNEL); 457 + if (state) 458 + __drm_atomic_helper_colorop_duplicate_state(colorop, state); 459 + 460 + return state; 461 + } 462 + 463 + void drm_colorop_atomic_destroy_state(struct drm_colorop *colorop, 464 + struct drm_colorop_state *state) 465 + { 466 + kfree(state); 467 + } 468 + 469 + /** 470 + * __drm_colorop_state_reset - resets colorop state to default values 471 + * @colorop_state: atomic colorop state, must not be NULL 472 + * @colorop: colorop object, must not be NULL 473 + * 474 + * Initializes the newly allocated @colorop_state with default 475 + * values. This is useful for drivers that subclass the CRTC state. 476 + */ 477 + static void __drm_colorop_state_reset(struct drm_colorop_state *colorop_state, 478 + struct drm_colorop *colorop) 479 + { 480 + u64 val; 481 + 482 + colorop_state->colorop = colorop; 483 + colorop_state->bypass = true; 484 + 485 + if (colorop->curve_1d_type_property) { 486 + drm_object_property_get_default_value(&colorop->base, 487 + colorop->curve_1d_type_property, 488 + &val); 489 + colorop_state->curve_1d_type = val; 490 + } 491 + } 492 + 493 + /** 494 + * __drm_colorop_reset - reset state on colorop 495 + * @colorop: drm colorop 496 + * @colorop_state: colorop state to assign 497 + * 498 + * Initializes the newly allocated @colorop_state and assigns it to 499 + * the &drm_crtc->state pointer of @colorop, usually required when 500 + * initializing the drivers or when called from the &drm_colorop_funcs.reset 501 + * hook. 502 + * 503 + * This is useful for drivers that subclass the colorop state. 504 + */ 505 + static void __drm_colorop_reset(struct drm_colorop *colorop, 506 + struct drm_colorop_state *colorop_state) 507 + { 508 + if (colorop_state) 509 + __drm_colorop_state_reset(colorop_state, colorop); 510 + 511 + colorop->state = colorop_state; 512 + } 513 + 514 + void drm_colorop_reset(struct drm_colorop *colorop) 515 + { 516 + kfree(colorop->state); 517 + colorop->state = kzalloc(sizeof(*colorop->state), GFP_KERNEL); 518 + 519 + if (colorop->state) 520 + __drm_colorop_reset(colorop, colorop->state); 521 + } 522 + 523 + static const char * const colorop_type_name[] = { 524 + [DRM_COLOROP_1D_CURVE] = "1D Curve", 525 + [DRM_COLOROP_1D_LUT] = "1D LUT", 526 + [DRM_COLOROP_CTM_3X4] = "3x4 Matrix", 527 + [DRM_COLOROP_MULTIPLIER] = "Multiplier", 528 + [DRM_COLOROP_3D_LUT] = "3D LUT", 529 + }; 530 + 531 + static const char * const colorop_lu3d_interpolation_name[] = { 532 + [DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL] = "Tetrahedral", 533 + }; 534 + 535 + static const char * const colorop_lut1d_interpolation_name[] = { 536 + [DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR] = "Linear", 537 + }; 538 + 539 + const char *drm_get_colorop_type_name(enum drm_colorop_type type) 540 + { 541 + if (WARN_ON(type >= ARRAY_SIZE(colorop_type_name))) 542 + return "unknown"; 543 + 544 + return colorop_type_name[type]; 545 + } 546 + 547 + const char *drm_get_colorop_curve_1d_type_name(enum drm_colorop_curve_1d_type type) 548 + { 549 + if (WARN_ON(type >= ARRAY_SIZE(colorop_curve_1d_type_names))) 550 + return "unknown"; 551 + 552 + return colorop_curve_1d_type_names[type]; 553 + } 554 + 555 + /** 556 + * drm_get_colorop_lut1d_interpolation_name: return a string for interpolation type 557 + * @type: interpolation type to compute name of 558 + * 559 + * In contrast to the other drm_get_*_name functions this one here returns a 560 + * const pointer and hence is threadsafe. 561 + */ 562 + const char *drm_get_colorop_lut1d_interpolation_name(enum drm_colorop_lut1d_interpolation_type type) 563 + { 564 + if (WARN_ON(type >= ARRAY_SIZE(colorop_lut1d_interpolation_name))) 565 + return "unknown"; 566 + 567 + return colorop_lut1d_interpolation_name[type]; 568 + } 569 + 570 + /** 571 + * drm_get_colorop_lut3d_interpolation_name - return a string for interpolation type 572 + * @type: interpolation type to compute name of 573 + * 574 + * In contrast to the other drm_get_*_name functions this one here returns a 575 + * const pointer and hence is threadsafe. 576 + */ 577 + const char *drm_get_colorop_lut3d_interpolation_name(enum drm_colorop_lut3d_interpolation_type type) 578 + { 579 + if (WARN_ON(type >= ARRAY_SIZE(colorop_lu3d_interpolation_name))) 580 + return "unknown"; 581 + 582 + return colorop_lu3d_interpolation_name[type]; 583 + } 584 + 585 + /** 586 + * drm_colorop_set_next_property - sets the next pointer 587 + * @colorop: drm colorop 588 + * @next: next colorop 589 + * 590 + * Should be used when constructing the color pipeline 591 + */ 592 + void drm_colorop_set_next_property(struct drm_colorop *colorop, struct drm_colorop *next) 593 + { 594 + drm_object_property_set_value(&colorop->base, 595 + colorop->next_property, 596 + next ? next->base.id : 0); 597 + colorop->next = next; 598 + } 599 + EXPORT_SYMBOL(drm_colorop_set_next_property);
+1
drivers/gpu/drm/drm_connector.c
··· 3439 3439 * properties reflect the latest status. 3440 3440 */ 3441 3441 ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, 3442 + file_priv->plane_color_pipeline, 3442 3443 (uint32_t __user *)(unsigned long)(out_resp->props_ptr), 3443 3444 (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), 3444 3445 &out_resp->count_props);
+1
drivers/gpu/drm/drm_crtc_internal.h
··· 163 163 void drm_mode_object_unregister(struct drm_device *dev, 164 164 struct drm_mode_object *object); 165 165 int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic, 166 + bool plane_color_pipeline, 166 167 uint32_t __user *prop_ptr, 167 168 uint64_t __user *prop_values, 168 169 uint32_t *arg_count_props);
+3
drivers/gpu/drm/drm_drv.c
··· 733 733 INIT_LIST_HEAD(&dev->filelist); 734 734 INIT_LIST_HEAD(&dev->filelist_internal); 735 735 INIT_LIST_HEAD(&dev->clientlist); 736 + INIT_LIST_HEAD(&dev->client_sysrq_list); 736 737 INIT_LIST_HEAD(&dev->vblank_event_list); 737 738 738 739 spin_lock_init(&dev->event_lock); ··· 1101 1100 goto err_unload; 1102 1101 } 1103 1102 drm_panic_register(dev); 1103 + drm_client_sysrq_register(dev); 1104 1104 1105 1105 DRM_INFO("Initialized %s %d.%d.%d for %s on minor %d\n", 1106 1106 driver->name, driver->major, driver->minor, ··· 1146 1144 { 1147 1145 dev->registered = false; 1148 1146 1147 + drm_client_sysrq_unregister(dev); 1149 1148 drm_panic_unregister(dev); 1150 1149 1151 1150 drm_client_dev_unregister(dev);
+19 -89
drivers/gpu/drm/drm_fb_helper.c
··· 32 32 #include <linux/console.h> 33 33 #include <linux/export.h> 34 34 #include <linux/pci.h> 35 - #include <linux/sysrq.h> 36 35 #include <linux/vga_switcheroo.h> 37 36 38 37 #include <drm/drm_atomic.h> ··· 254 255 /** 255 256 * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration 256 257 * @fb_helper: driver-allocated fbdev helper, can be NULL 258 + * @force: ignore present DRM master 257 259 * 258 260 * This helper should be called from fbdev emulation's &drm_client_funcs.restore 259 261 * callback. It ensures that the user isn't greeted with a black screen when the ··· 263 263 * Returns: 264 264 * 0 on success, or a negative errno code otherwise. 265 265 */ 266 - int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) 266 + int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, bool force) 267 267 { 268 - return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false); 268 + return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force); 269 269 } 270 270 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); 271 - 272 - #ifdef CONFIG_MAGIC_SYSRQ 273 - /* emergency restore, don't bother with error reporting */ 274 - static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) 275 - { 276 - struct drm_fb_helper *helper; 277 - 278 - mutex_lock(&kernel_fb_helper_lock); 279 - list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { 280 - struct drm_device *dev = helper->dev; 281 - 282 - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 283 - continue; 284 - 285 - mutex_lock(&helper->lock); 286 - drm_client_modeset_commit_locked(&helper->client); 287 - mutex_unlock(&helper->lock); 288 - } 289 - mutex_unlock(&kernel_fb_helper_lock); 290 - } 291 - 292 - static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); 293 - 294 - static void drm_fb_helper_sysrq(u8 dummy1) 295 - { 296 - schedule_work(&drm_fb_helper_restore_work); 297 - } 298 - 299 - static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { 300 - .handler = drm_fb_helper_sysrq, 301 - .help_msg = "force-fb(v)", 302 - .action_msg = "Restore framebuffer console", 303 - }; 304 - #else 305 - static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; 306 - #endif 307 271 308 272 static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) 309 273 { ··· 459 495 } 460 496 EXPORT_SYMBOL(drm_fb_helper_init); 461 497 462 - /** 463 - * drm_fb_helper_alloc_info - allocate fb_info and some of its members 464 - * @fb_helper: driver-allocated fbdev helper 465 - * 466 - * A helper to alloc fb_info and the member cmap. Called by the driver 467 - * within the struct &drm_driver.fbdev_probe callback function. Drivers do 468 - * not need to release the allocated fb_info structure themselves, this is 469 - * automatically done when calling drm_fb_helper_fini(). 470 - * 471 - * RETURNS: 472 - * fb_info pointer if things went okay, pointer containing error code 473 - * otherwise 474 - */ 475 - struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) 498 + static struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) 476 499 { 477 500 struct device *dev = fb_helper->dev->dev; 478 501 struct fb_info *info; ··· 486 535 framebuffer_release(info); 487 536 return ERR_PTR(ret); 488 537 } 489 - EXPORT_SYMBOL(drm_fb_helper_alloc_info); 490 538 491 - /** 492 - * drm_fb_helper_release_info - release fb_info and its members 493 - * @fb_helper: driver-allocated fbdev helper 494 - * 495 - * A helper to release fb_info and the member cmap. Drivers do not 496 - * need to release the allocated fb_info structure themselves, this is 497 - * automatically done when calling drm_fb_helper_fini(). 498 - */ 499 - void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper) 539 + static void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper) 500 540 { 501 541 struct fb_info *info = fb_helper->info; 502 542 ··· 500 558 fb_dealloc_cmap(&info->cmap); 501 559 framebuffer_release(info); 502 560 } 503 - EXPORT_SYMBOL(drm_fb_helper_release_info); 504 561 505 562 /** 506 563 * drm_fb_helper_unregister_info - unregister fb_info framebuffer device ··· 542 601 drm_fb_helper_release_info(fb_helper); 543 602 544 603 mutex_lock(&kernel_fb_helper_lock); 545 - if (!list_empty(&fb_helper->kernel_fb_list)) { 604 + if (!list_empty(&fb_helper->kernel_fb_list)) 546 605 list_del(&fb_helper->kernel_fb_list); 547 - if (list_empty(&kernel_fb_helper_list)) 548 - unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 549 - } 550 606 mutex_unlock(&kernel_fb_helper_lock); 551 607 552 608 if (!fb_helper->client.funcs) ··· 1266 1328 * the KDSET IOCTL with KD_TEXT, and only after that drops the master 1267 1329 * status when exiting. 1268 1330 * 1269 - * In the past this was caught by drm_fb_helper_lastclose(), but on 1270 - * modern systems where logind always keeps a drm fd open to orchestrate 1271 - * the vt switching, this doesn't work. 1331 + * In the past this was caught by drm_fb_helper_restore_fbdev_mode_unlocked(), 1332 + * but on modern systems where logind always keeps a drm fd open to 1333 + * orchestrate the vt switching, this doesn't work. 1272 1334 * 1273 1335 * To not break the userspace ABI we have this special case here, which 1274 1336 * is only used for the above case. Everything else uses the normal ··· 1747 1809 height = dev->mode_config.max_height; 1748 1810 1749 1811 drm_client_modeset_probe(&fb_helper->client, width, height); 1812 + 1813 + info = drm_fb_helper_alloc_info(fb_helper); 1814 + if (IS_ERR(info)) 1815 + return PTR_ERR(info); 1816 + 1750 1817 ret = drm_fb_helper_single_fb_probe(fb_helper); 1751 1818 if (ret < 0) { 1752 1819 if (ret == -EAGAIN) { ··· 1760 1817 } 1761 1818 mutex_unlock(&fb_helper->lock); 1762 1819 1763 - return ret; 1820 + goto err_drm_fb_helper_release_info; 1764 1821 } 1765 1822 drm_setup_crtcs_fb(fb_helper); 1766 1823 1767 1824 fb_helper->deferred_setup = false; 1768 1825 1769 - info = fb_helper->info; 1770 1826 info->var.pixclock = 0; 1771 1827 1772 1828 /* Need to drop locks to avoid recursive deadlock in ··· 1781 1839 info->node, info->fix.id); 1782 1840 1783 1841 mutex_lock(&kernel_fb_helper_lock); 1784 - if (list_empty(&kernel_fb_helper_list)) 1785 - register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 1786 - 1787 1842 list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); 1788 1843 mutex_unlock(&kernel_fb_helper_lock); 1789 1844 1790 1845 return 0; 1846 + 1847 + err_drm_fb_helper_release_info: 1848 + drm_fb_helper_release_info(fb_helper); 1849 + return ret; 1791 1850 } 1792 1851 1793 1852 /** ··· 1898 1955 return 0; 1899 1956 } 1900 1957 EXPORT_SYMBOL(drm_fb_helper_hotplug_event); 1901 - 1902 - /** 1903 - * drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation 1904 - * @dev: DRM device 1905 - * 1906 - * This function is obsolete. Call drm_fb_helper_restore_fbdev_mode_unlocked() 1907 - * instead. 1908 - */ 1909 - void drm_fb_helper_lastclose(struct drm_device *dev) 1910 - { 1911 - drm_fb_helper_restore_fbdev_mode_unlocked(dev->fb_helper); 1912 - } 1913 - EXPORT_SYMBOL(drm_fb_helper_lastclose);
+2 -10
drivers/gpu/drm/drm_fbdev_dma.c
··· 269 269 { 270 270 struct drm_client_dev *client = &fb_helper->client; 271 271 struct drm_device *dev = fb_helper->dev; 272 + struct fb_info *info = fb_helper->info; 272 273 struct drm_client_buffer *buffer; 273 274 struct drm_framebuffer *fb; 274 - struct fb_info *info; 275 275 u32 format; 276 276 struct iosys_map map; 277 277 int ret; ··· 301 301 fb_helper->buffer = buffer; 302 302 fb_helper->fb = fb; 303 303 304 - info = drm_fb_helper_alloc_info(fb_helper); 305 - if (IS_ERR(info)) { 306 - ret = PTR_ERR(info); 307 - goto err_drm_client_buffer_vunmap; 308 - } 309 - 310 304 drm_fb_helper_fill_info(info, fb_helper, sizes); 311 305 312 306 if (fb->funcs->dirty) ··· 308 314 else 309 315 ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes); 310 316 if (ret) 311 - goto err_drm_fb_helper_release_info; 317 + goto err_drm_client_buffer_vunmap; 312 318 313 319 return 0; 314 320 315 - err_drm_fb_helper_release_info: 316 - drm_fb_helper_release_info(fb_helper); 317 321 err_drm_client_buffer_vunmap: 318 322 fb_helper->fb = NULL; 319 323 fb_helper->buffer = NULL;
+2 -10
drivers/gpu/drm/drm_fbdev_shmem.c
··· 135 135 { 136 136 struct drm_client_dev *client = &fb_helper->client; 137 137 struct drm_device *dev = fb_helper->dev; 138 + struct fb_info *info = fb_helper->info; 138 139 struct drm_client_buffer *buffer; 139 140 struct drm_gem_shmem_object *shmem; 140 141 struct drm_framebuffer *fb; 141 - struct fb_info *info; 142 142 u32 format; 143 143 struct iosys_map map; 144 144 int ret; ··· 168 168 fb_helper->buffer = buffer; 169 169 fb_helper->fb = fb; 170 170 171 - info = drm_fb_helper_alloc_info(fb_helper); 172 - if (IS_ERR(info)) { 173 - ret = PTR_ERR(info); 174 - goto err_drm_client_buffer_vunmap; 175 - } 176 - 177 171 drm_fb_helper_fill_info(info, fb_helper, sizes); 178 172 179 173 info->fbops = &drm_fbdev_shmem_fb_ops; ··· 188 194 info->fbdefio = &fb_helper->fbdefio; 189 195 ret = fb_deferred_io_init(info); 190 196 if (ret) 191 - goto err_drm_fb_helper_release_info; 197 + goto err_drm_client_buffer_vunmap; 192 198 193 199 return 0; 194 200 195 - err_drm_fb_helper_release_info: 196 - drm_fb_helper_release_info(fb_helper); 197 201 err_drm_client_buffer_vunmap: 198 202 fb_helper->fb = NULL; 199 203 fb_helper->buffer = NULL;
+2 -10
drivers/gpu/drm/drm_fbdev_ttm.c
··· 174 174 { 175 175 struct drm_client_dev *client = &fb_helper->client; 176 176 struct drm_device *dev = fb_helper->dev; 177 + struct fb_info *info = fb_helper->info; 177 178 struct drm_client_buffer *buffer; 178 - struct fb_info *info; 179 179 size_t screen_size; 180 180 void *screen_buffer; 181 181 u32 format; ··· 203 203 goto err_drm_client_buffer_delete; 204 204 } 205 205 206 - info = drm_fb_helper_alloc_info(fb_helper); 207 - if (IS_ERR(info)) { 208 - ret = PTR_ERR(info); 209 - goto err_vfree; 210 - } 211 - 212 206 drm_fb_helper_fill_info(info, fb_helper, sizes); 213 207 214 208 info->fbops = &drm_fbdev_ttm_fb_ops; ··· 219 225 info->fbdefio = &fb_helper->fbdefio; 220 226 ret = fb_deferred_io_init(info); 221 227 if (ret) 222 - goto err_drm_fb_helper_release_info; 228 + goto err_vfree; 223 229 224 230 return 0; 225 231 226 - err_drm_fb_helper_release_info: 227 - drm_fb_helper_release_info(fb_helper); 228 232 err_vfree: 229 233 vfree(screen_buffer); 230 234 err_drm_client_buffer_delete:
+1 -1
drivers/gpu/drm/drm_file.c
··· 405 405 406 406 static void drm_lastclose(struct drm_device *dev) 407 407 { 408 - drm_client_dev_restore(dev); 408 + drm_client_dev_restore(dev, false); 409 409 410 410 if (dev_is_pci(dev->dev)) 411 411 vga_switcheroo_process_delayed_switch();
+3 -13
drivers/gpu/drm/drm_gem.c
··· 783 783 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 784 784 int count, struct drm_gem_object ***objs_out) 785 785 { 786 - struct drm_device *dev = filp->minor->dev; 787 786 struct drm_gem_object **objs; 788 787 u32 *handles; 789 788 int ret; ··· 797 798 798 799 *objs_out = objs; 799 800 800 - handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); 801 - if (!handles) { 802 - ret = -ENOMEM; 803 - goto out; 804 - } 805 - 806 - if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { 807 - ret = -EFAULT; 808 - drm_dbg_core(dev, "Failed to copy in GEM handles\n"); 809 - goto out; 810 - } 801 + handles = vmemdup_array_user(bo_handles, count, sizeof(u32)); 802 + if (IS_ERR(handles)) 803 + return PTR_ERR(handles); 811 804 812 805 ret = objects_lookup(filp, handles, count, objs); 813 - out: 814 806 kvfree(handles); 815 807 return ret; 816 808
+11
drivers/gpu/drm/drm_internal.h
··· 56 56 { } 57 57 #endif 58 58 59 + /* drm_client_sysrq.c */ 60 + #if defined(CONFIG_DRM_CLIENT) && defined(CONFIG_MAGIC_SYSRQ) 61 + void drm_client_sysrq_register(struct drm_device *dev); 62 + void drm_client_sysrq_unregister(struct drm_device *dev); 63 + #else 64 + static inline void drm_client_sysrq_register(struct drm_device *dev) 65 + { } 66 + static inline void drm_client_sysrq_unregister(struct drm_device *dev) 67 + { } 68 + #endif 69 + 59 70 /* drm_file.c */ 60 71 extern struct mutex drm_global_mutex; 61 72 bool drm_dev_needs_global_mutex(struct drm_device *dev);
+7
drivers/gpu/drm/drm_ioctl.c
··· 373 373 return -EINVAL; 374 374 file_priv->supports_virtualized_cursor_plane = req->value; 375 375 break; 376 + case DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE: 377 + if (!file_priv->atomic) 378 + return -EINVAL; 379 + if (req->value > 1) 380 + return -EINVAL; 381 + file_priv->plane_color_pipeline = req->value; 382 + break; 376 383 default: 377 384 return -EINVAL; 378 385 }
+7
drivers/gpu/drm/drm_mode_config.c
··· 30 30 #include <drm/drm_managed.h> 31 31 #include <drm/drm_mode_config.h> 32 32 #include <drm/drm_print.h> 33 + #include <drm/drm_colorop.h> 33 34 #include <linux/dma-resv.h> 34 35 35 36 #include "drm_crtc_internal.h" ··· 193 192 void drm_mode_config_reset(struct drm_device *dev) 194 193 { 195 194 struct drm_crtc *crtc; 195 + struct drm_colorop *colorop; 196 196 struct drm_plane *plane; 197 197 struct drm_encoder *encoder; 198 198 struct drm_connector *connector; 199 199 struct drm_connector_list_iter conn_iter; 200 + 201 + drm_for_each_colorop(colorop, dev) 202 + drm_colorop_reset(colorop); 200 203 201 204 drm_for_each_plane(plane, dev) 202 205 if (plane->funcs->reset) ··· 442 437 INIT_LIST_HEAD(&dev->mode_config.property_list); 443 438 INIT_LIST_HEAD(&dev->mode_config.property_blob_list); 444 439 INIT_LIST_HEAD(&dev->mode_config.plane_list); 440 + INIT_LIST_HEAD(&dev->mode_config.colorop_list); 445 441 INIT_LIST_HEAD(&dev->mode_config.privobj_list); 446 442 idr_init_base(&dev->mode_config.object_idr, 1); 447 443 idr_init_base(&dev->mode_config.tile_idr, 1); ··· 464 458 dev->mode_config.num_crtc = 0; 465 459 dev->mode_config.num_encoder = 0; 466 460 dev->mode_config.num_total_plane = 0; 461 + dev->mode_config.num_colorop = 0; 467 462 468 463 if (IS_ENABLED(CONFIG_LOCKDEP)) { 469 464 struct drm_modeset_acquire_ctx modeset_ctx;
+18
drivers/gpu/drm/drm_mode_object.c
··· 28 28 #include <drm/drm_device.h> 29 29 #include <drm/drm_file.h> 30 30 #include <drm/drm_mode_object.h> 31 + #include <drm/drm_plane.h> 31 32 #include <drm/drm_print.h> 32 33 33 34 #include "drm_crtc_internal.h" ··· 387 386 388 387 /* helper for getconnector and getproperties ioctls */ 389 388 int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic, 389 + bool plane_color_pipeline, 390 390 uint32_t __user *prop_ptr, 391 391 uint64_t __user *prop_values, 392 392 uint32_t *arg_count_props) ··· 400 398 401 399 if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic) 402 400 continue; 401 + 402 + if (plane_color_pipeline && obj->type == DRM_MODE_OBJECT_PLANE) { 403 + struct drm_plane *plane = obj_to_plane(obj); 404 + 405 + if (prop == plane->color_encoding_property || 406 + prop == plane->color_range_property) 407 + continue; 408 + } 409 + 410 + if (!plane_color_pipeline && obj->type == DRM_MODE_OBJECT_PLANE) { 411 + struct drm_plane *plane = obj_to_plane(obj); 412 + 413 + if (prop == plane->color_pipeline_property) 414 + continue; 415 + } 403 416 404 417 if (*arg_count_props > count) { 405 418 ret = __drm_object_property_get_value(obj, prop, &val); ··· 474 457 } 475 458 476 459 ret = drm_mode_object_get_properties(obj, file_priv->atomic, 460 + file_priv->plane_color_pipeline, 477 461 (uint32_t __user *)(unsigned long)(arg->props_ptr), 478 462 (uint64_t __user *)(unsigned long)(arg->prop_values_ptr), 479 463 &arg->count_props);
+59
drivers/gpu/drm/drm_plane.c
··· 1820 1820 return 0; 1821 1821 } 1822 1822 EXPORT_SYMBOL(drm_plane_add_size_hints_property); 1823 + 1824 + /** 1825 + * drm_plane_create_color_pipeline_property - create a new color pipeline 1826 + * property 1827 + * 1828 + * @plane: drm plane 1829 + * @pipelines: list of pipelines 1830 + * @num_pipelines: number of pipelines 1831 + * 1832 + * Create the COLOR_PIPELINE plane property to specific color pipelines on 1833 + * the plane. 1834 + * 1835 + * RETURNS: 1836 + * Zero for success or -errno 1837 + */ 1838 + int drm_plane_create_color_pipeline_property(struct drm_plane *plane, 1839 + const struct drm_prop_enum_list *pipelines, 1840 + int num_pipelines) 1841 + { 1842 + struct drm_prop_enum_list *all_pipelines; 1843 + struct drm_property *prop; 1844 + int len = 0; 1845 + int i; 1846 + 1847 + all_pipelines = kcalloc(num_pipelines + 1, 1848 + sizeof(*all_pipelines), 1849 + GFP_KERNEL); 1850 + 1851 + if (!all_pipelines) { 1852 + drm_err(plane->dev, "failed to allocate color pipeline\n"); 1853 + return -ENOMEM; 1854 + } 1855 + 1856 + /* Create default Bypass color pipeline */ 1857 + all_pipelines[len].type = 0; 1858 + all_pipelines[len].name = "Bypass"; 1859 + len++; 1860 + 1861 + /* Add all other color pipelines */ 1862 + for (i = 0; i < num_pipelines; i++, len++) { 1863 + all_pipelines[len].type = pipelines[i].type; 1864 + all_pipelines[len].name = pipelines[i].name; 1865 + } 1866 + 1867 + prop = drm_property_create_enum(plane->dev, DRM_MODE_PROP_ATOMIC, 1868 + "COLOR_PIPELINE", 1869 + all_pipelines, len); 1870 + if (IS_ERR(prop)) { 1871 + kfree(all_pipelines); 1872 + return PTR_ERR(prop); 1873 + } 1874 + 1875 + drm_object_attach_property(&plane->base, prop, 0); 1876 + plane->color_pipeline_property = prop; 1877 + 1878 + kfree(all_pipelines); 1879 + return 0; 1880 + } 1881 + EXPORT_SYMBOL(drm_plane_create_color_pipeline_property);
+1 -8
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
··· 58 58 struct drm_fb_helper_surface_size *sizes, 59 59 struct exynos_drm_gem *exynos_gem) 60 60 { 61 - struct fb_info *fbi; 61 + struct fb_info *fbi = helper->info; 62 62 struct drm_framebuffer *fb = helper->fb; 63 63 unsigned int size = fb->width * fb->height * fb->format->cpp[0]; 64 64 unsigned long offset; 65 - 66 - fbi = drm_fb_helper_alloc_info(helper); 67 - if (IS_ERR(fbi)) { 68 - DRM_DEV_ERROR(to_dma_dev(helper->dev), 69 - "failed to allocate fb info.\n"); 70 - return PTR_ERR(fbi); 71 - } 72 65 73 66 fbi->fbops = &exynos_drm_fb_ops; 74 67
+1 -11
drivers/gpu/drm/gma500/fbdev.c
··· 108 108 struct drm_device *dev = fb_helper->dev; 109 109 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 110 110 struct pci_dev *pdev = to_pci_dev(dev->dev); 111 - struct fb_info *info; 111 + struct fb_info *info = fb_helper->info; 112 112 struct drm_framebuffer *fb; 113 113 struct drm_mode_fb_cmd2 mode_cmd = { }; 114 114 int size; ··· 167 167 fb_helper->funcs = &psb_fbdev_fb_helper_funcs; 168 168 fb_helper->fb = fb; 169 169 170 - info = drm_fb_helper_alloc_info(fb_helper); 171 - if (IS_ERR(info)) { 172 - ret = PTR_ERR(info); 173 - goto err_drm_framebuffer_unregister_private; 174 - } 175 - 176 170 info->fbops = &psb_fbdev_fb_ops; 177 171 178 172 /* Accessed stolen memory directly */ ··· 190 196 191 197 return 0; 192 198 193 - err_drm_framebuffer_unregister_private: 194 - drm_framebuffer_unregister_private(fb); 195 - drm_framebuffer_cleanup(fb); 196 - kfree(fb); 197 199 err_drm_gem_object_put: 198 200 drm_gem_object_put(obj); 199 201 return ret;
+1 -8
drivers/gpu/drm/i915/display/intel_fbdev.c
··· 267 267 struct intel_display *display = to_intel_display(helper->dev); 268 268 struct intel_fbdev *ifbdev = to_intel_fbdev(helper); 269 269 struct intel_framebuffer *fb = ifbdev->fb; 270 + struct fb_info *info = helper->info; 270 271 struct ref_tracker *wakeref; 271 - struct fb_info *info; 272 272 struct i915_vma *vma; 273 273 unsigned long flags = 0; 274 274 bool prealloc = false; ··· 316 316 if (IS_ERR(vma)) { 317 317 ret = PTR_ERR(vma); 318 318 goto out_unlock; 319 - } 320 - 321 - info = drm_fb_helper_alloc_info(helper); 322 - if (IS_ERR(info)) { 323 - drm_err(display->drm, "Failed to allocate fb_info (%pe)\n", info); 324 - ret = PTR_ERR(info); 325 - goto out_unpin; 326 319 } 327 320 328 321 helper->funcs = &intel_fb_helper_funcs;
+1 -8
drivers/gpu/drm/msm/msm_fbdev.c
··· 91 91 { 92 92 struct drm_device *dev = helper->dev; 93 93 struct msm_drm_private *priv = dev->dev_private; 94 + struct fb_info *fbi = helper->info; 94 95 struct drm_framebuffer *fb = NULL; 95 96 struct drm_gem_object *bo; 96 - struct fb_info *fbi = NULL; 97 97 uint64_t paddr; 98 98 uint32_t format; 99 99 int ret, pitch; ··· 123 123 ret = msm_gem_get_and_pin_iova(bo, priv->kms->vm, &paddr); 124 124 if (ret) { 125 125 DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret); 126 - goto fail; 127 - } 128 - 129 - fbi = drm_fb_helper_alloc_info(helper); 130 - if (IS_ERR(fbi)) { 131 - DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n"); 132 - ret = PTR_ERR(fbi); 133 126 goto fail; 134 127 } 135 128
+1 -8
drivers/gpu/drm/omapdrm/omap_fbdev.c
··· 154 154 struct drm_device *dev = helper->dev; 155 155 struct omap_drm_private *priv = dev->dev_private; 156 156 struct omap_fbdev *fbdev = priv->fbdev; 157 + struct fb_info *fbi = helper->info; 157 158 struct drm_framebuffer *fb = NULL; 158 159 union omap_gem_size gsize; 159 - struct fb_info *fbi = NULL; 160 160 struct drm_mode_fb_cmd2 mode_cmd = {0}; 161 161 struct drm_gem_object *bo; 162 162 dma_addr_t dma_addr; ··· 222 222 if (ret) { 223 223 dev_err(dev->dev, "could not pin framebuffer\n"); 224 224 ret = -ENOMEM; 225 - goto fail; 226 - } 227 - 228 - fbi = drm_fb_helper_alloc_info(helper); 229 - if (IS_ERR(fbi)) { 230 - dev_err(dev->dev, "failed to allocate fb info\n"); 231 - ret = PTR_ERR(fbi); 232 225 goto fail; 233 226 } 234 227
+17 -3
drivers/gpu/drm/panel/Kconfig
··· 408 408 (found on the Gumstix Overo Palo35 board). To compile this driver as 409 409 a module, choose M here. 410 410 411 + config DRM_PANEL_LG_LD070WX3 412 + tristate "LG LD070WX3 MIPI DSI panel" 413 + depends on OF 414 + depends on DRM_MIPI_DSI 415 + depends on BACKLIGHT_CLASS_DEVICE 416 + select VIDEOMODE_HELPERS 417 + help 418 + Say Y here if you want to enable support for the LD070WX3 MIPI DSI 419 + panel found in the NVIDIA Tegra Note 7 tablet. 420 + 421 + To compile this driver as a module, choose M here: the module will 422 + be called panel-lg-ld070wx3. 423 + 411 424 config DRM_PANEL_LG_LG4573 412 425 tristate "LG4573 RGB/SPI panel" 413 426 depends on OF && SPI ··· 894 881 DSI protocol with 4 lanes. 895 882 896 883 config DRM_PANEL_SAMSUNG_SOFEF00 897 - tristate "Samsung sofef00/s6e3fc2x01 OnePlus 6/6T DSI cmd mode panels" 884 + tristate "Samsung SOFEF00 DSI panel controller" 898 885 depends on OF 899 886 depends on DRM_MIPI_DSI 900 887 depends on BACKLIGHT_CLASS_DEVICE 901 888 select VIDEOMODE_HELPERS 902 889 help 903 890 Say Y or M here if you want to enable support for the Samsung AMOLED 904 - command mode panels found in the OnePlus 6/6T smartphones. 891 + panel SOFEF00 DDIC and connected panel. 892 + Currently supported panels: 905 893 906 - The panels are 2280x1080@60Hz and 2340x1080@60Hz respectively 894 + Samsung AMS628NW01 (found in OnePlus 6, 1080x2280@60Hz) 907 895 908 896 config DRM_PANEL_SEIKO_43WVF1G 909 897 tristate "Seiko 43WVF1G panel"
+1
drivers/gpu/drm/panel/Makefile
··· 41 41 obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o 42 42 obj-$(CONFIG_DRM_PANEL_LINCOLNTECH_LCD197) += panel-lincolntech-lcd197.o 43 43 obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o 44 + obj-$(CONFIG_DRM_PANEL_LG_LD070WX3) += panel-lg-ld070wx3.o 44 45 obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o 45 46 obj-$(CONFIG_DRM_PANEL_LG_SW43408) += panel-lg-sw43408.o 46 47 obj-$(CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966) += panel-magnachip-d53e6ea8966.o
+1
drivers/gpu/drm/panel/panel-edp.c
··· 1965 1965 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80_d50, "NV116WHM-N49"), 1966 1966 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"), 1967 1967 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a6a, &delay_200_500_e80, "NV140WUM-N44"), 1968 + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a84, &delay_200_500_e50, "NV133WUM-T01"), 1968 1969 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"), 1969 1970 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"), 1970 1971 EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"),
+225
drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
··· 820 820 ILI9881C_COMMAND_INSTR(0xd3, 0x39), 821 821 }; 822 822 823 + static const struct ili9881c_instr w552946aaa_init[] = { 824 + ILI9881C_SWITCH_PAGE_INSTR(3), 825 + ILI9881C_COMMAND_INSTR(0x01, 0x00), 826 + ILI9881C_COMMAND_INSTR(0x02, 0x00), 827 + ILI9881C_COMMAND_INSTR(0x03, 0x53), 828 + ILI9881C_COMMAND_INSTR(0x04, 0x53), 829 + ILI9881C_COMMAND_INSTR(0x05, 0x13), 830 + ILI9881C_COMMAND_INSTR(0x06, 0x04), 831 + ILI9881C_COMMAND_INSTR(0x07, 0x02), 832 + ILI9881C_COMMAND_INSTR(0x08, 0x02), 833 + ILI9881C_COMMAND_INSTR(0x09, 0x00), 834 + ILI9881C_COMMAND_INSTR(0x0a, 0x00), 835 + ILI9881C_COMMAND_INSTR(0x0b, 0x00), 836 + ILI9881C_COMMAND_INSTR(0x0c, 0x00), 837 + ILI9881C_COMMAND_INSTR(0x0d, 0x00), 838 + ILI9881C_COMMAND_INSTR(0x0e, 0x00), 839 + ILI9881C_COMMAND_INSTR(0x0f, 0x00), 840 + ILI9881C_COMMAND_INSTR(0x10, 0x00), 841 + ILI9881C_COMMAND_INSTR(0x11, 0x00), 842 + ILI9881C_COMMAND_INSTR(0x12, 0x00), 843 + ILI9881C_COMMAND_INSTR(0x13, 0x00), 844 + ILI9881C_COMMAND_INSTR(0x14, 0x00), 845 + ILI9881C_COMMAND_INSTR(0x15, 0x08), 846 + ILI9881C_COMMAND_INSTR(0x16, 0x10), 847 + ILI9881C_COMMAND_INSTR(0x17, 0x00), 848 + ILI9881C_COMMAND_INSTR(0x18, 0x08), 849 + ILI9881C_COMMAND_INSTR(0x19, 0x00), 850 + ILI9881C_COMMAND_INSTR(0x1a, 0x00), 851 + ILI9881C_COMMAND_INSTR(0x1b, 0x00), 852 + ILI9881C_COMMAND_INSTR(0x1c, 0x00), 853 + ILI9881C_COMMAND_INSTR(0x1d, 0x00), 854 + ILI9881C_COMMAND_INSTR(0x1e, 0xc0), 855 + ILI9881C_COMMAND_INSTR(0x1f, 0x80), 856 + ILI9881C_COMMAND_INSTR(0x20, 0x02), 857 + ILI9881C_COMMAND_INSTR(0x21, 0x09), 858 + ILI9881C_COMMAND_INSTR(0x22, 0x00), 859 + ILI9881C_COMMAND_INSTR(0x23, 0x00), 860 + ILI9881C_COMMAND_INSTR(0x24, 0x00), 861 + ILI9881C_COMMAND_INSTR(0x25, 0x00), 862 + ILI9881C_COMMAND_INSTR(0x26, 0x00), 863 + ILI9881C_COMMAND_INSTR(0x27, 0x00), 864 + ILI9881C_COMMAND_INSTR(0x28, 0x55), 865 + ILI9881C_COMMAND_INSTR(0x29, 0x03), 866 + ILI9881C_COMMAND_INSTR(0x2a, 0x00), 867 + ILI9881C_COMMAND_INSTR(0x2b, 0x00), 868 + ILI9881C_COMMAND_INSTR(0x2c, 0x00), 869 + ILI9881C_COMMAND_INSTR(0x2d, 0x00), 870 + ILI9881C_COMMAND_INSTR(0x2e, 0x00), 871 + ILI9881C_COMMAND_INSTR(0x2f, 0x00), 872 + ILI9881C_COMMAND_INSTR(0x30, 0x00), 873 + ILI9881C_COMMAND_INSTR(0x31, 0x00), 874 + ILI9881C_COMMAND_INSTR(0x32, 0x00), 875 + ILI9881C_COMMAND_INSTR(0x33, 0x00), 876 + ILI9881C_COMMAND_INSTR(0x34, 0x04), 877 + ILI9881C_COMMAND_INSTR(0x35, 0x05), 878 + ILI9881C_COMMAND_INSTR(0x36, 0x05), 879 + ILI9881C_COMMAND_INSTR(0x37, 0x00), 880 + ILI9881C_COMMAND_INSTR(0x38, 0x3c), 881 + ILI9881C_COMMAND_INSTR(0x39, 0x35), 882 + ILI9881C_COMMAND_INSTR(0x3a, 0x00), 883 + ILI9881C_COMMAND_INSTR(0x3b, 0x40), 884 + ILI9881C_COMMAND_INSTR(0x3c, 0x00), 885 + ILI9881C_COMMAND_INSTR(0x3d, 0x00), 886 + ILI9881C_COMMAND_INSTR(0x3e, 0x00), 887 + ILI9881C_COMMAND_INSTR(0x3f, 0x00), 888 + ILI9881C_COMMAND_INSTR(0x40, 0x00), 889 + ILI9881C_COMMAND_INSTR(0x41, 0x88), 890 + ILI9881C_COMMAND_INSTR(0x42, 0x00), 891 + ILI9881C_COMMAND_INSTR(0x43, 0x00), 892 + ILI9881C_COMMAND_INSTR(0x44, 0x1f), 893 + ILI9881C_COMMAND_INSTR(0x50, 0x01), 894 + ILI9881C_COMMAND_INSTR(0x51, 0x23), 895 + ILI9881C_COMMAND_INSTR(0x52, 0x45), 896 + ILI9881C_COMMAND_INSTR(0x53, 0x67), 897 + ILI9881C_COMMAND_INSTR(0x54, 0x89), 898 + ILI9881C_COMMAND_INSTR(0x55, 0xab), 899 + ILI9881C_COMMAND_INSTR(0x56, 0x01), 900 + ILI9881C_COMMAND_INSTR(0x57, 0x23), 901 + ILI9881C_COMMAND_INSTR(0x58, 0x45), 902 + ILI9881C_COMMAND_INSTR(0x59, 0x67), 903 + ILI9881C_COMMAND_INSTR(0x5a, 0x89), 904 + ILI9881C_COMMAND_INSTR(0x5b, 0xab), 905 + ILI9881C_COMMAND_INSTR(0x5c, 0xcd), 906 + ILI9881C_COMMAND_INSTR(0x5d, 0xef), 907 + ILI9881C_COMMAND_INSTR(0x5e, 0x03), 908 + ILI9881C_COMMAND_INSTR(0x5f, 0x14), 909 + ILI9881C_COMMAND_INSTR(0x60, 0x15), 910 + ILI9881C_COMMAND_INSTR(0x61, 0x0c), 911 + ILI9881C_COMMAND_INSTR(0x62, 0x0d), 912 + ILI9881C_COMMAND_INSTR(0x63, 0x0e), 913 + ILI9881C_COMMAND_INSTR(0x64, 0x0f), 914 + ILI9881C_COMMAND_INSTR(0x65, 0x10), 915 + ILI9881C_COMMAND_INSTR(0x66, 0x11), 916 + ILI9881C_COMMAND_INSTR(0x67, 0x08), 917 + ILI9881C_COMMAND_INSTR(0x68, 0x02), 918 + ILI9881C_COMMAND_INSTR(0x69, 0x0a), 919 + ILI9881C_COMMAND_INSTR(0x6a, 0x02), 920 + ILI9881C_COMMAND_INSTR(0x6b, 0x02), 921 + ILI9881C_COMMAND_INSTR(0x6c, 0x02), 922 + ILI9881C_COMMAND_INSTR(0x6d, 0x02), 923 + ILI9881C_COMMAND_INSTR(0x6e, 0x02), 924 + ILI9881C_COMMAND_INSTR(0x6f, 0x02), 925 + ILI9881C_COMMAND_INSTR(0x70, 0x02), 926 + ILI9881C_COMMAND_INSTR(0x71, 0x02), 927 + ILI9881C_COMMAND_INSTR(0x72, 0x06), 928 + ILI9881C_COMMAND_INSTR(0x73, 0x02), 929 + ILI9881C_COMMAND_INSTR(0x74, 0x02), 930 + ILI9881C_COMMAND_INSTR(0x75, 0x14), 931 + ILI9881C_COMMAND_INSTR(0x76, 0x15), 932 + ILI9881C_COMMAND_INSTR(0x77, 0x0f), 933 + ILI9881C_COMMAND_INSTR(0x78, 0x0e), 934 + ILI9881C_COMMAND_INSTR(0x79, 0x0d), 935 + ILI9881C_COMMAND_INSTR(0x7a, 0x0c), 936 + ILI9881C_COMMAND_INSTR(0x7b, 0x11), 937 + ILI9881C_COMMAND_INSTR(0x7c, 0x10), 938 + ILI9881C_COMMAND_INSTR(0x7d, 0x06), 939 + ILI9881C_COMMAND_INSTR(0x7e, 0x02), 940 + ILI9881C_COMMAND_INSTR(0x7f, 0x0a), 941 + ILI9881C_COMMAND_INSTR(0x80, 0x02), 942 + ILI9881C_COMMAND_INSTR(0x81, 0x02), 943 + ILI9881C_COMMAND_INSTR(0x82, 0x02), 944 + ILI9881C_COMMAND_INSTR(0x83, 0x02), 945 + ILI9881C_COMMAND_INSTR(0x84, 0x02), 946 + ILI9881C_COMMAND_INSTR(0x85, 0x02), 947 + ILI9881C_COMMAND_INSTR(0x86, 0x02), 948 + ILI9881C_COMMAND_INSTR(0x87, 0x02), 949 + ILI9881C_COMMAND_INSTR(0x88, 0x08), 950 + ILI9881C_COMMAND_INSTR(0x89, 0x02), 951 + ILI9881C_COMMAND_INSTR(0x8a, 0x02), 952 + ILI9881C_SWITCH_PAGE_INSTR(4), 953 + ILI9881C_COMMAND_INSTR(0x00, 0x80), 954 + ILI9881C_COMMAND_INSTR(0x70, 0x00), 955 + ILI9881C_COMMAND_INSTR(0x71, 0x00), 956 + ILI9881C_COMMAND_INSTR(0x66, 0xfe), 957 + ILI9881C_COMMAND_INSTR(0x82, 0x15), 958 + ILI9881C_COMMAND_INSTR(0x84, 0x15), 959 + ILI9881C_COMMAND_INSTR(0x85, 0x15), 960 + ILI9881C_COMMAND_INSTR(0x3a, 0x24), 961 + ILI9881C_COMMAND_INSTR(0x32, 0xac), 962 + ILI9881C_COMMAND_INSTR(0x8c, 0x80), 963 + ILI9881C_COMMAND_INSTR(0x3c, 0xf5), 964 + ILI9881C_COMMAND_INSTR(0x88, 0x33), 965 + ILI9881C_SWITCH_PAGE_INSTR(1), 966 + ILI9881C_COMMAND_INSTR(0x22, 0x0a), 967 + ILI9881C_COMMAND_INSTR(0x31, 0x00), 968 + ILI9881C_COMMAND_INSTR(0x53, 0x78), 969 + ILI9881C_COMMAND_INSTR(0x55, 0x7b), 970 + ILI9881C_COMMAND_INSTR(0x60, 0x20), 971 + ILI9881C_COMMAND_INSTR(0x61, 0x00), 972 + ILI9881C_COMMAND_INSTR(0x62, 0x0d), 973 + ILI9881C_COMMAND_INSTR(0x63, 0x00), 974 + ILI9881C_COMMAND_INSTR(0xa0, 0x00), 975 + ILI9881C_COMMAND_INSTR(0xa1, 0x10), 976 + ILI9881C_COMMAND_INSTR(0xa2, 0x1c), 977 + ILI9881C_COMMAND_INSTR(0xa3, 0x13), 978 + ILI9881C_COMMAND_INSTR(0xa4, 0x15), 979 + ILI9881C_COMMAND_INSTR(0xa5, 0x26), 980 + ILI9881C_COMMAND_INSTR(0xa6, 0x1a), 981 + ILI9881C_COMMAND_INSTR(0xa7, 0x1d), 982 + ILI9881C_COMMAND_INSTR(0xa8, 0x67), 983 + ILI9881C_COMMAND_INSTR(0xa9, 0x1c), 984 + ILI9881C_COMMAND_INSTR(0xaa, 0x29), 985 + ILI9881C_COMMAND_INSTR(0xab, 0x5b), 986 + ILI9881C_COMMAND_INSTR(0xac, 0x26), 987 + ILI9881C_COMMAND_INSTR(0xad, 0x28), 988 + ILI9881C_COMMAND_INSTR(0xae, 0x5c), 989 + ILI9881C_COMMAND_INSTR(0xaf, 0x30), 990 + ILI9881C_COMMAND_INSTR(0xb0, 0x31), 991 + ILI9881C_COMMAND_INSTR(0xb1, 0x32), 992 + ILI9881C_COMMAND_INSTR(0xb2, 0x00), 993 + ILI9881C_COMMAND_INSTR(0xb1, 0x2e), 994 + ILI9881C_COMMAND_INSTR(0xb2, 0x32), 995 + ILI9881C_COMMAND_INSTR(0xb3, 0x00), 996 + ILI9881C_COMMAND_INSTR(0xb6, 0x02), 997 + ILI9881C_COMMAND_INSTR(0xb7, 0x03), 998 + ILI9881C_COMMAND_INSTR(0xc0, 0x00), 999 + ILI9881C_COMMAND_INSTR(0xc1, 0x10), 1000 + ILI9881C_COMMAND_INSTR(0xc2, 0x1c), 1001 + ILI9881C_COMMAND_INSTR(0xc3, 0x13), 1002 + ILI9881C_COMMAND_INSTR(0xc4, 0x15), 1003 + ILI9881C_COMMAND_INSTR(0xc5, 0x26), 1004 + ILI9881C_COMMAND_INSTR(0xc6, 0x1a), 1005 + ILI9881C_COMMAND_INSTR(0xc7, 0x1d), 1006 + ILI9881C_COMMAND_INSTR(0xc8, 0x67), 1007 + ILI9881C_COMMAND_INSTR(0xc9, 0x1c), 1008 + ILI9881C_COMMAND_INSTR(0xca, 0x29), 1009 + ILI9881C_COMMAND_INSTR(0xcb, 0x5b), 1010 + ILI9881C_COMMAND_INSTR(0xcc, 0x26), 1011 + ILI9881C_COMMAND_INSTR(0xcd, 0x28), 1012 + ILI9881C_COMMAND_INSTR(0xce, 0x5c), 1013 + ILI9881C_COMMAND_INSTR(0xcf, 0x30), 1014 + ILI9881C_COMMAND_INSTR(0xd0, 0x31), 1015 + ILI9881C_COMMAND_INSTR(0xd1, 0x2e), 1016 + ILI9881C_COMMAND_INSTR(0xd2, 0x32), 1017 + ILI9881C_COMMAND_INSTR(0xd3, 0x00), 1018 + ILI9881C_SWITCH_PAGE_INSTR(0), 1019 + }; 1020 + 823 1021 static const struct ili9881c_instr w552946ab_init[] = { 824 1022 ILI9881C_SWITCH_PAGE_INSTR(3), 825 1023 ILI9881C_COMMAND_INSTR(0x01, 0x00), ··· 2158 1960 .height_mm = 110, 2159 1961 }; 2160 1962 1963 + static const struct drm_display_mode w552946aaa_default_mode = { 1964 + .clock = 65000, 1965 + 1966 + .hdisplay = 720, 1967 + .hsync_start = 720 + 52, 1968 + .hsync_end = 720 + 52 + 8, 1969 + .htotal = 720 + 52 + 8 + 48, 1970 + 1971 + .vdisplay = 1280, 1972 + .vsync_start = 1280 + 16, 1973 + .vsync_end = 1280 + 16 + 6, 1974 + .vtotal = 1280 + 16 + 6 + 15, 1975 + 1976 + .width_mm = 68, 1977 + .height_mm = 121, 1978 + }; 1979 + 2161 1980 static const struct drm_display_mode w552946aba_default_mode = { 2162 1981 .clock = 64000, 2163 1982 ··· 2403 2188 .default_address_mode = 0x03, 2404 2189 }; 2405 2190 2191 + static const struct ili9881c_desc w552946aaa_desc = { 2192 + .init = w552946aaa_init, 2193 + .init_length = ARRAY_SIZE(w552946aaa_init), 2194 + .mode = &w552946aaa_default_mode, 2195 + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | 2196 + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, 2197 + .lanes = 2, 2198 + }; 2199 + 2406 2200 static const struct ili9881c_desc w552946aba_desc = { 2407 2201 .init = w552946ab_init, 2408 2202 .init_length = ARRAY_SIZE(w552946ab_init), ··· 2460 2236 { .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc }, 2461 2237 { .compatible = "startek,kd050hdfia020", .data = &kd050hdfia020_desc }, 2462 2238 { .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc }, 2239 + { .compatible = "wanchanglong,w552946aaa", .data = &w552946aaa_desc }, 2463 2240 { .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc }, 2464 2241 { .compatible = "ampire,am8001280g", .data = &am8001280g_desc }, 2465 2242 { .compatible = "raspberrypi,dsi-5inch", &rpi_5inch_desc },
+9 -12
drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
··· 1132 1132 dsi->lanes = desc->lanes; 1133 1133 1134 1134 jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 1135 - if (IS_ERR(jadard->reset)) { 1136 - DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n"); 1137 - return PTR_ERR(jadard->reset); 1138 - } 1135 + if (IS_ERR(jadard->reset)) 1136 + return dev_err_probe(&dsi->dev, PTR_ERR(jadard->reset), 1137 + "failed to get our reset GPIO\n"); 1139 1138 1140 1139 jadard->vdd = devm_regulator_get(dev, "vdd"); 1141 - if (IS_ERR(jadard->vdd)) { 1142 - DRM_DEV_ERROR(&dsi->dev, "failed to get vdd regulator\n"); 1143 - return PTR_ERR(jadard->vdd); 1144 - } 1140 + if (IS_ERR(jadard->vdd)) 1141 + return dev_err_probe(&dsi->dev, PTR_ERR(jadard->vdd), 1142 + "failed to get vdd regulator\n"); 1145 1143 1146 1144 jadard->vccio = devm_regulator_get(dev, "vccio"); 1147 - if (IS_ERR(jadard->vccio)) { 1148 - DRM_DEV_ERROR(&dsi->dev, "failed to get vccio regulator\n"); 1149 - return PTR_ERR(jadard->vccio); 1150 - } 1145 + if (IS_ERR(jadard->vccio)) 1146 + return dev_err_probe(&dsi->dev, PTR_ERR(jadard->vccio), 1147 + "failed to get vccio regulator\n"); 1151 1148 1152 1149 ret = of_drm_get_panel_orientation(dev->of_node, &jadard->orientation); 1153 1150 if (ret < 0)
+184
drivers/gpu/drm/panel/panel-lg-ld070wx3.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include <linux/array_size.h> 4 + #include <linux/delay.h> 5 + #include <linux/err.h> 6 + #include <linux/gpio/consumer.h> 7 + #include <linux/mod_devicetable.h> 8 + #include <linux/module.h> 9 + #include <linux/property.h> 10 + #include <linux/regulator/consumer.h> 11 + 12 + #include <video/mipi_display.h> 13 + 14 + #include <drm/drm_mipi_dsi.h> 15 + #include <drm/drm_modes.h> 16 + #include <drm/drm_panel.h> 17 + #include <drm/drm_probe_helper.h> 18 + 19 + static const struct regulator_bulk_data lg_ld070wx3_supplies[] = { 20 + { .supply = "vdd" }, { .supply = "vcc" }, 21 + }; 22 + 23 + struct lg_ld070wx3 { 24 + struct drm_panel panel; 25 + struct mipi_dsi_device *dsi; 26 + 27 + struct regulator_bulk_data *supplies; 28 + }; 29 + 30 + static inline struct lg_ld070wx3 *to_lg_ld070wx3(struct drm_panel *panel) 31 + { 32 + return container_of(panel, struct lg_ld070wx3, panel); 33 + } 34 + 35 + static int lg_ld070wx3_prepare(struct drm_panel *panel) 36 + { 37 + struct lg_ld070wx3 *priv = to_lg_ld070wx3(panel); 38 + struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi }; 39 + struct device *dev = panel->dev; 40 + int ret; 41 + 42 + ret = regulator_bulk_enable(ARRAY_SIZE(lg_ld070wx3_supplies), priv->supplies); 43 + if (ret < 0) { 44 + dev_err(dev, "failed to enable power supplies: %d\n", ret); 45 + return ret; 46 + } 47 + 48 + /* 49 + * According to spec delay between enabling supply is 0, 50 + * for regulators to reach required voltage ~5ms needed. 51 + * MIPI interface signal for setup requires additional 52 + * 110ms which in total results in 115ms. 53 + */ 54 + mdelay(115); 55 + 56 + mipi_dsi_dcs_soft_reset_multi(&ctx); 57 + mipi_dsi_msleep(&ctx, 20); 58 + 59 + /* Differential input impedance selection */ 60 + mipi_dsi_dcs_write_seq_multi(&ctx, 0xae, 0x0b); 61 + 62 + /* Enter test mode 1 and 2*/ 63 + mipi_dsi_dcs_write_seq_multi(&ctx, 0xee, 0xea); 64 + mipi_dsi_dcs_write_seq_multi(&ctx, 0xef, 0x5f); 65 + 66 + /* Increased MIPI CLK driving ability */ 67 + mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x68); 68 + 69 + /* Exit test mode 1 and 2 */ 70 + mipi_dsi_dcs_write_seq_multi(&ctx, 0xee, 0x00); 71 + mipi_dsi_dcs_write_seq_multi(&ctx, 0xef, 0x00); 72 + 73 + return ctx.accum_err; 74 + } 75 + 76 + static int lg_ld070wx3_unprepare(struct drm_panel *panel) 77 + { 78 + struct lg_ld070wx3 *priv = to_lg_ld070wx3(panel); 79 + struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi }; 80 + 81 + mipi_dsi_dcs_enter_sleep_mode_multi(&ctx); 82 + 83 + msleep(50); 84 + 85 + regulator_bulk_disable(ARRAY_SIZE(lg_ld070wx3_supplies), priv->supplies); 86 + 87 + /* power supply must be off for at least 1s after panel disable */ 88 + msleep(1000); 89 + 90 + return 0; 91 + } 92 + 93 + static const struct drm_display_mode lg_ld070wx3_mode = { 94 + .clock = (800 + 32 + 48 + 8) * (1280 + 5 + 3 + 1) * 60 / 1000, 95 + .hdisplay = 800, 96 + .hsync_start = 800 + 32, 97 + .hsync_end = 800 + 32 + 48, 98 + .htotal = 800 + 32 + 48 + 8, 99 + .vdisplay = 1280, 100 + .vsync_start = 1280 + 5, 101 + .vsync_end = 1280 + 5 + 3, 102 + .vtotal = 1280 + 5 + 3 + 1, 103 + .width_mm = 94, 104 + .height_mm = 151, 105 + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 106 + }; 107 + 108 + static int lg_ld070wx3_get_modes(struct drm_panel *panel, 109 + struct drm_connector *connector) 110 + { 111 + return drm_connector_helper_get_modes_fixed(connector, &lg_ld070wx3_mode); 112 + } 113 + 114 + static const struct drm_panel_funcs lg_ld070wx3_panel_funcs = { 115 + .prepare = lg_ld070wx3_prepare, 116 + .unprepare = lg_ld070wx3_unprepare, 117 + .get_modes = lg_ld070wx3_get_modes, 118 + }; 119 + 120 + static int lg_ld070wx3_probe(struct mipi_dsi_device *dsi) 121 + { 122 + struct device *dev = &dsi->dev; 123 + struct lg_ld070wx3 *priv; 124 + int ret; 125 + 126 + priv = devm_drm_panel_alloc(dev, struct lg_ld070wx3, panel, 127 + &lg_ld070wx3_panel_funcs, 128 + DRM_MODE_CONNECTOR_DSI); 129 + if (IS_ERR(priv)) 130 + return PTR_ERR(priv); 131 + 132 + ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(lg_ld070wx3_supplies), 133 + lg_ld070wx3_supplies, &priv->supplies); 134 + if (ret < 0) 135 + return dev_err_probe(dev, ret, "failed to get supplies\n"); 136 + 137 + priv->dsi = dsi; 138 + mipi_dsi_set_drvdata(dsi, priv); 139 + 140 + dsi->lanes = 4; 141 + dsi->format = MIPI_DSI_FMT_RGB888; 142 + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM; 143 + 144 + ret = drm_panel_of_backlight(&priv->panel); 145 + if (ret < 0) 146 + return dev_err_probe(dev, ret, "failed to get backlight\n"); 147 + 148 + drm_panel_add(&priv->panel); 149 + 150 + ret = devm_mipi_dsi_attach(dev, dsi); 151 + if (ret < 0) { 152 + drm_panel_remove(&priv->panel); 153 + return dev_err_probe(dev, ret, "failed to attach to DSI host\n"); 154 + } 155 + 156 + return 0; 157 + } 158 + 159 + static void lg_ld070wx3_remove(struct mipi_dsi_device *dsi) 160 + { 161 + struct lg_ld070wx3 *priv = mipi_dsi_get_drvdata(dsi); 162 + 163 + drm_panel_remove(&priv->panel); 164 + } 165 + 166 + static const struct of_device_id lg_ld070wx3_of_match[] = { 167 + { .compatible = "lg,ld070wx3-sl01" }, 168 + { /* sentinel */ } 169 + }; 170 + MODULE_DEVICE_TABLE(of, lg_ld070wx3_of_match); 171 + 172 + static struct mipi_dsi_driver lg_ld070wx3_driver = { 173 + .driver = { 174 + .name = "panel-lg-ld070wx3", 175 + .of_match_table = lg_ld070wx3_of_match, 176 + }, 177 + .probe = lg_ld070wx3_probe, 178 + .remove = lg_ld070wx3_remove, 179 + }; 180 + module_mipi_dsi_driver(lg_ld070wx3_driver); 181 + 182 + MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>"); 183 + MODULE_DESCRIPTION("LG LD070WX3-SL01 DSI panel driver"); 184 + MODULE_LICENSE("GPL");
+4 -4
drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
··· 54 54 } 55 55 56 56 msleep(20); 57 - gpiod_set_value(ctx->gpios.power, 1); 57 + gpiod_set_value_cansleep(ctx->gpios.power, 1); 58 58 msleep(20); 59 - gpiod_set_value(ctx->gpios.reset, 1); 59 + gpiod_set_value_cansleep(ctx->gpios.reset, 1); 60 60 msleep(20); 61 61 return 0; 62 62 } ··· 65 65 { 66 66 struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); 67 67 68 - gpiod_set_value(ctx->gpios.reset, 0); 69 - gpiod_set_value(ctx->gpios.power, 0); 68 + gpiod_set_value_cansleep(ctx->gpios.reset, 0); 69 + gpiod_set_value_cansleep(ctx->gpios.power, 0); 70 70 regulator_disable(ctx->supply); 71 71 72 72 return 0;
+67 -38
drivers/gpu/drm/panel/panel-samsung-sofef00.c
··· 16 16 #include <drm/drm_mipi_dsi.h> 17 17 #include <drm/drm_modes.h> 18 18 #include <drm/drm_panel.h> 19 + #include <drm/drm_probe_helper.h> 19 20 20 21 struct sofef00_panel { 21 22 struct drm_panel panel; 22 23 struct mipi_dsi_device *dsi; 23 - struct regulator *supply; 24 + struct regulator_bulk_data *supplies; 24 25 struct gpio_desc *reset_gpio; 26 + }; 27 + 28 + static const struct regulator_bulk_data sofef00_supplies[] = { 29 + { .supply = "vddio" }, 30 + { .supply = "vci" }, 31 + { .supply = "poc" }, 25 32 }; 26 33 27 34 static inline ··· 36 29 { 37 30 return container_of(panel, struct sofef00_panel, panel); 38 31 } 32 + 33 + #define sofef00_test_key_on_lvl2(ctx) \ 34 + mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0x5a, 0x5a) 35 + #define sofef00_test_key_off_lvl2(ctx) \ 36 + mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0xa5, 0xa5) 39 37 40 38 static void sofef00_panel_reset(struct sofef00_panel *ctx) 41 39 { ··· 62 50 mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); 63 51 mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000); 64 52 65 - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); 66 - 53 + sofef00_test_key_on_lvl2(&dsi_ctx); 67 54 mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); 55 + sofef00_test_key_off_lvl2(&dsi_ctx); 68 56 69 - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); 70 - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); 57 + sofef00_test_key_on_lvl2(&dsi_ctx); 71 58 mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x07); 72 59 mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x12); 73 - mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); 60 + sofef00_test_key_off_lvl2(&dsi_ctx); 61 + 74 62 mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); 75 63 mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00); 64 + 65 + return dsi_ctx.accum_err; 66 + } 67 + 68 + static int sofef00_enable(struct drm_panel *panel) 69 + { 70 + struct sofef00_panel *ctx = to_sofef00_panel(panel); 71 + struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi }; 76 72 77 73 mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); 78 74 ··· 91 71 { 92 72 struct mipi_dsi_device *dsi = ctx->dsi; 93 73 struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; 94 - 95 - dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; 96 74 97 75 mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); 98 76 mipi_dsi_msleep(&dsi_ctx, 40); ··· 104 86 static int sofef00_panel_prepare(struct drm_panel *panel) 105 87 { 106 88 struct sofef00_panel *ctx = to_sofef00_panel(panel); 107 - struct device *dev = &ctx->dsi->dev; 108 89 int ret; 109 90 110 - ret = regulator_enable(ctx->supply); 111 - if (ret < 0) { 112 - dev_err(dev, "Failed to enable regulator: %d\n", ret); 91 + ret = regulator_bulk_enable(ARRAY_SIZE(sofef00_supplies), ctx->supplies); 92 + if (ret < 0) 113 93 return ret; 114 - } 115 94 116 95 sofef00_panel_reset(ctx); 117 96 118 97 ret = sofef00_panel_on(ctx); 119 98 if (ret < 0) { 120 99 gpiod_set_value_cansleep(ctx->reset_gpio, 1); 100 + regulator_bulk_disable(ARRAY_SIZE(sofef00_supplies), ctx->supplies); 121 101 return ret; 122 102 } 103 + 104 + return 0; 105 + } 106 + 107 + static int sofef00_disable(struct drm_panel *panel) 108 + { 109 + struct sofef00_panel *ctx = to_sofef00_panel(panel); 110 + 111 + sofef00_panel_off(ctx); 123 112 124 113 return 0; 125 114 } ··· 135 110 { 136 111 struct sofef00_panel *ctx = to_sofef00_panel(panel); 137 112 138 - sofef00_panel_off(ctx); 139 - regulator_disable(ctx->supply); 113 + regulator_bulk_disable(ARRAY_SIZE(sofef00_supplies), ctx->supplies); 140 114 141 115 return 0; 142 116 } 143 117 144 - static const struct drm_display_mode enchilada_panel_mode = { 118 + static const struct drm_display_mode ams628nw01_panel_mode = { 145 119 .clock = (1080 + 112 + 16 + 36) * (2280 + 36 + 8 + 12) * 60 / 1000, 120 + 146 121 .hdisplay = 1080, 147 122 .hsync_start = 1080 + 112, 148 123 .hsync_end = 1080 + 112 + 16, 149 124 .htotal = 1080 + 112 + 16 + 36, 125 + 150 126 .vdisplay = 2280, 151 127 .vsync_start = 2280 + 36, 152 128 .vsync_end = 2280 + 36 + 8, 153 129 .vtotal = 2280 + 36 + 8 + 12, 130 + 154 131 .width_mm = 68, 155 132 .height_mm = 145, 133 + 134 + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 156 135 }; 157 136 158 137 static int sofef00_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector) 159 138 { 160 - struct drm_display_mode *mode; 161 - 162 - mode = drm_mode_duplicate(connector->dev, &enchilada_panel_mode); 163 - if (!mode) 164 - return -ENOMEM; 165 - 166 - drm_mode_set_name(mode); 167 - 168 - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; 169 - connector->display_info.width_mm = mode->width_mm; 170 - connector->display_info.height_mm = mode->height_mm; 171 - drm_mode_probed_add(connector, mode); 172 - 173 - return 1; 139 + return drm_connector_helper_get_modes_fixed(connector, &ams628nw01_panel_mode); 174 140 } 175 141 176 142 static const struct drm_panel_funcs sofef00_panel_panel_funcs = { 177 143 .prepare = sofef00_panel_prepare, 144 + .enable = sofef00_enable, 145 + .disable = sofef00_disable, 178 146 .unprepare = sofef00_panel_unprepare, 179 147 .get_modes = sofef00_panel_get_modes, 180 148 }; ··· 178 160 int err; 179 161 u16 brightness = (u16)backlight_get_brightness(bl); 180 162 163 + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; 164 + 181 165 err = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness); 182 166 if (err < 0) 183 167 return err; 168 + 169 + dsi->mode_flags |= MIPI_DSI_MODE_LPM; 184 170 185 171 return 0; 186 172 } ··· 199 177 struct device *dev = &dsi->dev; 200 178 const struct backlight_properties props = { 201 179 .type = BACKLIGHT_PLATFORM, 202 - .brightness = 1023, 180 + .brightness = 512, 203 181 .max_brightness = 1023, 204 182 }; 205 183 ··· 219 197 if (IS_ERR(ctx)) 220 198 return PTR_ERR(ctx); 221 199 222 - ctx->supply = devm_regulator_get(dev, "vddio"); 223 - if (IS_ERR(ctx->supply)) 224 - return dev_err_probe(dev, PTR_ERR(ctx->supply), 225 - "Failed to get vddio regulator\n"); 200 + ret = devm_regulator_bulk_get_const(dev, 201 + ARRAY_SIZE(sofef00_supplies), 202 + sofef00_supplies, 203 + &ctx->supplies); 204 + if (ret) 205 + return dev_err_probe(dev, ret, "Failed to get regulators\n"); 226 206 227 207 ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); 228 208 if (IS_ERR(ctx->reset_gpio)) ··· 236 212 237 213 dsi->lanes = 4; 238 214 dsi->format = MIPI_DSI_FMT_RGB888; 215 + dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST | 216 + MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; 217 + 218 + ctx->panel.prepare_prev_first = true; 239 219 240 220 ctx->panel.backlight = sofef00_create_backlight(dsi); 241 221 if (IS_ERR(ctx->panel.backlight)) ··· 271 243 } 272 244 273 245 static const struct of_device_id sofef00_panel_of_match[] = { 274 - { .compatible = "samsung,sofef00" }, 246 + { .compatible = "samsung,sofef00" }, /* legacy */ 247 + { .compatible = "samsung,sofef00-ams628nw01" }, 275 248 { /* sentinel */ } 276 249 }; 277 250 MODULE_DEVICE_TABLE(of, sofef00_panel_of_match); ··· 281 252 .probe = sofef00_panel_probe, 282 253 .remove = sofef00_panel_remove, 283 254 .driver = { 284 - .name = "panel-oneplus6", 255 + .name = "panel-samsung-sofef00", 285 256 .of_match_table = sofef00_panel_of_match, 286 257 }, 287 258 }; ··· 289 260 module_mipi_dsi_driver(sofef00_panel_driver); 290 261 291 262 MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>"); 292 - MODULE_DESCRIPTION("DRM driver for Samsung AMOLED DSI panels found in OnePlus 6/6T phones"); 263 + MODULE_DESCRIPTION("DRM driver for Samsung SOFEF00 DDIC"); 293 264 MODULE_LICENSE("GPL v2");
+61 -31
drivers/gpu/drm/panel/panel-simple.c
··· 4106 4106 .connector_type = DRM_MODE_CONNECTOR_DPI, 4107 4107 }; 4108 4108 4109 + static const struct display_timing raystar_rff500f_awh_dnn_timing = { 4110 + .pixelclock = { 23000000, 25000000, 27000000 }, 4111 + .hactive = { 800, 800, 800 }, 4112 + .hback_porch = { 4, 8, 48 }, 4113 + .hfront_porch = { 4, 8, 48 }, 4114 + .hsync_len = { 2, 4, 8 }, 4115 + .vactive = { 480, 480, 480 }, 4116 + .vback_porch = { 4, 8, 12 }, 4117 + .vfront_porch = { 4, 8, 12 }, 4118 + .vsync_len = { 2, 4, 8 }, 4119 + }; 4120 + 4121 + static const struct panel_desc raystar_rff500f_awh_dnn = { 4122 + .timings = &raystar_rff500f_awh_dnn_timing, 4123 + .num_timings = 1, 4124 + .bpc = 8, 4125 + .size = { 4126 + .width = 108, 4127 + .height = 65, 4128 + }, 4129 + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 4130 + .connector_type = DRM_MODE_CONNECTOR_LVDS, 4131 + }; 4132 + 4109 4133 static const struct display_timing rocktech_rk043fn48h_timing = { 4110 4134 .pixelclock = { 6000000, 9000000, 12000000 }, 4111 4135 .hactive = { 480, 480, 480 }, ··· 4242 4218 .enable = 300, 4243 4219 .disable = 200, 4244 4220 .unprepare = 600, 4221 + }, 4222 + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 4223 + .connector_type = DRM_MODE_CONNECTOR_LVDS, 4224 + }; 4225 + 4226 + static const struct display_timing samsung_ltl106al01_timing = { 4227 + .pixelclock = { 71980000, 71980000, 71980000 }, 4228 + .hactive = { 1366, 1366, 1366 }, 4229 + .hfront_porch = { 56, 56, 56 }, 4230 + .hback_porch = { 106, 106, 106 }, 4231 + .hsync_len = { 14, 14, 14 }, 4232 + .vactive = { 768, 768, 768 }, 4233 + .vfront_porch = { 3, 3, 3 }, 4234 + .vback_porch = { 6, 6, 6 }, 4235 + .vsync_len = { 1, 1, 1 }, 4236 + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, 4237 + }; 4238 + 4239 + static const struct panel_desc samsung_ltl106al01 = { 4240 + .timings = &samsung_ltl106al01_timing, 4241 + .num_timings = 1, 4242 + .bpc = 8, 4243 + .size = { 4244 + .width = 235, 4245 + .height = 132, 4246 + }, 4247 + .delay = { 4248 + .prepare = 5, 4249 + .enable = 10, 4250 + .disable = 10, 4251 + .unprepare = 5, 4245 4252 }, 4246 4253 .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 4247 4254 .connector_type = DRM_MODE_CONNECTOR_LVDS, ··· 5434 5379 .compatible = "qishenglong,gopher2b-lcd", 5435 5380 .data = &qishenglong_gopher2b_lcd, 5436 5381 }, { 5382 + .compatible = "raystar,rff500f-awh-dnn", 5383 + .data = &raystar_rff500f_awh_dnn, 5384 + }, { 5437 5385 .compatible = "rocktech,rk043fn48h", 5438 5386 .data = &rocktech_rk043fn48h, 5439 5387 }, { ··· 5448 5390 }, { 5449 5391 .compatible = "samsung,ltl101al01", 5450 5392 .data = &samsung_ltl101al01, 5393 + }, { 5394 + .compatible = "samsung,ltl106al01", 5395 + .data = &samsung_ltl106al01, 5451 5396 }, { 5452 5397 .compatible = "samsung,ltn101nt05", 5453 5398 .data = &samsung_ltn101nt05, ··· 5661 5600 .lanes = 4, 5662 5601 }; 5663 5602 5664 - static const struct drm_display_mode lg_ld070wx3_sl01_mode = { 5665 - .clock = 71000, 5666 - .hdisplay = 800, 5667 - .hsync_start = 800 + 32, 5668 - .hsync_end = 800 + 32 + 1, 5669 - .htotal = 800 + 32 + 1 + 57, 5670 - .vdisplay = 1280, 5671 - .vsync_start = 1280 + 28, 5672 - .vsync_end = 1280 + 28 + 1, 5673 - .vtotal = 1280 + 28 + 1 + 14, 5674 - }; 5675 - 5676 - static const struct panel_desc_dsi lg_ld070wx3_sl01 = { 5677 - .desc = { 5678 - .modes = &lg_ld070wx3_sl01_mode, 5679 - .num_modes = 1, 5680 - .bpc = 8, 5681 - .size = { 5682 - .width = 94, 5683 - .height = 151, 5684 - }, 5685 - .connector_type = DRM_MODE_CONNECTOR_DSI, 5686 - }, 5687 - .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS, 5688 - .format = MIPI_DSI_FMT_RGB888, 5689 - .lanes = 4, 5690 - }; 5691 - 5692 5603 static const struct drm_display_mode lg_lh500wx1_sd03_mode = { 5693 5604 .clock = 67000, 5694 5605 .hdisplay = 720, ··· 5784 5751 }, { 5785 5752 .compatible = "boe,tv080wum-nl0", 5786 5753 .data = &boe_tv080wum_nl0 5787 - }, { 5788 - .compatible = "lg,ld070wx3-sl01", 5789 - .data = &lg_ld070wx3_sl01 5790 5754 }, { 5791 5755 .compatible = "lg,lh500wx1-sd03", 5792 5756 .data = &lg_lh500wx1_sd03
+1
drivers/gpu/drm/panthor/Makefile
··· 10 10 panthor_heap.o \ 11 11 panthor_hw.o \ 12 12 panthor_mmu.o \ 13 + panthor_pwr.o \ 13 14 panthor_sched.o 14 15 15 16 obj-$(CONFIG_DRM_PANTHOR) += panthor.o
+15 -3
drivers/gpu/drm/panthor/panthor_device.c
··· 21 21 #include "panthor_gpu.h" 22 22 #include "panthor_hw.h" 23 23 #include "panthor_mmu.h" 24 + #include "panthor_pwr.h" 24 25 #include "panthor_regs.h" 25 26 #include "panthor_sched.h" 26 27 ··· 114 113 panthor_fw_unplug(ptdev); 115 114 panthor_mmu_unplug(ptdev); 116 115 panthor_gpu_unplug(ptdev); 116 + panthor_pwr_unplug(ptdev); 117 117 118 118 pm_runtime_dont_use_autosuspend(ptdev->base.dev); 119 119 pm_runtime_put_sync_suspend(ptdev->base.dev); ··· 154 152 panthor_sched_pre_reset(ptdev); 155 153 panthor_fw_pre_reset(ptdev, true); 156 154 panthor_mmu_pre_reset(ptdev); 157 - panthor_gpu_soft_reset(ptdev); 158 - panthor_gpu_l2_power_on(ptdev); 155 + panthor_hw_soft_reset(ptdev); 156 + panthor_hw_l2_power_on(ptdev); 159 157 panthor_mmu_post_reset(ptdev); 160 158 ret = panthor_fw_post_reset(ptdev); 161 159 atomic_set(&ptdev->reset.pending, 0); ··· 270 268 if (ret) 271 269 goto err_rpm_put; 272 270 273 - ret = panthor_gpu_init(ptdev); 271 + ret = panthor_pwr_init(ptdev); 274 272 if (ret) 275 273 goto err_rpm_put; 274 + 275 + ret = panthor_gpu_init(ptdev); 276 + if (ret) 277 + goto err_unplug_pwr; 276 278 277 279 ret = panthor_gpu_coherency_init(ptdev); 278 280 if (ret) ··· 317 311 318 312 err_unplug_gpu: 319 313 panthor_gpu_unplug(ptdev); 314 + 315 + err_unplug_pwr: 316 + panthor_pwr_unplug(ptdev); 320 317 321 318 err_rpm_put: 322 319 pm_runtime_put_sync_suspend(ptdev->base.dev); ··· 474 465 { 475 466 int ret; 476 467 468 + panthor_pwr_resume(ptdev); 477 469 panthor_gpu_resume(ptdev); 478 470 panthor_mmu_resume(ptdev); 479 471 ··· 484 474 485 475 panthor_mmu_suspend(ptdev); 486 476 panthor_gpu_suspend(ptdev); 477 + panthor_pwr_suspend(ptdev); 487 478 return ret; 488 479 } 489 480 ··· 598 587 panthor_fw_suspend(ptdev); 599 588 panthor_mmu_suspend(ptdev); 600 589 panthor_gpu_suspend(ptdev); 590 + panthor_pwr_suspend(ptdev); 601 591 drm_dev_exit(cookie); 602 592 } 603 593
+8
drivers/gpu/drm/panthor/panthor_device.h
··· 24 24 struct panthor_gpu; 25 25 struct panthor_group_pool; 26 26 struct panthor_heap_pool; 27 + struct panthor_hw; 27 28 struct panthor_job; 28 29 struct panthor_mmu; 29 30 struct panthor_fw; 30 31 struct panthor_perfcnt; 32 + struct panthor_pwr; 31 33 struct panthor_vm; 32 34 struct panthor_vm_pool; 33 35 ··· 135 133 136 134 /** @csif_info: Command stream interface information. */ 137 135 struct drm_panthor_csif_info csif_info; 136 + 137 + /** @hw: GPU-specific data. */ 138 + struct panthor_hw *hw; 139 + 140 + /** @pwr: Power control management data. */ 141 + struct panthor_pwr *pwr; 138 142 139 143 /** @gpu: GPU management data. */ 140 144 struct panthor_gpu *gpu;
+113 -18
drivers/gpu/drm/panthor/panthor_fw.c
··· 22 22 #include "panthor_fw.h" 23 23 #include "panthor_gem.h" 24 24 #include "panthor_gpu.h" 25 + #include "panthor_hw.h" 25 26 #include "panthor_mmu.h" 26 27 #include "panthor_regs.h" 27 28 #include "panthor_sched.h" ··· 34 33 #define PROGRESS_TIMEOUT_SCALE_SHIFT 10 35 34 #define IDLE_HYSTERESIS_US 800 36 35 #define PWROFF_HYSTERESIS_US 10000 36 + #define MCU_HALT_TIMEOUT_US (1ULL * USEC_PER_SEC) 37 37 38 38 /** 39 39 * struct panthor_fw_binary_hdr - Firmware binary header. ··· 317 315 return NULL; 318 316 319 317 return &ptdev->fw->iface.streams[csg_slot][cs_slot]; 318 + } 319 + 320 + static bool panthor_fw_has_glb_state(struct panthor_device *ptdev) 321 + { 322 + struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 323 + 324 + return glb_iface->control->version >= CSF_IFACE_VERSION(4, 1, 0); 325 + } 326 + 327 + static bool panthor_fw_has_64bit_ep_req(struct panthor_device *ptdev) 328 + { 329 + struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 330 + 331 + return glb_iface->control->version >= CSF_IFACE_VERSION(4, 0, 0); 332 + } 333 + 334 + u64 panthor_fw_csg_endpoint_req_get(struct panthor_device *ptdev, 335 + struct panthor_fw_csg_iface *csg_iface) 336 + { 337 + if (panthor_fw_has_64bit_ep_req(ptdev)) 338 + return csg_iface->input->endpoint_req2; 339 + else 340 + return csg_iface->input->endpoint_req; 341 + } 342 + 343 + void panthor_fw_csg_endpoint_req_set(struct panthor_device *ptdev, 344 + struct panthor_fw_csg_iface *csg_iface, u64 value) 345 + { 346 + if (panthor_fw_has_64bit_ep_req(ptdev)) 347 + csg_iface->input->endpoint_req2 = value; 348 + else 349 + csg_iface->input->endpoint_req = lower_32_bits(value); 350 + } 351 + 352 + void panthor_fw_csg_endpoint_req_update(struct panthor_device *ptdev, 353 + struct panthor_fw_csg_iface *csg_iface, u64 value, 354 + u64 mask) 355 + { 356 + if (panthor_fw_has_64bit_ep_req(ptdev)) 357 + panthor_fw_update_reqs64(csg_iface, endpoint_req2, value, mask); 358 + else 359 + panthor_fw_update_reqs(csg_iface, endpoint_req, lower_32_bits(value), 360 + lower_32_bits(mask)); 320 361 } 321 362 322 363 /** ··· 1041 996 GLB_IDLE_EN | 1042 997 GLB_IDLE; 1043 998 999 + if (panthor_fw_has_glb_state(ptdev)) 1000 + glb_iface->input->ack_irq_mask |= GLB_STATE_MASK; 1001 + 1044 1002 panthor_fw_update_reqs(glb_iface, req, GLB_IDLE_EN, GLB_IDLE_EN); 1045 1003 panthor_fw_toggle_reqs(glb_iface, req, ack, 1046 1004 GLB_CFG_ALLOC_EN | ··· 1117 1069 drm_err(&ptdev->base, "Failed to stop MCU"); 1118 1070 } 1119 1071 1072 + static bool panthor_fw_mcu_halted(struct panthor_device *ptdev) 1073 + { 1074 + struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 1075 + bool halted; 1076 + 1077 + halted = gpu_read(ptdev, MCU_STATUS) == MCU_STATUS_HALT; 1078 + 1079 + if (panthor_fw_has_glb_state(ptdev)) 1080 + halted &= (GLB_STATE_GET(glb_iface->output->ack) == GLB_STATE_HALT); 1081 + 1082 + return halted; 1083 + } 1084 + 1085 + static void panthor_fw_halt_mcu(struct panthor_device *ptdev) 1086 + { 1087 + struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 1088 + 1089 + if (panthor_fw_has_glb_state(ptdev)) 1090 + panthor_fw_update_reqs(glb_iface, req, GLB_STATE(GLB_STATE_HALT), GLB_STATE_MASK); 1091 + else 1092 + panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT); 1093 + 1094 + gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1); 1095 + } 1096 + 1097 + static bool panthor_fw_wait_mcu_halted(struct panthor_device *ptdev) 1098 + { 1099 + bool halted = false; 1100 + 1101 + if (read_poll_timeout_atomic(panthor_fw_mcu_halted, halted, halted, 10, 1102 + MCU_HALT_TIMEOUT_US, 0, ptdev)) { 1103 + drm_warn(&ptdev->base, "Timed out waiting for MCU to halt"); 1104 + return false; 1105 + } 1106 + 1107 + return true; 1108 + } 1109 + 1110 + static void panthor_fw_mcu_set_active(struct panthor_device *ptdev) 1111 + { 1112 + struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 1113 + 1114 + if (panthor_fw_has_glb_state(ptdev)) 1115 + panthor_fw_update_reqs(glb_iface, req, GLB_STATE(GLB_STATE_ACTIVE), GLB_STATE_MASK); 1116 + else 1117 + panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT); 1118 + } 1119 + 1120 1120 /** 1121 1121 * panthor_fw_pre_reset() - Call before a reset. 1122 1122 * @ptdev: Device. ··· 1181 1085 ptdev->reset.fast = false; 1182 1086 1183 1087 if (!on_hang) { 1184 - struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 1185 - u32 status; 1186 - 1187 - panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT); 1188 - gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1); 1189 - if (!gpu_read_poll_timeout(ptdev, MCU_STATUS, status, 1190 - status == MCU_STATUS_HALT, 10, 1191 - 100000)) { 1192 - ptdev->reset.fast = true; 1193 - } else { 1088 + panthor_fw_halt_mcu(ptdev); 1089 + if (!panthor_fw_wait_mcu_halted(ptdev)) 1194 1090 drm_warn(&ptdev->base, "Failed to cleanly suspend MCU"); 1195 - } 1091 + else 1092 + ptdev->reset.fast = true; 1196 1093 } 1094 + panthor_fw_stop(ptdev); 1197 1095 1198 1096 panthor_job_irq_suspend(&ptdev->fw->irq); 1199 1097 panthor_fw_stop(ptdev); ··· 1216 1126 */ 1217 1127 panthor_reload_fw_sections(ptdev, true); 1218 1128 } else { 1219 - /* The FW detects 0 -> 1 transitions. Make sure we reset 1220 - * the HALT bit before the FW is rebooted. 1129 + /* 1130 + * If the FW was previously successfully halted in the pre-reset 1131 + * operation, we need to transition it to active again before 1132 + * the FW is rebooted. 1221 1133 * This is not needed on a slow reset because FW sections are 1222 1134 * re-initialized. 1223 1135 */ 1224 - struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev); 1225 - 1226 - panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT); 1136 + panthor_fw_mcu_set_active(ptdev); 1227 1137 } 1228 1138 1229 1139 ret = panthor_fw_start(ptdev); ··· 1261 1171 if (ptdev->fw->irq.irq) 1262 1172 panthor_job_irq_suspend(&ptdev->fw->irq); 1263 1173 1174 + panthor_fw_halt_mcu(ptdev); 1175 + if (!panthor_fw_wait_mcu_halted(ptdev)) 1176 + drm_warn(&ptdev->base, "Failed to halt MCU on unplug"); 1177 + 1264 1178 panthor_fw_stop(ptdev); 1265 1179 } 1266 1180 ··· 1280 1186 ptdev->fw->vm = NULL; 1281 1187 1282 1188 if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev)) 1283 - panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000); 1189 + panthor_hw_l2_power_off(ptdev); 1284 1190 } 1285 1191 1286 1192 /** ··· 1459 1365 return ret; 1460 1366 } 1461 1367 1462 - ret = panthor_gpu_l2_power_on(ptdev); 1368 + ret = panthor_hw_l2_power_on(ptdev); 1463 1369 if (ret) 1464 1370 return ret; 1465 1371 ··· 1503 1409 MODULE_FIRMWARE("arm/mali/arch11.8/mali_csffw.bin"); 1504 1410 MODULE_FIRMWARE("arm/mali/arch12.8/mali_csffw.bin"); 1505 1411 MODULE_FIRMWARE("arm/mali/arch13.8/mali_csffw.bin"); 1412 + MODULE_FIRMWARE("arm/mali/arch14.8/mali_csffw.bin");
+30 -2
drivers/gpu/drm/panthor/panthor_fw.h
··· 167 167 #define CSG_EP_REQ_TILER(x) (((x) << 16) & GENMASK(19, 16)) 168 168 #define CSG_EP_REQ_EXCL_COMPUTE BIT(20) 169 169 #define CSG_EP_REQ_EXCL_FRAGMENT BIT(21) 170 - #define CSG_EP_REQ_PRIORITY(x) (((x) << 28) & GENMASK(31, 28)) 171 170 #define CSG_EP_REQ_PRIORITY_MASK GENMASK(31, 28) 171 + #define CSG_EP_REQ_PRIORITY(x) (((x) << 28) & CSG_EP_REQ_PRIORITY_MASK) 172 + #define CSG_EP_REQ_PRIORITY_GET(x) (((x) & CSG_EP_REQ_PRIORITY_MASK) >> 28) 172 173 u32 endpoint_req; 173 - u32 reserved2[2]; 174 + u64 endpoint_req2; 174 175 u64 suspend_buf; 175 176 u64 protm_suspend_buf; 176 177 u32 config; ··· 215 214 #define GLB_FWCFG_UPDATE BIT(9) 216 215 #define GLB_IDLE_EN BIT(10) 217 216 #define GLB_SLEEP BIT(12) 217 + #define GLB_STATE_MASK GENMASK(14, 12) 218 + #define GLB_STATE_ACTIVE 0 219 + #define GLB_STATE_HALT 1 220 + #define GLB_STATE_SLEEP 2 221 + #define GLB_STATE_SUSPEND 3 222 + #define GLB_STATE(x) (((x) << 12) & GLB_STATE_MASK) 223 + #define GLB_STATE_GET(x) (((x) & GLB_STATE_MASK) >> 12) 218 224 #define GLB_INACTIVE_COMPUTE BIT(20) 219 225 #define GLB_INACTIVE_FRAGMENT BIT(21) 220 226 #define GLB_INACTIVE_TILER BIT(22) ··· 465 457 spin_unlock(&(__iface)->lock); \ 466 458 } while (0) 467 459 460 + #define panthor_fw_update_reqs64(__iface, __in_reg, __val, __mask) \ 461 + do { \ 462 + u64 __cur_val, __new_val; \ 463 + spin_lock(&(__iface)->lock); \ 464 + __cur_val = READ_ONCE((__iface)->input->__in_reg); \ 465 + __new_val = (__cur_val & ~(__mask)) | ((__val) & (__mask)); \ 466 + WRITE_ONCE((__iface)->input->__in_reg, __new_val); \ 467 + spin_unlock(&(__iface)->lock); \ 468 + } while (0) 469 + 468 470 struct panthor_fw_global_iface * 469 471 panthor_fw_get_glb_iface(struct panthor_device *ptdev); 470 472 ··· 483 465 484 466 struct panthor_fw_cs_iface * 485 467 panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot); 468 + 469 + u64 panthor_fw_csg_endpoint_req_get(struct panthor_device *ptdev, 470 + struct panthor_fw_csg_iface *csg_iface); 471 + 472 + void panthor_fw_csg_endpoint_req_set(struct panthor_device *ptdev, 473 + struct panthor_fw_csg_iface *csg_iface, u64 value); 474 + 475 + void panthor_fw_csg_endpoint_req_update(struct panthor_device *ptdev, 476 + struct panthor_fw_csg_iface *csg_iface, u64 value, 477 + u64 mask); 486 478 487 479 int panthor_fw_csg_wait_acks(struct panthor_device *ptdev, u32 csg_id, u32 req_mask, 488 480 u32 *acked, u32 timeout_ms);
+3 -3
drivers/gpu/drm/panthor/panthor_gem.c
··· 145 145 bo = to_panthor_bo(&obj->base); 146 146 kbo->obj = &obj->base; 147 147 bo->flags = bo_flags; 148 + bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm); 149 + drm_gem_object_get(bo->exclusive_vm_root_gem); 150 + bo->base.base.resv = bo->exclusive_vm_root_gem->resv; 148 151 149 152 if (vm == panthor_fw_vm(ptdev)) 150 153 debug_flags |= PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED; ··· 171 168 goto err_free_va; 172 169 173 170 kbo->vm = panthor_vm_get(vm); 174 - bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm); 175 - drm_gem_object_get(bo->exclusive_vm_root_gem); 176 - bo->base.base.resv = bo->exclusive_vm_root_gem->resv; 177 171 return kbo; 178 172 179 173 err_free_va:
+9 -3
drivers/gpu/drm/panthor/panthor_gpu.c
··· 19 19 20 20 #include "panthor_device.h" 21 21 #include "panthor_gpu.h" 22 + #include "panthor_hw.h" 22 23 #include "panthor_regs.h" 23 24 24 25 /** ··· 242 241 return 0; 243 242 } 244 243 244 + void panthor_gpu_l2_power_off(struct panthor_device *ptdev) 245 + { 246 + panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000); 247 + } 248 + 245 249 /** 246 250 * panthor_gpu_l2_power_on() - Power-on the L2-cache 247 251 * @ptdev: Device. ··· 374 368 { 375 369 /* On a fast reset, simply power down the L2. */ 376 370 if (!ptdev->reset.fast) 377 - panthor_gpu_soft_reset(ptdev); 371 + panthor_hw_soft_reset(ptdev); 378 372 else 379 - panthor_gpu_power_off(ptdev, L2, 1, 20000); 373 + panthor_hw_l2_power_off(ptdev); 380 374 381 375 panthor_gpu_irq_suspend(&ptdev->gpu->irq); 382 376 } ··· 391 385 void panthor_gpu_resume(struct panthor_device *ptdev) 392 386 { 393 387 panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK); 394 - panthor_gpu_l2_power_on(ptdev); 388 + panthor_hw_l2_power_on(ptdev); 395 389 } 396 390
+1
drivers/gpu/drm/panthor/panthor_gpu.h
··· 46 46 type ## _PWRTRANS, \ 47 47 mask, timeout_us) 48 48 49 + void panthor_gpu_l2_power_off(struct panthor_device *ptdev); 49 50 int panthor_gpu_l2_power_on(struct panthor_device *ptdev); 50 51 int panthor_gpu_flush_caches(struct panthor_device *ptdev, 51 52 u32 l2, u32 lsc, u32 other);
+102 -5
drivers/gpu/drm/panthor/panthor_hw.c
··· 4 4 #include <drm/drm_print.h> 5 5 6 6 #include "panthor_device.h" 7 + #include "panthor_gpu.h" 7 8 #include "panthor_hw.h" 9 + #include "panthor_pwr.h" 8 10 #include "panthor_regs.h" 9 11 10 12 #define GPU_PROD_ID_MAKE(arch_major, prod_major) \ 11 13 (((arch_major) << 24) | (prod_major)) 14 + 15 + /** struct panthor_hw_entry - HW arch major to panthor_hw binding entry */ 16 + struct panthor_hw_entry { 17 + /** @arch_min: Minimum supported architecture major value (inclusive) */ 18 + u8 arch_min; 19 + 20 + /** @arch_max: Maximum supported architecture major value (inclusive) */ 21 + u8 arch_max; 22 + 23 + /** @hwdev: Pointer to panthor_hw structure */ 24 + struct panthor_hw *hwdev; 25 + }; 26 + 27 + static struct panthor_hw panthor_hw_arch_v10 = { 28 + .ops = { 29 + .soft_reset = panthor_gpu_soft_reset, 30 + .l2_power_off = panthor_gpu_l2_power_off, 31 + .l2_power_on = panthor_gpu_l2_power_on, 32 + }, 33 + }; 34 + 35 + static struct panthor_hw panthor_hw_arch_v14 = { 36 + .ops = { 37 + .soft_reset = panthor_pwr_reset_soft, 38 + .l2_power_off = panthor_pwr_l2_power_off, 39 + .l2_power_on = panthor_pwr_l2_power_on, 40 + }, 41 + }; 42 + 43 + static struct panthor_hw_entry panthor_hw_match[] = { 44 + { 45 + .arch_min = 10, 46 + .arch_max = 13, 47 + .hwdev = &panthor_hw_arch_v10, 48 + }, 49 + { 50 + .arch_min = 14, 51 + .arch_max = 14, 52 + .hwdev = &panthor_hw_arch_v14, 53 + }, 54 + }; 12 55 13 56 static char *get_gpu_model_name(struct panthor_device *ptdev) 14 57 { ··· 98 55 fallthrough; 99 56 case GPU_PROD_ID_MAKE(13, 1): 100 57 return "Mali-G625"; 58 + case GPU_PROD_ID_MAKE(14, 0): 59 + return "Mali-G1-Ultra"; 60 + case GPU_PROD_ID_MAKE(14, 1): 61 + return "Mali-G1-Premium"; 62 + case GPU_PROD_ID_MAKE(14, 3): 63 + return "Mali-G1-Pro"; 101 64 } 102 65 103 66 return "(Unknown Mali GPU)"; ··· 113 64 { 114 65 unsigned int i; 115 66 116 - ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID); 117 67 ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID); 118 68 ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID); 119 69 ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES); ··· 130 82 131 83 ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT); 132 84 133 - ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT); 134 - ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT); 135 - ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT); 136 - 137 85 /* Introduced in arch 11.x */ 138 86 ptdev->gpu_info.gpu_features = gpu_read64(ptdev, GPU_FEATURES); 87 + 88 + if (panthor_hw_has_pwr_ctrl(ptdev)) { 89 + /* Introduced in arch 14.x */ 90 + ptdev->gpu_info.l2_present = gpu_read64(ptdev, PWR_L2_PRESENT); 91 + ptdev->gpu_info.tiler_present = gpu_read64(ptdev, PWR_TILER_PRESENT); 92 + ptdev->gpu_info.shader_present = gpu_read64(ptdev, PWR_SHADER_PRESENT); 93 + } else { 94 + ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT); 95 + ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT); 96 + ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT); 97 + } 139 98 } 140 99 141 100 static void panthor_hw_info_init(struct panthor_device *ptdev) ··· 174 119 ptdev->gpu_info.tiler_present); 175 120 } 176 121 122 + static int panthor_hw_bind_device(struct panthor_device *ptdev) 123 + { 124 + struct panthor_hw *hdev = NULL; 125 + const u32 arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id); 126 + int i = 0; 127 + 128 + for (i = 0; i < ARRAY_SIZE(panthor_hw_match); i++) { 129 + struct panthor_hw_entry *entry = &panthor_hw_match[i]; 130 + 131 + if (arch_major >= entry->arch_min && arch_major <= entry->arch_max) { 132 + hdev = entry->hwdev; 133 + break; 134 + } 135 + } 136 + 137 + if (!hdev) 138 + return -EOPNOTSUPP; 139 + 140 + ptdev->hw = hdev; 141 + 142 + return 0; 143 + } 144 + 145 + static int panthor_hw_gpu_id_init(struct panthor_device *ptdev) 146 + { 147 + ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID); 148 + if (!ptdev->gpu_info.gpu_id) 149 + return -ENXIO; 150 + 151 + return 0; 152 + } 153 + 177 154 int panthor_hw_init(struct panthor_device *ptdev) 178 155 { 156 + int ret = 0; 157 + 158 + ret = panthor_hw_gpu_id_init(ptdev); 159 + if (ret) 160 + return ret; 161 + 162 + ret = panthor_hw_bind_device(ptdev); 163 + if (ret) 164 + return ret; 165 + 179 166 panthor_hw_info_init(ptdev); 180 167 181 168 return 0;
+46 -1
drivers/gpu/drm/panthor/panthor_hw.h
··· 4 4 #ifndef __PANTHOR_HW_H__ 5 5 #define __PANTHOR_HW_H__ 6 6 7 - struct panthor_device; 7 + #include "panthor_device.h" 8 + #include "panthor_regs.h" 9 + 10 + /** 11 + * struct panthor_hw_ops - HW operations that are specific to a GPU 12 + */ 13 + struct panthor_hw_ops { 14 + /** @soft_reset: Soft reset function pointer */ 15 + int (*soft_reset)(struct panthor_device *ptdev); 16 + 17 + /** @l2_power_off: L2 power off function pointer */ 18 + void (*l2_power_off)(struct panthor_device *ptdev); 19 + 20 + /** @l2_power_on: L2 power on function pointer */ 21 + int (*l2_power_on)(struct panthor_device *ptdev); 22 + }; 23 + 24 + /** 25 + * struct panthor_hw - GPU specific register mapping and functions 26 + */ 27 + struct panthor_hw { 28 + /** @features: Bitmap containing panthor_hw_feature */ 29 + 30 + /** @ops: Panthor HW specific operations */ 31 + struct panthor_hw_ops ops; 32 + }; 8 33 9 34 int panthor_hw_init(struct panthor_device *ptdev); 35 + 36 + static inline int panthor_hw_soft_reset(struct panthor_device *ptdev) 37 + { 38 + return ptdev->hw->ops.soft_reset(ptdev); 39 + } 40 + 41 + static inline int panthor_hw_l2_power_on(struct panthor_device *ptdev) 42 + { 43 + return ptdev->hw->ops.l2_power_on(ptdev); 44 + } 45 + 46 + static inline void panthor_hw_l2_power_off(struct panthor_device *ptdev) 47 + { 48 + ptdev->hw->ops.l2_power_off(ptdev); 49 + } 50 + 51 + static inline bool panthor_hw_has_pwr_ctrl(struct panthor_device *ptdev) 52 + { 53 + return GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id) >= 14; 54 + } 10 55 11 56 #endif /* __PANTHOR_HW_H__ */
+14 -5
drivers/gpu/drm/panthor/panthor_mmu.c
··· 904 904 { 905 905 struct panthor_device *ptdev = vm->ptdev; 906 906 struct io_pgtable_ops *ops = vm->pgtbl_ops; 907 + u64 start_iova = iova; 907 908 u64 offset = 0; 908 - 909 - drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size); 910 909 911 910 while (offset < size) { 912 911 size_t unmapped_sz = 0, pgcount; ··· 921 922 panthor_vm_flush_range(vm, iova, offset + unmapped_sz); 922 923 return -EINVAL; 923 924 } 925 + 926 + drm_dbg(&ptdev->base, 927 + "unmap: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pgcnt=%zu, pgsz=%zu", 928 + vm->as.id, start_iova, size, iova + offset, 929 + unmapped_sz / pgsize, pgsize); 930 + 924 931 offset += unmapped_sz; 925 932 } 926 933 ··· 942 937 struct scatterlist *sgl; 943 938 struct io_pgtable_ops *ops = vm->pgtbl_ops; 944 939 u64 start_iova = iova; 940 + u64 start_size = size; 945 941 int ret; 946 942 947 943 if (!size) ··· 962 956 len = min_t(size_t, len, size); 963 957 size -= len; 964 958 965 - drm_dbg(&ptdev->base, "map: as=%d, iova=%llx, paddr=%pad, len=%zx", 966 - vm->as.id, iova, &paddr, len); 967 - 968 959 while (len) { 969 960 size_t pgcount, mapped = 0; 970 961 size_t pgsize = get_pgsize(iova | paddr, len, &pgcount); 971 962 972 963 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, 973 964 GFP_KERNEL, &mapped); 965 + 966 + drm_dbg(&ptdev->base, 967 + "map: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pa=%pad, pgcnt=%zu, pgsz=%zu", 968 + vm->as.id, start_iova, start_size, iova, &paddr, 969 + mapped / pgsize, pgsize); 970 + 974 971 iova += mapped; 975 972 paddr += mapped; 976 973 len -= mapped;
+549
drivers/gpu/drm/panthor/panthor_pwr.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 or MIT 2 + /* Copyright 2025 ARM Limited. All rights reserved. */ 3 + 4 + #include <linux/platform_device.h> 5 + #include <linux/interrupt.h> 6 + #include <linux/cleanup.h> 7 + #include <linux/iopoll.h> 8 + #include <linux/wait.h> 9 + 10 + #include <drm/drm_managed.h> 11 + #include <drm/drm_print.h> 12 + 13 + #include "panthor_device.h" 14 + #include "panthor_hw.h" 15 + #include "panthor_pwr.h" 16 + #include "panthor_regs.h" 17 + 18 + #define PWR_INTERRUPTS_MASK \ 19 + (PWR_IRQ_POWER_CHANGED_SINGLE | \ 20 + PWR_IRQ_POWER_CHANGED_ALL | \ 21 + PWR_IRQ_DELEGATION_CHANGED | \ 22 + PWR_IRQ_RESET_COMPLETED | \ 23 + PWR_IRQ_RETRACT_COMPLETED | \ 24 + PWR_IRQ_INSPECT_COMPLETED | \ 25 + PWR_IRQ_COMMAND_NOT_ALLOWED | \ 26 + PWR_IRQ_COMMAND_INVALID) 27 + 28 + #define PWR_ALL_CORES_MASK GENMASK_U64(63, 0) 29 + 30 + #define PWR_DOMAIN_MAX_BITS 16 31 + 32 + #define PWR_TRANSITION_TIMEOUT_US (2ULL * USEC_PER_SEC) 33 + 34 + #define PWR_RETRACT_TIMEOUT_US (2ULL * USEC_PER_MSEC) 35 + 36 + #define PWR_RESET_TIMEOUT_MS 500 37 + 38 + /** 39 + * struct panthor_pwr - PWR_CONTROL block management data. 40 + */ 41 + struct panthor_pwr { 42 + /** @irq: PWR irq. */ 43 + struct panthor_irq irq; 44 + 45 + /** @reqs_lock: Lock protecting access to pending_reqs. */ 46 + spinlock_t reqs_lock; 47 + 48 + /** @pending_reqs: Pending PWR requests. */ 49 + u32 pending_reqs; 50 + 51 + /** @reqs_acked: PWR request wait queue. */ 52 + wait_queue_head_t reqs_acked; 53 + }; 54 + 55 + static void panthor_pwr_irq_handler(struct panthor_device *ptdev, u32 status) 56 + { 57 + spin_lock(&ptdev->pwr->reqs_lock); 58 + gpu_write(ptdev, PWR_INT_CLEAR, status); 59 + 60 + if (unlikely(status & PWR_IRQ_COMMAND_NOT_ALLOWED)) 61 + drm_err(&ptdev->base, "PWR_IRQ: COMMAND_NOT_ALLOWED"); 62 + 63 + if (unlikely(status & PWR_IRQ_COMMAND_INVALID)) 64 + drm_err(&ptdev->base, "PWR_IRQ: COMMAND_INVALID"); 65 + 66 + if (status & ptdev->pwr->pending_reqs) { 67 + ptdev->pwr->pending_reqs &= ~status; 68 + wake_up_all(&ptdev->pwr->reqs_acked); 69 + } 70 + spin_unlock(&ptdev->pwr->reqs_lock); 71 + } 72 + PANTHOR_IRQ_HANDLER(pwr, PWR, panthor_pwr_irq_handler); 73 + 74 + static void panthor_pwr_write_command(struct panthor_device *ptdev, u32 command, u64 args) 75 + { 76 + if (args) 77 + gpu_write64(ptdev, PWR_CMDARG, args); 78 + 79 + gpu_write(ptdev, PWR_COMMAND, command); 80 + } 81 + 82 + static bool reset_irq_raised(struct panthor_device *ptdev) 83 + { 84 + return gpu_read(ptdev, PWR_INT_RAWSTAT) & PWR_IRQ_RESET_COMPLETED; 85 + } 86 + 87 + static bool reset_pending(struct panthor_device *ptdev) 88 + { 89 + return (ptdev->pwr->pending_reqs & PWR_IRQ_RESET_COMPLETED); 90 + } 91 + 92 + static int panthor_pwr_reset(struct panthor_device *ptdev, u32 reset_cmd) 93 + { 94 + scoped_guard(spinlock_irqsave, &ptdev->pwr->reqs_lock) { 95 + if (reset_pending(ptdev)) { 96 + drm_WARN(&ptdev->base, 1, "Reset already pending"); 97 + } else { 98 + ptdev->pwr->pending_reqs |= PWR_IRQ_RESET_COMPLETED; 99 + gpu_write(ptdev, PWR_INT_CLEAR, PWR_IRQ_RESET_COMPLETED); 100 + panthor_pwr_write_command(ptdev, reset_cmd, 0); 101 + } 102 + } 103 + 104 + if (!wait_event_timeout(ptdev->pwr->reqs_acked, !reset_pending(ptdev), 105 + msecs_to_jiffies(PWR_RESET_TIMEOUT_MS))) { 106 + guard(spinlock_irqsave)(&ptdev->pwr->reqs_lock); 107 + 108 + if (reset_pending(ptdev) && !reset_irq_raised(ptdev)) { 109 + drm_err(&ptdev->base, "RESET timed out (0x%x)", reset_cmd); 110 + return -ETIMEDOUT; 111 + } 112 + 113 + ptdev->pwr->pending_reqs &= ~PWR_IRQ_RESET_COMPLETED; 114 + } 115 + 116 + return 0; 117 + } 118 + 119 + static const char *get_domain_name(u8 domain) 120 + { 121 + switch (domain) { 122 + case PWR_COMMAND_DOMAIN_L2: 123 + return "L2"; 124 + case PWR_COMMAND_DOMAIN_TILER: 125 + return "Tiler"; 126 + case PWR_COMMAND_DOMAIN_SHADER: 127 + return "Shader"; 128 + case PWR_COMMAND_DOMAIN_BASE: 129 + return "Base"; 130 + case PWR_COMMAND_DOMAIN_STACK: 131 + return "Stack"; 132 + } 133 + return "Unknown"; 134 + } 135 + 136 + static u32 get_domain_base(u8 domain) 137 + { 138 + switch (domain) { 139 + case PWR_COMMAND_DOMAIN_L2: 140 + return PWR_L2_PRESENT; 141 + case PWR_COMMAND_DOMAIN_TILER: 142 + return PWR_TILER_PRESENT; 143 + case PWR_COMMAND_DOMAIN_SHADER: 144 + return PWR_SHADER_PRESENT; 145 + case PWR_COMMAND_DOMAIN_BASE: 146 + return PWR_BASE_PRESENT; 147 + case PWR_COMMAND_DOMAIN_STACK: 148 + return PWR_STACK_PRESENT; 149 + } 150 + return 0; 151 + } 152 + 153 + static u32 get_domain_ready_reg(u32 domain) 154 + { 155 + return get_domain_base(domain) + (PWR_L2_READY - PWR_L2_PRESENT); 156 + } 157 + 158 + static u32 get_domain_pwrtrans_reg(u32 domain) 159 + { 160 + return get_domain_base(domain) + (PWR_L2_PWRTRANS - PWR_L2_PRESENT); 161 + } 162 + 163 + static bool is_valid_domain(u32 domain) 164 + { 165 + return get_domain_base(domain) != 0; 166 + } 167 + 168 + static bool has_rtu(struct panthor_device *ptdev) 169 + { 170 + return ptdev->gpu_info.gpu_features & GPU_FEATURES_RAY_TRAVERSAL; 171 + } 172 + 173 + static u8 get_domain_subdomain(struct panthor_device *ptdev, u32 domain) 174 + { 175 + if (domain == PWR_COMMAND_DOMAIN_SHADER && has_rtu(ptdev)) 176 + return PWR_COMMAND_SUBDOMAIN_RTU; 177 + 178 + return 0; 179 + } 180 + 181 + static int panthor_pwr_domain_wait_transition(struct panthor_device *ptdev, u32 domain, 182 + u32 timeout_us) 183 + { 184 + u32 pwrtrans_reg = get_domain_pwrtrans_reg(domain); 185 + u64 val; 186 + int ret = 0; 187 + 188 + ret = gpu_read64_poll_timeout(ptdev, pwrtrans_reg, val, !(PWR_ALL_CORES_MASK & val), 100, 189 + timeout_us); 190 + if (ret) { 191 + drm_err(&ptdev->base, "%s domain power in transition, pwrtrans(0x%llx)", 192 + get_domain_name(domain), val); 193 + return ret; 194 + } 195 + 196 + return 0; 197 + } 198 + 199 + static void panthor_pwr_debug_info_show(struct panthor_device *ptdev) 200 + { 201 + drm_info(&ptdev->base, "GPU_FEATURES: 0x%016llx", gpu_read64(ptdev, GPU_FEATURES)); 202 + drm_info(&ptdev->base, "PWR_STATUS: 0x%016llx", gpu_read64(ptdev, PWR_STATUS)); 203 + drm_info(&ptdev->base, "L2_PRESENT: 0x%016llx", gpu_read64(ptdev, PWR_L2_PRESENT)); 204 + drm_info(&ptdev->base, "L2_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_L2_PWRTRANS)); 205 + drm_info(&ptdev->base, "L2_READY: 0x%016llx", gpu_read64(ptdev, PWR_L2_READY)); 206 + drm_info(&ptdev->base, "TILER_PRESENT: 0x%016llx", gpu_read64(ptdev, PWR_TILER_PRESENT)); 207 + drm_info(&ptdev->base, "TILER_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_TILER_PWRTRANS)); 208 + drm_info(&ptdev->base, "TILER_READY: 0x%016llx", gpu_read64(ptdev, PWR_TILER_READY)); 209 + drm_info(&ptdev->base, "SHADER_PRESENT: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_PRESENT)); 210 + drm_info(&ptdev->base, "SHADER_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_PWRTRANS)); 211 + drm_info(&ptdev->base, "SHADER_READY: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_READY)); 212 + } 213 + 214 + static int panthor_pwr_domain_transition(struct panthor_device *ptdev, u32 cmd, u32 domain, 215 + u64 mask, u32 timeout_us) 216 + { 217 + u32 ready_reg = get_domain_ready_reg(domain); 218 + u32 pwr_cmd = PWR_COMMAND_DEF(cmd, domain, get_domain_subdomain(ptdev, domain)); 219 + u64 expected_val = 0; 220 + u64 val; 221 + int ret = 0; 222 + 223 + if (drm_WARN_ON(&ptdev->base, !is_valid_domain(domain))) 224 + return -EINVAL; 225 + 226 + switch (cmd) { 227 + case PWR_COMMAND_POWER_DOWN: 228 + expected_val = 0; 229 + break; 230 + case PWR_COMMAND_POWER_UP: 231 + expected_val = mask; 232 + break; 233 + default: 234 + drm_err(&ptdev->base, "Invalid power domain transition command (0x%x)", cmd); 235 + return -EINVAL; 236 + } 237 + 238 + ret = panthor_pwr_domain_wait_transition(ptdev, domain, timeout_us); 239 + if (ret) 240 + return ret; 241 + 242 + /* domain already in target state, return early */ 243 + if ((gpu_read64(ptdev, ready_reg) & mask) == expected_val) 244 + return 0; 245 + 246 + panthor_pwr_write_command(ptdev, pwr_cmd, mask); 247 + 248 + ret = gpu_read64_poll_timeout(ptdev, ready_reg, val, (mask & val) == expected_val, 100, 249 + timeout_us); 250 + if (ret) { 251 + drm_err(&ptdev->base, 252 + "timeout waiting on %s power domain transition, cmd(0x%x), arg(0x%llx)", 253 + get_domain_name(domain), pwr_cmd, mask); 254 + panthor_pwr_debug_info_show(ptdev); 255 + return ret; 256 + } 257 + 258 + return 0; 259 + } 260 + 261 + #define panthor_pwr_domain_power_off(__ptdev, __domain, __mask, __timeout_us) \ 262 + panthor_pwr_domain_transition(__ptdev, PWR_COMMAND_POWER_DOWN, __domain, __mask, \ 263 + __timeout_us) 264 + 265 + #define panthor_pwr_domain_power_on(__ptdev, __domain, __mask, __timeout_us) \ 266 + panthor_pwr_domain_transition(__ptdev, PWR_COMMAND_POWER_UP, __domain, __mask, __timeout_us) 267 + 268 + /** 269 + * retract_domain() - Retract control of a domain from MCU 270 + * @ptdev: Device. 271 + * @domain: Domain to retract the control 272 + * 273 + * Retracting L2 domain is not expected since it won't be delegated. 274 + * 275 + * Return: 0 on success or retracted already. 276 + * -EPERM if domain is L2. 277 + * A negative error code otherwise. 278 + */ 279 + static int retract_domain(struct panthor_device *ptdev, u32 domain) 280 + { 281 + const u32 pwr_cmd = PWR_COMMAND_DEF(PWR_COMMAND_RETRACT, domain, 0); 282 + const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS); 283 + const u64 delegated_mask = PWR_STATUS_DOMAIN_DELEGATED(domain); 284 + const u64 allow_mask = PWR_STATUS_DOMAIN_ALLOWED(domain); 285 + u64 val; 286 + int ret; 287 + 288 + if (drm_WARN_ON(&ptdev->base, domain == PWR_COMMAND_DOMAIN_L2)) 289 + return -EPERM; 290 + 291 + ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val, !(PWR_STATUS_RETRACT_PENDING & val), 292 + 0, PWR_RETRACT_TIMEOUT_US); 293 + if (ret) { 294 + drm_err(&ptdev->base, "%s domain retract pending", get_domain_name(domain)); 295 + return ret; 296 + } 297 + 298 + if (!(pwr_status & delegated_mask)) { 299 + drm_dbg(&ptdev->base, "%s domain already retracted", get_domain_name(domain)); 300 + return 0; 301 + } 302 + 303 + panthor_pwr_write_command(ptdev, pwr_cmd, 0); 304 + 305 + /* 306 + * On successful retraction 307 + * allow-flag will be set with delegated-flag being cleared. 308 + */ 309 + ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val, 310 + ((delegated_mask | allow_mask) & val) == allow_mask, 10, 311 + PWR_TRANSITION_TIMEOUT_US); 312 + if (ret) { 313 + drm_err(&ptdev->base, "Retracting %s domain timeout, cmd(0x%x)", 314 + get_domain_name(domain), pwr_cmd); 315 + return ret; 316 + } 317 + 318 + return 0; 319 + } 320 + 321 + /** 322 + * delegate_domain() - Delegate control of a domain to MCU 323 + * @ptdev: Device. 324 + * @domain: Domain to delegate the control 325 + * 326 + * Delegating L2 domain is prohibited. 327 + * 328 + * Return: 329 + * * 0 on success or delegated already. 330 + * * -EPERM if domain is L2. 331 + * * A negative error code otherwise. 332 + */ 333 + static int delegate_domain(struct panthor_device *ptdev, u32 domain) 334 + { 335 + const u32 pwr_cmd = PWR_COMMAND_DEF(PWR_COMMAND_DELEGATE, domain, 0); 336 + const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS); 337 + const u64 allow_mask = PWR_STATUS_DOMAIN_ALLOWED(domain); 338 + const u64 delegated_mask = PWR_STATUS_DOMAIN_DELEGATED(domain); 339 + u64 val; 340 + int ret; 341 + 342 + if (drm_WARN_ON(&ptdev->base, domain == PWR_COMMAND_DOMAIN_L2)) 343 + return -EPERM; 344 + 345 + /* Already delegated, exit early */ 346 + if (pwr_status & delegated_mask) 347 + return 0; 348 + 349 + /* Check if the command is allowed before delegating. */ 350 + if (!(pwr_status & allow_mask)) { 351 + drm_warn(&ptdev->base, "Delegating %s domain not allowed", get_domain_name(domain)); 352 + return -EPERM; 353 + } 354 + 355 + ret = panthor_pwr_domain_wait_transition(ptdev, domain, PWR_TRANSITION_TIMEOUT_US); 356 + if (ret) 357 + return ret; 358 + 359 + panthor_pwr_write_command(ptdev, pwr_cmd, 0); 360 + 361 + /* 362 + * On successful delegation 363 + * allow-flag will be cleared with delegated-flag being set. 364 + */ 365 + ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val, 366 + ((delegated_mask | allow_mask) & val) == delegated_mask, 367 + 10, PWR_TRANSITION_TIMEOUT_US); 368 + if (ret) { 369 + drm_err(&ptdev->base, "Delegating %s domain timeout, cmd(0x%x)", 370 + get_domain_name(domain), pwr_cmd); 371 + return ret; 372 + } 373 + 374 + return 0; 375 + } 376 + 377 + static int panthor_pwr_delegate_domains(struct panthor_device *ptdev) 378 + { 379 + int ret; 380 + 381 + if (!ptdev->pwr) 382 + return 0; 383 + 384 + ret = delegate_domain(ptdev, PWR_COMMAND_DOMAIN_SHADER); 385 + if (ret) 386 + return ret; 387 + 388 + ret = delegate_domain(ptdev, PWR_COMMAND_DOMAIN_TILER); 389 + if (ret) 390 + goto err_retract_shader; 391 + 392 + return 0; 393 + 394 + err_retract_shader: 395 + retract_domain(ptdev, PWR_COMMAND_DOMAIN_SHADER); 396 + 397 + return ret; 398 + } 399 + 400 + /** 401 + * panthor_pwr_domain_force_off - Forcefully power down a domain. 402 + * @ptdev: Device. 403 + * @domain: Domain to forcefully power down. 404 + * 405 + * This function will attempt to retract and power off the requested power 406 + * domain. However, if retraction fails, the operation is aborted. If power off 407 + * fails, the domain will remain retracted and under the host control. 408 + * 409 + * Return: 0 on success or a negative error code on failure. 410 + */ 411 + static int panthor_pwr_domain_force_off(struct panthor_device *ptdev, u32 domain) 412 + { 413 + const u64 domain_ready = gpu_read64(ptdev, get_domain_ready_reg(domain)); 414 + int ret; 415 + 416 + /* Domain already powered down, early exit. */ 417 + if (!domain_ready) 418 + return 0; 419 + 420 + /* Domain has to be in host control to issue power off command. */ 421 + ret = retract_domain(ptdev, domain); 422 + if (ret) 423 + return ret; 424 + 425 + return panthor_pwr_domain_power_off(ptdev, domain, domain_ready, PWR_TRANSITION_TIMEOUT_US); 426 + } 427 + 428 + void panthor_pwr_unplug(struct panthor_device *ptdev) 429 + { 430 + unsigned long flags; 431 + 432 + if (!ptdev->pwr) 433 + return; 434 + 435 + /* Make sure the IRQ handler is not running after that point. */ 436 + panthor_pwr_irq_suspend(&ptdev->pwr->irq); 437 + 438 + /* Wake-up all waiters. */ 439 + spin_lock_irqsave(&ptdev->pwr->reqs_lock, flags); 440 + ptdev->pwr->pending_reqs = 0; 441 + wake_up_all(&ptdev->pwr->reqs_acked); 442 + spin_unlock_irqrestore(&ptdev->pwr->reqs_lock, flags); 443 + } 444 + 445 + int panthor_pwr_init(struct panthor_device *ptdev) 446 + { 447 + struct panthor_pwr *pwr; 448 + int err, irq; 449 + 450 + if (!panthor_hw_has_pwr_ctrl(ptdev)) 451 + return 0; 452 + 453 + pwr = drmm_kzalloc(&ptdev->base, sizeof(*pwr), GFP_KERNEL); 454 + if (!pwr) 455 + return -ENOMEM; 456 + 457 + spin_lock_init(&pwr->reqs_lock); 458 + init_waitqueue_head(&pwr->reqs_acked); 459 + ptdev->pwr = pwr; 460 + 461 + irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu"); 462 + if (irq < 0) 463 + return irq; 464 + 465 + err = panthor_request_pwr_irq(ptdev, &pwr->irq, irq, PWR_INTERRUPTS_MASK); 466 + if (err) 467 + return err; 468 + 469 + return 0; 470 + } 471 + 472 + int panthor_pwr_reset_soft(struct panthor_device *ptdev) 473 + { 474 + if (!(gpu_read64(ptdev, PWR_STATUS) & PWR_STATUS_ALLOW_SOFT_RESET)) { 475 + drm_err(&ptdev->base, "RESET_SOFT not allowed"); 476 + return -EOPNOTSUPP; 477 + } 478 + 479 + return panthor_pwr_reset(ptdev, PWR_COMMAND_RESET_SOFT); 480 + } 481 + 482 + void panthor_pwr_l2_power_off(struct panthor_device *ptdev) 483 + { 484 + const u64 l2_allow_mask = PWR_STATUS_DOMAIN_ALLOWED(PWR_COMMAND_DOMAIN_L2); 485 + const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS); 486 + 487 + /* Abort if L2 power off constraints are not satisfied */ 488 + if (!(pwr_status & l2_allow_mask)) { 489 + drm_warn(&ptdev->base, "Power off L2 domain not allowed"); 490 + return; 491 + } 492 + 493 + /* It is expected that when halting the MCU, it would power down its 494 + * delegated domains. However, an unresponsive or hung MCU may not do 495 + * so, which is why we need to check and retract the domains back into 496 + * host control to be powered down in the right order before powering 497 + * down the L2. 498 + */ 499 + if (panthor_pwr_domain_force_off(ptdev, PWR_COMMAND_DOMAIN_TILER)) 500 + return; 501 + 502 + if (panthor_pwr_domain_force_off(ptdev, PWR_COMMAND_DOMAIN_SHADER)) 503 + return; 504 + 505 + panthor_pwr_domain_power_off(ptdev, PWR_COMMAND_DOMAIN_L2, ptdev->gpu_info.l2_present, 506 + PWR_TRANSITION_TIMEOUT_US); 507 + } 508 + 509 + int panthor_pwr_l2_power_on(struct panthor_device *ptdev) 510 + { 511 + const u32 pwr_status = gpu_read64(ptdev, PWR_STATUS); 512 + const u32 l2_allow_mask = PWR_STATUS_DOMAIN_ALLOWED(PWR_COMMAND_DOMAIN_L2); 513 + int ret; 514 + 515 + if ((pwr_status & l2_allow_mask) == 0) { 516 + drm_warn(&ptdev->base, "Power on L2 domain not allowed"); 517 + return -EPERM; 518 + } 519 + 520 + ret = panthor_pwr_domain_power_on(ptdev, PWR_COMMAND_DOMAIN_L2, ptdev->gpu_info.l2_present, 521 + PWR_TRANSITION_TIMEOUT_US); 522 + if (ret) 523 + return ret; 524 + 525 + /* Delegate control of the shader and tiler power domains to the MCU as 526 + * it can better manage which shader/tiler cores need to be powered up 527 + * or can be powered down based on currently running jobs. 528 + * 529 + * If the shader and tiler domains are already delegated to the MCU, 530 + * this call would just return early. 531 + */ 532 + return panthor_pwr_delegate_domains(ptdev); 533 + } 534 + 535 + void panthor_pwr_suspend(struct panthor_device *ptdev) 536 + { 537 + if (!ptdev->pwr) 538 + return; 539 + 540 + panthor_pwr_irq_suspend(&ptdev->pwr->irq); 541 + } 542 + 543 + void panthor_pwr_resume(struct panthor_device *ptdev) 544 + { 545 + if (!ptdev->pwr) 546 + return; 547 + 548 + panthor_pwr_irq_resume(&ptdev->pwr->irq, PWR_INTERRUPTS_MASK); 549 + }
+23
drivers/gpu/drm/panthor/panthor_pwr.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 or MIT */ 2 + /* Copyright 2025 ARM Limited. All rights reserved. */ 3 + 4 + #ifndef __PANTHOR_PWR_H__ 5 + #define __PANTHOR_PWR_H__ 6 + 7 + struct panthor_device; 8 + 9 + void panthor_pwr_unplug(struct panthor_device *ptdev); 10 + 11 + int panthor_pwr_init(struct panthor_device *ptdev); 12 + 13 + int panthor_pwr_reset_soft(struct panthor_device *ptdev); 14 + 15 + void panthor_pwr_l2_power_off(struct panthor_device *ptdev); 16 + 17 + int panthor_pwr_l2_power_on(struct panthor_device *ptdev); 18 + 19 + void panthor_pwr_suspend(struct panthor_device *ptdev); 20 + 21 + void panthor_pwr_resume(struct panthor_device *ptdev); 22 + 23 + #endif /* __PANTHOR_PWR_H__ */
+79
drivers/gpu/drm/panthor/panthor_regs.h
··· 74 74 75 75 #define GPU_FEATURES 0x60 76 76 #define GPU_FEATURES_RAY_INTERSECTION BIT(2) 77 + #define GPU_FEATURES_RAY_TRAVERSAL BIT(5) 77 78 78 79 #define GPU_TIMESTAMP_OFFSET 0x88 79 80 #define GPU_CYCLE_COUNT 0x90 ··· 209 208 210 209 #define CSF_DOORBELL(i) (0x80000 + ((i) * 0x10000)) 211 210 #define CSF_GLB_DOORBELL_ID 0 211 + 212 + /* PWR Control registers */ 213 + 214 + #define PWR_CONTROL_BASE 0x800 215 + #define PWR_CTRL_REG(x) (PWR_CONTROL_BASE + (x)) 216 + 217 + #define PWR_INT_RAWSTAT PWR_CTRL_REG(0x0) 218 + #define PWR_INT_CLEAR PWR_CTRL_REG(0x4) 219 + #define PWR_INT_MASK PWR_CTRL_REG(0x8) 220 + #define PWR_INT_STAT PWR_CTRL_REG(0xc) 221 + #define PWR_IRQ_POWER_CHANGED_SINGLE BIT(0) 222 + #define PWR_IRQ_POWER_CHANGED_ALL BIT(1) 223 + #define PWR_IRQ_DELEGATION_CHANGED BIT(2) 224 + #define PWR_IRQ_RESET_COMPLETED BIT(3) 225 + #define PWR_IRQ_RETRACT_COMPLETED BIT(4) 226 + #define PWR_IRQ_INSPECT_COMPLETED BIT(5) 227 + #define PWR_IRQ_COMMAND_NOT_ALLOWED BIT(30) 228 + #define PWR_IRQ_COMMAND_INVALID BIT(31) 229 + 230 + #define PWR_STATUS PWR_CTRL_REG(0x20) 231 + #define PWR_STATUS_ALLOW_L2 BIT_U64(0) 232 + #define PWR_STATUS_ALLOW_TILER BIT_U64(1) 233 + #define PWR_STATUS_ALLOW_SHADER BIT_U64(8) 234 + #define PWR_STATUS_ALLOW_BASE BIT_U64(14) 235 + #define PWR_STATUS_ALLOW_STACK BIT_U64(15) 236 + #define PWR_STATUS_DOMAIN_ALLOWED(x) BIT_U64(x) 237 + #define PWR_STATUS_DELEGATED_L2 BIT_U64(16) 238 + #define PWR_STATUS_DELEGATED_TILER BIT_U64(17) 239 + #define PWR_STATUS_DELEGATED_SHADER BIT_U64(24) 240 + #define PWR_STATUS_DELEGATED_BASE BIT_U64(30) 241 + #define PWR_STATUS_DELEGATED_STACK BIT_U64(31) 242 + #define PWR_STATUS_DELEGATED_SHIFT 16 243 + #define PWR_STATUS_DOMAIN_DELEGATED(x) BIT_U64((x) + PWR_STATUS_DELEGATED_SHIFT) 244 + #define PWR_STATUS_ALLOW_SOFT_RESET BIT_U64(33) 245 + #define PWR_STATUS_ALLOW_FAST_RESET BIT_U64(34) 246 + #define PWR_STATUS_POWER_PENDING BIT_U64(41) 247 + #define PWR_STATUS_RESET_PENDING BIT_U64(42) 248 + #define PWR_STATUS_RETRACT_PENDING BIT_U64(43) 249 + #define PWR_STATUS_INSPECT_PENDING BIT_U64(44) 250 + 251 + #define PWR_COMMAND PWR_CTRL_REG(0x28) 252 + #define PWR_COMMAND_POWER_UP 0x10 253 + #define PWR_COMMAND_POWER_DOWN 0x11 254 + #define PWR_COMMAND_DELEGATE 0x20 255 + #define PWR_COMMAND_RETRACT 0x21 256 + #define PWR_COMMAND_RESET_SOFT 0x31 257 + #define PWR_COMMAND_RESET_FAST 0x32 258 + #define PWR_COMMAND_INSPECT 0xF0 259 + #define PWR_COMMAND_DOMAIN_L2 0 260 + #define PWR_COMMAND_DOMAIN_TILER 1 261 + #define PWR_COMMAND_DOMAIN_SHADER 8 262 + #define PWR_COMMAND_DOMAIN_BASE 14 263 + #define PWR_COMMAND_DOMAIN_STACK 15 264 + #define PWR_COMMAND_SUBDOMAIN_RTU BIT(0) 265 + #define PWR_COMMAND_DEF(cmd, domain, subdomain) \ 266 + (((subdomain) << 16) | ((domain) << 8) | (cmd)) 267 + 268 + #define PWR_CMDARG PWR_CTRL_REG(0x30) 269 + 270 + #define PWR_L2_PRESENT PWR_CTRL_REG(0x100) 271 + #define PWR_L2_READY PWR_CTRL_REG(0x108) 272 + #define PWR_L2_PWRTRANS PWR_CTRL_REG(0x110) 273 + #define PWR_L2_PWRACTIVE PWR_CTRL_REG(0x118) 274 + #define PWR_TILER_PRESENT PWR_CTRL_REG(0x140) 275 + #define PWR_TILER_READY PWR_CTRL_REG(0x148) 276 + #define PWR_TILER_PWRTRANS PWR_CTRL_REG(0x150) 277 + #define PWR_TILER_PWRACTIVE PWR_CTRL_REG(0x158) 278 + #define PWR_SHADER_PRESENT PWR_CTRL_REG(0x200) 279 + #define PWR_SHADER_READY PWR_CTRL_REG(0x208) 280 + #define PWR_SHADER_PWRTRANS PWR_CTRL_REG(0x210) 281 + #define PWR_SHADER_PWRACTIVE PWR_CTRL_REG(0x218) 282 + #define PWR_BASE_PRESENT PWR_CTRL_REG(0x380) 283 + #define PWR_BASE_READY PWR_CTRL_REG(0x388) 284 + #define PWR_BASE_PWRTRANS PWR_CTRL_REG(0x390) 285 + #define PWR_BASE_PWRACTIVE PWR_CTRL_REG(0x398) 286 + #define PWR_STACK_PRESENT PWR_CTRL_REG(0x3c0) 287 + #define PWR_STACK_READY PWR_CTRL_REG(0x3c8) 288 + #define PWR_STACK_PWRTRANS PWR_CTRL_REG(0x3d0) 212 289 213 290 #endif
+226 -90
drivers/gpu/drm/panthor/panthor_sched.c
··· 364 364 /** @name: DRM scheduler name for this queue. */ 365 365 char *name; 366 366 367 - /** 368 - * @remaining_time: Time remaining before the job timeout expires. 369 - * 370 - * The job timeout is suspended when the queue is not scheduled by the 371 - * FW. Every time we suspend the timer, we need to save the remaining 372 - * time so we can restore it later on. 373 - */ 374 - unsigned long remaining_time; 367 + /** @timeout: Queue timeout related fields. */ 368 + struct { 369 + /** @timeout.work: Work executed when a queue timeout occurs. */ 370 + struct delayed_work work; 375 371 376 - /** @timeout_suspended: True if the job timeout was suspended. */ 377 - bool timeout_suspended; 372 + /** 373 + * @timeout.remaining: Time remaining before a queue timeout. 374 + * 375 + * When the timer is running, this value is set to MAX_SCHEDULE_TIMEOUT. 376 + * When the timer is suspended, it's set to the time remaining when the 377 + * timer was suspended. 378 + */ 379 + unsigned long remaining; 380 + } timeout; 378 381 379 382 /** 380 383 * @doorbell_id: Doorbell assigned to this queue. ··· 902 899 if (IS_ERR_OR_NULL(queue)) 903 900 return; 904 901 902 + /* This should have been disabled before that point. */ 903 + drm_WARN_ON(&group->ptdev->base, 904 + disable_delayed_work_sync(&queue->timeout.work)); 905 + 905 906 if (queue->entity.fence_context) 906 907 drm_sched_entity_destroy(&queue->entity); 907 908 ··· 1053 1046 return 0; 1054 1047 } 1055 1048 1049 + static bool 1050 + group_is_idle(struct panthor_group *group) 1051 + { 1052 + struct panthor_device *ptdev = group->ptdev; 1053 + u32 inactive_queues; 1054 + 1055 + if (group->csg_id >= 0) 1056 + return ptdev->scheduler->csg_slots[group->csg_id].idle; 1057 + 1058 + inactive_queues = group->idle_queues | group->blocked_queues; 1059 + return hweight32(inactive_queues) == group->queue_count; 1060 + } 1061 + 1062 + static void 1063 + queue_reset_timeout_locked(struct panthor_queue *queue) 1064 + { 1065 + lockdep_assert_held(&queue->fence_ctx.lock); 1066 + 1067 + if (queue->timeout.remaining != MAX_SCHEDULE_TIMEOUT) { 1068 + mod_delayed_work(queue->scheduler.timeout_wq, 1069 + &queue->timeout.work, 1070 + msecs_to_jiffies(JOB_TIMEOUT_MS)); 1071 + } 1072 + } 1073 + 1074 + static bool 1075 + group_can_run(struct panthor_group *group) 1076 + { 1077 + return group->state != PANTHOR_CS_GROUP_TERMINATED && 1078 + group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE && 1079 + !group->destroyed && group->fatal_queues == 0 && 1080 + !group->timedout; 1081 + } 1082 + 1083 + static bool 1084 + queue_timeout_is_suspended(struct panthor_queue *queue) 1085 + { 1086 + /* When running, the remaining time is set to MAX_SCHEDULE_TIMEOUT. */ 1087 + return queue->timeout.remaining != MAX_SCHEDULE_TIMEOUT; 1088 + } 1089 + 1090 + static void 1091 + queue_suspend_timeout_locked(struct panthor_queue *queue) 1092 + { 1093 + unsigned long qtimeout, now; 1094 + struct panthor_group *group; 1095 + struct panthor_job *job; 1096 + bool timer_was_active; 1097 + 1098 + lockdep_assert_held(&queue->fence_ctx.lock); 1099 + 1100 + /* Already suspended, nothing to do. */ 1101 + if (queue_timeout_is_suspended(queue)) 1102 + return; 1103 + 1104 + job = list_first_entry_or_null(&queue->fence_ctx.in_flight_jobs, 1105 + struct panthor_job, node); 1106 + group = job ? job->group : NULL; 1107 + 1108 + /* If the queue is blocked and the group is idle, we want the timer to 1109 + * keep running because the group can't be unblocked by other queues, 1110 + * so it has to come from an external source, and we want to timebox 1111 + * this external signalling. 1112 + */ 1113 + if (group && group_can_run(group) && 1114 + (group->blocked_queues & BIT(job->queue_idx)) && 1115 + group_is_idle(group)) 1116 + return; 1117 + 1118 + now = jiffies; 1119 + qtimeout = queue->timeout.work.timer.expires; 1120 + 1121 + /* Cancel the timer. */ 1122 + timer_was_active = cancel_delayed_work(&queue->timeout.work); 1123 + if (!timer_was_active || !job) 1124 + queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS); 1125 + else if (time_after(qtimeout, now)) 1126 + queue->timeout.remaining = qtimeout - now; 1127 + else 1128 + queue->timeout.remaining = 0; 1129 + 1130 + if (WARN_ON_ONCE(queue->timeout.remaining > msecs_to_jiffies(JOB_TIMEOUT_MS))) 1131 + queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS); 1132 + } 1133 + 1134 + static void 1135 + queue_suspend_timeout(struct panthor_queue *queue) 1136 + { 1137 + spin_lock(&queue->fence_ctx.lock); 1138 + queue_suspend_timeout_locked(queue); 1139 + spin_unlock(&queue->fence_ctx.lock); 1140 + } 1141 + 1142 + static void 1143 + queue_resume_timeout(struct panthor_queue *queue) 1144 + { 1145 + spin_lock(&queue->fence_ctx.lock); 1146 + 1147 + if (queue_timeout_is_suspended(queue)) { 1148 + mod_delayed_work(queue->scheduler.timeout_wq, 1149 + &queue->timeout.work, 1150 + queue->timeout.remaining); 1151 + 1152 + queue->timeout.remaining = MAX_SCHEDULE_TIMEOUT; 1153 + } 1154 + 1155 + spin_unlock(&queue->fence_ctx.lock); 1156 + } 1157 + 1056 1158 /** 1057 1159 * cs_slot_prog_locked() - Program a queue slot 1058 1160 * @ptdev: Device. ··· 1200 1084 CS_IDLE_EMPTY | 1201 1085 CS_STATE_MASK | 1202 1086 CS_EXTRACT_EVENT); 1203 - if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) { 1204 - drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time); 1205 - queue->timeout_suspended = false; 1206 - } 1087 + if (queue->iface.input->insert != queue->iface.input->extract) 1088 + queue_resume_timeout(queue); 1207 1089 } 1208 1090 1209 1091 /** ··· 1228 1114 CS_STATE_STOP, 1229 1115 CS_STATE_MASK); 1230 1116 1231 - /* If the queue is blocked, we want to keep the timeout running, so 1232 - * we can detect unbounded waits and kill the group when that happens. 1233 - */ 1234 - if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) { 1235 - queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler); 1236 - queue->timeout_suspended = true; 1237 - WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS)); 1238 - } 1117 + queue_suspend_timeout(queue); 1239 1118 1240 1119 return 0; 1241 1120 } ··· 1247 1140 { 1248 1141 struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id]; 1249 1142 struct panthor_fw_csg_iface *csg_iface; 1143 + u64 endpoint_req; 1250 1144 1251 1145 lockdep_assert_held(&ptdev->scheduler->lock); 1252 1146 1253 1147 csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id); 1254 - csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28; 1148 + endpoint_req = panthor_fw_csg_endpoint_req_get(ptdev, csg_iface); 1149 + csg_slot->priority = CSG_EP_REQ_PRIORITY_GET(endpoint_req); 1255 1150 } 1256 1151 1257 1152 /** ··· 1413 1304 struct panthor_csg_slot *csg_slot; 1414 1305 struct panthor_group *group; 1415 1306 u32 queue_mask = 0, i; 1307 + u64 endpoint_req; 1416 1308 1417 1309 lockdep_assert_held(&ptdev->scheduler->lock); 1418 1310 ··· 1440 1330 csg_iface->input->allow_compute = group->compute_core_mask; 1441 1331 csg_iface->input->allow_fragment = group->fragment_core_mask; 1442 1332 csg_iface->input->allow_other = group->tiler_core_mask; 1443 - csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) | 1444 - CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) | 1445 - CSG_EP_REQ_TILER(group->max_tiler_cores) | 1446 - CSG_EP_REQ_PRIORITY(priority); 1333 + endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) | 1334 + CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) | 1335 + CSG_EP_REQ_TILER(group->max_tiler_cores) | 1336 + CSG_EP_REQ_PRIORITY(priority); 1337 + panthor_fw_csg_endpoint_req_set(ptdev, csg_iface, endpoint_req); 1338 + 1447 1339 csg_iface->input->config = panthor_vm_as(group->vm); 1448 1340 1449 1341 if (group->suspend_buf) ··· 2028 1916 return ctx->group_count == sched->csg_slot_count; 2029 1917 } 2030 1918 2031 - static bool 2032 - group_is_idle(struct panthor_group *group) 2033 - { 2034 - struct panthor_device *ptdev = group->ptdev; 2035 - u32 inactive_queues; 2036 - 2037 - if (group->csg_id >= 0) 2038 - return ptdev->scheduler->csg_slots[group->csg_id].idle; 2039 - 2040 - inactive_queues = group->idle_queues | group->blocked_queues; 2041 - return hweight32(inactive_queues) == group->queue_count; 2042 - } 2043 - 2044 - static bool 2045 - group_can_run(struct panthor_group *group) 2046 - { 2047 - return group->state != PANTHOR_CS_GROUP_TERMINATED && 2048 - group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE && 2049 - !group->destroyed && group->fatal_queues == 0 && 2050 - !group->timedout; 2051 - } 2052 - 2053 1919 static void 2054 1920 tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched, 2055 1921 struct panthor_sched_tick_ctx *ctx, ··· 2321 2231 continue; 2322 2232 } 2323 2233 2324 - panthor_fw_update_reqs(csg_iface, endpoint_req, 2325 - CSG_EP_REQ_PRIORITY(new_csg_prio), 2326 - CSG_EP_REQ_PRIORITY_MASK); 2234 + panthor_fw_csg_endpoint_req_update(ptdev, csg_iface, 2235 + CSG_EP_REQ_PRIORITY(new_csg_prio), 2236 + CSG_EP_REQ_PRIORITY_MASK); 2327 2237 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2328 2238 csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG, 2329 2239 CSG_ENDPOINT_CONFIG); ··· 2709 2619 static void queue_stop(struct panthor_queue *queue, 2710 2620 struct panthor_job *bad_job) 2711 2621 { 2622 + disable_delayed_work_sync(&queue->timeout.work); 2712 2623 drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL); 2713 2624 } 2714 2625 ··· 2721 2630 list_for_each_entry(job, &queue->scheduler.pending_list, base.list) 2722 2631 job->base.s_fence->parent = dma_fence_get(job->done_fence); 2723 2632 2633 + enable_delayed_work(&queue->timeout.work); 2724 2634 drm_sched_start(&queue->scheduler, 0); 2725 2635 } 2726 2636 ··· 2788 2696 { 2789 2697 struct panthor_scheduler *sched = ptdev->scheduler; 2790 2698 struct panthor_csg_slots_upd_ctx upd_ctx; 2791 - struct panthor_group *group; 2792 2699 u32 suspended_slots; 2793 2700 u32 i; 2794 2701 ··· 2841 2750 while (slot_mask) { 2842 2751 u32 csg_id = ffs(slot_mask) - 1; 2843 2752 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 2753 + struct panthor_group *group = csg_slot->group; 2844 2754 2845 2755 /* Terminate command timedout, but the soft-reset will 2846 2756 * automatically terminate all active groups, so let's 2847 2757 * force the state to halted here. 2848 2758 */ 2849 - if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED) 2850 - csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED; 2759 + if (group->state != PANTHOR_CS_GROUP_TERMINATED) { 2760 + group->state = PANTHOR_CS_GROUP_TERMINATED; 2761 + 2762 + /* Reset the queue slots manually if the termination 2763 + * request failed. 2764 + */ 2765 + for (i = 0; i < group->queue_count; i++) { 2766 + if (group->queues[i]) 2767 + cs_slot_reset_locked(ptdev, csg_id, i); 2768 + } 2769 + } 2851 2770 slot_mask &= ~BIT(csg_id); 2852 2771 } 2853 2772 } ··· 2887 2786 2888 2787 for (i = 0; i < sched->csg_slot_count; i++) { 2889 2788 struct panthor_csg_slot *csg_slot = &sched->csg_slots[i]; 2789 + struct panthor_group *group = csg_slot->group; 2890 2790 2891 - group = csg_slot->group; 2892 2791 if (!group) 2893 2792 continue; 2894 2793 ··· 3017 2916 xa_unlock(&gpool->xa); 3018 2917 } 3019 2918 3020 - static void group_sync_upd_work(struct work_struct *work) 2919 + static bool queue_check_job_completion(struct panthor_queue *queue) 3021 2920 { 3022 - struct panthor_group *group = 3023 - container_of(work, struct panthor_group, sync_upd_work); 2921 + struct panthor_syncobj_64b *syncobj = NULL; 3024 2922 struct panthor_job *job, *job_tmp; 2923 + bool cookie, progress = false; 3025 2924 LIST_HEAD(done_jobs); 3026 - u32 queue_idx; 3027 - bool cookie; 3028 2925 3029 2926 cookie = dma_fence_begin_signalling(); 3030 - for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) { 3031 - struct panthor_queue *queue = group->queues[queue_idx]; 3032 - struct panthor_syncobj_64b *syncobj; 2927 + spin_lock(&queue->fence_ctx.lock); 2928 + list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) { 2929 + if (!syncobj) { 2930 + struct panthor_group *group = job->group; 3033 2931 3034 - if (!queue) 3035 - continue; 3036 - 3037 - syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj)); 3038 - 3039 - spin_lock(&queue->fence_ctx.lock); 3040 - list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) { 3041 - if (syncobj->seqno < job->done_fence->seqno) 3042 - break; 3043 - 3044 - list_move_tail(&job->node, &done_jobs); 3045 - dma_fence_signal_locked(job->done_fence); 2932 + syncobj = group->syncobjs->kmap + 2933 + (job->queue_idx * sizeof(*syncobj)); 3046 2934 } 3047 - spin_unlock(&queue->fence_ctx.lock); 2935 + 2936 + if (syncobj->seqno < job->done_fence->seqno) 2937 + break; 2938 + 2939 + list_move_tail(&job->node, &done_jobs); 2940 + dma_fence_signal_locked(job->done_fence); 3048 2941 } 2942 + 2943 + if (list_empty(&queue->fence_ctx.in_flight_jobs)) { 2944 + /* If we have no job left, we cancel the timer, and reset remaining 2945 + * time to its default so it can be restarted next time 2946 + * queue_resume_timeout() is called. 2947 + */ 2948 + queue_suspend_timeout_locked(queue); 2949 + 2950 + /* If there's no job pending, we consider it progress to avoid a 2951 + * spurious timeout if the timeout handler and the sync update 2952 + * handler raced. 2953 + */ 2954 + progress = true; 2955 + } else if (!list_empty(&done_jobs)) { 2956 + queue_reset_timeout_locked(queue); 2957 + progress = true; 2958 + } 2959 + spin_unlock(&queue->fence_ctx.lock); 3049 2960 dma_fence_end_signalling(cookie); 3050 2961 3051 2962 list_for_each_entry_safe(job, job_tmp, &done_jobs, node) { ··· 3066 2953 list_del_init(&job->node); 3067 2954 panthor_job_put(&job->base); 3068 2955 } 2956 + 2957 + return progress; 2958 + } 2959 + 2960 + static void group_sync_upd_work(struct work_struct *work) 2961 + { 2962 + struct panthor_group *group = 2963 + container_of(work, struct panthor_group, sync_upd_work); 2964 + u32 queue_idx; 2965 + bool cookie; 2966 + 2967 + cookie = dma_fence_begin_signalling(); 2968 + for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) { 2969 + struct panthor_queue *queue = group->queues[queue_idx]; 2970 + 2971 + if (!queue) 2972 + continue; 2973 + 2974 + queue_check_job_completion(queue); 2975 + } 2976 + dma_fence_end_signalling(cookie); 3069 2977 3070 2978 group_put(group); 3071 2979 } ··· 3335 3201 queue->iface.input->insert = job->ringbuf.end; 3336 3202 3337 3203 if (group->csg_id < 0) { 3338 - /* If the queue is blocked, we want to keep the timeout running, so we 3339 - * can detect unbounded waits and kill the group when that happens. 3340 - * Otherwise, we suspend the timeout so the time we spend waiting for 3341 - * a CSG slot is not counted. 3342 - */ 3343 - if (!(group->blocked_queues & BIT(job->queue_idx)) && 3344 - !queue->timeout_suspended) { 3345 - queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler); 3346 - queue->timeout_suspended = true; 3347 - } 3348 - 3349 3204 group_schedule_locked(group, BIT(job->queue_idx)); 3350 3205 } else { 3351 3206 gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1); ··· 3343 3220 pm_runtime_get(ptdev->base.dev); 3344 3221 sched->pm.has_ref = true; 3345 3222 } 3223 + queue_resume_timeout(queue); 3346 3224 panthor_devfreq_record_busy(sched->ptdev); 3347 3225 } 3348 3226 ··· 3393 3269 mutex_unlock(&sched->lock); 3394 3270 3395 3271 queue_start(queue); 3396 - 3397 3272 return DRM_GPU_SCHED_STAT_RESET; 3398 3273 } 3399 3274 ··· 3435 3312 return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64)); 3436 3313 } 3437 3314 3315 + static void queue_timeout_work(struct work_struct *work) 3316 + { 3317 + struct panthor_queue *queue = container_of(work, struct panthor_queue, 3318 + timeout.work.work); 3319 + bool progress; 3320 + 3321 + progress = queue_check_job_completion(queue); 3322 + if (!progress) 3323 + drm_sched_fault(&queue->scheduler); 3324 + } 3325 + 3438 3326 static struct panthor_queue * 3439 3327 group_create_queue(struct panthor_group *group, 3440 3328 const struct drm_panthor_queue_create *args, ··· 3462 3328 * their profiling status. 3463 3329 */ 3464 3330 .credit_limit = args->ringbuf_size / sizeof(u64), 3465 - .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), 3331 + .timeout = MAX_SCHEDULE_TIMEOUT, 3466 3332 .timeout_wq = group->ptdev->reset.wq, 3467 3333 .dev = group->ptdev->base.dev, 3468 3334 }; ··· 3484 3350 if (!queue) 3485 3351 return ERR_PTR(-ENOMEM); 3486 3352 3353 + queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS); 3354 + INIT_DELAYED_WORK(&queue->timeout.work, queue_timeout_work); 3487 3355 queue->fence_ctx.id = dma_fence_context_alloc(1); 3488 3356 spin_lock_init(&queue->fence_ctx.lock); 3489 3357 INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs);
+1 -12
drivers/gpu/drm/radeon/radeon_fbdev.c
··· 202 202 struct radeon_device *rdev = fb_helper->dev->dev_private; 203 203 const struct drm_format_info *format_info; 204 204 struct drm_mode_fb_cmd2 mode_cmd = { }; 205 - struct fb_info *info; 205 + struct fb_info *info = fb_helper->info; 206 206 struct drm_gem_object *gobj; 207 207 struct radeon_bo *rbo; 208 208 struct drm_framebuffer *fb; ··· 243 243 fb_helper->funcs = &radeon_fbdev_fb_helper_funcs; 244 244 fb_helper->fb = fb; 245 245 246 - /* okay we have an object now allocate the framebuffer */ 247 - info = drm_fb_helper_alloc_info(fb_helper); 248 - if (IS_ERR(info)) { 249 - ret = PTR_ERR(info); 250 - goto err_drm_framebuffer_unregister_private; 251 - } 252 - 253 246 info->fbops = &radeon_fbdev_fb_ops; 254 247 255 248 /* radeon resume is fragile and needs a vt switch to help it along */ ··· 268 275 269 276 return 0; 270 277 271 - err_drm_framebuffer_unregister_private: 272 - fb_helper->fb = NULL; 273 - drm_framebuffer_unregister_private(fb); 274 - drm_framebuffer_cleanup(fb); 275 278 err_kfree: 276 279 kfree(fb); 277 280 err_radeon_fbdev_destroy_pinned_object:
+14 -17
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
··· 331 331 struct device_node *np = dev->of_node; 332 332 333 333 dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); 334 - if (IS_ERR(dp->grf)) { 335 - DRM_DEV_ERROR(dev, "failed to get rockchip,grf property\n"); 336 - return PTR_ERR(dp->grf); 337 - } 334 + if (IS_ERR(dp->grf)) 335 + return dev_err_probe(dev, PTR_ERR(dp->grf), 336 + "failed to get rockchip,grf property\n"); 338 337 339 338 dp->grfclk = devm_clk_get_optional(dev, "grf"); 340 339 if (IS_ERR(dp->grfclk)) 341 - return dev_err_probe(dev, PTR_ERR(dp->grfclk), "failed to get grf clock\n"); 340 + return dev_err_probe(dev, PTR_ERR(dp->grfclk), 341 + "failed to get grf clock\n"); 342 342 343 343 dp->pclk = devm_clk_get(dev, "pclk"); 344 - if (IS_ERR(dp->pclk)) { 345 - DRM_DEV_ERROR(dev, "failed to get pclk property\n"); 346 - return PTR_ERR(dp->pclk); 347 - } 344 + if (IS_ERR(dp->pclk)) 345 + return dev_err_probe(dev, PTR_ERR(dp->pclk), 346 + "failed to get pclk property\n"); 348 347 349 348 dp->rst = devm_reset_control_get(dev, "dp"); 350 - if (IS_ERR(dp->rst)) { 351 - DRM_DEV_ERROR(dev, "failed to get dp reset control\n"); 352 - return PTR_ERR(dp->rst); 353 - } 349 + if (IS_ERR(dp->rst)) 350 + return dev_err_probe(dev, PTR_ERR(dp->rst), 351 + "failed to get dp reset control\n"); 354 352 355 353 dp->apbrst = devm_reset_control_get_optional(dev, "apb"); 356 - if (IS_ERR(dp->apbrst)) { 357 - DRM_DEV_ERROR(dev, "failed to get apb reset control\n"); 358 - return PTR_ERR(dp->apbrst); 359 - } 354 + if (IS_ERR(dp->apbrst)) 355 + return dev_err_probe(dev, PTR_ERR(dp->apbrst), 356 + "failed to get apb reset control\n"); 360 357 361 358 return 0; 362 359 }
+82 -38
drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
··· 14 14 #include <linux/module.h> 15 15 #include <linux/platform_device.h> 16 16 #include <linux/phy/phy.h> 17 + #include <linux/phy/phy-hdmi.h> 17 18 #include <linux/regmap.h> 18 19 #include <linux/workqueue.h> 19 20 ··· 39 38 #define RK3576_HDMI_HDCP14_MEM_EN BIT(15) 40 39 41 40 #define RK3576_VO0_GRF_SOC_CON8 0x0020 42 - #define RK3576_COLOR_FORMAT_MASK (0xf << 4) 43 - #define RK3576_COLOR_DEPTH_MASK (0xf << 8) 44 - #define RK3576_RGB (0 << 4) 45 - #define RK3576_YUV422 (0x1 << 4) 46 - #define RK3576_YUV444 (0x2 << 4) 47 - #define RK3576_YUV420 (0x3 << 4) 48 - #define RK3576_8BPC (0x0 << 8) 49 - #define RK3576_10BPC (0x6 << 8) 41 + #define RK3576_COLOR_DEPTH_MASK GENMASK(11, 8) 42 + #define RK3576_8BPC 0x0 43 + #define RK3576_10BPC 0x6 44 + #define RK3576_COLOR_FORMAT_MASK GENMASK(7, 4) 45 + #define RK3576_RGB 0x9 46 + #define RK3576_YUV422 0x1 47 + #define RK3576_YUV444 0x2 48 + #define RK3576_YUV420 0x3 50 49 #define RK3576_CECIN_MASK BIT(3) 51 - 52 - #define RK3576_VO0_GRF_SOC_CON12 0x0030 53 - #define RK3576_GRF_OSDA_DLYN (0xf << 12) 54 - #define RK3576_GRF_OSDA_DIV (0x7f << 1) 55 - #define RK3576_GRF_OSDA_DLY_EN BIT(0) 56 50 57 51 #define RK3576_VO0_GRF_SOC_CON14 0x0038 58 52 #define RK3576_I2S_SEL_MASK BIT(0) ··· 70 74 #define RK3588_HDMI1_LEVEL_INT BIT(24) 71 75 #define RK3588_GRF_VO1_CON3 0x000c 72 76 #define RK3588_GRF_VO1_CON6 0x0018 77 + #define RK3588_COLOR_DEPTH_MASK GENMASK(7, 4) 78 + #define RK3588_8BPC 0x0 79 + #define RK3588_10BPC 0x6 80 + #define RK3588_COLOR_FORMAT_MASK GENMASK(3, 0) 81 + #define RK3588_RGB 0x0 82 + #define RK3588_YUV420 0x3 73 83 #define RK3588_SCLIN_MASK BIT(9) 74 84 #define RK3588_SDAIN_MASK BIT(10) 75 85 #define RK3588_MODE_MASK BIT(11) ··· 94 92 struct rockchip_encoder encoder; 95 93 struct dw_hdmi_qp *hdmi; 96 94 struct phy *phy; 97 - struct gpio_desc *enable_gpio; 95 + struct gpio_desc *frl_enable_gpio; 98 96 struct delayed_work hpd_work; 99 97 int port_id; 100 98 const struct rockchip_hdmi_qp_ctrl_ops *ctrl_ops; 99 + unsigned long long tmds_char_rate; 101 100 }; 102 101 103 102 struct rockchip_hdmi_qp_ctrl_ops { 104 103 void (*io_init)(struct rockchip_hdmi_qp *hdmi); 104 + void (*enc_init)(struct rockchip_hdmi_qp *hdmi, struct rockchip_crtc_state *state); 105 105 irqreturn_t (*irq_callback)(int irq, void *dev_id); 106 106 irqreturn_t (*hardirq_callback)(int irq, void *dev_id); 107 107 }; ··· 119 115 { 120 116 struct rockchip_hdmi_qp *hdmi = to_rockchip_hdmi_qp(encoder); 121 117 struct drm_crtc *crtc = encoder->crtc; 122 - unsigned long long rate; 123 118 124 119 /* Unconditionally switch to TMDS as FRL is not yet supported */ 125 - gpiod_set_value(hdmi->enable_gpio, 1); 120 + gpiod_set_value(hdmi->frl_enable_gpio, 0); 126 121 127 - if (crtc && crtc->state) { 128 - rate = drm_hdmi_compute_mode_clock(&crtc->state->adjusted_mode, 129 - 8, HDMI_COLORSPACE_RGB); 130 - /* 131 - * FIXME: Temporary workaround to pass pixel clock rate 132 - * to the PHY driver until phy_configure_opts_hdmi 133 - * becomes available in the PHY API. See also the related 134 - * comment in rk_hdptx_phy_power_on() from 135 - * drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c 136 - */ 137 - phy_set_bus_width(hdmi->phy, div_u64(rate, 100)); 138 - } 122 + if (!crtc || !crtc->state) 123 + return; 124 + 125 + if (hdmi->ctrl_ops->enc_init) 126 + hdmi->ctrl_ops->enc_init(hdmi, to_rockchip_crtc_state(crtc->state)); 139 127 } 140 128 141 129 static int ··· 135 139 struct drm_crtc_state *crtc_state, 136 140 struct drm_connector_state *conn_state) 137 141 { 142 + struct rockchip_hdmi_qp *hdmi = to_rockchip_hdmi_qp(encoder); 138 143 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); 144 + union phy_configure_opts phy_cfg = {}; 145 + int ret; 139 146 140 - s->output_mode = ROCKCHIP_OUT_MODE_AAAA; 141 - s->output_type = DRM_MODE_CONNECTOR_HDMIA; 147 + if (hdmi->tmds_char_rate == conn_state->hdmi.tmds_char_rate && 148 + s->output_bpc == conn_state->hdmi.output_bpc) 149 + return 0; 142 150 143 - return 0; 151 + phy_cfg.hdmi.tmds_char_rate = conn_state->hdmi.tmds_char_rate; 152 + phy_cfg.hdmi.bpc = conn_state->hdmi.output_bpc; 153 + 154 + ret = phy_configure(hdmi->phy, &phy_cfg); 155 + if (!ret) { 156 + hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate; 157 + s->output_mode = ROCKCHIP_OUT_MODE_AAAA; 158 + s->output_type = DRM_MODE_CONNECTOR_HDMIA; 159 + s->output_bpc = conn_state->hdmi.output_bpc; 160 + } else { 161 + dev_err(hdmi->dev, "Failed to configure phy: %d\n", ret); 162 + } 163 + 164 + return ret; 144 165 } 145 166 146 167 static const struct ··· 388 375 regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val); 389 376 } 390 377 378 + static void dw_hdmi_qp_rk3576_enc_init(struct rockchip_hdmi_qp *hdmi, 379 + struct rockchip_crtc_state *state) 380 + { 381 + u32 val; 382 + 383 + if (state->output_bpc == 10) 384 + val = FIELD_PREP_WM16(RK3576_COLOR_DEPTH_MASK, RK3576_10BPC); 385 + else 386 + val = FIELD_PREP_WM16(RK3576_COLOR_DEPTH_MASK, RK3576_8BPC); 387 + 388 + regmap_write(hdmi->vo_regmap, RK3576_VO0_GRF_SOC_CON8, val); 389 + } 390 + 391 + static void dw_hdmi_qp_rk3588_enc_init(struct rockchip_hdmi_qp *hdmi, 392 + struct rockchip_crtc_state *state) 393 + { 394 + u32 val; 395 + 396 + if (state->output_bpc == 10) 397 + val = FIELD_PREP_WM16(RK3588_COLOR_DEPTH_MASK, RK3588_10BPC); 398 + else 399 + val = FIELD_PREP_WM16(RK3588_COLOR_DEPTH_MASK, RK3588_8BPC); 400 + 401 + regmap_write(hdmi->vo_regmap, 402 + hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3, 403 + val); 404 + } 405 + 391 406 static const struct rockchip_hdmi_qp_ctrl_ops rk3576_hdmi_ctrl_ops = { 392 407 .io_init = dw_hdmi_qp_rk3576_io_init, 393 - .irq_callback = dw_hdmi_qp_rk3576_irq, 408 + .enc_init = dw_hdmi_qp_rk3576_enc_init, 409 + .irq_callback = dw_hdmi_qp_rk3576_irq, 394 410 .hardirq_callback = dw_hdmi_qp_rk3576_hardirq, 395 411 }; 396 412 397 413 static const struct rockchip_hdmi_qp_ctrl_ops rk3588_hdmi_ctrl_ops = { 398 414 .io_init = dw_hdmi_qp_rk3588_io_init, 399 - .irq_callback = dw_hdmi_qp_rk3588_irq, 415 + .enc_init = dw_hdmi_qp_rk3588_enc_init, 416 + .irq_callback = dw_hdmi_qp_rk3588_irq, 400 417 .hardirq_callback = dw_hdmi_qp_rk3588_hardirq, 401 418 }; 402 419 ··· 519 476 520 477 plat_data.phy_ops = cfg->phy_ops; 521 478 plat_data.phy_data = hdmi; 479 + plat_data.max_bpc = 10; 522 480 523 481 encoder = &hdmi->encoder.encoder; 524 482 encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); ··· 559 515 plat_data.ref_clk_rate = clk_get_rate(ref_clk); 560 516 clk_put(ref_clk); 561 517 562 - hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable", 563 - GPIOD_OUT_HIGH); 564 - if (IS_ERR(hdmi->enable_gpio)) 565 - return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->enable_gpio), 566 - "Failed to request enable GPIO\n"); 518 + hdmi->frl_enable_gpio = devm_gpiod_get_optional(hdmi->dev, "frl-enable", 519 + GPIOD_OUT_LOW); 520 + if (IS_ERR(hdmi->frl_enable_gpio)) 521 + return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->frl_enable_gpio), 522 + "Failed to request FRL enable GPIO\n"); 567 523 568 524 hdmi->phy = devm_of_phy_get_by_index(dev, dev->of_node, 0); 569 525 if (IS_ERR(hdmi->phy))
+3
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
··· 97 97 private->iommu_dev = ERR_PTR(-ENODEV); 98 98 else if (!private->iommu_dev) 99 99 private->iommu_dev = dev; 100 + 101 + if (!IS_ERR(private->iommu_dev)) 102 + drm_dev_set_dma_dev(drm_dev, private->iommu_dev); 100 103 } 101 104 102 105 static int rockchip_drm_init_iommu(struct drm_device *drm_dev)
+31 -25
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
··· 102 102 VOP2_AFBC_FMT_INVALID = -1, 103 103 }; 104 104 105 - #define VOP2_MAX_DCLK_RATE 600000000 105 + #define VOP2_MAX_DCLK_RATE 600000000UL 106 106 107 107 /* 108 108 * bus-format types. ··· 1743 1743 * Switch to HDMI PHY PLL as DCLK source for display modes up 1744 1744 * to 4K@60Hz, if available, otherwise keep using the system CRU. 1745 1745 */ 1746 - if ((vop2->pll_hdmiphy0 || vop2->pll_hdmiphy1) && clock <= VOP2_MAX_DCLK_RATE) { 1747 - drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) { 1748 - struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder); 1746 + if (vop2->pll_hdmiphy0 || vop2->pll_hdmiphy1) { 1747 + unsigned long max_dclk = DIV_ROUND_CLOSEST_ULL(VOP2_MAX_DCLK_RATE * 8, 1748 + vcstate->output_bpc); 1749 + if (clock <= max_dclk) { 1750 + drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) { 1751 + struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder); 1749 1752 1750 - if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) { 1751 - if (!vop2->pll_hdmiphy0) 1753 + if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) { 1754 + if (!vop2->pll_hdmiphy0) 1755 + break; 1756 + 1757 + if (!vp->dclk_src) 1758 + vp->dclk_src = clk_get_parent(vp->dclk); 1759 + 1760 + ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy0); 1761 + if (ret < 0) 1762 + drm_warn(vop2->drm, 1763 + "Could not switch to HDMI0 PHY PLL: %d\n", 1764 + ret); 1752 1765 break; 1766 + } 1753 1767 1754 - if (!vp->dclk_src) 1755 - vp->dclk_src = clk_get_parent(vp->dclk); 1768 + if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI1) { 1769 + if (!vop2->pll_hdmiphy1) 1770 + break; 1756 1771 1757 - ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy0); 1758 - if (ret < 0) 1759 - drm_warn(vop2->drm, 1760 - "Could not switch to HDMI0 PHY PLL: %d\n", ret); 1761 - break; 1762 - } 1772 + if (!vp->dclk_src) 1773 + vp->dclk_src = clk_get_parent(vp->dclk); 1763 1774 1764 - if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI1) { 1765 - if (!vop2->pll_hdmiphy1) 1775 + ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy1); 1776 + if (ret < 0) 1777 + drm_warn(vop2->drm, 1778 + "Could not switch to HDMI1 PHY PLL: %d\n", 1779 + ret); 1766 1780 break; 1767 - 1768 - if (!vp->dclk_src) 1769 - vp->dclk_src = clk_get_parent(vp->dclk); 1770 - 1771 - ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy1); 1772 - if (ret < 0) 1773 - drm_warn(vop2->drm, 1774 - "Could not switch to HDMI1 PHY PLL: %d\n", ret); 1775 - break; 1781 + } 1776 1782 } 1777 1783 } 1778 1784 }
+42 -7
drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
··· 1369 1369 }, 1370 1370 }; 1371 1371 1372 + /* 1373 + * phys_id is used to identify a main window(Cluster Win/Smart Win, not 1374 + * include the sub win of a cluster or the multi area) that can do overlay 1375 + * in main overlay stage. 1376 + */ 1377 + static struct vop2_win *vop2_find_win_by_phys_id(struct vop2 *vop2, uint8_t phys_id) 1378 + { 1379 + struct vop2_win *win; 1380 + int i; 1381 + 1382 + for (i = 0; i < vop2->data->win_size; i++) { 1383 + win = &vop2->win[i]; 1384 + if (win->data->phys_id == phys_id) 1385 + return win; 1386 + } 1387 + 1388 + return NULL; 1389 + } 1390 + 1372 1391 static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags) 1373 1392 { 1374 1393 struct vop2 *vop2 = vp->vop2; ··· 1861 1842 alpha->dst_alpha_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE; 1862 1843 } 1863 1844 1864 - static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id) 1845 + static int vop2_find_start_mixer_id_for_vp(struct vop2_video_port *vp) 1865 1846 { 1866 - struct vop2_video_port *vp; 1867 - int used_layer = 0; 1847 + struct vop2 *vop2 = vp->vop2; 1848 + struct vop2_win *win; 1849 + u32 layer_sel = vop2->old_layer_sel; 1850 + u32 used_layer = 0; 1851 + unsigned long win_mask = vp->win_mask; 1852 + unsigned long phys_id; 1853 + bool match; 1868 1854 int i; 1869 1855 1870 - for (i = 0; i < port_id; i++) { 1871 - vp = &vop2->vps[i]; 1872 - used_layer += hweight32(vp->win_mask); 1856 + for (i = 0; i < 31; i += 4) { 1857 + match = false; 1858 + for_each_set_bit(phys_id, &win_mask, ROCKCHIP_VOP2_ESMART3) { 1859 + win = vop2_find_win_by_phys_id(vop2, phys_id); 1860 + if (win->data->layer_sel_id[vp->id] == ((layer_sel >> i) & 0xf)) { 1861 + match = true; 1862 + break; 1863 + } 1864 + } 1865 + 1866 + if (!match) 1867 + used_layer += 1; 1868 + else 1869 + break; 1873 1870 } 1874 1871 1875 1872 return used_layer; ··· 1970 1935 u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE; 1971 1936 1972 1937 if (vop2->version <= VOP_VERSION_RK3588) 1973 - mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id); 1938 + mixer_id = vop2_find_start_mixer_id_for_vp(vp); 1974 1939 else 1975 1940 mixer_id = 0; 1976 1941
+1
drivers/gpu/drm/tegra/Makefile
··· 25 25 falcon.o \ 26 26 vic.o \ 27 27 nvdec.o \ 28 + nvjpg.o \ 28 29 riscv.o 29 30 30 31 tegra-drm-y += trace.o
+2
drivers/gpu/drm/tegra/drm.c
··· 1384 1384 { .compatible = "nvidia,tegra210-sor1", }, 1385 1385 { .compatible = "nvidia,tegra210-vic", }, 1386 1386 { .compatible = "nvidia,tegra210-nvdec", }, 1387 + { .compatible = "nvidia,tegra210-nvjpg", }, 1387 1388 { .compatible = "nvidia,tegra186-display", }, 1388 1389 { .compatible = "nvidia,tegra186-dc", }, 1389 1390 { .compatible = "nvidia,tegra186-sor", }, ··· 1423 1422 &tegra_gr3d_driver, 1424 1423 &tegra_vic_driver, 1425 1424 &tegra_nvdec_driver, 1425 + &tegra_nvjpg_driver, 1426 1426 }; 1427 1427 1428 1428 static int __init host1x_drm_init(void)
+1
drivers/gpu/drm/tegra/drm.h
··· 214 214 extern struct platform_driver tegra_gr3d_driver; 215 215 extern struct platform_driver tegra_vic_driver; 216 216 extern struct platform_driver tegra_nvdec_driver; 217 + extern struct platform_driver tegra_nvjpg_driver; 217 218 218 219 #endif /* HOST1X_DRM_H */
+30 -29
drivers/gpu/drm/tegra/dsi.c
··· 546 546 /* horizontal back porch */ 547 547 hbp = (mode->htotal - mode->hsync_end) * mul / div; 548 548 549 - if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0) 550 - hbp += hsw; 551 - 552 549 /* horizontal front porch */ 553 550 hfp = (mode->hsync_start - mode->hdisplay) * mul / div; 551 + 552 + if (dsi->master || dsi->slave) { 553 + hact /= 2; 554 + hsw /= 2; 555 + hbp /= 2; 556 + hfp /= 2; 557 + } 558 + 559 + if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0) 560 + hbp += hsw; 554 561 555 562 /* subtract packet overhead */ 556 563 hsw -= 10; ··· 568 561 tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3); 569 562 tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5); 570 563 tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7); 571 - 572 - /* set SOL delay (for non-burst mode only) */ 573 - tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY); 574 - 575 - /* TODO: implement ganged mode */ 576 564 } else { 577 565 u16 bytes; 578 566 ··· 589 587 value = MIPI_DCS_WRITE_MEMORY_START << 8 | 590 588 MIPI_DCS_WRITE_MEMORY_CONTINUE; 591 589 tegra_dsi_writel(dsi, value, DSI_DCS_CMDS); 592 - 593 - /* set SOL delay */ 594 - if (dsi->master || dsi->slave) { 595 - unsigned long delay, bclk, bclk_ganged; 596 - unsigned int lanes = state->lanes; 597 - 598 - /* SOL to valid, valid to FIFO and FIFO write delay */ 599 - delay = 4 + 4 + 2; 600 - delay = DIV_ROUND_UP(delay * mul, div * lanes); 601 - /* FIFO read delay */ 602 - delay = delay + 6; 603 - 604 - bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes); 605 - bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes); 606 - value = bclk - bclk_ganged + delay + 20; 607 - } else { 608 - /* TODO: revisit for non-ganged mode */ 609 - value = 8 * mul / div; 610 - } 611 - 612 - tegra_dsi_writel(dsi, value, DSI_SOL_DELAY); 613 590 } 591 + 592 + /* set SOL delay */ 593 + if (dsi->master || dsi->slave) { 594 + unsigned long delay, bclk, bclk_ganged; 595 + unsigned int lanes = state->lanes; 596 + 597 + /* SOL to valid, valid to FIFO and FIFO write delay */ 598 + delay = 4 + 4 + 2; 599 + delay = DIV_ROUND_UP(delay * mul, div * lanes); 600 + /* FIFO read delay */ 601 + delay = delay + 6; 602 + 603 + bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes); 604 + bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes); 605 + value = bclk - bclk_ganged + delay + 20; 606 + } else { 607 + value = 8 * mul / div; 608 + } 609 + 610 + tegra_dsi_writel(dsi, value, DSI_SOL_DELAY); 614 611 615 612 if (dsi->slave) { 616 613 tegra_dsi_configure(dsi->slave, pipe, mode);
+1 -8
drivers/gpu/drm/tegra/fbdev.c
··· 73 73 struct tegra_drm *tegra = helper->dev->dev_private; 74 74 struct drm_device *drm = helper->dev; 75 75 struct drm_mode_fb_cmd2 cmd = { 0 }; 76 + struct fb_info *info = helper->info; 76 77 unsigned int bytes_per_pixel; 77 78 struct drm_framebuffer *fb; 78 79 unsigned long offset; 79 - struct fb_info *info; 80 80 struct tegra_bo *bo; 81 81 size_t size; 82 82 int err; ··· 96 96 bo = tegra_bo_create(drm, size, 0); 97 97 if (IS_ERR(bo)) 98 98 return PTR_ERR(bo); 99 - 100 - info = drm_fb_helper_alloc_info(helper); 101 - if (IS_ERR(info)) { 102 - dev_err(drm->dev, "failed to allocate framebuffer info\n"); 103 - drm_gem_object_put(&bo->gem); 104 - return PTR_ERR(info); 105 - } 106 99 107 100 fb = tegra_fb_alloc(drm, 108 101 drm_get_format_info(drm, cmd.pixel_format, cmd.modifier[0]),
+330
drivers/gpu/drm/tegra/nvjpg.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include <linux/clk.h> 4 + #include <linux/delay.h> 5 + #include <linux/dma-mapping.h> 6 + #include <linux/host1x.h> 7 + #include <linux/iommu.h> 8 + #include <linux/module.h> 9 + #include <linux/of.h> 10 + #include <linux/platform_device.h> 11 + #include <linux/pm_runtime.h> 12 + 13 + #include "drm.h" 14 + #include "falcon.h" 15 + 16 + struct nvjpg_config { 17 + const char *firmware; 18 + unsigned int version; 19 + }; 20 + 21 + struct nvjpg { 22 + struct falcon falcon; 23 + 24 + void __iomem *regs; 25 + struct tegra_drm_client client; 26 + struct device *dev; 27 + struct clk *clk; 28 + 29 + /* Platform configuration */ 30 + const struct nvjpg_config *config; 31 + }; 32 + 33 + static inline struct nvjpg *to_nvjpg(struct tegra_drm_client *client) 34 + { 35 + return container_of(client, struct nvjpg, client); 36 + } 37 + 38 + static int nvjpg_init(struct host1x_client *client) 39 + { 40 + struct tegra_drm_client *drm = host1x_to_drm_client(client); 41 + struct drm_device *dev = dev_get_drvdata(client->host); 42 + struct tegra_drm *tegra = dev->dev_private; 43 + struct nvjpg *nvjpg = to_nvjpg(drm); 44 + int err; 45 + 46 + err = host1x_client_iommu_attach(client); 47 + if (err < 0 && err != -ENODEV) { 48 + dev_err(nvjpg->dev, "failed to attach to domain: %d\n", err); 49 + return err; 50 + } 51 + 52 + err = tegra_drm_register_client(tegra, drm); 53 + if (err < 0) 54 + goto detach; 55 + 56 + /* 57 + * Inherit the DMA parameters (such as maximum segment size) from the 58 + * parent host1x device. 59 + */ 60 + client->dev->dma_parms = client->host->dma_parms; 61 + 62 + return 0; 63 + 64 + detach: 65 + host1x_client_iommu_detach(client); 66 + 67 + return err; 68 + } 69 + 70 + static int nvjpg_exit(struct host1x_client *client) 71 + { 72 + struct tegra_drm_client *drm = host1x_to_drm_client(client); 73 + struct drm_device *dev = dev_get_drvdata(client->host); 74 + struct tegra_drm *tegra = dev->dev_private; 75 + struct nvjpg *nvjpg = to_nvjpg(drm); 76 + int err; 77 + 78 + /* avoid a dangling pointer just in case this disappears */ 79 + client->dev->dma_parms = NULL; 80 + 81 + err = tegra_drm_unregister_client(tegra, drm); 82 + if (err < 0) 83 + return err; 84 + 85 + pm_runtime_dont_use_autosuspend(client->dev); 86 + pm_runtime_force_suspend(client->dev); 87 + 88 + host1x_client_iommu_detach(client); 89 + 90 + if (client->group) { 91 + dma_unmap_single(nvjpg->dev, nvjpg->falcon.firmware.phys, 92 + nvjpg->falcon.firmware.size, DMA_TO_DEVICE); 93 + tegra_drm_free(tegra, nvjpg->falcon.firmware.size, 94 + nvjpg->falcon.firmware.virt, 95 + nvjpg->falcon.firmware.iova); 96 + } else { 97 + dma_free_coherent(nvjpg->dev, nvjpg->falcon.firmware.size, 98 + nvjpg->falcon.firmware.virt, 99 + nvjpg->falcon.firmware.iova); 100 + } 101 + 102 + return 0; 103 + } 104 + 105 + static const struct host1x_client_ops nvjpg_client_ops = { 106 + .init = nvjpg_init, 107 + .exit = nvjpg_exit, 108 + }; 109 + 110 + static int nvjpg_load_falcon_firmware(struct nvjpg *nvjpg) 111 + { 112 + struct host1x_client *client = &nvjpg->client.base; 113 + struct tegra_drm *tegra = nvjpg->client.drm; 114 + dma_addr_t iova; 115 + size_t size; 116 + void *virt; 117 + int err; 118 + 119 + if (nvjpg->falcon.firmware.virt) 120 + return 0; 121 + 122 + err = falcon_read_firmware(&nvjpg->falcon, nvjpg->config->firmware); 123 + if (err < 0) 124 + return err; 125 + 126 + size = nvjpg->falcon.firmware.size; 127 + 128 + if (!client->group) { 129 + virt = dma_alloc_coherent(nvjpg->dev, size, &iova, GFP_KERNEL); 130 + if (!virt) 131 + return -ENOMEM; 132 + } else { 133 + virt = tegra_drm_alloc(tegra, size, &iova); 134 + if (IS_ERR(virt)) 135 + return PTR_ERR(virt); 136 + } 137 + 138 + nvjpg->falcon.firmware.virt = virt; 139 + nvjpg->falcon.firmware.iova = iova; 140 + 141 + err = falcon_load_firmware(&nvjpg->falcon); 142 + if (err < 0) 143 + goto cleanup; 144 + 145 + /* 146 + * In this case we have received an IOVA from the shared domain, so we 147 + * need to make sure to get the physical address so that the DMA API 148 + * knows what memory pages to flush the cache for. 149 + */ 150 + if (client->group) { 151 + dma_addr_t phys; 152 + 153 + phys = dma_map_single(nvjpg->dev, virt, size, DMA_TO_DEVICE); 154 + 155 + err = dma_mapping_error(nvjpg->dev, phys); 156 + if (err < 0) 157 + goto cleanup; 158 + 159 + nvjpg->falcon.firmware.phys = phys; 160 + } 161 + 162 + return 0; 163 + 164 + cleanup: 165 + if (!client->group) 166 + dma_free_coherent(nvjpg->dev, size, virt, iova); 167 + else 168 + tegra_drm_free(tegra, size, virt, iova); 169 + 170 + return err; 171 + } 172 + 173 + static __maybe_unused int nvjpg_runtime_resume(struct device *dev) 174 + { 175 + struct nvjpg *nvjpg = dev_get_drvdata(dev); 176 + int err; 177 + 178 + err = clk_prepare_enable(nvjpg->clk); 179 + if (err < 0) 180 + return err; 181 + 182 + usleep_range(20, 30); 183 + 184 + err = nvjpg_load_falcon_firmware(nvjpg); 185 + if (err < 0) 186 + goto disable_clk; 187 + 188 + err = falcon_boot(&nvjpg->falcon); 189 + if (err < 0) 190 + goto disable_clk; 191 + 192 + return 0; 193 + 194 + disable_clk: 195 + clk_disable_unprepare(nvjpg->clk); 196 + return err; 197 + } 198 + 199 + static __maybe_unused int nvjpg_runtime_suspend(struct device *dev) 200 + { 201 + struct nvjpg *nvjpg = dev_get_drvdata(dev); 202 + 203 + clk_disable_unprepare(nvjpg->clk); 204 + 205 + return 0; 206 + } 207 + 208 + static int nvjpg_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported) 209 + { 210 + *supported = false; 211 + 212 + return 0; 213 + } 214 + 215 + static const struct tegra_drm_client_ops nvjpg_ops = { 216 + .get_streamid_offset = NULL, 217 + .can_use_memory_ctx = nvjpg_can_use_memory_ctx, 218 + }; 219 + 220 + #define NVIDIA_TEGRA_210_NVJPG_FIRMWARE "nvidia/tegra210/nvjpg.bin" 221 + 222 + static const struct nvjpg_config tegra210_nvjpg_config = { 223 + .firmware = NVIDIA_TEGRA_210_NVJPG_FIRMWARE, 224 + .version = 0x21, 225 + }; 226 + 227 + static const struct of_device_id tegra_nvjpg_of_match[] = { 228 + { .compatible = "nvidia,tegra210-nvjpg", .data = &tegra210_nvjpg_config }, 229 + { }, 230 + }; 231 + MODULE_DEVICE_TABLE(of, tegra_nvjpg_of_match); 232 + 233 + static int nvjpg_probe(struct platform_device *pdev) 234 + { 235 + struct device *dev = &pdev->dev; 236 + struct nvjpg *nvjpg; 237 + int err; 238 + 239 + /* inherit DMA mask from host1x parent */ 240 + err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask); 241 + if (err < 0) { 242 + dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); 243 + return err; 244 + } 245 + 246 + nvjpg = devm_kzalloc(dev, sizeof(*nvjpg), GFP_KERNEL); 247 + if (!nvjpg) 248 + return -ENOMEM; 249 + 250 + nvjpg->config = of_device_get_match_data(dev); 251 + 252 + nvjpg->regs = devm_platform_ioremap_resource(pdev, 0); 253 + if (IS_ERR(nvjpg->regs)) 254 + return PTR_ERR(nvjpg->regs); 255 + 256 + nvjpg->clk = devm_clk_get(dev, "nvjpg"); 257 + if (IS_ERR(nvjpg->clk)) { 258 + dev_err(&pdev->dev, "failed to get clock\n"); 259 + return PTR_ERR(nvjpg->clk); 260 + } 261 + 262 + err = clk_set_rate(nvjpg->clk, ULONG_MAX); 263 + if (err < 0) { 264 + dev_err(&pdev->dev, "failed to set clock rate\n"); 265 + return err; 266 + } 267 + 268 + nvjpg->falcon.dev = dev; 269 + nvjpg->falcon.regs = nvjpg->regs; 270 + 271 + err = falcon_init(&nvjpg->falcon); 272 + if (err < 0) 273 + return err; 274 + 275 + platform_set_drvdata(pdev, nvjpg); 276 + 277 + INIT_LIST_HEAD(&nvjpg->client.base.list); 278 + nvjpg->client.base.ops = &nvjpg_client_ops; 279 + nvjpg->client.base.dev = dev; 280 + nvjpg->client.base.class = HOST1X_CLASS_NVJPG; 281 + nvjpg->dev = dev; 282 + 283 + INIT_LIST_HEAD(&nvjpg->client.list); 284 + nvjpg->client.version = nvjpg->config->version; 285 + nvjpg->client.ops = &nvjpg_ops; 286 + 287 + err = host1x_client_register(&nvjpg->client.base); 288 + if (err < 0) { 289 + dev_err(dev, "failed to register host1x client: %d\n", err); 290 + goto exit_falcon; 291 + } 292 + 293 + pm_runtime_use_autosuspend(dev); 294 + pm_runtime_set_autosuspend_delay(dev, 500); 295 + devm_pm_runtime_enable(dev); 296 + 297 + return 0; 298 + 299 + exit_falcon: 300 + falcon_exit(&nvjpg->falcon); 301 + 302 + return err; 303 + } 304 + 305 + static void nvjpg_remove(struct platform_device *pdev) 306 + { 307 + struct nvjpg *nvjpg = platform_get_drvdata(pdev); 308 + 309 + host1x_client_unregister(&nvjpg->client.base); 310 + falcon_exit(&nvjpg->falcon); 311 + } 312 + 313 + static const struct dev_pm_ops nvjpg_pm_ops = { 314 + RUNTIME_PM_OPS(nvjpg_runtime_suspend, nvjpg_runtime_resume, NULL) 315 + SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) 316 + }; 317 + 318 + struct platform_driver tegra_nvjpg_driver = { 319 + .driver = { 320 + .name = "tegra-nvjpg", 321 + .of_match_table = tegra_nvjpg_of_match, 322 + .pm = &nvjpg_pm_ops 323 + }, 324 + .probe = nvjpg_probe, 325 + .remove = nvjpg_remove, 326 + }; 327 + 328 + #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) 329 + MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVJPG_FIRMWARE); 330 + #endif
+2 -1
drivers/gpu/drm/tests/Makefile
··· 24 24 drm_plane_helper_test.o \ 25 25 drm_probe_helper_test.o \ 26 26 drm_rect_test.o \ 27 - drm_sysfb_modeset_test.o 27 + drm_sysfb_modeset_test.o \ 28 + drm_fixp_test.o 28 29 29 30 CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)
+71
drivers/gpu/drm/tests/drm_fixp_test.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2022 Advanced Micro Devices, Inc. 4 + */ 5 + 6 + #include <kunit/test.h> 7 + #include <drm/drm_fixed.h> 8 + 9 + static void drm_test_sm2fixp(struct kunit *test) 10 + { 11 + KUNIT_EXPECT_EQ(test, 0x7fffffffffffffffll, ((1ull << 63) - 1)); 12 + 13 + /* 1 */ 14 + KUNIT_EXPECT_EQ(test, drm_int2fixp(1), drm_sm2fixp(1ull << DRM_FIXED_POINT)); 15 + 16 + /* -1 */ 17 + KUNIT_EXPECT_EQ(test, drm_int2fixp(-1), 18 + drm_sm2fixp((1ull << 63) | (1ull << DRM_FIXED_POINT))); 19 + 20 + /* 0.5 */ 21 + KUNIT_EXPECT_EQ(test, drm_fixp_from_fraction(1, 2), 22 + drm_sm2fixp(1ull << (DRM_FIXED_POINT - 1))); 23 + 24 + /* -0.5 */ 25 + KUNIT_EXPECT_EQ(test, drm_fixp_from_fraction(-1, 2), 26 + drm_sm2fixp((1ull << 63) | (1ull << (DRM_FIXED_POINT - 1)))); 27 + } 28 + 29 + static void drm_test_int2fixp(struct kunit *test) 30 + { 31 + /* 1 */ 32 + KUNIT_EXPECT_EQ(test, 1ll << 32, drm_int2fixp(1)); 33 + 34 + /* -1 */ 35 + KUNIT_EXPECT_EQ(test, -(1ll << 32), drm_int2fixp(-1)); 36 + 37 + /* 1 + (-1) = 0 */ 38 + KUNIT_EXPECT_EQ(test, 0, drm_int2fixp(1) + drm_int2fixp(-1)); 39 + 40 + /* 1 / 2 */ 41 + KUNIT_EXPECT_EQ(test, 1ll << 31, drm_fixp_from_fraction(1, 2)); 42 + 43 + /* -0.5 */ 44 + KUNIT_EXPECT_EQ(test, -(1ll << 31), drm_fixp_from_fraction(-1, 2)); 45 + 46 + /* (1 / 2) + (-1) = 0.5 */ 47 + KUNIT_EXPECT_EQ(test, 1ll << 31, drm_fixp_from_fraction(-1, 2) + drm_int2fixp(1)); 48 + 49 + /* (1 / 2) - 1) = 0.5 */ 50 + KUNIT_EXPECT_EQ(test, -(1ll << 31), drm_fixp_from_fraction(1, 2) + drm_int2fixp(-1)); 51 + 52 + /* (1 / 2) - 1) = 0.5 */ 53 + KUNIT_EXPECT_EQ(test, -(1ll << 31), drm_fixp_from_fraction(1, 2) - drm_int2fixp(1)); 54 + } 55 + 56 + static struct kunit_case drm_fixp_tests[] = { 57 + KUNIT_CASE(drm_test_int2fixp), 58 + KUNIT_CASE(drm_test_sm2fixp), 59 + { } 60 + }; 61 + 62 + static struct kunit_suite drm_fixp_test_suite = { 63 + .name = "drm_fixp", 64 + .test_cases = drm_fixp_tests, 65 + }; 66 + 67 + kunit_test_suite(drm_fixp_test_suite); 68 + 69 + MODULE_AUTHOR("AMD"); 70 + MODULE_LICENSE("Dual MIT/GPL"); 71 + MODULE_DESCRIPTION("Unit tests for drm_fixed.h");
+6 -5
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
··· 652 652 int err; 653 653 654 654 man = ttm_manager_type(priv->ttm_dev, mem_type); 655 - man->move = dma_fence_get_stub(); 655 + man->eviction_fences[0] = dma_fence_get_stub(); 656 656 657 657 bo = ttm_bo_kunit_init(test, test->priv, size, NULL); 658 658 bo->type = bo_type; ··· 669 669 KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size); 670 670 671 671 ttm_bo_fini(bo); 672 - dma_fence_put(man->move); 672 + dma_fence_put(man->eviction_fences[0]); 673 673 } 674 674 675 675 static const struct ttm_bo_validate_test_case ttm_bo_validate_wait_cases[] = { ··· 733 733 734 734 spin_lock_init(&fence_lock); 735 735 man = ttm_manager_type(priv->ttm_dev, fst_mem); 736 - man->move = alloc_mock_fence(test); 736 + man->eviction_fences[0] = alloc_mock_fence(test); 737 737 738 - task = kthread_create(threaded_fence_signal, man->move, "move-fence-signal"); 738 + task = kthread_create(threaded_fence_signal, man->eviction_fences[0], "move-fence-signal"); 739 739 if (IS_ERR(task)) 740 740 KUNIT_FAIL(test, "Couldn't create move fence signal task\n"); 741 741 ··· 743 743 err = ttm_bo_validate(bo, placement_val, &ctx_val); 744 744 dma_resv_unlock(bo->base.resv); 745 745 746 - dma_fence_wait_timeout(man->move, false, MAX_SCHEDULE_TIMEOUT); 746 + dma_fence_wait_timeout(man->eviction_fences[0], false, MAX_SCHEDULE_TIMEOUT); 747 + man->eviction_fences[0] = NULL; 747 748 748 749 KUNIT_EXPECT_EQ(test, err, 0); 749 750 KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size);
+3 -2
drivers/gpu/drm/ttm/tests/ttm_resource_test.c
··· 207 207 struct ttm_resource_test_priv *priv = test->priv; 208 208 struct ttm_resource_manager *man; 209 209 size_t size = SZ_16K; 210 + int i; 210 211 211 212 man = kunit_kzalloc(test, sizeof(*man), GFP_KERNEL); 212 213 KUNIT_ASSERT_NOT_NULL(test, man); ··· 217 216 KUNIT_ASSERT_PTR_EQ(test, man->bdev, priv->devs->ttm_dev); 218 217 KUNIT_ASSERT_EQ(test, man->size, size); 219 218 KUNIT_ASSERT_EQ(test, man->usage, 0); 220 - KUNIT_ASSERT_NULL(test, man->move); 221 - KUNIT_ASSERT_NOT_NULL(test, &man->move_lock); 219 + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) 220 + KUNIT_ASSERT_NULL(test, man->eviction_fences[i]); 222 221 223 222 for (int i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 224 223 KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[i]));
+24 -23
drivers/gpu/drm/ttm/ttm_bo.c
··· 659 659 EXPORT_SYMBOL(ttm_bo_unpin); 660 660 661 661 /* 662 - * Add the last move fence to the BO as kernel dependency and reserve a new 663 - * fence slot. 662 + * Add the pipelined eviction fencesto the BO as kernel dependency and reserve new 663 + * fence slots. 664 664 */ 665 - static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, 666 - struct ttm_resource_manager *man, 667 - bool no_wait_gpu) 665 + static int ttm_bo_add_pipelined_eviction_fences(struct ttm_buffer_object *bo, 666 + struct ttm_resource_manager *man, 667 + bool no_wait_gpu) 668 668 { 669 669 struct dma_fence *fence; 670 - int ret; 670 + int i; 671 671 672 - spin_lock(&man->move_lock); 673 - fence = dma_fence_get(man->move); 674 - spin_unlock(&man->move_lock); 672 + spin_lock(&man->eviction_lock); 673 + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) { 674 + fence = man->eviction_fences[i]; 675 + if (!fence) 676 + continue; 675 677 676 - if (!fence) 677 - return 0; 678 - 679 - if (no_wait_gpu) { 680 - ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY; 681 - dma_fence_put(fence); 682 - return ret; 678 + if (no_wait_gpu) { 679 + if (!dma_fence_is_signaled(fence)) { 680 + spin_unlock(&man->eviction_lock); 681 + return -EBUSY; 682 + } 683 + } else { 684 + dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); 685 + } 683 686 } 687 + spin_unlock(&man->eviction_lock); 684 688 685 - dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); 686 - 687 - ret = dma_resv_reserve_fences(bo->base.resv, 1); 688 - dma_fence_put(fence); 689 - return ret; 689 + /* TODO: this call should be removed. */ 690 + return dma_resv_reserve_fences(bo->base.resv, 1); 690 691 } 691 692 692 693 /** ··· 720 719 int i, ret; 721 720 722 721 ticket = dma_resv_locking_ctx(bo->base.resv); 723 - ret = dma_resv_reserve_fences(bo->base.resv, 1); 722 + ret = dma_resv_reserve_fences(bo->base.resv, TTM_NUM_MOVE_FENCES); 724 723 if (unlikely(ret)) 725 724 return ret; 726 725 ··· 759 758 return ret; 760 759 } 761 760 762 - ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu); 761 + ret = ttm_bo_add_pipelined_eviction_fences(bo, man, ctx->no_wait_gpu); 763 762 if (unlikely(ret)) { 764 763 ttm_resource_free(bo, res); 765 764 if (ret == -EBUSY)
+31 -7
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 258 258 ret = dma_resv_trylock(&fbo->base.base._resv); 259 259 WARN_ON(!ret); 260 260 261 - ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); 261 + ret = dma_resv_reserve_fences(&fbo->base.base._resv, TTM_NUM_MOVE_FENCES); 262 262 if (ret) { 263 263 dma_resv_unlock(&fbo->base.base._resv); 264 264 kfree(fbo); ··· 646 646 { 647 647 struct ttm_device *bdev = bo->bdev; 648 648 struct ttm_resource_manager *from; 649 + struct dma_fence *tmp; 650 + int i; 649 651 650 652 from = ttm_manager_type(bdev, bo->resource->mem_type); 651 653 652 654 /** 653 655 * BO doesn't have a TTM we need to bind/unbind. Just remember 654 - * this eviction and free up the allocation 656 + * this eviction and free up the allocation. 657 + * The fence will be saved in the first free slot or in the slot 658 + * already used to store a fence from the same context. Since 659 + * drivers can't use more than TTM_NUM_MOVE_FENCES contexts for 660 + * evictions we should always find a slot to use. 655 661 */ 656 - spin_lock(&from->move_lock); 657 - if (!from->move || dma_fence_is_later(fence, from->move)) { 658 - dma_fence_put(from->move); 659 - from->move = dma_fence_get(fence); 662 + spin_lock(&from->eviction_lock); 663 + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) { 664 + tmp = from->eviction_fences[i]; 665 + if (!tmp) 666 + break; 667 + if (fence->context != tmp->context) 668 + continue; 669 + if (dma_fence_is_later(fence, tmp)) { 670 + dma_fence_put(tmp); 671 + break; 672 + } 673 + goto unlock; 660 674 } 661 - spin_unlock(&from->move_lock); 675 + if (i < TTM_NUM_MOVE_FENCES) { 676 + from->eviction_fences[i] = dma_fence_get(fence); 677 + } else { 678 + WARN(1, "not enough fence slots for all fence contexts"); 679 + spin_unlock(&from->eviction_lock); 680 + dma_fence_wait(fence, false); 681 + goto end; 682 + } 662 683 684 + unlock: 685 + spin_unlock(&from->eviction_lock); 686 + end: 663 687 ttm_resource_free(bo, &bo->resource); 664 688 } 665 689
+19 -12
drivers/gpu/drm/ttm/ttm_resource.c
··· 524 524 { 525 525 unsigned i; 526 526 527 - spin_lock_init(&man->move_lock); 528 527 man->bdev = bdev; 529 528 man->size = size; 530 529 man->usage = 0; 531 530 532 531 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) 533 532 INIT_LIST_HEAD(&man->lru[i]); 534 - man->move = NULL; 533 + spin_lock_init(&man->eviction_lock); 534 + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) 535 + man->eviction_fences[i] = NULL; 535 536 } 536 537 EXPORT_SYMBOL(ttm_resource_manager_init); 537 538 ··· 553 552 .no_wait_gpu = false, 554 553 }; 555 554 struct dma_fence *fence; 556 - int ret; 555 + int ret, i; 557 556 558 557 do { 559 558 ret = ttm_bo_evict_first(bdev, man, &ctx); ··· 563 562 if (ret && ret != -ENOENT) 564 563 return ret; 565 564 566 - spin_lock(&man->move_lock); 567 - fence = dma_fence_get(man->move); 568 - spin_unlock(&man->move_lock); 565 + ret = 0; 569 566 570 - if (fence) { 571 - ret = dma_fence_wait(fence, false); 572 - dma_fence_put(fence); 573 - if (ret) 574 - return ret; 567 + spin_lock(&man->eviction_lock); 568 + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) { 569 + fence = man->eviction_fences[i]; 570 + if (fence && !dma_fence_is_signaled(fence)) { 571 + dma_fence_get(fence); 572 + spin_unlock(&man->eviction_lock); 573 + ret = dma_fence_wait(fence, false); 574 + dma_fence_put(fence); 575 + if (ret) 576 + return ret; 577 + spin_lock(&man->eviction_lock); 578 + } 575 579 } 580 + spin_unlock(&man->eviction_lock); 576 581 577 - return 0; 582 + return ret; 578 583 } 579 584 EXPORT_SYMBOL(ttm_resource_manager_evict_all); 580 585
+3 -1
drivers/gpu/drm/vkms/Makefile
··· 9 9 vkms_writeback.o \ 10 10 vkms_connector.o \ 11 11 vkms_config.o \ 12 - vkms_configfs.o 12 + vkms_configfs.o \ 13 + vkms_colorop.o \ 14 + vkms_luts.o 13 15 14 16 obj-$(CONFIG_DRM_VKMS) += vkms.o 15 17 obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += tests/
+2 -1
drivers/gpu/drm/vkms/tests/Makefile
··· 2 2 3 3 vkms-kunit-tests-y := \ 4 4 vkms_config_test.o \ 5 - vkms_format_test.o 5 + vkms_format_test.o \ 6 + vkms_color_test.o 6 7 7 8 obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms-kunit-tests.o
+414
drivers/gpu/drm/vkms/tests/vkms_color_test.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + 3 + #include <kunit/test.h> 4 + 5 + #include <drm/drm_fixed.h> 6 + #include <drm/drm_mode.h> 7 + #include "../vkms_composer.h" 8 + #include "../vkms_drv.h" 9 + #include "../vkms_luts.h" 10 + 11 + #define TEST_LUT_SIZE 16 12 + 13 + MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); 14 + 15 + static struct drm_color_lut test_linear_array[TEST_LUT_SIZE] = { 16 + { 0x0, 0x0, 0x0, 0 }, 17 + { 0x1111, 0x1111, 0x1111, 0 }, 18 + { 0x2222, 0x2222, 0x2222, 0 }, 19 + { 0x3333, 0x3333, 0x3333, 0 }, 20 + { 0x4444, 0x4444, 0x4444, 0 }, 21 + { 0x5555, 0x5555, 0x5555, 0 }, 22 + { 0x6666, 0x6666, 0x6666, 0 }, 23 + { 0x7777, 0x7777, 0x7777, 0 }, 24 + { 0x8888, 0x8888, 0x8888, 0 }, 25 + { 0x9999, 0x9999, 0x9999, 0 }, 26 + { 0xaaaa, 0xaaaa, 0xaaaa, 0 }, 27 + { 0xbbbb, 0xbbbb, 0xbbbb, 0 }, 28 + { 0xcccc, 0xcccc, 0xcccc, 0 }, 29 + { 0xdddd, 0xdddd, 0xdddd, 0 }, 30 + { 0xeeee, 0xeeee, 0xeeee, 0 }, 31 + { 0xffff, 0xffff, 0xffff, 0 }, 32 + }; 33 + 34 + /* lerp test parameters */ 35 + struct vkms_color_test_lerp_params { 36 + s64 t; 37 + __u16 a; 38 + __u16 b; 39 + __u16 expected; 40 + }; 41 + 42 + /* lerp test cases */ 43 + static const struct vkms_color_test_lerp_params color_test_lerp_cases[] = { 44 + /* Half-way round down */ 45 + { 0x80000000 - 1, 0x0, 0x10, 0x8 }, 46 + { 0x80000000 - 1, 0x1, 0x10, 0x8 }, /* Odd a */ 47 + { 0x80000000 - 1, 0x1, 0xf, 0x8 }, /* Odd b */ 48 + { 0x80000000 - 1, 0x10, 0x10, 0x10 }, /* b = a */ 49 + { 0x80000000 - 1, 0x10, 0x11, 0x10 }, /* b = a + 1*/ 50 + /* Half-way round up */ 51 + { 0x80000000, 0x0, 0x10, 0x8 }, 52 + { 0x80000000, 0x1, 0x10, 0x9 }, /* Odd a */ 53 + { 0x80000000, 0x1, 0xf, 0x8 }, /* Odd b */ 54 + { 0x80000000, 0x10, 0x10, 0x10 }, /* b = a */ 55 + { 0x80000000, 0x10, 0x11, 0x11 }, /* b = a + 1*/ 56 + /* t = 0.0 */ 57 + { 0x0, 0x0, 0x10, 0x0 }, 58 + { 0x0, 0x1, 0x10, 0x1 }, /* Odd a */ 59 + { 0x0, 0x1, 0xf, 0x1 }, /* Odd b */ 60 + { 0x0, 0x10, 0x10, 0x10 }, /* b = a */ 61 + { 0x0, 0x10, 0x11, 0x10 }, /* b = a + 1*/ 62 + /* t = 1.0 */ 63 + { 0x100000000, 0x0, 0x10, 0x10 }, 64 + { 0x100000000, 0x1, 0x10, 0x10 }, /* Odd a */ 65 + { 0x100000000, 0x1, 0xf, 0xf }, /* Odd b */ 66 + { 0x100000000, 0x10, 0x10, 0x10 }, /* b = a */ 67 + { 0x100000000, 0x10, 0x11, 0x11 }, /* b = a + 1*/ 68 + /* t = 0.0 + 1 */ 69 + { 0x0 + 1, 0x0, 0x10, 0x0 }, 70 + { 0x0 + 1, 0x1, 0x10, 0x1 }, /* Odd a */ 71 + { 0x0 + 1, 0x1, 0xf, 0x1 }, /* Odd b */ 72 + { 0x0 + 1, 0x10, 0x10, 0x10 }, /* b = a */ 73 + { 0x0 + 1, 0x10, 0x11, 0x10 }, /* b = a + 1*/ 74 + /* t = 1.0 - 1 */ 75 + { 0x100000000 - 1, 0x0, 0x10, 0x10 }, 76 + { 0x100000000 - 1, 0x1, 0x10, 0x10 }, /* Odd a */ 77 + { 0x100000000 - 1, 0x1, 0xf, 0xf }, /* Odd b */ 78 + { 0x100000000 - 1, 0x10, 0x10, 0x10 }, /* b = a */ 79 + { 0x100000000 - 1, 0x10, 0x11, 0x11 }, /* b = a + 1*/ 80 + /* t chosen to verify the flipping point of result a (or b) to a+1 (or b-1) */ 81 + { 0x80000000 - 1, 0x0, 0x1, 0x0 }, 82 + { 0x80000000, 0x0, 0x1, 0x1 }, 83 + }; 84 + 85 + static const struct vkms_color_lut test_linear_lut = { 86 + .base = test_linear_array, 87 + .lut_length = TEST_LUT_SIZE, 88 + .channel_value2index_ratio = 0xf000fll 89 + }; 90 + 91 + static void vkms_color_test_get_lut_index(struct kunit *test) 92 + { 93 + s64 lut_index; 94 + int i; 95 + 96 + lut_index = get_lut_index(&test_linear_lut, test_linear_array[0].red); 97 + KUNIT_EXPECT_EQ(test, drm_fixp2int(lut_index), 0); 98 + 99 + for (i = 0; i < TEST_LUT_SIZE; i++) { 100 + lut_index = get_lut_index(&test_linear_lut, test_linear_array[i].red); 101 + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(lut_index), i); 102 + } 103 + 104 + KUNIT_EXPECT_EQ(test, drm_fixp2int(get_lut_index(&srgb_eotf, 0x0)), 0x0); 105 + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0x0)), 0x0); 106 + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0x101)), 0x1); 107 + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0x202)), 0x2); 108 + 109 + KUNIT_EXPECT_EQ(test, drm_fixp2int(get_lut_index(&srgb_inv_eotf, 0x0)), 0x0); 110 + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_inv_eotf, 0x0)), 0x0); 111 + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_inv_eotf, 0x101)), 0x1); 112 + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_inv_eotf, 0x202)), 0x2); 113 + 114 + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0xfefe)), 0xfe); 115 + KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0xffff)), 0xff); 116 + } 117 + 118 + static void vkms_color_test_lerp(struct kunit *test) 119 + { 120 + int i; 121 + 122 + for (i = 0; i < ARRAY_SIZE(color_test_lerp_cases); i++) { 123 + const struct vkms_color_test_lerp_params *params = &color_test_lerp_cases[i]; 124 + 125 + KUNIT_EXPECT_EQ(test, lerp_u16(params->a, params->b, params->t), params->expected); 126 + } 127 + } 128 + 129 + static void vkms_color_test_linear(struct kunit *test) 130 + { 131 + for (int i = 0; i < LUT_SIZE; i++) { 132 + int linear = apply_lut_to_channel_value(&linear_eotf, i * 0x101, LUT_RED); 133 + 134 + KUNIT_EXPECT_EQ(test, DIV_ROUND_CLOSEST(linear, 0x101), i); 135 + } 136 + } 137 + 138 + static void vkms_color_srgb_inv_srgb(struct kunit *test) 139 + { 140 + u16 srgb, final; 141 + 142 + for (int i = 0; i < LUT_SIZE; i++) { 143 + srgb = apply_lut_to_channel_value(&srgb_eotf, i * 0x101, LUT_RED); 144 + final = apply_lut_to_channel_value(&srgb_inv_eotf, srgb, LUT_RED); 145 + 146 + KUNIT_EXPECT_GE(test, final / 0x101, i - 1); 147 + KUNIT_EXPECT_LE(test, final / 0x101, i + 1); 148 + } 149 + } 150 + 151 + #define FIXPT_HALF (DRM_FIXED_ONE >> 1) 152 + #define FIXPT_QUARTER (DRM_FIXED_ONE >> 2) 153 + 154 + static const struct drm_color_ctm_3x4 test_matrix_3x4_50_desat = { { 155 + FIXPT_HALF, FIXPT_QUARTER, FIXPT_QUARTER, 0, 156 + FIXPT_QUARTER, FIXPT_HALF, FIXPT_QUARTER, 0, 157 + FIXPT_QUARTER, FIXPT_QUARTER, FIXPT_HALF, 0 158 + } }; 159 + 160 + static void vkms_color_ctm_3x4_50_desat(struct kunit *test) 161 + { 162 + struct pixel_argb_s32 ref, out; 163 + 164 + /* full white */ 165 + ref.a = 0xffff; 166 + ref.r = 0xffff; 167 + ref.g = 0xffff; 168 + ref.b = 0xffff; 169 + 170 + memcpy(&out, &ref, sizeof(out)); 171 + apply_3x4_matrix(&out, &test_matrix_3x4_50_desat); 172 + 173 + KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out)); 174 + 175 + /* full black */ 176 + ref.a = 0xffff; 177 + ref.r = 0x0; 178 + ref.g = 0x0; 179 + ref.b = 0x0; 180 + 181 + memcpy(&out, &ref, sizeof(out)); 182 + apply_3x4_matrix(&out, &test_matrix_3x4_50_desat); 183 + 184 + KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out)); 185 + 186 + /* 50% grey */ 187 + ref.a = 0xffff; 188 + ref.r = 0x8000; 189 + ref.g = 0x8000; 190 + ref.b = 0x8000; 191 + 192 + memcpy(&out, &ref, sizeof(out)); 193 + apply_3x4_matrix(&out, &test_matrix_3x4_50_desat); 194 + 195 + KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out)); 196 + 197 + /* full red to 50% desat */ 198 + ref.a = 0xffff; 199 + ref.r = 0x8000; 200 + ref.g = 0x4000; 201 + ref.b = 0x4000; 202 + 203 + out.a = 0xffff; 204 + out.r = 0xffff; 205 + out.g = 0x0; 206 + out.b = 0x0; 207 + 208 + apply_3x4_matrix(&out, &test_matrix_3x4_50_desat); 209 + 210 + KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out)); 211 + } 212 + 213 + /* 214 + * BT.709 encoding matrix 215 + * 216 + * Values printed from within IGT when converting 217 + * igt_matrix_3x4_bt709_enc to the fixed-point format expected 218 + * by DRM/KMS. 219 + */ 220 + static const struct drm_color_ctm_3x4 test_matrix_3x4_bt709_enc = { { 221 + 0x00000000366cf400ull, 0x00000000b7175900ull, 0x0000000127bb300ull, 0, 222 + 0x800000001993b3a0ull, 0x800000005609fe80ull, 0x000000006f9db200ull, 0, 223 + 0x000000009d70a400ull, 0x800000008f011100ull, 0x800000000e6f9330ull, 0 224 + } }; 225 + 226 + static void vkms_color_ctm_3x4_bt709(struct kunit *test) 227 + { 228 + struct pixel_argb_s32 out; 229 + 230 + /* full white to bt709 */ 231 + out.a = 0xffff; 232 + out.r = 0xffff; 233 + out.g = 0xffff; 234 + out.b = 0xffff; 235 + 236 + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); 237 + 238 + /* Y 255 */ 239 + KUNIT_EXPECT_GT(test, out.r, 0xfe00); 240 + KUNIT_EXPECT_LT(test, out.r, 0x10000); 241 + 242 + /* U 0 */ 243 + KUNIT_EXPECT_LT(test, out.g, 0x0100); 244 + 245 + /* V 0 */ 246 + KUNIT_EXPECT_LT(test, out.b, 0x0100); 247 + 248 + /* full black to bt709 */ 249 + out.a = 0xffff; 250 + out.r = 0x0; 251 + out.g = 0x0; 252 + out.b = 0x0; 253 + 254 + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); 255 + 256 + /* Y 0 */ 257 + KUNIT_EXPECT_LT(test, out.r, 0x100); 258 + 259 + /* U 0 */ 260 + KUNIT_EXPECT_LT(test, out.g, 0x0100); 261 + 262 + /* V 0 */ 263 + KUNIT_EXPECT_LT(test, out.b, 0x0100); 264 + 265 + /* gray to bt709 */ 266 + out.a = 0xffff; 267 + out.r = 0x7fff; 268 + out.g = 0x7fff; 269 + out.b = 0x7fff; 270 + 271 + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); 272 + 273 + /* Y 127 */ 274 + KUNIT_EXPECT_GT(test, out.r, 0x7e00); 275 + KUNIT_EXPECT_LT(test, out.r, 0x8000); 276 + 277 + /* U 0 */ 278 + KUNIT_EXPECT_LT(test, out.g, 0x0100); 279 + 280 + /* V 0 */ 281 + KUNIT_EXPECT_LT(test, out.b, 0x0100); 282 + 283 + /* == red 255 - bt709 enc == */ 284 + out.a = 0xffff; 285 + out.r = 0xffff; 286 + out.g = 0x0; 287 + out.b = 0x0; 288 + 289 + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); 290 + 291 + /* Y 54 */ 292 + KUNIT_EXPECT_GT(test, out.r, 0x3500); 293 + KUNIT_EXPECT_LT(test, out.r, 0x3700); 294 + 295 + /* U 0 */ 296 + KUNIT_EXPECT_LT(test, out.g, 0x0100); 297 + 298 + /* V 157 */ 299 + KUNIT_EXPECT_GT(test, out.b, 0x9C00); 300 + KUNIT_EXPECT_LT(test, out.b, 0x9E00); 301 + 302 + /* == green 255 - bt709 enc == */ 303 + out.a = 0xffff; 304 + out.r = 0x0; 305 + out.g = 0xffff; 306 + out.b = 0x0; 307 + 308 + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); 309 + 310 + /* Y 182 */ 311 + KUNIT_EXPECT_GT(test, out.r, 0xB500); 312 + KUNIT_EXPECT_LT(test, out.r, 0xB780); /* laxed by half*/ 313 + 314 + /* U 0 */ 315 + KUNIT_EXPECT_LT(test, out.g, 0x0100); 316 + 317 + /* V 0 */ 318 + KUNIT_EXPECT_LT(test, out.b, 0x0100); 319 + 320 + /* == blue 255 - bt709 enc == */ 321 + out.a = 0xffff; 322 + out.r = 0x0; 323 + out.g = 0x0; 324 + out.b = 0xffff; 325 + 326 + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); 327 + 328 + /* Y 18 */ 329 + KUNIT_EXPECT_GT(test, out.r, 0x1100); 330 + KUNIT_EXPECT_LT(test, out.r, 0x1300); 331 + 332 + /* U 111 */ 333 + KUNIT_EXPECT_GT(test, out.g, 0x6E00); 334 + KUNIT_EXPECT_LT(test, out.g, 0x7000); 335 + 336 + /* V 0 */ 337 + KUNIT_EXPECT_LT(test, out.b, 0x0100); 338 + 339 + /* == red 140 - bt709 enc == */ 340 + out.a = 0xffff; 341 + out.r = 0x8c8c; 342 + out.g = 0x0; 343 + out.b = 0x0; 344 + 345 + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); 346 + 347 + /* Y 30 */ 348 + KUNIT_EXPECT_GT(test, out.r, 0x1D00); 349 + KUNIT_EXPECT_LT(test, out.r, 0x1F00); 350 + 351 + /* U 0 */ 352 + KUNIT_EXPECT_LT(test, out.g, 0x100); 353 + 354 + /* V 87 */ 355 + KUNIT_EXPECT_GT(test, out.b, 0x5600); 356 + KUNIT_EXPECT_LT(test, out.b, 0x5800); 357 + 358 + /* == green 140 - bt709 enc == */ 359 + out.a = 0xffff; 360 + out.r = 0x0; 361 + out.g = 0x8c8c; 362 + out.b = 0x0; 363 + 364 + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); 365 + 366 + /* Y 30 */ 367 + KUNIT_EXPECT_GT(test, out.r, 0x6400); 368 + KUNIT_EXPECT_LT(test, out.r, 0x6600); 369 + 370 + /* U 0 */ 371 + KUNIT_EXPECT_LT(test, out.g, 0x100); 372 + 373 + /* V 0 */ 374 + KUNIT_EXPECT_LT(test, out.b, 0x100); 375 + 376 + /* == blue 140 - bt709 enc == */ 377 + out.a = 0xffff; 378 + out.r = 0x0; 379 + out.g = 0x0; 380 + out.b = 0x8c8c; 381 + 382 + apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc); 383 + 384 + /* Y 30 */ 385 + KUNIT_EXPECT_GT(test, out.r, 0x900); 386 + KUNIT_EXPECT_LT(test, out.r, 0xB00); 387 + 388 + /* U 61 */ 389 + KUNIT_EXPECT_GT(test, out.g, 0x3C00); 390 + KUNIT_EXPECT_LT(test, out.g, 0x3E00); 391 + 392 + /* V 0 */ 393 + KUNIT_EXPECT_LT(test, out.b, 0x100); 394 + } 395 + 396 + static struct kunit_case vkms_color_test_cases[] = { 397 + KUNIT_CASE(vkms_color_test_get_lut_index), 398 + KUNIT_CASE(vkms_color_test_lerp), 399 + KUNIT_CASE(vkms_color_test_linear), 400 + KUNIT_CASE(vkms_color_srgb_inv_srgb), 401 + KUNIT_CASE(vkms_color_ctm_3x4_50_desat), 402 + KUNIT_CASE(vkms_color_ctm_3x4_bt709), 403 + {} 404 + }; 405 + 406 + static struct kunit_suite vkms_color_test_suite = { 407 + .name = "vkms-color", 408 + .test_cases = vkms_color_test_cases, 409 + }; 410 + 411 + kunit_test_suite(vkms_color_test_suite); 412 + 413 + MODULE_DESCRIPTION("Kunit test for VKMS LUT handling"); 414 + MODULE_LICENSE("GPL");
+30 -17
drivers/gpu/drm/vkms/tests/vkms_config_test.c
··· 83 83 bool enable_cursor; 84 84 bool enable_writeback; 85 85 bool enable_overlay; 86 + bool enable_plane_pipeline; 86 87 }; 87 88 88 89 static void vkms_config_test_empty_config(struct kunit *test) ··· 109 108 } 110 109 111 110 static struct default_config_case default_config_cases[] = { 112 - { false, false, false }, 113 - { true, false, false }, 114 - { true, true, false }, 115 - { true, false, true }, 116 - { false, true, false }, 117 - { false, true, true }, 118 - { false, false, true }, 119 - { true, true, true }, 111 + { false, false, false, false }, 112 + { true, false, false, false }, 113 + { true, true, false, false }, 114 + { true, false, true, false }, 115 + { false, true, false, false }, 116 + { false, true, true, false }, 117 + { false, false, true, false }, 118 + { true, true, true, false }, 119 + { false, false, false, true }, 120 + { true, false, false, true }, 121 + { true, true, false, true }, 122 + { true, false, true, true }, 123 + { false, true, false, true }, 124 + { false, true, true, true }, 125 + { false, false, true, true }, 126 + { true, true, true, true }, 120 127 }; 121 128 122 129 KUNIT_ARRAY_PARAM(default_config, default_config_cases, NULL); ··· 141 132 142 133 config = vkms_config_default_create(params->enable_cursor, 143 134 params->enable_writeback, 144 - params->enable_overlay); 135 + params->enable_overlay, 136 + params->enable_plane_pipeline); 145 137 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); 146 138 147 139 /* Planes */ 148 140 vkms_config_for_each_plane(config, plane_cfg) { 141 + KUNIT_EXPECT_EQ(test, 142 + vkms_config_plane_get_default_pipeline(plane_cfg), 143 + params->enable_plane_pipeline); 149 144 switch (vkms_config_plane_get_type(plane_cfg)) { 150 145 case DRM_PLANE_TYPE_PRIMARY: 151 146 n_primaries++; ··· 381 368 struct vkms_config_plane *plane_cfg; 382 369 int n; 383 370 384 - config = vkms_config_default_create(false, false, false); 371 + config = vkms_config_default_create(false, false, false, false); 385 372 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); 386 373 387 374 /* Invalid: No planes */ ··· 406 393 struct vkms_config_encoder *encoder_cfg; 407 394 int err; 408 395 409 - config = vkms_config_default_create(false, false, false); 396 + config = vkms_config_default_create(false, false, false, false); 410 397 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); 411 398 412 399 plane_cfg = get_first_plane(config); ··· 487 474 struct vkms_config_plane *plane_cfg; 488 475 struct vkms_config_crtc *crtc_cfg; 489 476 490 - config = vkms_config_default_create(false, false, false); 477 + config = vkms_config_default_create(false, false, false, false); 491 478 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); 492 479 493 480 plane_cfg = get_first_plane(config); ··· 506 493 struct vkms_config_crtc *crtc_cfg; 507 494 int n; 508 495 509 - config = vkms_config_default_create(false, false, false); 496 + config = vkms_config_default_create(false, false, false, false); 510 497 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); 511 498 512 499 /* Invalid: No CRTCs */ ··· 529 516 struct vkms_config_encoder *encoder_cfg; 530 517 int n; 531 518 532 - config = vkms_config_default_create(false, false, false); 519 + config = vkms_config_default_create(false, false, false, false); 533 520 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); 534 521 535 522 /* Invalid: No encoders */ ··· 554 541 struct vkms_config_encoder *encoder_cfg; 555 542 int err; 556 543 557 - config = vkms_config_default_create(false, false, false); 544 + config = vkms_config_default_create(false, false, false, false); 558 545 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); 559 546 560 547 crtc_cfg1 = get_first_crtc(config); ··· 600 587 struct vkms_config_connector *connector_cfg; 601 588 int n; 602 589 603 - config = vkms_config_default_create(false, false, false); 590 + config = vkms_config_default_create(false, false, false, false); 604 591 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); 605 592 606 593 /* Invalid: No connectors */ ··· 623 610 struct vkms_config_encoder *encoder_cfg; 624 611 struct vkms_config_connector *connector_cfg; 625 612 626 - config = vkms_config_default_create(false, false, false); 613 + config = vkms_config_default_create(false, false, false, false); 627 614 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); 628 615 629 616 encoder_cfg = get_first_encoder(config);
+120
drivers/gpu/drm/vkms/vkms_colorop.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + 3 + #include <linux/slab.h> 4 + #include <drm/drm_colorop.h> 5 + #include <drm/drm_print.h> 6 + #include <drm/drm_property.h> 7 + #include <drm/drm_plane.h> 8 + 9 + #include "vkms_drv.h" 10 + 11 + static const u64 supported_tfs = 12 + BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) | 13 + BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF); 14 + 15 + #define MAX_COLOR_PIPELINE_OPS 4 16 + 17 + static int vkms_initialize_color_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list) 18 + { 19 + struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS]; 20 + struct drm_device *dev = plane->dev; 21 + int ret; 22 + int i = 0, j = 0; 23 + 24 + memset(ops, 0, sizeof(ops)); 25 + 26 + /* 1st op: 1d curve */ 27 + ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL); 28 + if (!ops[i]) { 29 + drm_err(dev, "KMS: Failed to allocate colorop\n"); 30 + ret = -ENOMEM; 31 + goto cleanup; 32 + } 33 + 34 + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, supported_tfs, 35 + DRM_COLOROP_FLAG_ALLOW_BYPASS); 36 + if (ret) 37 + goto cleanup; 38 + 39 + list->type = ops[i]->base.id; 40 + list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id); 41 + 42 + i++; 43 + 44 + /* 2nd op: 3x4 matrix */ 45 + ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL); 46 + if (!ops[i]) { 47 + drm_err(dev, "KMS: Failed to allocate colorop\n"); 48 + ret = -ENOMEM; 49 + goto cleanup; 50 + } 51 + 52 + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); 53 + if (ret) 54 + goto cleanup; 55 + 56 + drm_colorop_set_next_property(ops[i - 1], ops[i]); 57 + 58 + i++; 59 + 60 + /* 3rd op: 3x4 matrix */ 61 + ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL); 62 + if (!ops[i]) { 63 + drm_err(dev, "KMS: Failed to allocate colorop\n"); 64 + ret = -ENOMEM; 65 + goto cleanup; 66 + } 67 + 68 + ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS); 69 + if (ret) 70 + goto cleanup; 71 + 72 + drm_colorop_set_next_property(ops[i - 1], ops[i]); 73 + 74 + i++; 75 + 76 + /* 4th op: 1d curve */ 77 + ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL); 78 + if (!ops[i]) { 79 + drm_err(dev, "KMS: Failed to allocate colorop\n"); 80 + ret = -ENOMEM; 81 + goto cleanup; 82 + } 83 + 84 + ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, supported_tfs, 85 + DRM_COLOROP_FLAG_ALLOW_BYPASS); 86 + if (ret) 87 + goto cleanup; 88 + 89 + drm_colorop_set_next_property(ops[i - 1], ops[i]); 90 + 91 + return 0; 92 + 93 + cleanup: 94 + for (j = 0; j < i; j++) { 95 + if (ops[j]) { 96 + drm_colorop_cleanup(ops[j]); 97 + kfree(ops[j]); 98 + } 99 + } 100 + 101 + return ret; 102 + } 103 + 104 + int vkms_initialize_colorops(struct drm_plane *plane) 105 + { 106 + struct drm_prop_enum_list pipeline; 107 + int ret; 108 + 109 + /* Add color pipeline */ 110 + ret = vkms_initialize_color_pipeline(plane, &pipeline); 111 + if (ret) 112 + return ret; 113 + 114 + /* Create COLOR_PIPELINE property and attach */ 115 + ret = drm_plane_create_color_pipeline_property(plane, &pipeline, 1); 116 + if (ret) 117 + return ret; 118 + 119 + return 0; 120 + }
+118 -17
drivers/gpu/drm/vkms/vkms_composer.c
··· 11 11 #include <drm/drm_print.h> 12 12 #include <drm/drm_vblank.h> 13 13 #include <linux/minmax.h> 14 + #include <kunit/visibility.h> 14 15 15 - #include "vkms_drv.h" 16 + #include "vkms_composer.h" 17 + #include "vkms_luts.h" 16 18 17 19 static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha) 18 20 { ··· 63 61 } 64 62 65 63 // lerp(a, b, t) = a + (b - a) * t 66 - static u16 lerp_u16(u16 a, u16 b, s64 t) 64 + VISIBLE_IF_KUNIT u16 lerp_u16(u16 a, u16 b, s64 t) 67 65 { 68 66 s64 a_fp = drm_int2fixp(a); 69 67 s64 b_fp = drm_int2fixp(b); ··· 72 70 73 71 return drm_fixp2int_round(a_fp + delta); 74 72 } 73 + EXPORT_SYMBOL_IF_KUNIT(lerp_u16); 75 74 76 - static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value) 75 + VISIBLE_IF_KUNIT s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value) 77 76 { 78 77 s64 color_channel_fp = drm_int2fixp(channel_value); 79 78 80 79 return drm_fixp_mul(color_channel_fp, lut->channel_value2index_ratio); 81 80 } 81 + EXPORT_SYMBOL_IF_KUNIT(get_lut_index); 82 82 83 - /* 84 - * This enum is related to the positions of the variables inside 85 - * `struct drm_color_lut`, so the order of both needs to be the same. 86 - */ 87 - enum lut_channel { 88 - LUT_RED = 0, 89 - LUT_GREEN, 90 - LUT_BLUE, 91 - LUT_RESERVED 92 - }; 93 - 94 - static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value, 95 - enum lut_channel channel) 83 + VISIBLE_IF_KUNIT u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value, 84 + enum lut_channel channel) 96 85 { 97 86 s64 lut_index = get_lut_index(lut, channel_value); 98 87 u16 *floor_lut_value, *ceil_lut_value; ··· 108 115 return lerp_u16(floor_channel_value, ceil_channel_value, 109 116 lut_index & DRM_FIXED_DECIMAL_MASK); 110 117 } 118 + EXPORT_SYMBOL_IF_KUNIT(apply_lut_to_channel_value); 119 + 111 120 112 121 static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buffer *output_buffer) 113 122 { ··· 125 130 pixel->r = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->r, LUT_RED); 126 131 pixel->g = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->g, LUT_GREEN); 127 132 pixel->b = apply_lut_to_channel_value(&crtc_state->gamma_lut, pixel->b, LUT_BLUE); 133 + } 134 + } 135 + 136 + VISIBLE_IF_KUNIT void apply_3x4_matrix(struct pixel_argb_s32 *pixel, 137 + const struct drm_color_ctm_3x4 *matrix) 138 + { 139 + s64 rf, gf, bf; 140 + s64 r, g, b; 141 + 142 + r = drm_int2fixp(pixel->r); 143 + g = drm_int2fixp(pixel->g); 144 + b = drm_int2fixp(pixel->b); 145 + 146 + rf = drm_fixp_mul(drm_sm2fixp(matrix->matrix[0]), r) + 147 + drm_fixp_mul(drm_sm2fixp(matrix->matrix[1]), g) + 148 + drm_fixp_mul(drm_sm2fixp(matrix->matrix[2]), b) + 149 + drm_sm2fixp(matrix->matrix[3]); 150 + 151 + gf = drm_fixp_mul(drm_sm2fixp(matrix->matrix[4]), r) + 152 + drm_fixp_mul(drm_sm2fixp(matrix->matrix[5]), g) + 153 + drm_fixp_mul(drm_sm2fixp(matrix->matrix[6]), b) + 154 + drm_sm2fixp(matrix->matrix[7]); 155 + 156 + bf = drm_fixp_mul(drm_sm2fixp(matrix->matrix[8]), r) + 157 + drm_fixp_mul(drm_sm2fixp(matrix->matrix[9]), g) + 158 + drm_fixp_mul(drm_sm2fixp(matrix->matrix[10]), b) + 159 + drm_sm2fixp(matrix->matrix[11]); 160 + 161 + pixel->r = drm_fixp2int_round(rf); 162 + pixel->g = drm_fixp2int_round(gf); 163 + pixel->b = drm_fixp2int_round(bf); 164 + } 165 + EXPORT_SYMBOL_IF_KUNIT(apply_3x4_matrix); 166 + 167 + static void apply_colorop(struct pixel_argb_s32 *pixel, struct drm_colorop *colorop) 168 + { 169 + struct drm_colorop_state *colorop_state = colorop->state; 170 + struct drm_device *dev = colorop->dev; 171 + 172 + if (colorop->type == DRM_COLOROP_1D_CURVE) { 173 + switch (colorop_state->curve_1d_type) { 174 + case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF: 175 + pixel->r = apply_lut_to_channel_value(&srgb_inv_eotf, pixel->r, LUT_RED); 176 + pixel->g = apply_lut_to_channel_value(&srgb_inv_eotf, pixel->g, LUT_GREEN); 177 + pixel->b = apply_lut_to_channel_value(&srgb_inv_eotf, pixel->b, LUT_BLUE); 178 + break; 179 + case DRM_COLOROP_1D_CURVE_SRGB_EOTF: 180 + pixel->r = apply_lut_to_channel_value(&srgb_eotf, pixel->r, LUT_RED); 181 + pixel->g = apply_lut_to_channel_value(&srgb_eotf, pixel->g, LUT_GREEN); 182 + pixel->b = apply_lut_to_channel_value(&srgb_eotf, pixel->b, LUT_BLUE); 183 + break; 184 + default: 185 + drm_WARN_ONCE(dev, true, 186 + "unknown colorop 1D curve type %d\n", 187 + colorop_state->curve_1d_type); 188 + break; 189 + } 190 + } else if (colorop->type == DRM_COLOROP_CTM_3X4) { 191 + if (colorop_state->data) 192 + apply_3x4_matrix(pixel, 193 + (struct drm_color_ctm_3x4 *)colorop_state->data->data); 194 + } 195 + } 196 + 197 + static void pre_blend_color_transform(const struct vkms_plane_state *plane_state, 198 + struct line_buffer *output_buffer) 199 + { 200 + struct pixel_argb_s32 pixel; 201 + 202 + for (size_t x = 0; x < output_buffer->n_pixels; x++) { 203 + struct drm_colorop *colorop = plane_state->base.base.color_pipeline; 204 + 205 + /* 206 + * Some operations, such as applying a BT709 encoding matrix, 207 + * followed by a decoding matrix, require that we preserve 208 + * values above 1.0 and below 0.0 until the end of the pipeline. 209 + * 210 + * Pack the 16-bit UNORM values into s32 to give us head-room to 211 + * avoid clipping until we're at the end of the pipeline. Clip 212 + * intentionally at the end of the pipeline before packing 213 + * UNORM values back into u16. 214 + */ 215 + pixel.a = output_buffer->pixels[x].a; 216 + pixel.r = output_buffer->pixels[x].r; 217 + pixel.g = output_buffer->pixels[x].g; 218 + pixel.b = output_buffer->pixels[x].b; 219 + 220 + while (colorop) { 221 + struct drm_colorop_state *colorop_state; 222 + 223 + colorop_state = colorop->state; 224 + 225 + if (!colorop_state) 226 + return; 227 + 228 + if (!colorop_state->bypass) 229 + apply_colorop(&pixel, colorop); 230 + 231 + colorop = colorop->next; 232 + } 233 + 234 + /* clamp values */ 235 + output_buffer->pixels[x].a = clamp_val(pixel.a, 0, 0xffff); 236 + output_buffer->pixels[x].r = clamp_val(pixel.r, 0, 0xffff); 237 + output_buffer->pixels[x].g = clamp_val(pixel.g, 0, 0xffff); 238 + output_buffer->pixels[x].b = clamp_val(pixel.b, 0, 0xffff); 128 239 } 129 240 } 130 241 ··· 449 348 */ 450 349 current_plane->pixel_read_line(current_plane, src_x_start, src_y_start, direction, 451 350 pixel_count, &stage_buffer->pixels[dst_x_start]); 452 - 351 + pre_blend_color_transform(current_plane, stage_buffer); 453 352 pre_mul_alpha_blend(stage_buffer, output_buffer, 454 353 dst_x_start, pixel_count); 455 354 }
+28
drivers/gpu/drm/vkms/vkms_composer.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + 3 + #ifndef _VKMS_COMPOSER_H_ 4 + #define _VKMS_COMPOSER_H_ 5 + 6 + #include <kunit/visibility.h> 7 + #include "vkms_drv.h" 8 + 9 + /* 10 + * This enum is related to the positions of the variables inside 11 + * `struct drm_color_lut`, so the order of both needs to be the same. 12 + */ 13 + enum lut_channel { 14 + LUT_RED = 0, 15 + LUT_GREEN, 16 + LUT_BLUE, 17 + LUT_RESERVED 18 + }; 19 + 20 + #if IS_ENABLED(CONFIG_KUNIT) 21 + u16 lerp_u16(u16 a, u16 b, s64 t); 22 + s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value); 23 + u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value, 24 + enum lut_channel channel); 25 + void apply_3x4_matrix(struct pixel_argb_s32 *pixel, const struct drm_color_ctm_3x4 *matrix); 26 + #endif 27 + 28 + #endif /* _VKMS_COMPOSER_H_ */
+6 -1
drivers/gpu/drm/vkms/vkms_config.c
··· 33 33 34 34 struct vkms_config *vkms_config_default_create(bool enable_cursor, 35 35 bool enable_writeback, 36 - bool enable_overlay) 36 + bool enable_overlay, 37 + bool enable_plane_pipeline) 37 38 { 38 39 struct vkms_config *config; 39 40 struct vkms_config_plane *plane_cfg; ··· 59 58 60 59 if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg)) 61 60 goto err_alloc; 61 + vkms_config_plane_set_default_pipeline(plane_cfg, enable_plane_pipeline); 62 62 63 63 if (enable_overlay) { 64 64 for (n = 0; n < NUM_OVERLAY_PLANES; n++) { ··· 69 67 70 68 vkms_config_plane_set_type(plane_cfg, 71 69 DRM_PLANE_TYPE_OVERLAY); 70 + vkms_config_plane_set_default_pipeline(plane_cfg, enable_plane_pipeline); 72 71 73 72 if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg)) 74 73 goto err_alloc; ··· 82 79 goto err_alloc; 83 80 84 81 vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR); 82 + vkms_config_plane_set_default_pipeline(plane_cfg, enable_plane_pipeline); 85 83 86 84 if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg)) 87 85 goto err_alloc; ··· 393 389 return ERR_PTR(-ENOMEM); 394 390 395 391 plane_cfg->config = config; 392 + plane_cfg->default_pipeline = false; 396 393 vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY); 397 394 xa_init_flags(&plane_cfg->possible_crtcs, XA_FLAGS_ALLOC); 398 395
+27 -1
drivers/gpu/drm/vkms/vkms_config.h
··· 49 49 50 50 enum drm_plane_type type; 51 51 struct xarray possible_crtcs; 52 + bool default_pipeline; 52 53 53 54 /* Internal usage */ 54 55 struct vkms_plane *plane; ··· 204 203 */ 205 204 struct vkms_config *vkms_config_default_create(bool enable_cursor, 206 205 bool enable_writeback, 207 - bool enable_overlay); 206 + bool enable_overlay, 207 + bool enable_plane_pipeline); 208 208 209 209 /** 210 210 * vkms_config_destroy() - Free a VKMS configuration ··· 288 286 enum drm_plane_type type) 289 287 { 290 288 plane_cfg->type = type; 289 + } 290 + 291 + /** 292 + * vkms_config_plane_get_default_pipeline() - Return if the plane will 293 + * be created with the default pipeline 294 + * @plane_cfg: Plane to get the information from 295 + */ 296 + static inline bool 297 + vkms_config_plane_get_default_pipeline(struct vkms_config_plane *plane_cfg) 298 + { 299 + return plane_cfg->default_pipeline; 300 + } 301 + 302 + /** 303 + * vkms_config_plane_set_default_pipeline() - Set if the plane will 304 + * be created with the default pipeline 305 + * @plane_cfg: Plane to configure the pipeline 306 + * @default_pipeline: New default pipeline value 307 + */ 308 + static inline void 309 + vkms_config_plane_set_default_pipeline(struct vkms_config_plane *plane_cfg, 310 + bool default_pipeline) 311 + { 312 + plane_cfg->default_pipeline = default_pipeline; 291 313 } 292 314 293 315 /**
+7 -1
drivers/gpu/drm/vkms/vkms_drv.c
··· 51 51 module_param_named(enable_overlay, enable_overlay, bool, 0444); 52 52 MODULE_PARM_DESC(enable_overlay, "Enable/Disable overlay support"); 53 53 54 + static bool enable_plane_pipeline; 55 + module_param_named(enable_plane_pipeline, enable_plane_pipeline, bool, 0444); 56 + MODULE_PARM_DESC(enable_plane_pipeline, "Enable/Disable plane pipeline support"); 57 + 54 58 static bool create_default_dev = true; 55 59 module_param_named(create_default_dev, create_default_dev, bool, 0444); 56 60 MODULE_PARM_DESC(create_default_dev, "Create or not the default VKMS device"); ··· 231 227 if (!create_default_dev) 232 228 return 0; 233 229 234 - config = vkms_config_default_create(enable_cursor, enable_writeback, enable_overlay); 230 + config = vkms_config_default_create(enable_cursor, enable_writeback, 231 + enable_overlay, enable_plane_pipeline); 235 232 if (IS_ERR(config)) 236 233 return PTR_ERR(config); 237 234 ··· 258 253 259 254 fdev = config->dev->faux_dev; 260 255 256 + drm_colorop_pipeline_destroy(&config->dev->drm); 261 257 drm_dev_unregister(&config->dev->drm); 262 258 drm_atomic_helper_shutdown(&config->dev->drm); 263 259 devres_release_group(&fdev->dev, NULL);
+10 -2
drivers/gpu/drm/vkms/vkms_drv.h
··· 45 45 unsigned int rotation; 46 46 }; 47 47 48 + struct pixel_argb_s32 { 49 + s32 a, r, g, b; 50 + }; 51 + 48 52 /** 49 53 * struct pixel_argb_u16 - Internal representation of a pixel color. 50 54 * @a: Alpha component value, stored in 16 bits, without padding, using ··· 229 225 }; 230 226 231 227 struct vkms_config; 228 + struct vkms_config_plane; 232 229 233 230 /** 234 231 * struct vkms_device - Description of a VKMS device ··· 303 298 * vkms_plane_init() - Initialize a plane 304 299 * 305 300 * @vkmsdev: VKMS device containing the plane 306 - * @type: type of plane to initialize 301 + * @plane_cfg: plane configuration 307 302 */ 308 303 struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev, 309 - enum drm_plane_type type); 304 + struct vkms_config_plane *plane_cfg); 310 305 311 306 /* CRC Support */ 312 307 const char *const *vkms_get_crc_sources(struct drm_crtc *crtc, ··· 322 317 323 318 /* Writeback */ 324 319 int vkms_enable_writeback_connector(struct vkms_device *vkmsdev, struct vkms_output *vkms_out); 320 + 321 + /* Colorops */ 322 + int vkms_initialize_colorops(struct drm_plane *plane); 325 323 326 324 #endif /* _VKMS_DRV_H_ */
+811
drivers/gpu/drm/vkms/vkms_luts.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + 3 + #include <drm/drm_mode.h> 4 + 5 + #include "vkms_drv.h" 6 + #include "vkms_luts.h" 7 + 8 + /* 9 + * These luts were generated with a LUT generated based on 10 + * skia's transfer function code. The LUT generator can be 11 + * found at 12 + * https://gitlab.freedesktop.org/hwentland/lutgen 13 + */ 14 + 15 + static struct drm_color_lut linear_array[LUT_SIZE] = { 16 + { 0x0, 0x0, 0x0, 0 }, 17 + { 0x101, 0x101, 0x101, 0 }, 18 + { 0x202, 0x202, 0x202, 0 }, 19 + { 0x303, 0x303, 0x303, 0 }, 20 + { 0x404, 0x404, 0x404, 0 }, 21 + { 0x505, 0x505, 0x505, 0 }, 22 + { 0x606, 0x606, 0x606, 0 }, 23 + { 0x707, 0x707, 0x707, 0 }, 24 + { 0x808, 0x808, 0x808, 0 }, 25 + { 0x909, 0x909, 0x909, 0 }, 26 + { 0xa0a, 0xa0a, 0xa0a, 0 }, 27 + { 0xb0b, 0xb0b, 0xb0b, 0 }, 28 + { 0xc0c, 0xc0c, 0xc0c, 0 }, 29 + { 0xd0d, 0xd0d, 0xd0d, 0 }, 30 + { 0xe0e, 0xe0e, 0xe0e, 0 }, 31 + { 0xf0f, 0xf0f, 0xf0f, 0 }, 32 + { 0x1010, 0x1010, 0x1010, 0 }, 33 + { 0x1111, 0x1111, 0x1111, 0 }, 34 + { 0x1212, 0x1212, 0x1212, 0 }, 35 + { 0x1313, 0x1313, 0x1313, 0 }, 36 + { 0x1414, 0x1414, 0x1414, 0 }, 37 + { 0x1515, 0x1515, 0x1515, 0 }, 38 + { 0x1616, 0x1616, 0x1616, 0 }, 39 + { 0x1717, 0x1717, 0x1717, 0 }, 40 + { 0x1818, 0x1818, 0x1818, 0 }, 41 + { 0x1919, 0x1919, 0x1919, 0 }, 42 + { 0x1a1a, 0x1a1a, 0x1a1a, 0 }, 43 + { 0x1b1b, 0x1b1b, 0x1b1b, 0 }, 44 + { 0x1c1c, 0x1c1c, 0x1c1c, 0 }, 45 + { 0x1d1d, 0x1d1d, 0x1d1d, 0 }, 46 + { 0x1e1e, 0x1e1e, 0x1e1e, 0 }, 47 + { 0x1f1f, 0x1f1f, 0x1f1f, 0 }, 48 + { 0x2020, 0x2020, 0x2020, 0 }, 49 + { 0x2121, 0x2121, 0x2121, 0 }, 50 + { 0x2222, 0x2222, 0x2222, 0 }, 51 + { 0x2323, 0x2323, 0x2323, 0 }, 52 + { 0x2424, 0x2424, 0x2424, 0 }, 53 + { 0x2525, 0x2525, 0x2525, 0 }, 54 + { 0x2626, 0x2626, 0x2626, 0 }, 55 + { 0x2727, 0x2727, 0x2727, 0 }, 56 + { 0x2828, 0x2828, 0x2828, 0 }, 57 + { 0x2929, 0x2929, 0x2929, 0 }, 58 + { 0x2a2a, 0x2a2a, 0x2a2a, 0 }, 59 + { 0x2b2b, 0x2b2b, 0x2b2b, 0 }, 60 + { 0x2c2c, 0x2c2c, 0x2c2c, 0 }, 61 + { 0x2d2d, 0x2d2d, 0x2d2d, 0 }, 62 + { 0x2e2e, 0x2e2e, 0x2e2e, 0 }, 63 + { 0x2f2f, 0x2f2f, 0x2f2f, 0 }, 64 + { 0x3030, 0x3030, 0x3030, 0 }, 65 + { 0x3131, 0x3131, 0x3131, 0 }, 66 + { 0x3232, 0x3232, 0x3232, 0 }, 67 + { 0x3333, 0x3333, 0x3333, 0 }, 68 + { 0x3434, 0x3434, 0x3434, 0 }, 69 + { 0x3535, 0x3535, 0x3535, 0 }, 70 + { 0x3636, 0x3636, 0x3636, 0 }, 71 + { 0x3737, 0x3737, 0x3737, 0 }, 72 + { 0x3838, 0x3838, 0x3838, 0 }, 73 + { 0x3939, 0x3939, 0x3939, 0 }, 74 + { 0x3a3a, 0x3a3a, 0x3a3a, 0 }, 75 + { 0x3b3b, 0x3b3b, 0x3b3b, 0 }, 76 + { 0x3c3c, 0x3c3c, 0x3c3c, 0 }, 77 + { 0x3d3d, 0x3d3d, 0x3d3d, 0 }, 78 + { 0x3e3e, 0x3e3e, 0x3e3e, 0 }, 79 + { 0x3f3f, 0x3f3f, 0x3f3f, 0 }, 80 + { 0x4040, 0x4040, 0x4040, 0 }, 81 + { 0x4141, 0x4141, 0x4141, 0 }, 82 + { 0x4242, 0x4242, 0x4242, 0 }, 83 + { 0x4343, 0x4343, 0x4343, 0 }, 84 + { 0x4444, 0x4444, 0x4444, 0 }, 85 + { 0x4545, 0x4545, 0x4545, 0 }, 86 + { 0x4646, 0x4646, 0x4646, 0 }, 87 + { 0x4747, 0x4747, 0x4747, 0 }, 88 + { 0x4848, 0x4848, 0x4848, 0 }, 89 + { 0x4949, 0x4949, 0x4949, 0 }, 90 + { 0x4a4a, 0x4a4a, 0x4a4a, 0 }, 91 + { 0x4b4b, 0x4b4b, 0x4b4b, 0 }, 92 + { 0x4c4c, 0x4c4c, 0x4c4c, 0 }, 93 + { 0x4d4d, 0x4d4d, 0x4d4d, 0 }, 94 + { 0x4e4e, 0x4e4e, 0x4e4e, 0 }, 95 + { 0x4f4f, 0x4f4f, 0x4f4f, 0 }, 96 + { 0x5050, 0x5050, 0x5050, 0 }, 97 + { 0x5151, 0x5151, 0x5151, 0 }, 98 + { 0x5252, 0x5252, 0x5252, 0 }, 99 + { 0x5353, 0x5353, 0x5353, 0 }, 100 + { 0x5454, 0x5454, 0x5454, 0 }, 101 + { 0x5555, 0x5555, 0x5555, 0 }, 102 + { 0x5656, 0x5656, 0x5656, 0 }, 103 + { 0x5757, 0x5757, 0x5757, 0 }, 104 + { 0x5858, 0x5858, 0x5858, 0 }, 105 + { 0x5959, 0x5959, 0x5959, 0 }, 106 + { 0x5a5a, 0x5a5a, 0x5a5a, 0 }, 107 + { 0x5b5b, 0x5b5b, 0x5b5b, 0 }, 108 + { 0x5c5c, 0x5c5c, 0x5c5c, 0 }, 109 + { 0x5d5d, 0x5d5d, 0x5d5d, 0 }, 110 + { 0x5e5e, 0x5e5e, 0x5e5e, 0 }, 111 + { 0x5f5f, 0x5f5f, 0x5f5f, 0 }, 112 + { 0x6060, 0x6060, 0x6060, 0 }, 113 + { 0x6161, 0x6161, 0x6161, 0 }, 114 + { 0x6262, 0x6262, 0x6262, 0 }, 115 + { 0x6363, 0x6363, 0x6363, 0 }, 116 + { 0x6464, 0x6464, 0x6464, 0 }, 117 + { 0x6565, 0x6565, 0x6565, 0 }, 118 + { 0x6666, 0x6666, 0x6666, 0 }, 119 + { 0x6767, 0x6767, 0x6767, 0 }, 120 + { 0x6868, 0x6868, 0x6868, 0 }, 121 + { 0x6969, 0x6969, 0x6969, 0 }, 122 + { 0x6a6a, 0x6a6a, 0x6a6a, 0 }, 123 + { 0x6b6b, 0x6b6b, 0x6b6b, 0 }, 124 + { 0x6c6c, 0x6c6c, 0x6c6c, 0 }, 125 + { 0x6d6d, 0x6d6d, 0x6d6d, 0 }, 126 + { 0x6e6e, 0x6e6e, 0x6e6e, 0 }, 127 + { 0x6f6f, 0x6f6f, 0x6f6f, 0 }, 128 + { 0x7070, 0x7070, 0x7070, 0 }, 129 + { 0x7171, 0x7171, 0x7171, 0 }, 130 + { 0x7272, 0x7272, 0x7272, 0 }, 131 + { 0x7373, 0x7373, 0x7373, 0 }, 132 + { 0x7474, 0x7474, 0x7474, 0 }, 133 + { 0x7575, 0x7575, 0x7575, 0 }, 134 + { 0x7676, 0x7676, 0x7676, 0 }, 135 + { 0x7777, 0x7777, 0x7777, 0 }, 136 + { 0x7878, 0x7878, 0x7878, 0 }, 137 + { 0x7979, 0x7979, 0x7979, 0 }, 138 + { 0x7a7a, 0x7a7a, 0x7a7a, 0 }, 139 + { 0x7b7b, 0x7b7b, 0x7b7b, 0 }, 140 + { 0x7c7c, 0x7c7c, 0x7c7c, 0 }, 141 + { 0x7d7d, 0x7d7d, 0x7d7d, 0 }, 142 + { 0x7e7e, 0x7e7e, 0x7e7e, 0 }, 143 + { 0x7f7f, 0x7f7f, 0x7f7f, 0 }, 144 + { 0x8080, 0x8080, 0x8080, 0 }, 145 + { 0x8181, 0x8181, 0x8181, 0 }, 146 + { 0x8282, 0x8282, 0x8282, 0 }, 147 + { 0x8383, 0x8383, 0x8383, 0 }, 148 + { 0x8484, 0x8484, 0x8484, 0 }, 149 + { 0x8585, 0x8585, 0x8585, 0 }, 150 + { 0x8686, 0x8686, 0x8686, 0 }, 151 + { 0x8787, 0x8787, 0x8787, 0 }, 152 + { 0x8888, 0x8888, 0x8888, 0 }, 153 + { 0x8989, 0x8989, 0x8989, 0 }, 154 + { 0x8a8a, 0x8a8a, 0x8a8a, 0 }, 155 + { 0x8b8b, 0x8b8b, 0x8b8b, 0 }, 156 + { 0x8c8c, 0x8c8c, 0x8c8c, 0 }, 157 + { 0x8d8d, 0x8d8d, 0x8d8d, 0 }, 158 + { 0x8e8e, 0x8e8e, 0x8e8e, 0 }, 159 + { 0x8f8f, 0x8f8f, 0x8f8f, 0 }, 160 + { 0x9090, 0x9090, 0x9090, 0 }, 161 + { 0x9191, 0x9191, 0x9191, 0 }, 162 + { 0x9292, 0x9292, 0x9292, 0 }, 163 + { 0x9393, 0x9393, 0x9393, 0 }, 164 + { 0x9494, 0x9494, 0x9494, 0 }, 165 + { 0x9595, 0x9595, 0x9595, 0 }, 166 + { 0x9696, 0x9696, 0x9696, 0 }, 167 + { 0x9797, 0x9797, 0x9797, 0 }, 168 + { 0x9898, 0x9898, 0x9898, 0 }, 169 + { 0x9999, 0x9999, 0x9999, 0 }, 170 + { 0x9a9a, 0x9a9a, 0x9a9a, 0 }, 171 + { 0x9b9b, 0x9b9b, 0x9b9b, 0 }, 172 + { 0x9c9c, 0x9c9c, 0x9c9c, 0 }, 173 + { 0x9d9d, 0x9d9d, 0x9d9d, 0 }, 174 + { 0x9e9e, 0x9e9e, 0x9e9e, 0 }, 175 + { 0x9f9f, 0x9f9f, 0x9f9f, 0 }, 176 + { 0xa0a0, 0xa0a0, 0xa0a0, 0 }, 177 + { 0xa1a1, 0xa1a1, 0xa1a1, 0 }, 178 + { 0xa2a2, 0xa2a2, 0xa2a2, 0 }, 179 + { 0xa3a3, 0xa3a3, 0xa3a3, 0 }, 180 + { 0xa4a4, 0xa4a4, 0xa4a4, 0 }, 181 + { 0xa5a5, 0xa5a5, 0xa5a5, 0 }, 182 + { 0xa6a6, 0xa6a6, 0xa6a6, 0 }, 183 + { 0xa7a7, 0xa7a7, 0xa7a7, 0 }, 184 + { 0xa8a8, 0xa8a8, 0xa8a8, 0 }, 185 + { 0xa9a9, 0xa9a9, 0xa9a9, 0 }, 186 + { 0xaaaa, 0xaaaa, 0xaaaa, 0 }, 187 + { 0xabab, 0xabab, 0xabab, 0 }, 188 + { 0xacac, 0xacac, 0xacac, 0 }, 189 + { 0xadad, 0xadad, 0xadad, 0 }, 190 + { 0xaeae, 0xaeae, 0xaeae, 0 }, 191 + { 0xafaf, 0xafaf, 0xafaf, 0 }, 192 + { 0xb0b0, 0xb0b0, 0xb0b0, 0 }, 193 + { 0xb1b1, 0xb1b1, 0xb1b1, 0 }, 194 + { 0xb2b2, 0xb2b2, 0xb2b2, 0 }, 195 + { 0xb3b3, 0xb3b3, 0xb3b3, 0 }, 196 + { 0xb4b4, 0xb4b4, 0xb4b4, 0 }, 197 + { 0xb5b5, 0xb5b5, 0xb5b5, 0 }, 198 + { 0xb6b6, 0xb6b6, 0xb6b6, 0 }, 199 + { 0xb7b7, 0xb7b7, 0xb7b7, 0 }, 200 + { 0xb8b8, 0xb8b8, 0xb8b8, 0 }, 201 + { 0xb9b9, 0xb9b9, 0xb9b9, 0 }, 202 + { 0xbaba, 0xbaba, 0xbaba, 0 }, 203 + { 0xbbbb, 0xbbbb, 0xbbbb, 0 }, 204 + { 0xbcbc, 0xbcbc, 0xbcbc, 0 }, 205 + { 0xbdbd, 0xbdbd, 0xbdbd, 0 }, 206 + { 0xbebe, 0xbebe, 0xbebe, 0 }, 207 + { 0xbfbf, 0xbfbf, 0xbfbf, 0 }, 208 + { 0xc0c0, 0xc0c0, 0xc0c0, 0 }, 209 + { 0xc1c1, 0xc1c1, 0xc1c1, 0 }, 210 + { 0xc2c2, 0xc2c2, 0xc2c2, 0 }, 211 + { 0xc3c3, 0xc3c3, 0xc3c3, 0 }, 212 + { 0xc4c4, 0xc4c4, 0xc4c4, 0 }, 213 + { 0xc5c5, 0xc5c5, 0xc5c5, 0 }, 214 + { 0xc6c6, 0xc6c6, 0xc6c6, 0 }, 215 + { 0xc7c7, 0xc7c7, 0xc7c7, 0 }, 216 + { 0xc8c8, 0xc8c8, 0xc8c8, 0 }, 217 + { 0xc9c9, 0xc9c9, 0xc9c9, 0 }, 218 + { 0xcaca, 0xcaca, 0xcaca, 0 }, 219 + { 0xcbcb, 0xcbcb, 0xcbcb, 0 }, 220 + { 0xcccc, 0xcccc, 0xcccc, 0 }, 221 + { 0xcdcd, 0xcdcd, 0xcdcd, 0 }, 222 + { 0xcece, 0xcece, 0xcece, 0 }, 223 + { 0xcfcf, 0xcfcf, 0xcfcf, 0 }, 224 + { 0xd0d0, 0xd0d0, 0xd0d0, 0 }, 225 + { 0xd1d1, 0xd1d1, 0xd1d1, 0 }, 226 + { 0xd2d2, 0xd2d2, 0xd2d2, 0 }, 227 + { 0xd3d3, 0xd3d3, 0xd3d3, 0 }, 228 + { 0xd4d4, 0xd4d4, 0xd4d4, 0 }, 229 + { 0xd5d5, 0xd5d5, 0xd5d5, 0 }, 230 + { 0xd6d6, 0xd6d6, 0xd6d6, 0 }, 231 + { 0xd7d7, 0xd7d7, 0xd7d7, 0 }, 232 + { 0xd8d8, 0xd8d8, 0xd8d8, 0 }, 233 + { 0xd9d9, 0xd9d9, 0xd9d9, 0 }, 234 + { 0xdada, 0xdada, 0xdada, 0 }, 235 + { 0xdbdb, 0xdbdb, 0xdbdb, 0 }, 236 + { 0xdcdc, 0xdcdc, 0xdcdc, 0 }, 237 + { 0xdddd, 0xdddd, 0xdddd, 0 }, 238 + { 0xdede, 0xdede, 0xdede, 0 }, 239 + { 0xdfdf, 0xdfdf, 0xdfdf, 0 }, 240 + { 0xe0e0, 0xe0e0, 0xe0e0, 0 }, 241 + { 0xe1e1, 0xe1e1, 0xe1e1, 0 }, 242 + { 0xe2e2, 0xe2e2, 0xe2e2, 0 }, 243 + { 0xe3e3, 0xe3e3, 0xe3e3, 0 }, 244 + { 0xe4e4, 0xe4e4, 0xe4e4, 0 }, 245 + { 0xe5e5, 0xe5e5, 0xe5e5, 0 }, 246 + { 0xe6e6, 0xe6e6, 0xe6e6, 0 }, 247 + { 0xe7e7, 0xe7e7, 0xe7e7, 0 }, 248 + { 0xe8e8, 0xe8e8, 0xe8e8, 0 }, 249 + { 0xe9e9, 0xe9e9, 0xe9e9, 0 }, 250 + { 0xeaea, 0xeaea, 0xeaea, 0 }, 251 + { 0xebeb, 0xebeb, 0xebeb, 0 }, 252 + { 0xecec, 0xecec, 0xecec, 0 }, 253 + { 0xeded, 0xeded, 0xeded, 0 }, 254 + { 0xeeee, 0xeeee, 0xeeee, 0 }, 255 + { 0xefef, 0xefef, 0xefef, 0 }, 256 + { 0xf0f0, 0xf0f0, 0xf0f0, 0 }, 257 + { 0xf1f1, 0xf1f1, 0xf1f1, 0 }, 258 + { 0xf2f2, 0xf2f2, 0xf2f2, 0 }, 259 + { 0xf3f3, 0xf3f3, 0xf3f3, 0 }, 260 + { 0xf4f4, 0xf4f4, 0xf4f4, 0 }, 261 + { 0xf5f5, 0xf5f5, 0xf5f5, 0 }, 262 + { 0xf6f6, 0xf6f6, 0xf6f6, 0 }, 263 + { 0xf7f7, 0xf7f7, 0xf7f7, 0 }, 264 + { 0xf8f8, 0xf8f8, 0xf8f8, 0 }, 265 + { 0xf9f9, 0xf9f9, 0xf9f9, 0 }, 266 + { 0xfafa, 0xfafa, 0xfafa, 0 }, 267 + { 0xfbfb, 0xfbfb, 0xfbfb, 0 }, 268 + { 0xfcfc, 0xfcfc, 0xfcfc, 0 }, 269 + { 0xfdfd, 0xfdfd, 0xfdfd, 0 }, 270 + { 0xfefe, 0xfefe, 0xfefe, 0 }, 271 + { 0xffff, 0xffff, 0xffff, 0 }, 272 + }; 273 + 274 + const struct vkms_color_lut linear_eotf = { 275 + .base = linear_array, 276 + .lut_length = LUT_SIZE, 277 + .channel_value2index_ratio = 0xff00ffll 278 + }; 279 + EXPORT_SYMBOL(linear_eotf); 280 + 281 + static struct drm_color_lut srgb_array[LUT_SIZE] = { 282 + { 0x0, 0x0, 0x0, 0 }, 283 + { 0x13, 0x13, 0x13, 0 }, 284 + { 0x27, 0x27, 0x27, 0 }, 285 + { 0x3b, 0x3b, 0x3b, 0 }, 286 + { 0x4f, 0x4f, 0x4f, 0 }, 287 + { 0x63, 0x63, 0x63, 0 }, 288 + { 0x77, 0x77, 0x77, 0 }, 289 + { 0x8b, 0x8b, 0x8b, 0 }, 290 + { 0x9f, 0x9f, 0x9f, 0 }, 291 + { 0xb3, 0xb3, 0xb3, 0 }, 292 + { 0xc6, 0xc6, 0xc6, 0 }, 293 + { 0xdb, 0xdb, 0xdb, 0 }, 294 + { 0xf0, 0xf0, 0xf0, 0 }, 295 + { 0x107, 0x107, 0x107, 0 }, 296 + { 0x11f, 0x11f, 0x11f, 0 }, 297 + { 0x139, 0x139, 0x139, 0 }, 298 + { 0x153, 0x153, 0x153, 0 }, 299 + { 0x16f, 0x16f, 0x16f, 0 }, 300 + { 0x18c, 0x18c, 0x18c, 0 }, 301 + { 0x1aa, 0x1aa, 0x1aa, 0 }, 302 + { 0x1ca, 0x1ca, 0x1ca, 0 }, 303 + { 0x1eb, 0x1eb, 0x1eb, 0 }, 304 + { 0x20d, 0x20d, 0x20d, 0 }, 305 + { 0x231, 0x231, 0x231, 0 }, 306 + { 0x256, 0x256, 0x256, 0 }, 307 + { 0x27d, 0x27d, 0x27d, 0 }, 308 + { 0x2a4, 0x2a4, 0x2a4, 0 }, 309 + { 0x2ce, 0x2ce, 0x2ce, 0 }, 310 + { 0x2f9, 0x2f9, 0x2f9, 0 }, 311 + { 0x325, 0x325, 0x325, 0 }, 312 + { 0x352, 0x352, 0x352, 0 }, 313 + { 0x381, 0x381, 0x381, 0 }, 314 + { 0x3b2, 0x3b2, 0x3b2, 0 }, 315 + { 0x3e4, 0x3e4, 0x3e4, 0 }, 316 + { 0x418, 0x418, 0x418, 0 }, 317 + { 0x44d, 0x44d, 0x44d, 0 }, 318 + { 0x484, 0x484, 0x484, 0 }, 319 + { 0x4bc, 0x4bc, 0x4bc, 0 }, 320 + { 0x4f6, 0x4f6, 0x4f6, 0 }, 321 + { 0x531, 0x531, 0x531, 0 }, 322 + { 0x56e, 0x56e, 0x56e, 0 }, 323 + { 0x5ad, 0x5ad, 0x5ad, 0 }, 324 + { 0x5ed, 0x5ed, 0x5ed, 0 }, 325 + { 0x62f, 0x62f, 0x62f, 0 }, 326 + { 0x672, 0x672, 0x672, 0 }, 327 + { 0x6b7, 0x6b7, 0x6b7, 0 }, 328 + { 0x6fe, 0x6fe, 0x6fe, 0 }, 329 + { 0x746, 0x746, 0x746, 0 }, 330 + { 0x791, 0x791, 0x791, 0 }, 331 + { 0x7dc, 0x7dc, 0x7dc, 0 }, 332 + { 0x82a, 0x82a, 0x82a, 0 }, 333 + { 0x879, 0x879, 0x879, 0 }, 334 + { 0x8ca, 0x8ca, 0x8ca, 0 }, 335 + { 0x91d, 0x91d, 0x91d, 0 }, 336 + { 0x971, 0x971, 0x971, 0 }, 337 + { 0x9c7, 0x9c7, 0x9c7, 0 }, 338 + { 0xa1f, 0xa1f, 0xa1f, 0 }, 339 + { 0xa79, 0xa79, 0xa79, 0 }, 340 + { 0xad4, 0xad4, 0xad4, 0 }, 341 + { 0xb32, 0xb32, 0xb32, 0 }, 342 + { 0xb91, 0xb91, 0xb91, 0 }, 343 + { 0xbf2, 0xbf2, 0xbf2, 0 }, 344 + { 0xc54, 0xc54, 0xc54, 0 }, 345 + { 0xcb9, 0xcb9, 0xcb9, 0 }, 346 + { 0xd1f, 0xd1f, 0xd1f, 0 }, 347 + { 0xd88, 0xd88, 0xd88, 0 }, 348 + { 0xdf2, 0xdf2, 0xdf2, 0 }, 349 + { 0xe5e, 0xe5e, 0xe5e, 0 }, 350 + { 0xecc, 0xecc, 0xecc, 0 }, 351 + { 0xf3c, 0xf3c, 0xf3c, 0 }, 352 + { 0xfad, 0xfad, 0xfad, 0 }, 353 + { 0x1021, 0x1021, 0x1021, 0 }, 354 + { 0x1096, 0x1096, 0x1096, 0 }, 355 + { 0x110e, 0x110e, 0x110e, 0 }, 356 + { 0x1187, 0x1187, 0x1187, 0 }, 357 + { 0x1203, 0x1203, 0x1203, 0 }, 358 + { 0x1280, 0x1280, 0x1280, 0 }, 359 + { 0x12ff, 0x12ff, 0x12ff, 0 }, 360 + { 0x1380, 0x1380, 0x1380, 0 }, 361 + { 0x1404, 0x1404, 0x1404, 0 }, 362 + { 0x1489, 0x1489, 0x1489, 0 }, 363 + { 0x1510, 0x1510, 0x1510, 0 }, 364 + { 0x1599, 0x1599, 0x1599, 0 }, 365 + { 0x1624, 0x1624, 0x1624, 0 }, 366 + { 0x16b2, 0x16b2, 0x16b2, 0 }, 367 + { 0x1741, 0x1741, 0x1741, 0 }, 368 + { 0x17d2, 0x17d2, 0x17d2, 0 }, 369 + { 0x1865, 0x1865, 0x1865, 0 }, 370 + { 0x18fb, 0x18fb, 0x18fb, 0 }, 371 + { 0x1992, 0x1992, 0x1992, 0 }, 372 + { 0x1a2c, 0x1a2c, 0x1a2c, 0 }, 373 + { 0x1ac8, 0x1ac8, 0x1ac8, 0 }, 374 + { 0x1b65, 0x1b65, 0x1b65, 0 }, 375 + { 0x1c05, 0x1c05, 0x1c05, 0 }, 376 + { 0x1ca7, 0x1ca7, 0x1ca7, 0 }, 377 + { 0x1d4b, 0x1d4b, 0x1d4b, 0 }, 378 + { 0x1df1, 0x1df1, 0x1df1, 0 }, 379 + { 0x1e99, 0x1e99, 0x1e99, 0 }, 380 + { 0x1f44, 0x1f44, 0x1f44, 0 }, 381 + { 0x1ff0, 0x1ff0, 0x1ff0, 0 }, 382 + { 0x209f, 0x209f, 0x209f, 0 }, 383 + { 0x2150, 0x2150, 0x2150, 0 }, 384 + { 0x2203, 0x2203, 0x2203, 0 }, 385 + { 0x22b8, 0x22b8, 0x22b8, 0 }, 386 + { 0x2370, 0x2370, 0x2370, 0 }, 387 + { 0x2429, 0x2429, 0x2429, 0 }, 388 + { 0x24e5, 0x24e5, 0x24e5, 0 }, 389 + { 0x25a3, 0x25a3, 0x25a3, 0 }, 390 + { 0x2663, 0x2663, 0x2663, 0 }, 391 + { 0x2726, 0x2726, 0x2726, 0 }, 392 + { 0x27ea, 0x27ea, 0x27ea, 0 }, 393 + { 0x28b1, 0x28b1, 0x28b1, 0 }, 394 + { 0x297a, 0x297a, 0x297a, 0 }, 395 + { 0x2a45, 0x2a45, 0x2a45, 0 }, 396 + { 0x2b13, 0x2b13, 0x2b13, 0 }, 397 + { 0x2be3, 0x2be3, 0x2be3, 0 }, 398 + { 0x2cb5, 0x2cb5, 0x2cb5, 0 }, 399 + { 0x2d89, 0x2d89, 0x2d89, 0 }, 400 + { 0x2e60, 0x2e60, 0x2e60, 0 }, 401 + { 0x2f39, 0x2f39, 0x2f39, 0 }, 402 + { 0x3014, 0x3014, 0x3014, 0 }, 403 + { 0x30f2, 0x30f2, 0x30f2, 0 }, 404 + { 0x31d2, 0x31d2, 0x31d2, 0 }, 405 + { 0x32b4, 0x32b4, 0x32b4, 0 }, 406 + { 0x3398, 0x3398, 0x3398, 0 }, 407 + { 0x347f, 0x347f, 0x347f, 0 }, 408 + { 0x3569, 0x3569, 0x3569, 0 }, 409 + { 0x3654, 0x3654, 0x3654, 0 }, 410 + { 0x3742, 0x3742, 0x3742, 0 }, 411 + { 0x3832, 0x3832, 0x3832, 0 }, 412 + { 0x3925, 0x3925, 0x3925, 0 }, 413 + { 0x3a1a, 0x3a1a, 0x3a1a, 0 }, 414 + { 0x3b11, 0x3b11, 0x3b11, 0 }, 415 + { 0x3c0b, 0x3c0b, 0x3c0b, 0 }, 416 + { 0x3d07, 0x3d07, 0x3d07, 0 }, 417 + { 0x3e05, 0x3e05, 0x3e05, 0 }, 418 + { 0x3f06, 0x3f06, 0x3f06, 0 }, 419 + { 0x400a, 0x400a, 0x400a, 0 }, 420 + { 0x410f, 0x410f, 0x410f, 0 }, 421 + { 0x4218, 0x4218, 0x4218, 0 }, 422 + { 0x4322, 0x4322, 0x4322, 0 }, 423 + { 0x442f, 0x442f, 0x442f, 0 }, 424 + { 0x453f, 0x453f, 0x453f, 0 }, 425 + { 0x4650, 0x4650, 0x4650, 0 }, 426 + { 0x4765, 0x4765, 0x4765, 0 }, 427 + { 0x487c, 0x487c, 0x487c, 0 }, 428 + { 0x4995, 0x4995, 0x4995, 0 }, 429 + { 0x4ab1, 0x4ab1, 0x4ab1, 0 }, 430 + { 0x4bcf, 0x4bcf, 0x4bcf, 0 }, 431 + { 0x4cf0, 0x4cf0, 0x4cf0, 0 }, 432 + { 0x4e13, 0x4e13, 0x4e13, 0 }, 433 + { 0x4f39, 0x4f39, 0x4f39, 0 }, 434 + { 0x5061, 0x5061, 0x5061, 0 }, 435 + { 0x518b, 0x518b, 0x518b, 0 }, 436 + { 0x52b9, 0x52b9, 0x52b9, 0 }, 437 + { 0x53e8, 0x53e8, 0x53e8, 0 }, 438 + { 0x551b, 0x551b, 0x551b, 0 }, 439 + { 0x5650, 0x5650, 0x5650, 0 }, 440 + { 0x5787, 0x5787, 0x5787, 0 }, 441 + { 0x58c1, 0x58c1, 0x58c1, 0 }, 442 + { 0x59fd, 0x59fd, 0x59fd, 0 }, 443 + { 0x5b3c, 0x5b3c, 0x5b3c, 0 }, 444 + { 0x5c7e, 0x5c7e, 0x5c7e, 0 }, 445 + { 0x5dc2, 0x5dc2, 0x5dc2, 0 }, 446 + { 0x5f09, 0x5f09, 0x5f09, 0 }, 447 + { 0x6052, 0x6052, 0x6052, 0 }, 448 + { 0x619e, 0x619e, 0x619e, 0 }, 449 + { 0x62ec, 0x62ec, 0x62ec, 0 }, 450 + { 0x643d, 0x643d, 0x643d, 0 }, 451 + { 0x6591, 0x6591, 0x6591, 0 }, 452 + { 0x66e7, 0x66e7, 0x66e7, 0 }, 453 + { 0x6840, 0x6840, 0x6840, 0 }, 454 + { 0x699b, 0x699b, 0x699b, 0 }, 455 + { 0x6afa, 0x6afa, 0x6afa, 0 }, 456 + { 0x6c5a, 0x6c5a, 0x6c5a, 0 }, 457 + { 0x6dbe, 0x6dbe, 0x6dbe, 0 }, 458 + { 0x6f24, 0x6f24, 0x6f24, 0 }, 459 + { 0x708c, 0x708c, 0x708c, 0 }, 460 + { 0x71f8, 0x71f8, 0x71f8, 0 }, 461 + { 0x7366, 0x7366, 0x7366, 0 }, 462 + { 0x74d6, 0x74d6, 0x74d6, 0 }, 463 + { 0x764a, 0x764a, 0x764a, 0 }, 464 + { 0x77c0, 0x77c0, 0x77c0, 0 }, 465 + { 0x7938, 0x7938, 0x7938, 0 }, 466 + { 0x7ab4, 0x7ab4, 0x7ab4, 0 }, 467 + { 0x7c32, 0x7c32, 0x7c32, 0 }, 468 + { 0x7db3, 0x7db3, 0x7db3, 0 }, 469 + { 0x7f36, 0x7f36, 0x7f36, 0 }, 470 + { 0x80bc, 0x80bc, 0x80bc, 0 }, 471 + { 0x8245, 0x8245, 0x8245, 0 }, 472 + { 0x83d1, 0x83d1, 0x83d1, 0 }, 473 + { 0x855f, 0x855f, 0x855f, 0 }, 474 + { 0x86f0, 0x86f0, 0x86f0, 0 }, 475 + { 0x8884, 0x8884, 0x8884, 0 }, 476 + { 0x8a1a, 0x8a1a, 0x8a1a, 0 }, 477 + { 0x8bb4, 0x8bb4, 0x8bb4, 0 }, 478 + { 0x8d50, 0x8d50, 0x8d50, 0 }, 479 + { 0x8eee, 0x8eee, 0x8eee, 0 }, 480 + { 0x9090, 0x9090, 0x9090, 0 }, 481 + { 0x9234, 0x9234, 0x9234, 0 }, 482 + { 0x93db, 0x93db, 0x93db, 0 }, 483 + { 0x9585, 0x9585, 0x9585, 0 }, 484 + { 0x9732, 0x9732, 0x9732, 0 }, 485 + { 0x98e1, 0x98e1, 0x98e1, 0 }, 486 + { 0x9a93, 0x9a93, 0x9a93, 0 }, 487 + { 0x9c48, 0x9c48, 0x9c48, 0 }, 488 + { 0x9e00, 0x9e00, 0x9e00, 0 }, 489 + { 0x9fbb, 0x9fbb, 0x9fbb, 0 }, 490 + { 0xa178, 0xa178, 0xa178, 0 }, 491 + { 0xa338, 0xa338, 0xa338, 0 }, 492 + { 0xa4fb, 0xa4fb, 0xa4fb, 0 }, 493 + { 0xa6c1, 0xa6c1, 0xa6c1, 0 }, 494 + { 0xa88a, 0xa88a, 0xa88a, 0 }, 495 + { 0xaa56, 0xaa56, 0xaa56, 0 }, 496 + { 0xac24, 0xac24, 0xac24, 0 }, 497 + { 0xadf5, 0xadf5, 0xadf5, 0 }, 498 + { 0xafc9, 0xafc9, 0xafc9, 0 }, 499 + { 0xb1a0, 0xb1a0, 0xb1a0, 0 }, 500 + { 0xb37a, 0xb37a, 0xb37a, 0 }, 501 + { 0xb557, 0xb557, 0xb557, 0 }, 502 + { 0xb736, 0xb736, 0xb736, 0 }, 503 + { 0xb919, 0xb919, 0xb919, 0 }, 504 + { 0xbafe, 0xbafe, 0xbafe, 0 }, 505 + { 0xbce6, 0xbce6, 0xbce6, 0 }, 506 + { 0xbed2, 0xbed2, 0xbed2, 0 }, 507 + { 0xc0c0, 0xc0c0, 0xc0c0, 0 }, 508 + { 0xc2b0, 0xc2b0, 0xc2b0, 0 }, 509 + { 0xc4a4, 0xc4a4, 0xc4a4, 0 }, 510 + { 0xc69b, 0xc69b, 0xc69b, 0 }, 511 + { 0xc895, 0xc895, 0xc895, 0 }, 512 + { 0xca91, 0xca91, 0xca91, 0 }, 513 + { 0xcc91, 0xcc91, 0xcc91, 0 }, 514 + { 0xce93, 0xce93, 0xce93, 0 }, 515 + { 0xd098, 0xd098, 0xd098, 0 }, 516 + { 0xd2a1, 0xd2a1, 0xd2a1, 0 }, 517 + { 0xd4ac, 0xd4ac, 0xd4ac, 0 }, 518 + { 0xd6ba, 0xd6ba, 0xd6ba, 0 }, 519 + { 0xd8cb, 0xd8cb, 0xd8cb, 0 }, 520 + { 0xdadf, 0xdadf, 0xdadf, 0 }, 521 + { 0xdcf7, 0xdcf7, 0xdcf7, 0 }, 522 + { 0xdf11, 0xdf11, 0xdf11, 0 }, 523 + { 0xe12e, 0xe12e, 0xe12e, 0 }, 524 + { 0xe34e, 0xe34e, 0xe34e, 0 }, 525 + { 0xe571, 0xe571, 0xe571, 0 }, 526 + { 0xe796, 0xe796, 0xe796, 0 }, 527 + { 0xe9bf, 0xe9bf, 0xe9bf, 0 }, 528 + { 0xebeb, 0xebeb, 0xebeb, 0 }, 529 + { 0xee1a, 0xee1a, 0xee1a, 0 }, 530 + { 0xf04c, 0xf04c, 0xf04c, 0 }, 531 + { 0xf281, 0xf281, 0xf281, 0 }, 532 + { 0xf4b9, 0xf4b9, 0xf4b9, 0 }, 533 + { 0xf6f4, 0xf6f4, 0xf6f4, 0 }, 534 + { 0xf932, 0xf932, 0xf932, 0 }, 535 + { 0xfb73, 0xfb73, 0xfb73, 0 }, 536 + { 0xfdb7, 0xfdb7, 0xfdb7, 0 }, 537 + { 0xffff, 0xffff, 0xffff, 0 }, 538 + }; 539 + 540 + const struct vkms_color_lut srgb_eotf = { 541 + .base = srgb_array, 542 + .lut_length = LUT_SIZE, 543 + .channel_value2index_ratio = 0xff00ffll 544 + }; 545 + EXPORT_SYMBOL(srgb_eotf); 546 + 547 + static struct drm_color_lut srgb_inv_array[LUT_SIZE] = { 548 + { 0x0, 0x0, 0x0, 0 }, 549 + { 0xcc2, 0xcc2, 0xcc2, 0 }, 550 + { 0x15be, 0x15be, 0x15be, 0 }, 551 + { 0x1c56, 0x1c56, 0x1c56, 0 }, 552 + { 0x21bd, 0x21bd, 0x21bd, 0 }, 553 + { 0x2666, 0x2666, 0x2666, 0 }, 554 + { 0x2a8a, 0x2a8a, 0x2a8a, 0 }, 555 + { 0x2e4c, 0x2e4c, 0x2e4c, 0 }, 556 + { 0x31c0, 0x31c0, 0x31c0, 0 }, 557 + { 0x34f6, 0x34f6, 0x34f6, 0 }, 558 + { 0x37f9, 0x37f9, 0x37f9, 0 }, 559 + { 0x3acf, 0x3acf, 0x3acf, 0 }, 560 + { 0x3d80, 0x3d80, 0x3d80, 0 }, 561 + { 0x4010, 0x4010, 0x4010, 0 }, 562 + { 0x4284, 0x4284, 0x4284, 0 }, 563 + { 0x44dd, 0x44dd, 0x44dd, 0 }, 564 + { 0x4720, 0x4720, 0x4720, 0 }, 565 + { 0x494e, 0x494e, 0x494e, 0 }, 566 + { 0x4b69, 0x4b69, 0x4b69, 0 }, 567 + { 0x4d73, 0x4d73, 0x4d73, 0 }, 568 + { 0x4f6e, 0x4f6e, 0x4f6e, 0 }, 569 + { 0x5159, 0x5159, 0x5159, 0 }, 570 + { 0x5337, 0x5337, 0x5337, 0 }, 571 + { 0x5509, 0x5509, 0x5509, 0 }, 572 + { 0x56cf, 0x56cf, 0x56cf, 0 }, 573 + { 0x588a, 0x588a, 0x588a, 0 }, 574 + { 0x5a3b, 0x5a3b, 0x5a3b, 0 }, 575 + { 0x5be2, 0x5be2, 0x5be2, 0 }, 576 + { 0x5d80, 0x5d80, 0x5d80, 0 }, 577 + { 0x5f16, 0x5f16, 0x5f16, 0 }, 578 + { 0x60a4, 0x60a4, 0x60a4, 0 }, 579 + { 0x6229, 0x6229, 0x6229, 0 }, 580 + { 0x63a8, 0x63a8, 0x63a8, 0 }, 581 + { 0x6520, 0x6520, 0x6520, 0 }, 582 + { 0x6691, 0x6691, 0x6691, 0 }, 583 + { 0x67fc, 0x67fc, 0x67fc, 0 }, 584 + { 0x6961, 0x6961, 0x6961, 0 }, 585 + { 0x6ac0, 0x6ac0, 0x6ac0, 0 }, 586 + { 0x6c19, 0x6c19, 0x6c19, 0 }, 587 + { 0x6d6e, 0x6d6e, 0x6d6e, 0 }, 588 + { 0x6ebd, 0x6ebd, 0x6ebd, 0 }, 589 + { 0x7008, 0x7008, 0x7008, 0 }, 590 + { 0x714d, 0x714d, 0x714d, 0 }, 591 + { 0x728f, 0x728f, 0x728f, 0 }, 592 + { 0x73cc, 0x73cc, 0x73cc, 0 }, 593 + { 0x7504, 0x7504, 0x7504, 0 }, 594 + { 0x7639, 0x7639, 0x7639, 0 }, 595 + { 0x776a, 0x776a, 0x776a, 0 }, 596 + { 0x7897, 0x7897, 0x7897, 0 }, 597 + { 0x79c1, 0x79c1, 0x79c1, 0 }, 598 + { 0x7ae7, 0x7ae7, 0x7ae7, 0 }, 599 + { 0x7c09, 0x7c09, 0x7c09, 0 }, 600 + { 0x7d28, 0x7d28, 0x7d28, 0 }, 601 + { 0x7e44, 0x7e44, 0x7e44, 0 }, 602 + { 0x7f5d, 0x7f5d, 0x7f5d, 0 }, 603 + { 0x8073, 0x8073, 0x8073, 0 }, 604 + { 0x8186, 0x8186, 0x8186, 0 }, 605 + { 0x8296, 0x8296, 0x8296, 0 }, 606 + { 0x83a4, 0x83a4, 0x83a4, 0 }, 607 + { 0x84ae, 0x84ae, 0x84ae, 0 }, 608 + { 0x85b6, 0x85b6, 0x85b6, 0 }, 609 + { 0x86bc, 0x86bc, 0x86bc, 0 }, 610 + { 0x87bf, 0x87bf, 0x87bf, 0 }, 611 + { 0x88bf, 0x88bf, 0x88bf, 0 }, 612 + { 0x89be, 0x89be, 0x89be, 0 }, 613 + { 0x8ab9, 0x8ab9, 0x8ab9, 0 }, 614 + { 0x8bb3, 0x8bb3, 0x8bb3, 0 }, 615 + { 0x8cab, 0x8cab, 0x8cab, 0 }, 616 + { 0x8da0, 0x8da0, 0x8da0, 0 }, 617 + { 0x8e93, 0x8e93, 0x8e93, 0 }, 618 + { 0x8f84, 0x8f84, 0x8f84, 0 }, 619 + { 0x9073, 0x9073, 0x9073, 0 }, 620 + { 0x9161, 0x9161, 0x9161, 0 }, 621 + { 0x924c, 0x924c, 0x924c, 0 }, 622 + { 0x9335, 0x9335, 0x9335, 0 }, 623 + { 0x941d, 0x941d, 0x941d, 0 }, 624 + { 0x9503, 0x9503, 0x9503, 0 }, 625 + { 0x95e7, 0x95e7, 0x95e7, 0 }, 626 + { 0x96c9, 0x96c9, 0x96c9, 0 }, 627 + { 0x97aa, 0x97aa, 0x97aa, 0 }, 628 + { 0x9889, 0x9889, 0x9889, 0 }, 629 + { 0x9966, 0x9966, 0x9966, 0 }, 630 + { 0x9a42, 0x9a42, 0x9a42, 0 }, 631 + { 0x9b1c, 0x9b1c, 0x9b1c, 0 }, 632 + { 0x9bf5, 0x9bf5, 0x9bf5, 0 }, 633 + { 0x9ccc, 0x9ccc, 0x9ccc, 0 }, 634 + { 0x9da1, 0x9da1, 0x9da1, 0 }, 635 + { 0x9e76, 0x9e76, 0x9e76, 0 }, 636 + { 0x9f49, 0x9f49, 0x9f49, 0 }, 637 + { 0xa01a, 0xa01a, 0xa01a, 0 }, 638 + { 0xa0ea, 0xa0ea, 0xa0ea, 0 }, 639 + { 0xa1b9, 0xa1b9, 0xa1b9, 0 }, 640 + { 0xa286, 0xa286, 0xa286, 0 }, 641 + { 0xa352, 0xa352, 0xa352, 0 }, 642 + { 0xa41d, 0xa41d, 0xa41d, 0 }, 643 + { 0xa4e7, 0xa4e7, 0xa4e7, 0 }, 644 + { 0xa5af, 0xa5af, 0xa5af, 0 }, 645 + { 0xa676, 0xa676, 0xa676, 0 }, 646 + { 0xa73c, 0xa73c, 0xa73c, 0 }, 647 + { 0xa801, 0xa801, 0xa801, 0 }, 648 + { 0xa8c5, 0xa8c5, 0xa8c5, 0 }, 649 + { 0xa987, 0xa987, 0xa987, 0 }, 650 + { 0xaa48, 0xaa48, 0xaa48, 0 }, 651 + { 0xab09, 0xab09, 0xab09, 0 }, 652 + { 0xabc8, 0xabc8, 0xabc8, 0 }, 653 + { 0xac86, 0xac86, 0xac86, 0 }, 654 + { 0xad43, 0xad43, 0xad43, 0 }, 655 + { 0xadff, 0xadff, 0xadff, 0 }, 656 + { 0xaeba, 0xaeba, 0xaeba, 0 }, 657 + { 0xaf74, 0xaf74, 0xaf74, 0 }, 658 + { 0xb02d, 0xb02d, 0xb02d, 0 }, 659 + { 0xb0e5, 0xb0e5, 0xb0e5, 0 }, 660 + { 0xb19c, 0xb19c, 0xb19c, 0 }, 661 + { 0xb252, 0xb252, 0xb252, 0 }, 662 + { 0xb307, 0xb307, 0xb307, 0 }, 663 + { 0xb3bb, 0xb3bb, 0xb3bb, 0 }, 664 + { 0xb46f, 0xb46f, 0xb46f, 0 }, 665 + { 0xb521, 0xb521, 0xb521, 0 }, 666 + { 0xb5d3, 0xb5d3, 0xb5d3, 0 }, 667 + { 0xb683, 0xb683, 0xb683, 0 }, 668 + { 0xb733, 0xb733, 0xb733, 0 }, 669 + { 0xb7e2, 0xb7e2, 0xb7e2, 0 }, 670 + { 0xb890, 0xb890, 0xb890, 0 }, 671 + { 0xb93d, 0xb93d, 0xb93d, 0 }, 672 + { 0xb9ea, 0xb9ea, 0xb9ea, 0 }, 673 + { 0xba96, 0xba96, 0xba96, 0 }, 674 + { 0xbb40, 0xbb40, 0xbb40, 0 }, 675 + { 0xbbea, 0xbbea, 0xbbea, 0 }, 676 + { 0xbc94, 0xbc94, 0xbc94, 0 }, 677 + { 0xbd3c, 0xbd3c, 0xbd3c, 0 }, 678 + { 0xbde4, 0xbde4, 0xbde4, 0 }, 679 + { 0xbe8b, 0xbe8b, 0xbe8b, 0 }, 680 + { 0xbf31, 0xbf31, 0xbf31, 0 }, 681 + { 0xbfd7, 0xbfd7, 0xbfd7, 0 }, 682 + { 0xc07b, 0xc07b, 0xc07b, 0 }, 683 + { 0xc120, 0xc120, 0xc120, 0 }, 684 + { 0xc1c3, 0xc1c3, 0xc1c3, 0 }, 685 + { 0xc266, 0xc266, 0xc266, 0 }, 686 + { 0xc308, 0xc308, 0xc308, 0 }, 687 + { 0xc3a9, 0xc3a9, 0xc3a9, 0 }, 688 + { 0xc449, 0xc449, 0xc449, 0 }, 689 + { 0xc4e9, 0xc4e9, 0xc4e9, 0 }, 690 + { 0xc589, 0xc589, 0xc589, 0 }, 691 + { 0xc627, 0xc627, 0xc627, 0 }, 692 + { 0xc6c5, 0xc6c5, 0xc6c5, 0 }, 693 + { 0xc763, 0xc763, 0xc763, 0 }, 694 + { 0xc7ff, 0xc7ff, 0xc7ff, 0 }, 695 + { 0xc89b, 0xc89b, 0xc89b, 0 }, 696 + { 0xc937, 0xc937, 0xc937, 0 }, 697 + { 0xc9d2, 0xc9d2, 0xc9d2, 0 }, 698 + { 0xca6c, 0xca6c, 0xca6c, 0 }, 699 + { 0xcb06, 0xcb06, 0xcb06, 0 }, 700 + { 0xcb9f, 0xcb9f, 0xcb9f, 0 }, 701 + { 0xcc37, 0xcc37, 0xcc37, 0 }, 702 + { 0xcccf, 0xcccf, 0xcccf, 0 }, 703 + { 0xcd66, 0xcd66, 0xcd66, 0 }, 704 + { 0xcdfd, 0xcdfd, 0xcdfd, 0 }, 705 + { 0xce93, 0xce93, 0xce93, 0 }, 706 + { 0xcf29, 0xcf29, 0xcf29, 0 }, 707 + { 0xcfbe, 0xcfbe, 0xcfbe, 0 }, 708 + { 0xd053, 0xd053, 0xd053, 0 }, 709 + { 0xd0e7, 0xd0e7, 0xd0e7, 0 }, 710 + { 0xd17a, 0xd17a, 0xd17a, 0 }, 711 + { 0xd20d, 0xd20d, 0xd20d, 0 }, 712 + { 0xd2a0, 0xd2a0, 0xd2a0, 0 }, 713 + { 0xd331, 0xd331, 0xd331, 0 }, 714 + { 0xd3c3, 0xd3c3, 0xd3c3, 0 }, 715 + { 0xd454, 0xd454, 0xd454, 0 }, 716 + { 0xd4e4, 0xd4e4, 0xd4e4, 0 }, 717 + { 0xd574, 0xd574, 0xd574, 0 }, 718 + { 0xd603, 0xd603, 0xd603, 0 }, 719 + { 0xd692, 0xd692, 0xd692, 0 }, 720 + { 0xd720, 0xd720, 0xd720, 0 }, 721 + { 0xd7ae, 0xd7ae, 0xd7ae, 0 }, 722 + { 0xd83c, 0xd83c, 0xd83c, 0 }, 723 + { 0xd8c9, 0xd8c9, 0xd8c9, 0 }, 724 + { 0xd955, 0xd955, 0xd955, 0 }, 725 + { 0xd9e1, 0xd9e1, 0xd9e1, 0 }, 726 + { 0xda6d, 0xda6d, 0xda6d, 0 }, 727 + { 0xdaf8, 0xdaf8, 0xdaf8, 0 }, 728 + { 0xdb83, 0xdb83, 0xdb83, 0 }, 729 + { 0xdc0d, 0xdc0d, 0xdc0d, 0 }, 730 + { 0xdc97, 0xdc97, 0xdc97, 0 }, 731 + { 0xdd20, 0xdd20, 0xdd20, 0 }, 732 + { 0xdda9, 0xdda9, 0xdda9, 0 }, 733 + { 0xde31, 0xde31, 0xde31, 0 }, 734 + { 0xdeb9, 0xdeb9, 0xdeb9, 0 }, 735 + { 0xdf41, 0xdf41, 0xdf41, 0 }, 736 + { 0xdfc8, 0xdfc8, 0xdfc8, 0 }, 737 + { 0xe04f, 0xe04f, 0xe04f, 0 }, 738 + { 0xe0d5, 0xe0d5, 0xe0d5, 0 }, 739 + { 0xe15b, 0xe15b, 0xe15b, 0 }, 740 + { 0xe1e0, 0xe1e0, 0xe1e0, 0 }, 741 + { 0xe266, 0xe266, 0xe266, 0 }, 742 + { 0xe2ea, 0xe2ea, 0xe2ea, 0 }, 743 + { 0xe36f, 0xe36f, 0xe36f, 0 }, 744 + { 0xe3f3, 0xe3f3, 0xe3f3, 0 }, 745 + { 0xe476, 0xe476, 0xe476, 0 }, 746 + { 0xe4f9, 0xe4f9, 0xe4f9, 0 }, 747 + { 0xe57c, 0xe57c, 0xe57c, 0 }, 748 + { 0xe5fe, 0xe5fe, 0xe5fe, 0 }, 749 + { 0xe680, 0xe680, 0xe680, 0 }, 750 + { 0xe702, 0xe702, 0xe702, 0 }, 751 + { 0xe783, 0xe783, 0xe783, 0 }, 752 + { 0xe804, 0xe804, 0xe804, 0 }, 753 + { 0xe884, 0xe884, 0xe884, 0 }, 754 + { 0xe905, 0xe905, 0xe905, 0 }, 755 + { 0xe984, 0xe984, 0xe984, 0 }, 756 + { 0xea04, 0xea04, 0xea04, 0 }, 757 + { 0xea83, 0xea83, 0xea83, 0 }, 758 + { 0xeb02, 0xeb02, 0xeb02, 0 }, 759 + { 0xeb80, 0xeb80, 0xeb80, 0 }, 760 + { 0xebfe, 0xebfe, 0xebfe, 0 }, 761 + { 0xec7b, 0xec7b, 0xec7b, 0 }, 762 + { 0xecf9, 0xecf9, 0xecf9, 0 }, 763 + { 0xed76, 0xed76, 0xed76, 0 }, 764 + { 0xedf2, 0xedf2, 0xedf2, 0 }, 765 + { 0xee6f, 0xee6f, 0xee6f, 0 }, 766 + { 0xeeeb, 0xeeeb, 0xeeeb, 0 }, 767 + { 0xef66, 0xef66, 0xef66, 0 }, 768 + { 0xefe2, 0xefe2, 0xefe2, 0 }, 769 + { 0xf05d, 0xf05d, 0xf05d, 0 }, 770 + { 0xf0d7, 0xf0d7, 0xf0d7, 0 }, 771 + { 0xf152, 0xf152, 0xf152, 0 }, 772 + { 0xf1cc, 0xf1cc, 0xf1cc, 0 }, 773 + { 0xf245, 0xf245, 0xf245, 0 }, 774 + { 0xf2bf, 0xf2bf, 0xf2bf, 0 }, 775 + { 0xf338, 0xf338, 0xf338, 0 }, 776 + { 0xf3b0, 0xf3b0, 0xf3b0, 0 }, 777 + { 0xf429, 0xf429, 0xf429, 0 }, 778 + { 0xf4a1, 0xf4a1, 0xf4a1, 0 }, 779 + { 0xf519, 0xf519, 0xf519, 0 }, 780 + { 0xf590, 0xf590, 0xf590, 0 }, 781 + { 0xf608, 0xf608, 0xf608, 0 }, 782 + { 0xf67e, 0xf67e, 0xf67e, 0 }, 783 + { 0xf6f5, 0xf6f5, 0xf6f5, 0 }, 784 + { 0xf76b, 0xf76b, 0xf76b, 0 }, 785 + { 0xf7e1, 0xf7e1, 0xf7e1, 0 }, 786 + { 0xf857, 0xf857, 0xf857, 0 }, 787 + { 0xf8cd, 0xf8cd, 0xf8cd, 0 }, 788 + { 0xf942, 0xf942, 0xf942, 0 }, 789 + { 0xf9b7, 0xf9b7, 0xf9b7, 0 }, 790 + { 0xfa2b, 0xfa2b, 0xfa2b, 0 }, 791 + { 0xfaa0, 0xfaa0, 0xfaa0, 0 }, 792 + { 0xfb14, 0xfb14, 0xfb14, 0 }, 793 + { 0xfb88, 0xfb88, 0xfb88, 0 }, 794 + { 0xfbfb, 0xfbfb, 0xfbfb, 0 }, 795 + { 0xfc6e, 0xfc6e, 0xfc6e, 0 }, 796 + { 0xfce1, 0xfce1, 0xfce1, 0 }, 797 + { 0xfd54, 0xfd54, 0xfd54, 0 }, 798 + { 0xfdc6, 0xfdc6, 0xfdc6, 0 }, 799 + { 0xfe39, 0xfe39, 0xfe39, 0 }, 800 + { 0xfeaa, 0xfeaa, 0xfeaa, 0 }, 801 + { 0xff1c, 0xff1c, 0xff1c, 0 }, 802 + { 0xff8d, 0xff8d, 0xff8d, 0 }, 803 + { 0xffff, 0xffff, 0xffff, 0 }, 804 + }; 805 + 806 + const struct vkms_color_lut srgb_inv_eotf = { 807 + .base = srgb_inv_array, 808 + .lut_length = LUT_SIZE, 809 + .channel_value2index_ratio = 0xff00ffll 810 + }; 811 + EXPORT_SYMBOL(srgb_inv_eotf);
+12
drivers/gpu/drm/vkms/vkms_luts.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + 3 + #ifndef _VKMS_LUTS_H_ 4 + #define _VKMS_LUTS_H_ 5 + 6 + #define LUT_SIZE 256 7 + 8 + extern const struct vkms_color_lut linear_eotf; 9 + extern const struct vkms_color_lut srgb_eotf; 10 + extern const struct vkms_color_lut srgb_inv_eotf; 11 + 12 + #endif /* _VKMS_LUTS_H_ */
+1 -5
drivers/gpu/drm/vkms/vkms_output.c
··· 20 20 return -EINVAL; 21 21 22 22 vkms_config_for_each_plane(vkmsdev->config, plane_cfg) { 23 - enum drm_plane_type type; 24 - 25 - type = vkms_config_plane_get_type(plane_cfg); 26 - 27 - plane_cfg->plane = vkms_plane_init(vkmsdev, type); 23 + plane_cfg->plane = vkms_plane_init(vkmsdev, plane_cfg); 28 24 if (IS_ERR(plane_cfg->plane)) { 29 25 DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n"); 30 26 return PTR_ERR(plane_cfg->plane);
+7 -2
drivers/gpu/drm/vkms/vkms_plane.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0+ 2 2 3 + #include "vkms_config.h" 3 4 #include <linux/iosys-map.h> 4 5 5 6 #include <drm/drm_atomic.h> ··· 219 218 }; 220 219 221 220 struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev, 222 - enum drm_plane_type type) 221 + struct vkms_config_plane *plane_cfg) 223 222 { 224 223 struct drm_device *dev = &vkmsdev->drm; 225 224 struct vkms_plane *plane; ··· 227 226 plane = drmm_universal_plane_alloc(dev, struct vkms_plane, base, 0, 228 227 &vkms_plane_funcs, 229 228 vkms_formats, ARRAY_SIZE(vkms_formats), 230 - NULL, type, NULL); 229 + NULL, vkms_config_plane_get_type(plane_cfg), 230 + NULL); 231 231 if (IS_ERR(plane)) 232 232 return plane; 233 233 ··· 245 243 BIT(DRM_COLOR_YCBCR_FULL_RANGE), 246 244 DRM_COLOR_YCBCR_BT601, 247 245 DRM_COLOR_YCBCR_FULL_RANGE); 246 + 247 + if (vkms_config_plane_get_default_pipeline(plane_cfg)) 248 + vkms_initialize_colorops(&plane->base); 248 249 249 250 return plane; 250 251 }
+9
drivers/gpu/host1x/dev.c
··· 71 71 return readl(sync_regs + r); 72 72 } 73 73 74 + #ifdef CONFIG_64BIT 75 + u64 host1x_sync_readq(struct host1x *host1x, u32 r) 76 + { 77 + void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset; 78 + 79 + return readq(sync_regs + r); 80 + } 81 + #endif 82 + 74 83 void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r) 75 84 { 76 85 writel(v, ch->regs + r);
+3
drivers/gpu/host1x/dev.h
··· 179 179 u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r); 180 180 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r); 181 181 u32 host1x_sync_readl(struct host1x *host1x, u32 r); 182 + #ifdef CONFIG_64BIT 183 + u64 host1x_sync_readq(struct host1x *host1x, u32 r); 184 + #endif 182 185 void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r); 183 186 u32 host1x_ch_readl(struct host1x_channel *ch, u32 r); 184 187
+47 -9
drivers/gpu/host1x/hw/intr_hw.c
··· 11 11 #include "../intr.h" 12 12 #include "../dev.h" 13 13 14 + static void process_32_syncpts(struct host1x *host, unsigned long val, u32 reg_offset) 15 + { 16 + unsigned int id; 17 + 18 + if (!val) 19 + return; 20 + 21 + host1x_sync_writel(host, val, HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(reg_offset)); 22 + host1x_sync_writel(host, val, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(reg_offset)); 23 + 24 + for_each_set_bit(id, &val, 32) 25 + host1x_intr_handle_interrupt(host, reg_offset * 32 + id); 26 + } 27 + 14 28 static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id) 15 29 { 16 30 struct host1x_intr_irq_data *irq_data = dev_id; 17 31 struct host1x *host = irq_data->host; 18 32 unsigned long reg; 19 - unsigned int i, id; 33 + unsigned int i; 20 34 35 + #if !defined(CONFIG_64BIT) 21 36 for (i = irq_data->offset; i < DIV_ROUND_UP(host->info->nb_pts, 32); 22 37 i += host->num_syncpt_irqs) { 23 38 reg = host1x_sync_readl(host, 24 39 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i)); 25 40 26 - host1x_sync_writel(host, reg, 27 - HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i)); 28 - host1x_sync_writel(host, reg, 41 + process_32_syncpts(host, reg, i); 42 + } 43 + #elif HOST1X_HW == 6 || HOST1X_HW == 7 44 + /* 45 + * Tegra186 and Tegra194 have the first INT_STATUS register not 64-bit aligned, 46 + * and only have one interrupt line. 47 + */ 48 + reg = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(0)); 49 + process_32_syncpts(host, reg, 0); 50 + 51 + for (i = 1; i < (host->info->nb_pts / 32) - 1; i += 2) { 52 + reg = host1x_sync_readq(host, 29 53 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i)); 30 54 31 - for_each_set_bit(id, &reg, 32) 32 - host1x_intr_handle_interrupt(host, i * 32 + id); 55 + process_32_syncpts(host, lower_32_bits(reg), i); 56 + process_32_syncpts(host, upper_32_bits(reg), i + 1); 33 57 } 58 + 59 + reg = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i)); 60 + process_32_syncpts(host, reg, i); 61 + #else 62 + /* All 64-bit capable SoCs have number of syncpoints divisible by 64 */ 63 + for (i = irq_data->offset; i < DIV_ROUND_UP(host->info->nb_pts, 64); 64 + i += host->num_syncpt_irqs) { 65 + reg = host1x_sync_readq(host, 66 + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i * 2)); 67 + 68 + process_32_syncpts(host, lower_32_bits(reg), i * 2 + 0); 69 + process_32_syncpts(host, upper_32_bits(reg), i * 2 + 1); 70 + } 71 + #endif 34 72 35 73 return IRQ_HANDLED; 36 74 } ··· 106 68 107 69 /* 108 70 * Program threshold interrupt destination among 8 lines per VM, 109 - * per syncpoint. For each group of 32 syncpoints (corresponding to one 110 - * interrupt status register), direct to one interrupt line, going 71 + * per syncpoint. For each group of 64 syncpoints (corresponding to two 72 + * interrupt status registers), direct to one interrupt line, going 111 73 * around in a round robin fashion. 112 74 */ 113 75 for (id = 0; id < host->info->nb_pts; id++) { 114 - u32 reg_offset = id / 32; 76 + u32 reg_offset = id / 64; 115 77 u32 irq_index = reg_offset % host->num_syncpt_irqs; 116 78 117 79 host1x_sync_writel(host, irq_index, HOST1X_SYNC_SYNCPT_INTR_DEST(id));
+4
include/drm/bridge/dw_hdmi_qp.h
··· 25 25 int main_irq; 26 26 int cec_irq; 27 27 unsigned long ref_clk_rate; 28 + /* Supported output formats: bitmask of @hdmi_colorspace */ 29 + unsigned int supported_formats; 30 + /* Maximum bits per color channel: 8, 10 or 12 */ 31 + unsigned int max_bpc; 28 32 }; 29 33 30 34 struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
+111
include/drm/drm_atomic.h
··· 30 30 31 31 #include <drm/drm_crtc.h> 32 32 #include <drm/drm_util.h> 33 + #include <drm/drm_colorop.h> 33 34 34 35 /** 35 36 * struct drm_crtc_commit - track modeset commits on a CRTC ··· 156 155 * used by the free code to remove the second reference if commit fails. 157 156 */ 158 157 bool abort_completion; 158 + }; 159 + 160 + struct __drm_colorops_state { 161 + struct drm_colorop *ptr; 162 + struct drm_colorop_state *state, *old_state, *new_state; 159 163 }; 160 164 161 165 struct __drm_planes_state { ··· 538 532 bool checked : 1; 539 533 540 534 /** 535 + * @plane_color_pipeline: 536 + * 537 + * Indicates whether this atomic state originated with a client that 538 + * set the DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE. 539 + * 540 + * Drivers and helper functions should use this to ignore legacy 541 + * properties that are incompatible with the drm_plane COLOR_PIPELINE 542 + * behavior, such as: 543 + * 544 + * - COLOR_RANGE 545 + * - COLOR_ENCODING 546 + * 547 + * or any other driver-specific properties that might affect pixel 548 + * values. 549 + */ 550 + bool plane_color_pipeline : 1; 551 + 552 + /** 553 + * @colorops: 554 + * 555 + * Pointer to array of @drm_colorop and @drm_colorop_state part of this 556 + * update. 557 + */ 558 + struct __drm_colorops_state *colorops; 559 + 560 + /** 541 561 * @planes: 542 562 * 543 563 * Pointer to array of @drm_plane and @drm_plane_state part of this ··· 704 672 struct drm_plane_state * __must_check 705 673 drm_atomic_get_plane_state(struct drm_atomic_state *state, 706 674 struct drm_plane *plane); 675 + struct drm_colorop_state * 676 + drm_atomic_get_colorop_state(struct drm_atomic_state *state, 677 + struct drm_colorop *colorop); 707 678 struct drm_connector_state * __must_check 708 679 drm_atomic_get_connector_state(struct drm_atomic_state *state, 709 680 struct drm_connector *connector); ··· 804 769 } 805 770 806 771 /** 772 + * drm_atomic_get_old_colorop_state - get colorop state, if it exists 773 + * @state: global atomic state object 774 + * @colorop: colorop to grab 775 + * 776 + * This function returns the old colorop state for the given colorop, or 777 + * NULL if the colorop is not part of the global atomic state. 778 + */ 779 + static inline struct drm_colorop_state * 780 + drm_atomic_get_old_colorop_state(struct drm_atomic_state *state, 781 + struct drm_colorop *colorop) 782 + { 783 + return state->colorops[drm_colorop_index(colorop)].old_state; 784 + } 785 + 786 + /** 787 + * drm_atomic_get_new_colorop_state - get colorop state, if it exists 788 + * @state: global atomic state object 789 + * @colorop: colorop to grab 790 + * 791 + * This function returns the new colorop state for the given colorop, or 792 + * NULL if the colorop is not part of the global atomic state. 793 + */ 794 + static inline struct drm_colorop_state * 795 + drm_atomic_get_new_colorop_state(struct drm_atomic_state *state, 796 + struct drm_colorop *colorop) 797 + { 798 + return state->colorops[drm_colorop_index(colorop)].new_state; 799 + } 800 + 801 + /** 807 802 * drm_atomic_get_old_connector_state - get connector state, if it exists 808 803 * @state: global atomic state object 809 804 * @connector: connector to grab ··· 924 859 int __must_check 925 860 drm_atomic_add_affected_planes(struct drm_atomic_state *state, 926 861 struct drm_crtc *crtc); 862 + int __must_check 863 + drm_atomic_add_affected_colorops(struct drm_atomic_state *state, 864 + struct drm_plane *plane); 927 865 928 866 int __must_check drm_atomic_check_only(struct drm_atomic_state *state); 929 867 int __must_check drm_atomic_commit(struct drm_atomic_state *state); ··· 1065 997 (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \ 1066 998 (new_crtc_state) = (__state)->crtcs[__i].new_state, \ 1067 999 (void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1)) 1000 + 1001 + /** 1002 + * for_each_oldnew_colorop_in_state - iterate over all colorops in an atomic update 1003 + * @__state: &struct drm_atomic_state pointer 1004 + * @colorop: &struct drm_colorop iteration cursor 1005 + * @old_colorop_state: &struct drm_colorop_state iteration cursor for the old state 1006 + * @new_colorop_state: &struct drm_colorop_state iteration cursor for the new state 1007 + * @__i: int iteration cursor, for macro-internal use 1008 + * 1009 + * This iterates over all colorops in an atomic update, tracking both old and 1010 + * new state. This is useful in places where the state delta needs to be 1011 + * considered, for example in atomic check functions. 1012 + */ 1013 + #define for_each_oldnew_colorop_in_state(__state, colorop, old_colorop_state, \ 1014 + new_colorop_state, __i) \ 1015 + for ((__i) = 0; \ 1016 + (__i) < (__state)->dev->mode_config.num_colorop; \ 1017 + (__i)++) \ 1018 + for_each_if ((__state)->colorops[__i].ptr && \ 1019 + ((colorop) = (__state)->colorops[__i].ptr, \ 1020 + (void)(colorop) /* Only to avoid unused-but-set-variable warning */, \ 1021 + (old_colorop_state) = (__state)->colorops[__i].old_state,\ 1022 + (new_colorop_state) = (__state)->colorops[__i].new_state, 1)) 1023 + 1024 + /** 1025 + * for_each_new_colorop_in_state - iterate over all colorops in an atomic update 1026 + * @__state: &struct drm_atomic_state pointer 1027 + * @colorop: &struct drm_colorop iteration cursor 1028 + * @new_colorop_state: &struct drm_colorop_state iteration cursor for the new state 1029 + * @__i: int iteration cursor, for macro-internal use 1030 + * 1031 + * This iterates over all colorops in an atomic update, tracking new state. This is 1032 + * useful in places where the state delta needs to be considered, for example in 1033 + * atomic check functions. 1034 + */ 1035 + #define for_each_new_colorop_in_state(__state, colorop, new_colorop_state, __i) \ 1036 + for ((__i) = 0; \ 1037 + (__i) < (__state)->dev->mode_config.num_colorop; \ 1038 + (__i)++) \ 1039 + for_each_if ((__state)->colorops[__i].ptr && \ 1040 + ((colorop) = (__state)->colorops[__i].ptr, \ 1041 + (void)(colorop) /* Only to avoid unused-but-set-variable warning */, \ 1042 + (new_colorop_state) = (__state)->colorops[__i].new_state, 1)) 1068 1043 1069 1044 /** 1070 1045 * for_each_oldnew_plane_in_state - iterate over all planes in an atomic update
+3
include/drm/drm_atomic_uapi.h
··· 37 37 struct drm_connector_state; 38 38 struct dma_fence; 39 39 struct drm_framebuffer; 40 + struct drm_colorop; 40 41 41 42 int __must_check 42 43 drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, ··· 50 49 struct drm_crtc *crtc); 51 50 void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, 52 51 struct drm_framebuffer *fb); 52 + void drm_atomic_set_colorop_for_plane(struct drm_plane_state *plane_state, 53 + struct drm_colorop *colorop); 53 54 int __must_check 54 55 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, 55 56 struct drm_crtc *crtc);
+5 -3
include/drm/drm_client.h
··· 57 57 * 58 58 * Note that the core does not guarantee exclusion against concurrent 59 59 * drm_open(). Clients need to ensure this themselves, for example by 60 - * using drm_master_internal_acquire() and 61 - * drm_master_internal_release(). 60 + * using drm_master_internal_acquire() and drm_master_internal_release(). 61 + * 62 + * If the caller passes force, the client should ignore any present DRM 63 + * master and restore the display anyway. 62 64 * 63 65 * This callback is optional. 64 66 */ 65 - int (*restore)(struct drm_client_dev *client); 67 + int (*restore)(struct drm_client_dev *client, bool force); 66 68 67 69 /** 68 70 * @hotplug:
+2 -2
include/drm/drm_client_event.h
··· 10 10 #if defined(CONFIG_DRM_CLIENT) 11 11 void drm_client_dev_unregister(struct drm_device *dev); 12 12 void drm_client_dev_hotplug(struct drm_device *dev); 13 - void drm_client_dev_restore(struct drm_device *dev); 13 + void drm_client_dev_restore(struct drm_device *dev, bool force); 14 14 void drm_client_dev_suspend(struct drm_device *dev); 15 15 void drm_client_dev_resume(struct drm_device *dev); 16 16 #else ··· 18 18 { } 19 19 static inline void drm_client_dev_hotplug(struct drm_device *dev) 20 20 { } 21 - static inline void drm_client_dev_restore(struct drm_device *dev) 21 + static inline void drm_client_dev_restore(struct drm_device *dev, bool force) 22 22 { } 23 23 static inline void drm_client_dev_suspend(struct drm_device *dev) 24 24 { }
+29
include/drm/drm_color_mgmt.h
··· 50 50 (1 << 16) - 1); 51 51 } 52 52 53 + /** 54 + * drm_color_lut32_extract - clamp and round LUT entries 55 + * @user_input: input value 56 + * @bit_precision: number of bits the hw LUT supports 57 + * 58 + * Extract U0.bit_precision from a U0.32 LUT value. 59 + * 60 + */ 61 + static inline u32 drm_color_lut32_extract(u32 user_input, int bit_precision) 62 + { 63 + u64 max = (bit_precision >= 64) ? ~0ULL : (1ULL << bit_precision) - 1; 64 + 65 + return DIV_ROUND_CLOSEST_ULL((u64)user_input * max, 66 + (1ULL << 32) - 1); 67 + } 68 + 53 69 u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n); 54 70 55 71 void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, ··· 86 70 static inline int drm_color_lut_size(const struct drm_property_blob *blob) 87 71 { 88 72 return blob->length / sizeof(struct drm_color_lut); 73 + } 74 + 75 + /** 76 + * drm_color_lut32_size - calculate the number of entries in the extended LUT 77 + * @blob: blob containing the LUT 78 + * 79 + * Returns: 80 + * The number of entries in the color LUT stored in @blob. 81 + */ 82 + static inline int drm_color_lut32_size(const struct drm_property_blob *blob) 83 + { 84 + return blob->length / sizeof(struct drm_color_lut32); 89 85 } 90 86 91 87 enum drm_color_encoding { ··· 174 146 void drm_crtc_fill_palette_332(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette); 175 147 void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette); 176 148 149 + int drm_color_lut32_check(const struct drm_property_blob *lut, u32 tests); 177 150 #endif
+464
include/drm/drm_colorop.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + * Authors: AMD 24 + * 25 + */ 26 + 27 + #ifndef __DRM_COLOROP_H__ 28 + #define __DRM_COLOROP_H__ 29 + 30 + #include <drm/drm_mode_object.h> 31 + #include <drm/drm_mode.h> 32 + #include <drm/drm_property.h> 33 + 34 + /* DRM colorop flags */ 35 + #define DRM_COLOROP_FLAG_ALLOW_BYPASS (1<<0) /* Allow bypass on the drm_colorop */ 36 + 37 + /** 38 + * enum drm_colorop_curve_1d_type - type of 1D curve 39 + * 40 + * Describes a 1D curve to be applied by the DRM_COLOROP_1D_CURVE colorop. 41 + */ 42 + enum drm_colorop_curve_1d_type { 43 + /** 44 + * @DRM_COLOROP_1D_CURVE_SRGB_EOTF: 45 + * 46 + * enum string "sRGB EOTF" 47 + * 48 + * sRGB piece-wise electro-optical transfer function. Transfer 49 + * characteristics as defined by IEC 61966-2-1 sRGB. Equivalent 50 + * to H.273 TransferCharacteristics code point 13 with 51 + * MatrixCoefficients set to 0. 52 + */ 53 + DRM_COLOROP_1D_CURVE_SRGB_EOTF, 54 + 55 + /** 56 + * @DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF: 57 + * 58 + * enum string "sRGB Inverse EOTF" 59 + * 60 + * The inverse of &DRM_COLOROP_1D_CURVE_SRGB_EOTF 61 + */ 62 + DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF, 63 + 64 + /** 65 + * @DRM_COLOROP_1D_CURVE_PQ_125_EOTF: 66 + * 67 + * enum string "PQ 125 EOTF" 68 + * 69 + * The PQ transfer function, scaled by 125.0f, so that 10,000 70 + * nits correspond to 125.0f. 71 + * 72 + * Transfer characteristics of the PQ function as defined by 73 + * SMPTE ST 2084 (2014) for 10-, 12-, 14-, and 16-bit systems 74 + * and Rec. ITU-R BT.2100-2 perceptual quantization (PQ) system, 75 + * represented by H.273 TransferCharacteristics code point 16. 76 + */ 77 + DRM_COLOROP_1D_CURVE_PQ_125_EOTF, 78 + 79 + /** 80 + * @DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF: 81 + * 82 + * enum string "PQ 125 Inverse EOTF" 83 + * 84 + * The inverse of DRM_COLOROP_1D_CURVE_PQ_125_EOTF. 85 + */ 86 + DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF, 87 + 88 + /** 89 + * @DRM_COLOROP_1D_CURVE_BT2020_INV_OETF: 90 + * 91 + * enum string "BT.2020 Inverse OETF" 92 + * 93 + * The inverse of &DRM_COLOROP_1D_CURVE_BT2020_OETF 94 + */ 95 + DRM_COLOROP_1D_CURVE_BT2020_INV_OETF, 96 + 97 + /** 98 + * @DRM_COLOROP_1D_CURVE_BT2020_OETF: 99 + * 100 + * enum string "BT.2020 OETF" 101 + * 102 + * The BT.2020/BT.709 transfer function. The BT.709 and BT.2020 103 + * transfer functions are the same, the only difference is that 104 + * BT.2020 is defined with more precision for 10 and 12-bit 105 + * encodings. 106 + * 107 + * 108 + */ 109 + DRM_COLOROP_1D_CURVE_BT2020_OETF, 110 + 111 + /** 112 + * @DRM_COLOROP_1D_CURVE_GAMMA22: 113 + * 114 + * enum string "Gamma 2.2" 115 + * 116 + * A gamma 2.2 power function. This applies a power curve with 117 + * gamma value of 2.2 to the input values. 118 + */ 119 + DRM_COLOROP_1D_CURVE_GAMMA22, 120 + 121 + /** 122 + * @DRM_COLOROP_1D_CURVE_GAMMA22_INV: 123 + * 124 + * enum string "Gamma 2.2 Inverse" 125 + * 126 + * The inverse of &DRM_COLOROP_1D_CURVE_GAMMA22 127 + */ 128 + DRM_COLOROP_1D_CURVE_GAMMA22_INV, 129 + /** 130 + * @DRM_COLOROP_1D_CURVE_COUNT: 131 + * 132 + * enum value denoting the size of the enum 133 + */ 134 + DRM_COLOROP_1D_CURVE_COUNT 135 + }; 136 + 137 + /** 138 + * struct drm_colorop_state - mutable colorop state 139 + */ 140 + struct drm_colorop_state { 141 + /** @colorop: backpointer to the colorop */ 142 + struct drm_colorop *colorop; 143 + 144 + /* 145 + * Color properties 146 + * 147 + * The following fields are not always valid, their usage depends 148 + * on the colorop type. See their associated comment for more 149 + * information. 150 + */ 151 + 152 + /** 153 + * @bypass: 154 + * 155 + * When the property BYPASS exists on this colorop, this stores 156 + * the requested bypass state: true if colorop shall be bypassed, 157 + * false if colorop is enabled. 158 + */ 159 + bool bypass; 160 + 161 + /** 162 + * @curve_1d_type: 163 + * 164 + * Type of 1D curve. 165 + */ 166 + enum drm_colorop_curve_1d_type curve_1d_type; 167 + 168 + /** 169 + * @multiplier: 170 + * 171 + * Multiplier to 'gain' the plane. Format is S31.32 sign-magnitude. 172 + */ 173 + uint64_t multiplier; 174 + 175 + /** 176 + * @data: 177 + * 178 + * Data blob for any TYPE that requires such a blob. The 179 + * interpretation of the blob is TYPE-specific. 180 + * 181 + * See the &drm_colorop_type documentation for how blob is laid 182 + * out. 183 + */ 184 + struct drm_property_blob *data; 185 + 186 + /** @state: backpointer to global drm_atomic_state */ 187 + struct drm_atomic_state *state; 188 + }; 189 + 190 + /** 191 + * struct drm_colorop - DRM color operation control structure 192 + * 193 + * A colorop represents one color operation. They can be chained via 194 + * the 'next' pointer to build a color pipeline. 195 + * 196 + * Since colorops cannot stand-alone and are used to describe colorop 197 + * operations on a plane they don't have their own locking mechanism but 198 + * are locked and programmed along with their associated &drm_plane. 199 + * 200 + */ 201 + struct drm_colorop { 202 + /** @dev: parent DRM device */ 203 + struct drm_device *dev; 204 + 205 + /** 206 + * @head: 207 + * 208 + * List of all colorops on @dev, linked from &drm_mode_config.colorop_list. 209 + * Invariant over the lifetime of @dev and therefore does not need 210 + * locking. 211 + */ 212 + struct list_head head; 213 + 214 + /** 215 + * @index: Position inside the mode_config.list, can be used as an array 216 + * index. It is invariant over the lifetime of the colorop. 217 + */ 218 + unsigned int index; 219 + 220 + /** @base: base mode object */ 221 + struct drm_mode_object base; 222 + 223 + /** 224 + * @plane: 225 + * 226 + * The plane on which the colorop sits. A drm_colorop is always unique 227 + * to a plane. 228 + */ 229 + struct drm_plane *plane; 230 + 231 + /** 232 + * @state: 233 + * 234 + * Current atomic state for this colorop. 235 + * 236 + * This is protected by @mutex. Note that nonblocking atomic commits 237 + * access the current colorop state without taking locks. 238 + */ 239 + struct drm_colorop_state *state; 240 + 241 + /* 242 + * Color properties 243 + * 244 + * The following fields are not always valid, their usage depends 245 + * on the colorop type. See their associated comment for more 246 + * information. 247 + */ 248 + 249 + /** @properties: property tracking for this colorop */ 250 + struct drm_object_properties properties; 251 + 252 + /** 253 + * @type: 254 + * 255 + * Read-only 256 + * Type of color operation 257 + */ 258 + enum drm_colorop_type type; 259 + 260 + /** 261 + * @next: 262 + * 263 + * Read-only 264 + * Pointer to next drm_colorop in pipeline 265 + */ 266 + struct drm_colorop *next; 267 + 268 + /** 269 + * @type_property: 270 + * 271 + * Read-only "TYPE" property for specifying the type of 272 + * this color operation. The type is enum drm_colorop_type. 273 + */ 274 + struct drm_property *type_property; 275 + 276 + /** 277 + * @bypass_property: 278 + * 279 + * Boolean property to control enablement of the color 280 + * operation. Only present if DRM_COLOROP_FLAG_ALLOW_BYPASS 281 + * flag is set. When present, setting bypass to "true" shall 282 + * always be supported to allow compositors to quickly fall 283 + * back to alternate methods of color processing. This is 284 + * important since setting color operations can fail due to 285 + * unique HW constraints. 286 + */ 287 + struct drm_property *bypass_property; 288 + 289 + /** 290 + * @size: 291 + * 292 + * Number of entries of the custom LUT. This should be read-only. 293 + */ 294 + uint32_t size; 295 + 296 + /** 297 + * @lut1d_interpolation: 298 + * 299 + * Read-only 300 + * Interpolation for DRM_COLOROP_1D_LUT 301 + */ 302 + enum drm_colorop_lut1d_interpolation_type lut1d_interpolation; 303 + 304 + /** 305 + * @lut3d_interpolation: 306 + * 307 + * Read-only 308 + * Interpolation for DRM_COLOROP_3D_LUT 309 + */ 310 + enum drm_colorop_lut3d_interpolation_type lut3d_interpolation; 311 + 312 + /** 313 + * @lut1d_interpolation_property: 314 + * 315 + * Read-only property for DRM_COLOROP_1D_LUT interpolation 316 + */ 317 + struct drm_property *lut1d_interpolation_property; 318 + 319 + /** 320 + * @curve_1d_type_property: 321 + * 322 + * Sub-type for DRM_COLOROP_1D_CURVE type. 323 + */ 324 + struct drm_property *curve_1d_type_property; 325 + 326 + /** 327 + * @multiplier_property: 328 + * 329 + * Multiplier property for plane gain 330 + */ 331 + struct drm_property *multiplier_property; 332 + 333 + /** 334 + * @size_property: 335 + * 336 + * Size property for custom LUT from userspace. 337 + */ 338 + struct drm_property *size_property; 339 + 340 + /** 341 + * @lut3d_interpolation_property: 342 + * 343 + * Read-only property for DRM_COLOROP_3D_LUT interpolation 344 + */ 345 + struct drm_property *lut3d_interpolation_property; 346 + 347 + /** 348 + * @data_property: 349 + * 350 + * blob property for any TYPE that requires a blob of data, 351 + * such as 1DLUT, CTM, 3DLUT, etc. 352 + * 353 + * The way this blob is interpreted depends on the TYPE of 354 + * this 355 + */ 356 + struct drm_property *data_property; 357 + 358 + /** 359 + * @next_property: 360 + * 361 + * Read-only property to next colorop in the pipeline 362 + */ 363 + struct drm_property *next_property; 364 + 365 + }; 366 + 367 + #define obj_to_colorop(x) container_of(x, struct drm_colorop, base) 368 + 369 + /** 370 + * drm_colorop_find - look up a Colorop object from its ID 371 + * @dev: DRM device 372 + * @file_priv: drm file to check for lease against. 373 + * @id: &drm_mode_object ID 374 + * 375 + * This can be used to look up a Colorop from its userspace ID. Only used by 376 + * drivers for legacy IOCTLs and interface, nowadays extensions to the KMS 377 + * userspace interface should be done using &drm_property. 378 + */ 379 + static inline struct drm_colorop *drm_colorop_find(struct drm_device *dev, 380 + struct drm_file *file_priv, 381 + uint32_t id) 382 + { 383 + struct drm_mode_object *mo; 384 + 385 + mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_COLOROP); 386 + return mo ? obj_to_colorop(mo) : NULL; 387 + } 388 + 389 + void drm_colorop_pipeline_destroy(struct drm_device *dev); 390 + void drm_colorop_cleanup(struct drm_colorop *colorop); 391 + 392 + int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop, 393 + struct drm_plane *plane, u64 supported_tfs, uint32_t flags); 394 + int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop, 395 + struct drm_plane *plane, uint32_t lut_size, 396 + enum drm_colorop_lut1d_interpolation_type interpolation, 397 + uint32_t flags); 398 + int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop, 399 + struct drm_plane *plane, uint32_t flags); 400 + int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop, 401 + struct drm_plane *plane, uint32_t flags); 402 + int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop, 403 + struct drm_plane *plane, 404 + uint32_t lut_size, 405 + enum drm_colorop_lut3d_interpolation_type interpolation, 406 + uint32_t flags); 407 + 408 + struct drm_colorop_state * 409 + drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop); 410 + 411 + void drm_colorop_atomic_destroy_state(struct drm_colorop *colorop, 412 + struct drm_colorop_state *state); 413 + 414 + /** 415 + * drm_colorop_reset - reset colorop atomic state 416 + * @colorop: drm colorop 417 + * 418 + * Resets the atomic state for @colorop by freeing the state pointer (which might 419 + * be NULL, e.g. at driver load time) and allocating a new empty state object. 420 + */ 421 + void drm_colorop_reset(struct drm_colorop *colorop); 422 + 423 + /** 424 + * drm_colorop_index - find the index of a registered colorop 425 + * @colorop: colorop to find index for 426 + * 427 + * Given a registered colorop, return the index of that colorop within a DRM 428 + * device's list of colorops. 429 + */ 430 + static inline unsigned int drm_colorop_index(const struct drm_colorop *colorop) 431 + { 432 + return colorop->index; 433 + } 434 + 435 + #define drm_for_each_colorop(colorop, dev) \ 436 + list_for_each_entry(colorop, &(dev)->mode_config.colorop_list, head) 437 + 438 + /** 439 + * drm_get_colorop_type_name - return a string for colorop type 440 + * @type: colorop type to compute name of 441 + * 442 + * In contrast to the other drm_get_*_name functions this one here returns a 443 + * const pointer and hence is threadsafe. 444 + */ 445 + const char *drm_get_colorop_type_name(enum drm_colorop_type type); 446 + 447 + /** 448 + * drm_get_colorop_curve_1d_type_name - return a string for 1D curve type 449 + * @type: 1d curve type to compute name of 450 + * 451 + * In contrast to the other drm_get_*_name functions this one here returns a 452 + * const pointer and hence is threadsafe. 453 + */ 454 + const char *drm_get_colorop_curve_1d_type_name(enum drm_colorop_curve_1d_type type); 455 + 456 + const char * 457 + drm_get_colorop_lut1d_interpolation_name(enum drm_colorop_lut1d_interpolation_type type); 458 + 459 + const char * 460 + drm_get_colorop_lut3d_interpolation_name(enum drm_colorop_lut3d_interpolation_type type); 461 + 462 + void drm_colorop_set_next_property(struct drm_colorop *colorop, struct drm_colorop *next); 463 + 464 + #endif /* __DRM_COLOROP_H__ */
+8
include/drm/drm_device.h
··· 239 239 struct list_head clientlist; 240 240 241 241 /** 242 + * @client_sysrq_list: 243 + * 244 + * Entry into list of devices registered for sysrq. Allows in-kernel 245 + * clients on this device to handle sysrq keys. 246 + */ 247 + struct list_head client_sysrq_list; 248 + 249 + /** 242 250 * @vblank_disable_immediate: 243 251 * 244 252 * If true, vblank interrupt will be disabled immediately when the
+2 -18
include/drm/drm_fb_helper.h
··· 254 254 int drm_fb_helper_check_var(struct fb_var_screeninfo *var, 255 255 struct fb_info *info); 256 256 257 - int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper); 257 + int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, 258 + bool force); 258 259 259 - struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper); 260 - void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper); 261 260 void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper); 262 261 void drm_fb_helper_fill_info(struct fb_info *info, 263 262 struct drm_fb_helper *fb_helper, ··· 282 283 int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper); 283 284 int drm_fb_helper_debug_enter(struct fb_info *info); 284 285 int drm_fb_helper_debug_leave(struct fb_info *info); 285 - void drm_fb_helper_lastclose(struct drm_device *dev); 286 286 #else 287 287 static inline void drm_fb_helper_prepare(struct drm_device *dev, 288 288 struct drm_fb_helper *helper, ··· 336 338 drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) 337 339 { 338 340 return 0; 339 - } 340 - 341 - static inline struct fb_info * 342 - drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) 343 - { 344 - return NULL; 345 - } 346 - 347 - static inline void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper) 348 - { 349 341 } 350 342 351 343 static inline void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper) ··· 396 408 static inline int drm_fb_helper_debug_leave(struct fb_info *info) 397 409 { 398 410 return 0; 399 - } 400 - 401 - static inline void drm_fb_helper_lastclose(struct drm_device *dev) 402 - { 403 411 } 404 412 #endif 405 413
+7
include/drm/drm_file.h
··· 207 207 bool writeback_connectors; 208 208 209 209 /** 210 + * @plane_color_pipeline: 211 + * 212 + * True if client understands plane color pipelines 213 + */ 214 + bool plane_color_pipeline; 215 + 216 + /** 210 217 * @was_master: 211 218 * 212 219 * This client has or had, master capability. Protected by struct
+17
include/drm/drm_fixed.h
··· 78 78 #define DRM_FIXED_EPSILON 1LL 79 79 #define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON) 80 80 81 + /** 82 + * @drm_sm2fixp 83 + * 84 + * Convert a 1.31.32 signed-magnitude fixed point to 32.32 85 + * 2s-complement fixed point 86 + * 87 + * @return s64 2s-complement fixed point 88 + */ 89 + static inline s64 drm_sm2fixp(__u64 a) 90 + { 91 + if ((a & (1LL << 63))) { 92 + return -(a & 0x7fffffffffffffffll); 93 + } else { 94 + return a; 95 + } 96 + } 97 + 81 98 static inline s64 drm_int2fixp(int a) 82 99 { 83 100 return ((s64)a) << DRM_FIXED_POINT;
+18
include/drm/drm_mode_config.h
··· 501 501 struct raw_spinlock panic_lock; 502 502 503 503 /** 504 + * @num_colorop: 505 + * 506 + * Number of colorop objects on this device. 507 + * This is invariant over the lifetime of a device and hence doesn't 508 + * need any locks. 509 + */ 510 + int num_colorop; 511 + 512 + /** 513 + * @colorop_list: 514 + * 515 + * List of colorop objects linked with &drm_colorop.head. This is 516 + * invariant over the lifetime of a device and hence doesn't need any 517 + * locks. 518 + */ 519 + struct list_head colorop_list; 520 + 521 + /** 504 522 * @num_crtc: 505 523 * 506 524 * Number of CRTCs on this device linked with &drm_crtc.head. This is invariant over the lifetime
+19
include/drm/drm_plane.h
··· 244 244 enum drm_scaling_filter scaling_filter; 245 245 246 246 /** 247 + * @color_pipeline: 248 + * 249 + * The first colorop of the active color pipeline, or NULL, if no 250 + * color pipeline is active. 251 + */ 252 + struct drm_colorop *color_pipeline; 253 + 254 + /** 247 255 * @commit: Tracks the pending commit to prevent use-after-free conditions, 248 256 * and for async plane updates. 249 257 * ··· 792 784 struct drm_property *color_range_property; 793 785 794 786 /** 787 + * @color_pipeline_property: 788 + * 789 + * Optional "COLOR_PIPELINE" enum property for specifying 790 + * a color pipeline to use on the plane. 791 + */ 792 + struct drm_property *color_pipeline_property; 793 + 794 + /** 795 795 * @scaling_filter_property: property to apply a particular filter while 796 796 * scaling. 797 797 */ ··· 1022 1006 const struct drm_plane_size_hint *hints, 1023 1007 int num_hints); 1024 1008 1009 + int drm_plane_create_color_pipeline_property(struct drm_plane *plane, 1010 + const struct drm_prop_enum_list *pipelines, 1011 + int num_pipelines); 1025 1012 #endif
+21 -8
include/drm/ttm/ttm_resource.h
··· 52 52 struct scatterlist; 53 53 54 54 /** 55 + * define TTM_NUM_MOVE_FENCES - How many entities can be used for evictions 56 + * 57 + * Pipelined evictions can be spread on multiple entities. This 58 + * is the max number of entities that can be used by the driver 59 + * for that purpose. 60 + */ 61 + #define TTM_NUM_MOVE_FENCES 8 62 + 63 + /** 55 64 * enum ttm_lru_item_type - enumerate ttm_lru_item subclasses 56 65 */ 57 66 enum ttm_lru_item_type { ··· 190 181 * @size: Size of the managed region. 191 182 * @bdev: ttm device this manager belongs to 192 183 * @func: structure pointer implementing the range manager. See above 193 - * @move_lock: lock for move fence 194 - * @move: The fence of the last pipelined move operation. 184 + * @eviction_lock: lock for eviction fences 185 + * @eviction_fences: The fences of the last pipelined move operation. 195 186 * @lru: The lru list for this memory type. 196 187 * 197 188 * This structure is used to identify and manage memory types for a device. ··· 205 196 struct ttm_device *bdev; 206 197 uint64_t size; 207 198 const struct ttm_resource_manager_func *func; 208 - spinlock_t move_lock; 209 199 210 - /* 211 - * Protected by @move_lock. 200 + /* This is very similar to a dma_resv object, but locking rules make 201 + * it difficult to use one in this context. 212 202 */ 213 - struct dma_fence *move; 203 + spinlock_t eviction_lock; 204 + struct dma_fence *eviction_fences[TTM_NUM_MOVE_FENCES]; 214 205 215 206 /* 216 207 * Protected by the bdev->lru_lock. ··· 431 422 static inline void 432 423 ttm_resource_manager_cleanup(struct ttm_resource_manager *man) 433 424 { 434 - dma_fence_put(man->move); 435 - man->move = NULL; 425 + int i; 426 + 427 + for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) { 428 + dma_fence_put(man->eviction_fences[i]); 429 + man->eviction_fences[i] = NULL; 430 + } 436 431 } 437 432 438 433 void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
-9
include/uapi/drm/amdgpu_drm.h
··· 1656 1656 #define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */ 1657 1657 #define AMDGPU_FAMILY_GC_12_0_0 152 /* GC 12.0.0 */ 1658 1658 1659 - /* FIXME wrong namespace! */ 1660 - struct drm_color_ctm_3x4 { 1661 - /* 1662 - * Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude 1663 - * (not two's complement!) format. 1664 - */ 1665 - __u64 matrix[12]; 1666 - }; 1667 - 1668 1659 #if defined(__cplusplus) 1669 1660 } 1670 1661 #endif
+15
include/uapi/drm/drm.h
··· 906 906 */ 907 907 #define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6 908 908 909 + /** 910 + * DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE 911 + * 912 + * If set to 1 the DRM core will allow setting the COLOR_PIPELINE 913 + * property on a &drm_plane, as well as drm_colorop properties. 914 + * 915 + * Setting of these plane properties will be rejected when this client 916 + * cap is set: 917 + * - COLOR_ENCODING 918 + * - COLOR_RANGE 919 + * 920 + * The client must enable &DRM_CLIENT_CAP_ATOMIC first. 921 + */ 922 + #define DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE 7 923 + 909 924 /* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ 910 925 struct drm_set_client_cap { 911 926 __u64 capability;
+134
include/uapi/drm/drm_mode.h
··· 629 629 #define DRM_MODE_OBJECT_FB 0xfbfbfbfb 630 630 #define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb 631 631 #define DRM_MODE_OBJECT_PLANE 0xeeeeeeee 632 + #define DRM_MODE_OBJECT_COLOROP 0xfafafafa 632 633 #define DRM_MODE_OBJECT_ANY 0 633 634 634 635 struct drm_mode_obj_get_properties { ··· 847 846 __u64 matrix[9]; 848 847 }; 849 848 849 + struct drm_color_ctm_3x4 { 850 + /* 851 + * Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude 852 + * (not two's complement!) format. 853 + * 854 + * out matrix in 855 + * |R| |0 1 2 3 | | R | 856 + * |G| = |4 5 6 7 | x | G | 857 + * |B| |8 9 10 11| | B | 858 + * |1.0| 859 + */ 860 + __u64 matrix[12]; 861 + }; 862 + 850 863 struct drm_color_lut { 851 864 /* 852 865 * Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and ··· 870 855 __u16 green; 871 856 __u16 blue; 872 857 __u16 reserved; 858 + }; 859 + 860 + /* 861 + * struct drm_color_lut32 862 + * 863 + * 32-bit per channel color LUT entry, similar to drm_color_lut. 864 + */ 865 + struct drm_color_lut32 { 866 + __u32 red; 867 + __u32 green; 868 + __u32 blue; 869 + __u32 reserved; 870 + }; 871 + 872 + /** 873 + * enum drm_colorop_type - Type of color operation 874 + * 875 + * drm_colorops can be of many different types. Each type behaves differently 876 + * and defines a different set of properties. This enum defines all types and 877 + * gives a high-level description. 878 + */ 879 + enum drm_colorop_type { 880 + /** 881 + * @DRM_COLOROP_1D_CURVE: 882 + * 883 + * enum string "1D Curve" 884 + * 885 + * A 1D curve that is being applied to all color channels. The 886 + * curve is specified via the CURVE_1D_TYPE colorop property. 887 + */ 888 + DRM_COLOROP_1D_CURVE, 889 + 890 + /** 891 + * @DRM_COLOROP_1D_LUT: 892 + * 893 + * enum string "1D LUT" 894 + * 895 + * A simple 1D LUT of uniformly spaced &drm_color_lut32 entries, 896 + * packed into a blob via the DATA property. The driver's 897 + * expected LUT size is advertised via the SIZE property. 898 + * 899 + * The DATA blob is an array of struct drm_color_lut32 with size 900 + * of "size". 901 + */ 902 + DRM_COLOROP_1D_LUT, 903 + 904 + /** 905 + * @DRM_COLOROP_CTM_3X4: 906 + * 907 + * enum string "3x4 Matrix" 908 + * 909 + * A 3x4 matrix. Its values are specified via the 910 + * &drm_color_ctm_3x4 struct provided via the DATA property. 911 + * 912 + * The DATA blob is a float[12]: 913 + * out matrix in 914 + * | R | | 0 1 2 3 | | R | 915 + * | G | = | 4 5 6 7 | x | G | 916 + * | B | | 8 9 10 12 | | B | 917 + */ 918 + DRM_COLOROP_CTM_3X4, 919 + 920 + /** 921 + * @DRM_COLOROP_MULTIPLIER: 922 + * 923 + * enum string "Multiplier" 924 + * 925 + * A simple multiplier, applied to all color values. The 926 + * multiplier is specified as a S31.32 via the MULTIPLIER 927 + * property. 928 + */ 929 + DRM_COLOROP_MULTIPLIER, 930 + 931 + /** 932 + * @DRM_COLOROP_3D_LUT: 933 + * 934 + * enum string "3D LUT" 935 + * 936 + * A 3D LUT of &drm_color_lut32 entries, 937 + * packed into a blob via the DATA property. The driver's expected 938 + * LUT size is advertised via the SIZE property, i.e., a 3D LUT with 939 + * 17x17x17 entries will have SIZE set to 17. 940 + * 941 + * The DATA blob is a 3D array of struct drm_color_lut32 with dimension 942 + * length of "size". 943 + * The LUT elements are traversed like so: 944 + * 945 + * for B in range 0..n 946 + * for G in range 0..n 947 + * for R in range 0..n 948 + * index = R + n * (G + n * B) 949 + * color = lut3d[index] 950 + */ 951 + DRM_COLOROP_3D_LUT, 952 + }; 953 + 954 + /** 955 + * enum drm_colorop_lut3d_interpolation_type - type of 3DLUT interpolation 956 + */ 957 + enum drm_colorop_lut3d_interpolation_type { 958 + /** 959 + * @DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL: 960 + * 961 + * Tetrahedral 3DLUT interpolation 962 + */ 963 + DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, 964 + }; 965 + 966 + /** 967 + * enum drm_colorop_lut1d_interpolation_type - type of interpolation for 1D LUTs 968 + */ 969 + enum drm_colorop_lut1d_interpolation_type { 970 + /** 971 + * @DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR: 972 + * 973 + * Linear interpolation. Values between points of the LUT will be 974 + * linearly interpolated. 975 + */ 976 + DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, 873 977 }; 874 978 875 979 /**