Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2015 Free Electrons
4 * Copyright (C) 2015 NextThing Co
5 *
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
7 */
8
9#include <linux/component.h>
10#include <linux/list.h>
11#include <linux/module.h>
12#include <linux/of_device.h>
13#include <linux/of_graph.h>
14#include <linux/dma-mapping.h>
15#include <linux/platform_device.h>
16#include <linux/reset.h>
17
18#include <drm/drm_atomic.h>
19#include <drm/drm_atomic_helper.h>
20#include <drm/drm_blend.h>
21#include <drm/drm_crtc.h>
22#include <drm/drm_fb_dma_helper.h>
23#include <drm/drm_fourcc.h>
24#include <drm/drm_framebuffer.h>
25#include <drm/drm_gem_dma_helper.h>
26#include <drm/drm_print.h>
27#include <drm/drm_probe_helper.h>
28
29#include "sun4i_backend.h"
30#include "sun4i_drv.h"
31#include "sun4i_frontend.h"
32#include "sun4i_layer.h"
33#include "sunxi_engine.h"
34
35struct sun4i_backend_quirks {
36 /* backend <-> TCON muxing selection done in backend */
37 bool needs_output_muxing;
38
39 /* alpha at the lowest z position is not always supported */
40 bool supports_lowest_plane_alpha;
41};
42
43static const u32 sunxi_rgb2yuv_coef[12] = {
44 0x00000107, 0x00000204, 0x00000064, 0x00000108,
45 0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
46 0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
47};
48
49static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
50{
51 int i;
52
53 DRM_DEBUG_DRIVER("Applying RGB to YUV color correction\n");
54
55 /* Set color correction */
56 regmap_write(engine->regs, SUN4I_BACKEND_OCCTL_REG,
57 SUN4I_BACKEND_OCCTL_ENABLE);
58
59 for (i = 0; i < 12; i++)
60 regmap_write(engine->regs, SUN4I_BACKEND_OCRCOEF_REG(i),
61 sunxi_rgb2yuv_coef[i]);
62}
63
64static void sun4i_backend_disable_color_correction(struct sunxi_engine *engine)
65{
66 DRM_DEBUG_DRIVER("Disabling color correction\n");
67
68 /* Disable color correction */
69 regmap_update_bits(engine->regs, SUN4I_BACKEND_OCCTL_REG,
70 SUN4I_BACKEND_OCCTL_ENABLE, 0);
71}
72
73static void sun4i_backend_commit(struct sunxi_engine *engine,
74 struct drm_crtc *crtc,
75 struct drm_atomic_state *state)
76{
77 DRM_DEBUG_DRIVER("Committing changes\n");
78
79 regmap_write(engine->regs, SUN4I_BACKEND_REGBUFFCTL_REG,
80 SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS |
81 SUN4I_BACKEND_REGBUFFCTL_LOADCTL);
82}
83
84void sun4i_backend_layer_enable(struct sun4i_backend *backend,
85 int layer, bool enable)
86{
87 u32 val;
88
89 DRM_DEBUG_DRIVER("%sabling layer %d\n", enable ? "En" : "Dis",
90 layer);
91
92 if (enable)
93 val = SUN4I_BACKEND_MODCTL_LAY_EN(layer);
94 else
95 val = 0;
96
97 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
98 SUN4I_BACKEND_MODCTL_LAY_EN(layer), val);
99}
100
101static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
102{
103 switch (format) {
104 case DRM_FORMAT_ARGB8888:
105 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
106 break;
107
108 case DRM_FORMAT_ARGB4444:
109 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB4444;
110 break;
111
112 case DRM_FORMAT_ARGB1555:
113 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB1555;
114 break;
115
116 case DRM_FORMAT_RGBA5551:
117 *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA5551;
118 break;
119
120 case DRM_FORMAT_RGBA4444:
121 *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA4444;
122 break;
123
124 case DRM_FORMAT_XRGB8888:
125 *mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
126 break;
127
128 case DRM_FORMAT_RGB888:
129 *mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
130 break;
131
132 case DRM_FORMAT_RGB565:
133 *mode = SUN4I_BACKEND_LAY_FBFMT_RGB565;
134 break;
135
136 default:
137 return -EINVAL;
138 }
139
140 return 0;
141}
142
143static const uint32_t sun4i_backend_formats[] = {
144 DRM_FORMAT_ARGB1555,
145 DRM_FORMAT_ARGB4444,
146 DRM_FORMAT_ARGB8888,
147 DRM_FORMAT_RGB565,
148 DRM_FORMAT_RGB888,
149 DRM_FORMAT_RGBA4444,
150 DRM_FORMAT_RGBA5551,
151 DRM_FORMAT_UYVY,
152 DRM_FORMAT_VYUY,
153 DRM_FORMAT_XRGB8888,
154 DRM_FORMAT_YUYV,
155 DRM_FORMAT_YVYU,
156};
157
158bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier)
159{
160 unsigned int i;
161
162 if (modifier != DRM_FORMAT_MOD_LINEAR)
163 return false;
164
165 for (i = 0; i < ARRAY_SIZE(sun4i_backend_formats); i++)
166 if (sun4i_backend_formats[i] == fmt)
167 return true;
168
169 return false;
170}
171
172int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
173 int layer, struct drm_plane *plane)
174{
175 struct drm_plane_state *state = plane->state;
176
177 DRM_DEBUG_DRIVER("Updating layer %d\n", layer);
178
179 /* Set height and width */
180 DRM_DEBUG_DRIVER("Layer size W: %u H: %u\n",
181 state->crtc_w, state->crtc_h);
182 regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYSIZE_REG(layer),
183 SUN4I_BACKEND_LAYSIZE(state->crtc_w,
184 state->crtc_h));
185
186 /* Set base coordinates */
187 DRM_DEBUG_DRIVER("Layer coordinates X: %d Y: %d\n",
188 state->crtc_x, state->crtc_y);
189 regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYCOOR_REG(layer),
190 SUN4I_BACKEND_LAYCOOR(state->crtc_x,
191 state->crtc_y));
192
193 return 0;
194}
195
196static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
197 int layer, struct drm_plane *plane)
198{
199 struct drm_plane_state *state = plane->state;
200 struct drm_framebuffer *fb = state->fb;
201 const struct drm_format_info *format = fb->format;
202 const uint32_t fmt = format->format;
203 u32 val = SUN4I_BACKEND_IYUVCTL_EN;
204 int i;
205
206 for (i = 0; i < ARRAY_SIZE(sunxi_bt601_yuv2rgb_coef); i++)
207 regmap_write(backend->engine.regs,
208 SUN4I_BACKEND_YGCOEF_REG(i),
209 sunxi_bt601_yuv2rgb_coef[i]);
210
211 /*
212 * We should do that only for a single plane, but the
213 * framebuffer's atomic_check has our back on this.
214 */
215 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
216 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN,
217 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN);
218
219 /* TODO: Add support for the multi-planar YUV formats */
220 if (drm_format_info_is_yuv_packed(format) &&
221 drm_format_info_is_yuv_sampling_422(format))
222 val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422;
223 else
224 DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", fmt);
225
226 /*
227 * Allwinner seems to list the pixel sequence from right to left, while
228 * DRM lists it from left to right.
229 */
230 switch (fmt) {
231 case DRM_FORMAT_YUYV:
232 val |= SUN4I_BACKEND_IYUVCTL_FBPS_VYUY;
233 break;
234 case DRM_FORMAT_YVYU:
235 val |= SUN4I_BACKEND_IYUVCTL_FBPS_UYVY;
236 break;
237 case DRM_FORMAT_UYVY:
238 val |= SUN4I_BACKEND_IYUVCTL_FBPS_YVYU;
239 break;
240 case DRM_FORMAT_VYUY:
241 val |= SUN4I_BACKEND_IYUVCTL_FBPS_YUYV;
242 break;
243 default:
244 DRM_DEBUG_DRIVER("Unsupported YUV pixel sequence (0x%x)\n",
245 fmt);
246 }
247
248 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVCTL_REG, val);
249
250 return 0;
251}
252
253int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
254 int layer, struct drm_plane *plane)
255{
256 struct drm_plane_state *state = plane->state;
257 struct drm_framebuffer *fb = state->fb;
258 u32 val;
259 int ret;
260
261 /* Clear the YUV mode */
262 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
263 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
264
265 val = SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(state->alpha >> 8);
266 if (state->alpha != DRM_BLEND_ALPHA_OPAQUE)
267 val |= SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN;
268 regmap_update_bits(backend->engine.regs,
269 SUN4I_BACKEND_ATTCTL_REG0(layer),
270 SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK |
271 SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
272 val);
273
274 if (fb->format->is_yuv)
275 return sun4i_backend_update_yuv_format(backend, layer, plane);
276
277 ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
278 if (ret) {
279 DRM_DEBUG_DRIVER("Invalid format\n");
280 return ret;
281 }
282
283 regmap_update_bits(backend->engine.regs,
284 SUN4I_BACKEND_ATTCTL_REG1(layer),
285 SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
286
287 return 0;
288}
289
290int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
291 int layer, uint32_t fmt)
292{
293 u32 val;
294 int ret;
295
296 ret = sun4i_backend_drm_format_to_layer(fmt, &val);
297 if (ret) {
298 DRM_DEBUG_DRIVER("Invalid format\n");
299 return ret;
300 }
301
302 regmap_update_bits(backend->engine.regs,
303 SUN4I_BACKEND_ATTCTL_REG0(layer),
304 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN,
305 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN);
306
307 regmap_update_bits(backend->engine.regs,
308 SUN4I_BACKEND_ATTCTL_REG1(layer),
309 SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
310
311 return 0;
312}
313
314static int sun4i_backend_update_yuv_buffer(struct sun4i_backend *backend,
315 struct drm_framebuffer *fb,
316 dma_addr_t paddr)
317{
318 /* TODO: Add support for the multi-planar YUV formats */
319 DRM_DEBUG_DRIVER("Setting packed YUV buffer address to %pad\n", &paddr);
320 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVADD_REG(0), paddr);
321
322 DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
323 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVLINEWIDTH_REG(0),
324 fb->pitches[0] * 8);
325
326 return 0;
327}
328
329int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
330 int layer, struct drm_plane *plane)
331{
332 struct drm_plane_state *state = plane->state;
333 struct drm_framebuffer *fb = state->fb;
334 u32 lo_paddr, hi_paddr;
335 dma_addr_t dma_addr;
336
337 /* Set the line width */
338 DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
339 regmap_write(backend->engine.regs,
340 SUN4I_BACKEND_LAYLINEWIDTH_REG(layer),
341 fb->pitches[0] * 8);
342
343 /* Get the start of the displayed memory */
344 dma_addr = drm_fb_dma_get_gem_addr(fb, state, 0);
345 DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &dma_addr);
346
347 if (fb->format->is_yuv)
348 return sun4i_backend_update_yuv_buffer(backend, fb, dma_addr);
349
350 /* Write the 32 lower bits of the address (in bits) */
351 lo_paddr = dma_addr << 3;
352 DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
353 regmap_write(backend->engine.regs,
354 SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
355 lo_paddr);
356
357 /* And the upper bits */
358 hi_paddr = dma_addr >> 29;
359 DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
360 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
361 SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
362 SUN4I_BACKEND_LAYFB_H4ADD(layer, hi_paddr));
363
364 return 0;
365}
366
367int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer,
368 struct drm_plane *plane)
369{
370 struct drm_plane_state *state = plane->state;
371 struct sun4i_layer_state *p_state = state_to_sun4i_layer_state(state);
372 unsigned int priority = state->normalized_zpos;
373 unsigned int pipe = p_state->pipe;
374
375 DRM_DEBUG_DRIVER("Setting layer %d's priority to %d and pipe %d\n",
376 layer, priority, pipe);
377 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
378 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK |
379 SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK,
380 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(p_state->pipe) |
381 SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(priority));
382
383 return 0;
384}
385
386void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
387 int layer)
388{
389 regmap_update_bits(backend->engine.regs,
390 SUN4I_BACKEND_ATTCTL_REG0(layer),
391 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN |
392 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
393}
394
395static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state)
396{
397 u16 src_h = state->src_h >> 16;
398 u16 src_w = state->src_w >> 16;
399
400 DRM_DEBUG_DRIVER("Input size %dx%d, output size %dx%d\n",
401 src_w, src_h, state->crtc_w, state->crtc_h);
402
403 if ((state->crtc_h != src_h) || (state->crtc_w != src_w))
404 return true;
405
406 return false;
407}
408
409static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state)
410{
411 struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane);
412 struct sun4i_backend *backend = layer->backend;
413 uint32_t format = state->fb->format->format;
414 uint64_t modifier = state->fb->modifier;
415
416 if (IS_ERR(backend->frontend))
417 return false;
418
419 if (!sun4i_frontend_format_is_supported(format, modifier))
420 return false;
421
422 if (!sun4i_backend_format_is_supported(format, modifier))
423 return true;
424
425 /*
426 * TODO: The backend alone allows 2x and 4x integer scaling, including
427 * support for an alpha component (which the frontend doesn't support).
428 * Use the backend directly instead of the frontend in this case, with
429 * another test to return false.
430 */
431
432 if (sun4i_backend_plane_uses_scaler(state))
433 return true;
434
435 /*
436 * Here the format is supported by both the frontend and the backend
437 * and no frontend scaling is required, so use the backend directly.
438 */
439 return false;
440}
441
442static bool sun4i_backend_plane_is_supported(struct drm_plane_state *state,
443 bool *uses_frontend)
444{
445 if (sun4i_backend_plane_uses_frontend(state)) {
446 *uses_frontend = true;
447 return true;
448 }
449
450 *uses_frontend = false;
451
452 /* Scaling is not supported without the frontend. */
453 if (sun4i_backend_plane_uses_scaler(state))
454 return false;
455
456 return true;
457}
458
459static void sun4i_backend_atomic_begin(struct sunxi_engine *engine,
460 struct drm_crtc_state *old_state)
461{
462 u32 val;
463
464 WARN_ON(regmap_read_poll_timeout(engine->regs,
465 SUN4I_BACKEND_REGBUFFCTL_REG,
466 val, !(val & SUN4I_BACKEND_REGBUFFCTL_LOADCTL),
467 100, 50000));
468}
469
470static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
471 struct drm_crtc_state *crtc_state)
472{
473 struct drm_plane_state *plane_states[SUN4I_BACKEND_NUM_LAYERS] = { 0 };
474 struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
475 struct drm_atomic_state *state = crtc_state->state;
476 struct drm_device *drm = state->dev;
477 struct drm_plane *plane;
478 unsigned int num_planes = 0;
479 unsigned int num_alpha_planes = 0;
480 unsigned int num_frontend_planes = 0;
481 unsigned int num_alpha_planes_max = 1;
482 unsigned int num_yuv_planes = 0;
483 unsigned int current_pipe = 0;
484 unsigned int i;
485
486 DRM_DEBUG_DRIVER("Starting checking our planes\n");
487
488 if (!crtc_state->planes_changed)
489 return 0;
490
491 drm_for_each_plane_mask(plane, drm, crtc_state->plane_mask) {
492 struct drm_plane_state *plane_state =
493 drm_atomic_get_plane_state(state, plane);
494 if (IS_ERR(plane_state))
495 return PTR_ERR(plane_state);
496
497 struct sun4i_layer_state *layer_state =
498 state_to_sun4i_layer_state(plane_state);
499 struct drm_framebuffer *fb = plane_state->fb;
500
501 if (!sun4i_backend_plane_is_supported(plane_state,
502 &layer_state->uses_frontend))
503 return -EINVAL;
504
505 if (layer_state->uses_frontend) {
506 DRM_DEBUG_DRIVER("Using the frontend for plane %d\n",
507 plane->index);
508 num_frontend_planes++;
509 } else {
510 if (fb->format->is_yuv) {
511 DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
512 num_yuv_planes++;
513 }
514 }
515
516 DRM_DEBUG_DRIVER("Plane FB format is %p4cc\n",
517 &fb->format->format);
518 if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
519 num_alpha_planes++;
520
521 DRM_DEBUG_DRIVER("Plane zpos is %d\n",
522 plane_state->normalized_zpos);
523
524 /* Sort our planes by Zpos */
525 plane_states[plane_state->normalized_zpos] = plane_state;
526
527 num_planes++;
528 }
529
530 /* All our planes were disabled, bail out */
531 if (!num_planes)
532 return 0;
533
534 /*
535 * The hardware is a bit unusual here.
536 *
537 * Even though it supports 4 layers, it does the composition
538 * in two separate steps.
539 *
540 * The first one is assigning a layer to one of its two
541 * pipes. If more that 1 layer is assigned to the same pipe,
542 * and if pixels overlaps, the pipe will take the pixel from
543 * the layer with the highest priority.
544 *
545 * The second step is the actual alpha blending, that takes
546 * the two pipes as input, and uses the potential alpha
547 * component to do the transparency between the two.
548 *
549 * This two-step scenario makes us unable to guarantee a
550 * robust alpha blending between the 4 layers in all
551 * situations, since this means that we need to have one layer
552 * with alpha at the lowest position of our two pipes.
553 *
554 * However, we cannot even do that on every platform, since
555 * the hardware has a bug where the lowest plane of the lowest
556 * pipe (pipe 0, priority 0), if it has any alpha, will
557 * discard the pixel data entirely and just display the pixels
558 * in the background color (black by default).
559 *
560 * This means that on the affected platforms, we effectively
561 * have only three valid configurations with alpha, all of
562 * them with the alpha being on pipe1 with the lowest
563 * position, which can be 1, 2 or 3 depending on the number of
564 * planes and their zpos.
565 */
566
567 /* For platforms that are not affected by the issue described above. */
568 if (backend->quirks->supports_lowest_plane_alpha)
569 num_alpha_planes_max++;
570
571 if (num_alpha_planes > num_alpha_planes_max) {
572 DRM_DEBUG_DRIVER("Too many planes with alpha, rejecting...\n");
573 return -EINVAL;
574 }
575
576 /* We can't have an alpha plane at the lowest position */
577 if (!backend->quirks->supports_lowest_plane_alpha &&
578 (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
579 return -EINVAL;
580
581 for (i = 1; i < num_planes; i++) {
582 struct drm_plane_state *p_state = plane_states[i];
583 struct drm_framebuffer *fb = p_state->fb;
584 struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(p_state);
585
586 /*
587 * The only alpha position is the lowest plane of the
588 * second pipe.
589 */
590 if (fb->format->has_alpha || (p_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
591 current_pipe++;
592
593 s_state->pipe = current_pipe;
594 }
595
596 /* We can only have a single YUV plane at a time */
597 if (num_yuv_planes > SUN4I_BACKEND_NUM_YUV_PLANES) {
598 DRM_DEBUG_DRIVER("Too many planes with YUV, rejecting...\n");
599 return -EINVAL;
600 }
601
602 if (num_frontend_planes > SUN4I_BACKEND_NUM_FRONTEND_LAYERS) {
603 DRM_DEBUG_DRIVER("Too many planes going through the frontend, rejecting\n");
604 return -EINVAL;
605 }
606
607 DRM_DEBUG_DRIVER("State valid with %u planes, %u alpha, %u video, %u YUV\n",
608 num_planes, num_alpha_planes, num_frontend_planes,
609 num_yuv_planes);
610
611 return 0;
612}
613
614static void sun4i_backend_vblank_quirk(struct sunxi_engine *engine)
615{
616 struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
617 struct sun4i_frontend *frontend = backend->frontend;
618
619 if (!frontend)
620 return;
621
622 /*
623 * In a teardown scenario with the frontend involved, we have
624 * to keep the frontend enabled until the next vblank, and
625 * only then disable it.
626 *
627 * This is due to the fact that the backend will not take into
628 * account the new configuration (with the plane that used to
629 * be fed by the frontend now disabled) until we write to the
630 * commit bit and the hardware fetches the new configuration
631 * during the next vblank.
632 *
633 * So we keep the frontend around in order to prevent any
634 * visual artifacts.
635 */
636 spin_lock(&backend->frontend_lock);
637 if (backend->frontend_teardown) {
638 sun4i_frontend_exit(frontend);
639 backend->frontend_teardown = false;
640 }
641 spin_unlock(&backend->frontend_lock);
642};
643
644static void sun4i_backend_mode_set(struct sunxi_engine *engine,
645 const struct drm_display_mode *mode)
646{
647 bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
648
649 DRM_DEBUG_DRIVER("Updating global size W: %u H: %u\n",
650 mode->hdisplay, mode->vdisplay);
651
652 regmap_write(engine->regs, SUN4I_BACKEND_DISSIZE_REG,
653 SUN4I_BACKEND_DISSIZE(mode->hdisplay, mode->vdisplay));
654
655 regmap_update_bits(engine->regs, SUN4I_BACKEND_MODCTL_REG,
656 SUN4I_BACKEND_MODCTL_ITLMOD_EN,
657 interlaced ? SUN4I_BACKEND_MODCTL_ITLMOD_EN : 0);
658
659 DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
660 interlaced ? "on" : "off");
661}
662
663static int sun4i_backend_init_sat(struct device *dev) {
664 struct sun4i_backend *backend = dev_get_drvdata(dev);
665 int ret;
666
667 backend->sat_reset = devm_reset_control_get(dev, "sat");
668 if (IS_ERR(backend->sat_reset)) {
669 dev_err(dev, "Couldn't get the SAT reset line\n");
670 return PTR_ERR(backend->sat_reset);
671 }
672
673 ret = reset_control_deassert(backend->sat_reset);
674 if (ret) {
675 dev_err(dev, "Couldn't deassert the SAT reset line\n");
676 return ret;
677 }
678
679 backend->sat_clk = devm_clk_get(dev, "sat");
680 if (IS_ERR(backend->sat_clk)) {
681 dev_err(dev, "Couldn't get our SAT clock\n");
682 ret = PTR_ERR(backend->sat_clk);
683 goto err_assert_reset;
684 }
685
686 ret = clk_prepare_enable(backend->sat_clk);
687 if (ret) {
688 dev_err(dev, "Couldn't enable the SAT clock\n");
689 return ret;
690 }
691
692 return 0;
693
694err_assert_reset:
695 reset_control_assert(backend->sat_reset);
696 return ret;
697}
698
699static int sun4i_backend_free_sat(struct device *dev) {
700 struct sun4i_backend *backend = dev_get_drvdata(dev);
701
702 clk_disable_unprepare(backend->sat_clk);
703 reset_control_assert(backend->sat_reset);
704
705 return 0;
706}
707
708/*
709 * The display backend can take video output from the display frontend, or
710 * the display enhancement unit on the A80, as input for one it its layers.
711 * This relationship within the display pipeline is encoded in the device
712 * tree with of_graph, and we use it here to figure out which backend, if
713 * there are 2 or more, we are currently probing. The number would be in
714 * the "reg" property of the upstream output port endpoint.
715 */
716static int sun4i_backend_of_get_id(struct device_node *node)
717{
718 struct device_node *ep, *remote;
719 struct of_endpoint of_ep;
720
721 /* Input port is 0, and we want the first endpoint. */
722 ep = of_graph_get_endpoint_by_regs(node, 0, -1);
723 if (!ep)
724 return -EINVAL;
725
726 remote = of_graph_get_remote_endpoint(ep);
727 of_node_put(ep);
728 if (!remote)
729 return -EINVAL;
730
731 of_graph_parse_endpoint(remote, &of_ep);
732 of_node_put(remote);
733 return of_ep.id;
734}
735
736/* TODO: This needs to take multiple pipelines into account */
737static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
738 struct device_node *node)
739{
740 struct device_node *port, *ep, *remote;
741 struct sun4i_frontend *frontend;
742
743 port = of_graph_get_port_by_id(node, 0);
744 if (!port)
745 return ERR_PTR(-EINVAL);
746
747 for_each_available_child_of_node(port, ep) {
748 remote = of_graph_get_remote_port_parent(ep);
749 if (!remote)
750 continue;
751 of_node_put(remote);
752
753 /* does this node match any registered engines? */
754 list_for_each_entry(frontend, &drv->frontend_list, list) {
755 if (remote == frontend->node) {
756 of_node_put(port);
757 of_node_put(ep);
758 return frontend;
759 }
760 }
761 }
762 of_node_put(port);
763 return ERR_PTR(-EINVAL);
764}
765
766static const struct sunxi_engine_ops sun4i_backend_engine_ops = {
767 .atomic_begin = sun4i_backend_atomic_begin,
768 .atomic_check = sun4i_backend_atomic_check,
769 .commit = sun4i_backend_commit,
770 .layers_init = sun4i_layers_init,
771 .apply_color_correction = sun4i_backend_apply_color_correction,
772 .disable_color_correction = sun4i_backend_disable_color_correction,
773 .vblank_quirk = sun4i_backend_vblank_quirk,
774 .mode_set = sun4i_backend_mode_set,
775};
776
777static const struct regmap_config sun4i_backend_regmap_config = {
778 .reg_bits = 32,
779 .val_bits = 32,
780 .reg_stride = 4,
781 .max_register = 0x5800,
782};
783
784static int sun4i_backend_bind(struct device *dev, struct device *master,
785 void *data)
786{
787 struct platform_device *pdev = to_platform_device(dev);
788 struct drm_device *drm = data;
789 struct sun4i_drv *drv = drm->dev_private;
790 struct sun4i_backend *backend;
791 const struct sun4i_backend_quirks *quirks;
792 void __iomem *regs;
793 int i, ret;
794
795 backend = devm_kzalloc(dev, sizeof(*backend), GFP_KERNEL);
796 if (!backend)
797 return -ENOMEM;
798 dev_set_drvdata(dev, backend);
799 spin_lock_init(&backend->frontend_lock);
800
801 /*
802 * This assume we have the same DMA constraints for all our the
803 * devices in our pipeline (all the backends, but also the
804 * frontends). This sounds bad, but it has always been the case
805 * for us, and DRM doesn't do per-device allocation either, so
806 * we would need to fix DRM first...
807 *
808 * Always use the first bound backend as the DMA device. While
809 * our device trees always have all backends enabled, some in
810 * the wild may actually have the first one disabled. If both
811 * are enabled, the order in which they are bound is guaranteed
812 * since the driver adds components in order.
813 */
814 if (drm_dev_dma_dev(drm) == drm->dev)
815 drm_dev_set_dma_dev(drm, dev);
816
817 backend->engine.node = dev->of_node;
818 backend->engine.ops = &sun4i_backend_engine_ops;
819 backend->engine.id = sun4i_backend_of_get_id(dev->of_node);
820 if (backend->engine.id < 0)
821 return backend->engine.id;
822
823 backend->frontend = sun4i_backend_find_frontend(drv, dev->of_node);
824 if (IS_ERR(backend->frontend))
825 dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
826
827 regs = devm_platform_ioremap_resource(pdev, 0);
828 if (IS_ERR(regs))
829 return PTR_ERR(regs);
830
831 backend->reset = devm_reset_control_get(dev, NULL);
832 if (IS_ERR(backend->reset)) {
833 dev_err(dev, "Couldn't get our reset line\n");
834 return PTR_ERR(backend->reset);
835 }
836
837 ret = reset_control_deassert(backend->reset);
838 if (ret) {
839 dev_err(dev, "Couldn't deassert our reset line\n");
840 return ret;
841 }
842
843 backend->bus_clk = devm_clk_get(dev, "ahb");
844 if (IS_ERR(backend->bus_clk)) {
845 dev_err(dev, "Couldn't get the backend bus clock\n");
846 ret = PTR_ERR(backend->bus_clk);
847 goto err_assert_reset;
848 }
849 clk_prepare_enable(backend->bus_clk);
850
851 backend->mod_clk = devm_clk_get(dev, "mod");
852 if (IS_ERR(backend->mod_clk)) {
853 dev_err(dev, "Couldn't get the backend module clock\n");
854 ret = PTR_ERR(backend->mod_clk);
855 goto err_disable_bus_clk;
856 }
857
858 ret = clk_set_rate_exclusive(backend->mod_clk, 300000000);
859 if (ret) {
860 dev_err(dev, "Couldn't set the module clock frequency\n");
861 goto err_disable_bus_clk;
862 }
863
864 clk_prepare_enable(backend->mod_clk);
865
866 backend->ram_clk = devm_clk_get(dev, "ram");
867 if (IS_ERR(backend->ram_clk)) {
868 dev_err(dev, "Couldn't get the backend RAM clock\n");
869 ret = PTR_ERR(backend->ram_clk);
870 goto err_disable_mod_clk;
871 }
872 clk_prepare_enable(backend->ram_clk);
873
874 if (of_device_is_compatible(dev->of_node,
875 "allwinner,sun8i-a33-display-backend")) {
876 ret = sun4i_backend_init_sat(dev);
877 if (ret) {
878 dev_err(dev, "Couldn't init SAT resources\n");
879 goto err_disable_ram_clk;
880 }
881 }
882
883 backend->engine.regs = devm_regmap_init_mmio(dev, regs,
884 &sun4i_backend_regmap_config);
885 if (IS_ERR(backend->engine.regs)) {
886 dev_err(dev, "Couldn't create the backend regmap\n");
887 ret = PTR_ERR(backend->engine.regs);
888 goto err_disable_ram_clk;
889 }
890
891 list_add_tail(&backend->engine.list, &drv->engine_list);
892
893 /*
894 * Many of the backend's layer configuration registers have
895 * undefined default values. This poses a risk as we use
896 * regmap_update_bits in some places, and don't overwrite
897 * the whole register.
898 *
899 * Clear the registers here to have something predictable.
900 */
901 for (i = 0x800; i < 0x1000; i += 4)
902 regmap_write(backend->engine.regs, i, 0);
903
904 /* Disable registers autoloading */
905 regmap_write(backend->engine.regs, SUN4I_BACKEND_REGBUFFCTL_REG,
906 SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS);
907
908 /* Enable the backend */
909 regmap_write(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
910 SUN4I_BACKEND_MODCTL_DEBE_EN |
911 SUN4I_BACKEND_MODCTL_START_CTL);
912
913 /* Set output selection if needed */
914 quirks = of_device_get_match_data(dev);
915 if (quirks->needs_output_muxing) {
916 /*
917 * We assume there is no dynamic muxing of backends
918 * and TCONs, so we select the backend with same ID.
919 *
920 * While dynamic selection might be interesting, since
921 * the CRTC is tied to the TCON, while the layers are
922 * tied to the backends, this means, we will need to
923 * switch between groups of layers. There might not be
924 * a way to represent this constraint in DRM.
925 */
926 regmap_update_bits(backend->engine.regs,
927 SUN4I_BACKEND_MODCTL_REG,
928 SUN4I_BACKEND_MODCTL_OUT_SEL,
929 (backend->engine.id
930 ? SUN4I_BACKEND_MODCTL_OUT_LCD1
931 : SUN4I_BACKEND_MODCTL_OUT_LCD0));
932 }
933
934 backend->quirks = quirks;
935
936 return 0;
937
938err_disable_ram_clk:
939 clk_disable_unprepare(backend->ram_clk);
940err_disable_mod_clk:
941 clk_rate_exclusive_put(backend->mod_clk);
942 clk_disable_unprepare(backend->mod_clk);
943err_disable_bus_clk:
944 clk_disable_unprepare(backend->bus_clk);
945err_assert_reset:
946 reset_control_assert(backend->reset);
947 return ret;
948}
949
950static void sun4i_backend_unbind(struct device *dev, struct device *master,
951 void *data)
952{
953 struct sun4i_backend *backend = dev_get_drvdata(dev);
954
955 list_del(&backend->engine.list);
956
957 if (of_device_is_compatible(dev->of_node,
958 "allwinner,sun8i-a33-display-backend"))
959 sun4i_backend_free_sat(dev);
960
961 clk_disable_unprepare(backend->ram_clk);
962 clk_rate_exclusive_put(backend->mod_clk);
963 clk_disable_unprepare(backend->mod_clk);
964 clk_disable_unprepare(backend->bus_clk);
965 reset_control_assert(backend->reset);
966}
967
968static const struct component_ops sun4i_backend_ops = {
969 .bind = sun4i_backend_bind,
970 .unbind = sun4i_backend_unbind,
971};
972
973static int sun4i_backend_probe(struct platform_device *pdev)
974{
975 return component_add(&pdev->dev, &sun4i_backend_ops);
976}
977
978static void sun4i_backend_remove(struct platform_device *pdev)
979{
980 component_del(&pdev->dev, &sun4i_backend_ops);
981}
982
983static const struct sun4i_backend_quirks sun4i_backend_quirks = {
984 .needs_output_muxing = true,
985};
986
987static const struct sun4i_backend_quirks sun5i_backend_quirks = {
988};
989
990static const struct sun4i_backend_quirks sun6i_backend_quirks = {
991};
992
993static const struct sun4i_backend_quirks sun7i_backend_quirks = {
994 .needs_output_muxing = true,
995};
996
997static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
998 .supports_lowest_plane_alpha = true,
999};
1000
1001static const struct sun4i_backend_quirks sun9i_backend_quirks = {
1002};
1003
1004static const struct of_device_id sun4i_backend_of_table[] = {
1005 {
1006 .compatible = "allwinner,sun4i-a10-display-backend",
1007 .data = &sun4i_backend_quirks,
1008 },
1009 {
1010 .compatible = "allwinner,sun5i-a13-display-backend",
1011 .data = &sun5i_backend_quirks,
1012 },
1013 {
1014 .compatible = "allwinner,sun6i-a31-display-backend",
1015 .data = &sun6i_backend_quirks,
1016 },
1017 {
1018 .compatible = "allwinner,sun7i-a20-display-backend",
1019 .data = &sun7i_backend_quirks,
1020 },
1021 {
1022 .compatible = "allwinner,sun8i-a23-display-backend",
1023 .data = &sun8i_a33_backend_quirks,
1024 },
1025 {
1026 .compatible = "allwinner,sun8i-a33-display-backend",
1027 .data = &sun8i_a33_backend_quirks,
1028 },
1029 {
1030 .compatible = "allwinner,sun9i-a80-display-backend",
1031 .data = &sun9i_backend_quirks,
1032 },
1033 { }
1034};
1035MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
1036
1037static struct platform_driver sun4i_backend_platform_driver = {
1038 .probe = sun4i_backend_probe,
1039 .remove = sun4i_backend_remove,
1040 .driver = {
1041 .name = "sun4i-backend",
1042 .of_match_table = sun4i_backend_of_table,
1043 },
1044};
1045module_platform_driver(sun4i_backend_platform_driver);
1046
1047MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
1048MODULE_DESCRIPTION("Allwinner A10 Display Backend Driver");
1049MODULE_LICENSE("GPL");