Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2012 Texas Instruments
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/delay.h>
8#include <linux/dma-mapping.h>
9#include <linux/of_graph.h>
10#include <linux/pm_runtime.h>
11
12#include <drm/drm_atomic.h>
13#include <drm/drm_atomic_helper.h>
14#include <drm/drm_crtc.h>
15#include <drm/drm_fb_dma_helper.h>
16#include <drm/drm_fourcc.h>
17#include <drm/drm_framebuffer.h>
18#include <drm/drm_gem_dma_helper.h>
19#include <drm/drm_managed.h>
20#include <drm/drm_modeset_helper_vtables.h>
21#include <drm/drm_print.h>
22#include <drm/drm_vblank.h>
23
24#include "tilcdc_drv.h"
25#include "tilcdc_regs.h"
26
27#define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000
28#define TILCDC_PALETTE_SIZE 32
29#define TILCDC_PALETTE_FIRST_ENTRY 0x4000
30
31struct tilcdc_crtc {
32 struct drm_crtc base;
33
34 struct tilcdc_plane *primary;
35 struct drm_pending_vblank_event *event;
36 struct mutex enable_lock;
37 bool enabled;
38 bool shutdown;
39 wait_queue_head_t frame_done_wq;
40 bool frame_done;
41 spinlock_t irq_lock;
42
43 unsigned int lcd_fck_rate;
44
45 ktime_t last_vblank;
46 unsigned int hvtotal_us;
47
48 struct drm_framebuffer *next_fb;
49
50 int sync_lost_count;
51 bool frame_intact;
52 struct work_struct recover_work;
53
54 dma_addr_t palette_dma_handle;
55 u16 *palette_base;
56 struct completion palette_loaded;
57};
58#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
59
60static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
61{
62 struct drm_device *dev = crtc->dev;
63 struct tilcdc_drm_private *priv = ddev_to_tilcdc_priv(dev);
64 struct drm_gem_dma_object *gem;
65 dma_addr_t start, end;
66 u64 dma_base_and_ceiling;
67
68 gem = drm_fb_dma_get_gem_obj(fb, 0);
69
70 start = gem->dma_addr + fb->offsets[0] +
71 crtc->y * fb->pitches[0] +
72 crtc->x * fb->format->cpp[0];
73
74 end = start + (crtc->mode.vdisplay * fb->pitches[0]);
75
76 /* Write LCDC_DMA_FB_BASE_ADDR_0_REG and LCDC_DMA_FB_CEILING_ADDR_0_REG
77 * with a single insruction, if available. This should make it more
78 * unlikely that LCDC would fetch the DMA addresses in the middle of
79 * an update.
80 */
81 if (priv->rev == 1)
82 end -= 1;
83
84 dma_base_and_ceiling = (u64)end << 32 | start;
85 tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
86}
87
88/*
89 * The driver currently only supports only true color formats. For
90 * true color the palette block is bypassed, but a 32 byte palette
91 * should still be loaded. The first 16-bit entry must be 0x4000 while
92 * all other entries must be zeroed.
93 */
94static void tilcdc_crtc_load_palette(struct drm_crtc *crtc)
95{
96 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
97 struct drm_device *dev = crtc->dev;
98 struct tilcdc_drm_private *priv = ddev_to_tilcdc_priv(dev);
99 int ret;
100
101 reinit_completion(&tilcdc_crtc->palette_loaded);
102
103 /* Tell the LCDC where the palette is located. */
104 tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG,
105 tilcdc_crtc->palette_dma_handle);
106 tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG,
107 (u32) tilcdc_crtc->palette_dma_handle +
108 TILCDC_PALETTE_SIZE - 1);
109
110 /* Set dma load mode for palette loading only. */
111 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
112 LCDC_PALETTE_LOAD_MODE(PALETTE_ONLY),
113 LCDC_PALETTE_LOAD_MODE_MASK);
114
115 /* Enable DMA Palette Loaded Interrupt */
116 if (priv->rev == 1)
117 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
118 else
119 tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_PL_INT_ENA);
120
121 /* Enable LCDC DMA and wait for palette to be loaded. */
122 tilcdc_clear_irqstatus(dev, 0xffffffff);
123 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
124
125 ret = wait_for_completion_timeout(&tilcdc_crtc->palette_loaded,
126 msecs_to_jiffies(50));
127 if (ret == 0)
128 drm_err(dev, "%s: Palette loading timeout", __func__);
129
130 /* Disable LCDC DMA and DMA Palette Loaded Interrupt. */
131 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
132 if (priv->rev == 1)
133 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA);
134 else
135 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, LCDC_V2_PL_INT_ENA);
136}
137
138static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
139{
140 struct tilcdc_drm_private *priv = ddev_to_tilcdc_priv(dev);
141
142 tilcdc_clear_irqstatus(dev, 0xffffffff);
143
144 if (priv->rev == 1) {
145 tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
146 LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
147 LCDC_V1_UNDERFLOW_INT_ENA);
148 } else {
149 tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
150 LCDC_V2_UNDERFLOW_INT_ENA |
151 LCDC_FRAME_DONE | LCDC_SYNC_LOST);
152 }
153}
154
155static void tilcdc_crtc_disable_irqs(struct drm_device *dev)
156{
157 struct tilcdc_drm_private *priv = ddev_to_tilcdc_priv(dev);
158
159 /* disable irqs that we might have enabled: */
160 if (priv->rev == 1) {
161 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
162 LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
163 LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
164 tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
165 LCDC_V1_END_OF_FRAME_INT_ENA);
166 } else {
167 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
168 LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
169 LCDC_V2_END_OF_FRAME0_INT_ENA |
170 LCDC_FRAME_DONE | LCDC_SYNC_LOST);
171 }
172}
173
174static void reset(struct drm_crtc *crtc)
175{
176 struct drm_device *dev = crtc->dev;
177 struct tilcdc_drm_private *priv = ddev_to_tilcdc_priv(dev);
178
179 if (priv->rev != 2)
180 return;
181
182 tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
183 usleep_range(250, 1000);
184 tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
185}
186
187/*
188 * Calculate the percentage difference between the requested pixel clock rate
189 * and the effective rate resulting from calculating the clock divider value.
190 */
191static unsigned int tilcdc_pclk_diff(unsigned long rate,
192 unsigned long real_rate)
193{
194 int r = rate / 100, rr = real_rate / 100;
195
196 return (unsigned int)(abs(((rr - r) * 100) / r));
197}
198
199static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
200{
201 struct drm_device *dev = crtc->dev;
202 struct tilcdc_drm_private *priv = ddev_to_tilcdc_priv(dev);
203 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
204 unsigned long clk_rate, real_pclk_rate, pclk_rate;
205 unsigned int clkdiv;
206 int ret;
207
208 clkdiv = 2; /* first try using a standard divider of 2 */
209
210 /* mode.clock is in KHz, set_rate wants parameter in Hz */
211 pclk_rate = crtc->mode.clock * 1000;
212
213 ret = clk_set_rate(priv->clk, pclk_rate * clkdiv);
214 clk_rate = clk_get_rate(priv->clk);
215 real_pclk_rate = clk_rate / clkdiv;
216 if (ret < 0 || tilcdc_pclk_diff(pclk_rate, real_pclk_rate) > 5) {
217 /*
218 * If we fail to set the clock rate (some architectures don't
219 * use the common clock framework yet and may not implement
220 * all the clk API calls for every clock), try the next best
221 * thing: adjusting the clock divider, unless clk_get_rate()
222 * failed as well.
223 */
224 if (!clk_rate) {
225 /* Nothing more we can do. Just bail out. */
226 drm_err(dev,
227 "failed to set the pixel clock - unable to read current lcdc clock rate\n");
228 return;
229 }
230
231 clkdiv = DIV_ROUND_CLOSEST(clk_rate, pclk_rate);
232
233 /*
234 * Emit a warning if the real clock rate resulting from the
235 * calculated divider differs much from the requested rate.
236 *
237 * 5% is an arbitrary value - LCDs are usually quite tolerant
238 * about pixel clock rates.
239 */
240 real_pclk_rate = clk_rate / clkdiv;
241
242 if (tilcdc_pclk_diff(pclk_rate, real_pclk_rate) > 5) {
243 drm_warn(dev,
244 "effective pixel clock rate (%luHz) differs from the requested rate (%luHz)\n",
245 real_pclk_rate, pclk_rate);
246 }
247 }
248
249 tilcdc_crtc->lcd_fck_rate = clk_rate;
250
251 DBG("lcd_clk=%u, mode clock=%d, div=%u",
252 tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv);
253
254 /* Configure the LCD clock divisor. */
255 tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) |
256 LCDC_RASTER_MODE);
257
258 if (priv->rev == 2)
259 tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
260 LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
261 LCDC_V2_CORE_CLK_EN);
262}
263
264static uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode)
265{
266 return (uint) div_u64(1000llu * mode->htotal * mode->vtotal,
267 mode->clock);
268}
269
270static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
271{
272 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
273 struct drm_device *dev = crtc->dev;
274 struct tilcdc_drm_private *priv = ddev_to_tilcdc_priv(dev);
275 uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
276 struct drm_display_mode *mode = &crtc->state->adjusted_mode;
277 struct drm_framebuffer *fb = crtc->primary->state->fb;
278
279 if (WARN_ON(!fb))
280 return;
281
282 /* Configure the Burst Size and fifo threshold of DMA: */
283 reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
284 /* Use 16 bit DMA burst size by default */
285 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
286
287 if (priv->fifo_th) {
288 int fifo_th_val = ilog2(priv->fifo_th) - 3;
289
290 reg |= (fifo_th_val << 8);
291 }
292 tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
293
294 /* Configure timings: */
295 hbp = mode->htotal - mode->hsync_end;
296 hfp = mode->hsync_start - mode->hdisplay;
297 hsw = mode->hsync_end - mode->hsync_start;
298 vbp = mode->vtotal - mode->vsync_end;
299 vfp = mode->vsync_start - mode->vdisplay;
300 vsw = mode->vsync_end - mode->vsync_start;
301
302 DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
303 mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
304
305 /* Set AC Bias Period and Number of Transitions per Interrupt: */
306 reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
307 /* Use 255 AC Bias Pin Frequency by default */
308 reg |= LCDC_AC_BIAS_FREQUENCY(255);
309
310 /*
311 * subtract one from hfp, hbp, hsw because the hardware uses
312 * a value of 0 as 1
313 */
314 if (priv->rev == 2) {
315 /* clear bits we're going to set */
316 reg &= ~0x78000033;
317 reg |= ((hfp-1) & 0x300) >> 8;
318 reg |= ((hbp-1) & 0x300) >> 4;
319 reg |= ((hsw-1) & 0x3c0) << 21;
320 }
321 tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg);
322
323 reg = (((mode->hdisplay >> 4) - 1) << 4) |
324 (((hbp-1) & 0xff) << 24) |
325 (((hfp-1) & 0xff) << 16) |
326 (((hsw-1) & 0x3f) << 10);
327 if (priv->rev == 2)
328 reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3;
329 tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg);
330
331 reg = ((mode->vdisplay - 1) & 0x3ff) |
332 ((vbp & 0xff) << 24) |
333 ((vfp & 0xff) << 16) |
334 (((vsw-1) & 0x3f) << 10);
335 tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg);
336
337 /*
338 * be sure to set Bit 10 for the V2 LCDC controller,
339 * otherwise limited to 1024 pixels width, stopping
340 * 1920x1080 being supported.
341 */
342 if (priv->rev == 2) {
343 if ((mode->vdisplay - 1) & 0x400) {
344 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG,
345 LCDC_LPP_B10);
346 } else {
347 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG,
348 LCDC_LPP_B10);
349 }
350 }
351
352 /* Configure display type: */
353 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
354 ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
355 LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK |
356 0x000ff000 /* Palette Loading Delay bits */);
357 reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
358 if (priv->rev == 2) {
359 switch (fb->format->format) {
360 case DRM_FORMAT_BGR565:
361 case DRM_FORMAT_RGB565:
362 break;
363 case DRM_FORMAT_XBGR8888:
364 case DRM_FORMAT_XRGB8888:
365 reg |= LCDC_V2_TFT_24BPP_UNPACK;
366 fallthrough;
367 case DRM_FORMAT_BGR888:
368 case DRM_FORMAT_RGB888:
369 reg |= LCDC_V2_TFT_24BPP_MODE;
370 break;
371 default:
372 drm_err(dev, "invalid pixel format\n");
373 return;
374 }
375 }
376 /* Use 128 FIFO DMA Request Delay by default */
377 reg |= 128 << 12;
378 tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg);
379
380 if (mode->flags == DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
381 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
382 else
383 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
384
385 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
386 if (mode->flags == DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE)
387 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
388 else
389 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
390
391 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
392 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
393 else
394 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
395
396 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
397 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
398 else
399 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
400
401 tilcdc_crtc_set_clk(crtc);
402
403 tilcdc_crtc_load_palette(crtc);
404
405 set_scanout(crtc, fb);
406
407 drm_mode_copy(&crtc->hwmode, &crtc->state->adjusted_mode);
408
409 tilcdc_crtc->hvtotal_us =
410 tilcdc_mode_hvtotal(&crtc->hwmode);
411}
412
413static void tilcdc_crtc_enable(struct drm_crtc *crtc)
414{
415 struct drm_device *dev = crtc->dev;
416 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
417 unsigned long flags;
418
419 mutex_lock(&tilcdc_crtc->enable_lock);
420 if (tilcdc_crtc->enabled || tilcdc_crtc->shutdown) {
421 mutex_unlock(&tilcdc_crtc->enable_lock);
422 return;
423 }
424
425 pm_runtime_get_sync(dev->dev);
426
427 reset(crtc);
428
429 tilcdc_crtc_set_mode(crtc);
430
431 tilcdc_crtc_enable_irqs(dev);
432
433 tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
434 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
435 LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
436 LCDC_PALETTE_LOAD_MODE_MASK);
437
438 /* There is no real chance for a race here as the time stamp
439 * is taken before the raster DMA is started. The spin-lock is
440 * taken to have a memory barrier after taking the time-stamp
441 * and to avoid a context switch between taking the stamp and
442 * enabling the raster.
443 */
444 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
445 tilcdc_crtc->last_vblank = ktime_get();
446 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
447 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
448
449 drm_crtc_vblank_on(crtc);
450
451 tilcdc_crtc->enabled = true;
452 mutex_unlock(&tilcdc_crtc->enable_lock);
453}
454
455static void tilcdc_crtc_atomic_enable(struct drm_crtc *crtc,
456 struct drm_atomic_state *state)
457{
458 tilcdc_crtc_enable(crtc);
459}
460
461static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
462{
463 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
464 struct drm_device *dev = crtc->dev;
465 int ret;
466
467 mutex_lock(&tilcdc_crtc->enable_lock);
468 if (shutdown)
469 tilcdc_crtc->shutdown = true;
470 if (!tilcdc_crtc->enabled) {
471 mutex_unlock(&tilcdc_crtc->enable_lock);
472 return;
473 }
474 tilcdc_crtc->frame_done = false;
475 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
476
477 /*
478 * Wait for framedone irq which will still come before putting
479 * things to sleep..
480 */
481 ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
482 tilcdc_crtc->frame_done,
483 msecs_to_jiffies(500));
484 if (ret == 0)
485 drm_err(dev, "%s: timeout waiting for framedone\n",
486 __func__);
487
488 drm_crtc_vblank_off(crtc);
489
490 spin_lock_irq(&crtc->dev->event_lock);
491
492 if (crtc->state->event) {
493 drm_crtc_send_vblank_event(crtc, crtc->state->event);
494 crtc->state->event = NULL;
495 }
496
497 spin_unlock_irq(&crtc->dev->event_lock);
498
499 tilcdc_crtc_disable_irqs(dev);
500
501 pm_runtime_put_sync(dev->dev);
502
503 tilcdc_crtc->enabled = false;
504 mutex_unlock(&tilcdc_crtc->enable_lock);
505}
506
507static void tilcdc_crtc_disable(struct drm_crtc *crtc)
508{
509 tilcdc_crtc_off(crtc, false);
510}
511
512static void tilcdc_crtc_atomic_disable(struct drm_crtc *crtc,
513 struct drm_atomic_state *state)
514{
515 tilcdc_crtc_disable(crtc);
516}
517
518static void tilcdc_crtc_atomic_flush(struct drm_crtc *crtc,
519 struct drm_atomic_state *state)
520{
521 if (!crtc->state->event)
522 return;
523
524 spin_lock_irq(&crtc->dev->event_lock);
525 drm_crtc_send_vblank_event(crtc, crtc->state->event);
526 crtc->state->event = NULL;
527 spin_unlock_irq(&crtc->dev->event_lock);
528}
529
530void tilcdc_crtc_shutdown(struct drm_crtc *crtc)
531{
532 tilcdc_crtc_off(crtc, true);
533}
534
535static bool tilcdc_crtc_is_on(struct drm_crtc *crtc)
536{
537 return crtc->state && crtc->state->enable && crtc->state->active;
538}
539
540static void tilcdc_crtc_recover_work(struct work_struct *work)
541{
542 struct tilcdc_crtc *tilcdc_crtc =
543 container_of(work, struct tilcdc_crtc, recover_work);
544 struct drm_crtc *crtc = &tilcdc_crtc->base;
545
546 drm_info(crtc->dev, "%s: Reset CRTC", __func__);
547
548 drm_modeset_lock(&crtc->mutex, NULL);
549
550 if (!tilcdc_crtc_is_on(crtc))
551 goto out;
552
553 tilcdc_crtc_disable(crtc);
554 tilcdc_crtc_enable(crtc);
555out:
556 drm_modeset_unlock(&crtc->mutex);
557}
558
559static void tilcdc_crtc_destroy(struct drm_device *dev, void *data)
560{
561 struct tilcdc_drm_private *priv = (struct tilcdc_drm_private *)data;
562
563 tilcdc_crtc_shutdown(priv->crtc);
564
565 flush_workqueue(priv->wq);
566
567 of_node_put(priv->crtc->port);
568}
569
570int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
571 struct drm_framebuffer *fb,
572 struct drm_pending_vblank_event *event)
573{
574 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
575 struct drm_device *dev = crtc->dev;
576
577 if (tilcdc_crtc->event) {
578 drm_err(dev, "already pending page flip!\n");
579 return -EBUSY;
580 }
581
582 tilcdc_crtc->event = event;
583
584 mutex_lock(&tilcdc_crtc->enable_lock);
585
586 if (tilcdc_crtc->enabled) {
587 unsigned long flags;
588 ktime_t next_vblank;
589 s64 tdiff;
590
591 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
592
593 next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
594 tilcdc_crtc->hvtotal_us);
595 tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
596
597 if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
598 tilcdc_crtc->next_fb = fb;
599 else
600 set_scanout(crtc, fb);
601
602 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
603 }
604
605 mutex_unlock(&tilcdc_crtc->enable_lock);
606
607 return 0;
608}
609
610static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
611 const struct drm_display_mode *mode,
612 struct drm_display_mode *adjusted_mode)
613{
614 /*
615 * tilcdc does not generate VESA-compliant sync but aligns
616 * VS on the second edge of HS instead of first edge.
617 * We use adjusted_mode, to fixup sync by aligning both rising
618 * edges and add HSKEW offset to fix the sync.
619 */
620 adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
621 adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
622
623 if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
624 adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
625 adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
626 } else {
627 adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
628 adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
629 }
630
631 return true;
632}
633
634static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
635 struct drm_atomic_state *state)
636{
637 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
638 crtc);
639 /* If we are not active we don't care */
640 if (!crtc_state->active)
641 return 0;
642
643 return drm_atomic_helper_check_crtc_primary_plane(crtc_state);
644}
645
646static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc)
647{
648 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
649 struct drm_device *dev = crtc->dev;
650 struct tilcdc_drm_private *priv = ddev_to_tilcdc_priv(dev);
651 unsigned long flags;
652
653 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
654
655 tilcdc_clear_irqstatus(dev, LCDC_END_OF_FRAME0);
656
657 if (priv->rev == 1)
658 tilcdc_set(dev, LCDC_DMA_CTRL_REG,
659 LCDC_V1_END_OF_FRAME_INT_ENA);
660 else
661 tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG,
662 LCDC_V2_END_OF_FRAME0_INT_ENA);
663
664 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
665
666 return 0;
667}
668
669static void tilcdc_crtc_disable_vblank(struct drm_crtc *crtc)
670{
671 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
672 struct drm_device *dev = crtc->dev;
673 struct tilcdc_drm_private *priv = ddev_to_tilcdc_priv(dev);
674 unsigned long flags;
675
676 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
677
678 if (priv->rev == 1)
679 tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
680 LCDC_V1_END_OF_FRAME_INT_ENA);
681 else
682 tilcdc_clear(dev, LCDC_INT_ENABLE_SET_REG,
683 LCDC_V2_END_OF_FRAME0_INT_ENA);
684
685 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
686}
687
688static void tilcdc_crtc_reset(struct drm_crtc *crtc)
689{
690 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
691 struct drm_device *dev = crtc->dev;
692 int ret;
693
694 drm_atomic_helper_crtc_reset(crtc);
695
696 /* Turn the raster off if it for some reason is on. */
697 pm_runtime_get_sync(dev->dev);
698 if (tilcdc_read(dev, LCDC_RASTER_CTRL_REG) & LCDC_RASTER_ENABLE) {
699 /* Enable DMA Frame Done Interrupt */
700 tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_FRAME_DONE);
701 tilcdc_clear_irqstatus(dev, 0xffffffff);
702
703 tilcdc_crtc->frame_done = false;
704 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
705
706 ret = wait_event_timeout(tilcdc_crtc->frame_done_wq,
707 tilcdc_crtc->frame_done,
708 msecs_to_jiffies(500));
709 if (ret == 0)
710 drm_err(dev, "%s: timeout waiting for framedone\n",
711 __func__);
712 }
713 pm_runtime_put_sync(dev->dev);
714}
715
716static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
717 .set_config = drm_atomic_helper_set_config,
718 .page_flip = drm_atomic_helper_page_flip,
719 .reset = tilcdc_crtc_reset,
720 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
721 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
722 .enable_vblank = tilcdc_crtc_enable_vblank,
723 .disable_vblank = tilcdc_crtc_disable_vblank,
724};
725
726static enum drm_mode_status
727tilcdc_crtc_mode_valid(struct drm_crtc *crtc,
728 const struct drm_display_mode *mode)
729{
730 struct tilcdc_drm_private *priv = ddev_to_tilcdc_priv(crtc->dev);
731 unsigned int bandwidth;
732 uint32_t hbp, hfp, hsw, vbp, vfp, vsw;
733
734 /*
735 * check to see if the width is within the range that
736 * the LCD Controller physically supports
737 */
738 if (mode->hdisplay > priv->max_width)
739 return MODE_VIRTUAL_X;
740
741 /* width must be multiple of 16 */
742 if (mode->hdisplay & 0xf)
743 return MODE_VIRTUAL_X;
744
745 if (mode->vdisplay > 2048)
746 return MODE_VIRTUAL_Y;
747
748 DBG("Processing mode %dx%d@%d with pixel clock %d",
749 mode->hdisplay, mode->vdisplay,
750 drm_mode_vrefresh(mode), mode->clock);
751
752 hbp = mode->htotal - mode->hsync_end;
753 hfp = mode->hsync_start - mode->hdisplay;
754 hsw = mode->hsync_end - mode->hsync_start;
755 vbp = mode->vtotal - mode->vsync_end;
756 vfp = mode->vsync_start - mode->vdisplay;
757 vsw = mode->vsync_end - mode->vsync_start;
758
759 if ((hbp-1) & ~0x3ff) {
760 DBG("Pruning mode: Horizontal Back Porch out of range");
761 return MODE_HBLANK_WIDE;
762 }
763
764 if ((hfp-1) & ~0x3ff) {
765 DBG("Pruning mode: Horizontal Front Porch out of range");
766 return MODE_HBLANK_WIDE;
767 }
768
769 if ((hsw-1) & ~0x3ff) {
770 DBG("Pruning mode: Horizontal Sync Width out of range");
771 return MODE_HSYNC_WIDE;
772 }
773
774 if (vbp & ~0xff) {
775 DBG("Pruning mode: Vertical Back Porch out of range");
776 return MODE_VBLANK_WIDE;
777 }
778
779 if (vfp & ~0xff) {
780 DBG("Pruning mode: Vertical Front Porch out of range");
781 return MODE_VBLANK_WIDE;
782 }
783
784 if ((vsw-1) & ~0x3f) {
785 DBG("Pruning mode: Vertical Sync Width out of range");
786 return MODE_VSYNC_WIDE;
787 }
788
789 /*
790 * some devices have a maximum allowed pixel clock
791 * configured from the DT
792 */
793 if (mode->clock > priv->max_pixelclock) {
794 DBG("Pruning mode: pixel clock too high");
795 return MODE_CLOCK_HIGH;
796 }
797
798 /*
799 * some devices further limit the max horizontal resolution
800 * configured from the DT
801 */
802 if (mode->hdisplay > priv->max_width)
803 return MODE_BAD_WIDTH;
804
805 /* filter out modes that would require too much memory bandwidth: */
806 bandwidth = mode->hdisplay * mode->vdisplay *
807 drm_mode_vrefresh(mode);
808 if (bandwidth > priv->max_bandwidth) {
809 DBG("Pruning mode: exceeds defined bandwidth limit");
810 return MODE_BAD;
811 }
812
813 return MODE_OK;
814}
815
816static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
817 .mode_valid = tilcdc_crtc_mode_valid,
818 .mode_fixup = tilcdc_crtc_mode_fixup,
819 .atomic_check = tilcdc_crtc_atomic_check,
820 .atomic_enable = tilcdc_crtc_atomic_enable,
821 .atomic_disable = tilcdc_crtc_atomic_disable,
822 .atomic_flush = tilcdc_crtc_atomic_flush,
823};
824
825void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
826{
827 struct drm_device *dev = crtc->dev;
828 struct tilcdc_drm_private *priv = ddev_to_tilcdc_priv(dev);
829 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
830
831 drm_modeset_lock(&crtc->mutex, NULL);
832 if (tilcdc_crtc->lcd_fck_rate != clk_get_rate(priv->clk)) {
833 if (tilcdc_crtc_is_on(crtc)) {
834 pm_runtime_get_sync(dev->dev);
835 tilcdc_crtc_disable(crtc);
836
837 tilcdc_crtc_set_clk(crtc);
838
839 tilcdc_crtc_enable(crtc);
840 pm_runtime_put_sync(dev->dev);
841 }
842 }
843 drm_modeset_unlock(&crtc->mutex);
844}
845
846#define SYNC_LOST_COUNT_LIMIT 50
847
848irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
849{
850 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
851 struct drm_device *dev = crtc->dev;
852 struct tilcdc_drm_private *priv = ddev_to_tilcdc_priv(dev);
853 uint32_t stat, reg;
854
855 stat = tilcdc_read_irqstatus(dev);
856 tilcdc_clear_irqstatus(dev, stat);
857
858 if (stat & LCDC_END_OF_FRAME0) {
859 bool skip_event = false;
860 ktime_t now;
861
862 now = ktime_get();
863
864 spin_lock(&tilcdc_crtc->irq_lock);
865
866 tilcdc_crtc->last_vblank = now;
867
868 if (tilcdc_crtc->next_fb) {
869 set_scanout(crtc, tilcdc_crtc->next_fb);
870 tilcdc_crtc->next_fb = NULL;
871 skip_event = true;
872 }
873
874 spin_unlock(&tilcdc_crtc->irq_lock);
875
876 drm_crtc_handle_vblank(crtc);
877
878 if (!skip_event) {
879 struct drm_pending_vblank_event *event;
880
881 spin_lock(&dev->event_lock);
882
883 event = tilcdc_crtc->event;
884 tilcdc_crtc->event = NULL;
885 if (event)
886 drm_crtc_send_vblank_event(crtc, event);
887
888 spin_unlock(&dev->event_lock);
889 }
890
891 if (tilcdc_crtc->frame_intact)
892 tilcdc_crtc->sync_lost_count = 0;
893 else
894 tilcdc_crtc->frame_intact = true;
895 }
896
897 if (stat & LCDC_FIFO_UNDERFLOW)
898 drm_err_ratelimited(dev, "%s(0x%08x): FIFO underflow",
899 __func__, stat);
900
901 if (stat & LCDC_PL_LOAD_DONE) {
902 complete(&tilcdc_crtc->palette_loaded);
903 if (priv->rev == 1)
904 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
905 LCDC_V1_PL_INT_ENA);
906 else
907 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
908 LCDC_V2_PL_INT_ENA);
909 }
910
911 if (stat & LCDC_SYNC_LOST) {
912 drm_err_ratelimited(dev, "%s(0x%08x): Sync lost",
913 __func__, stat);
914 tilcdc_crtc->frame_intact = false;
915 if (priv->rev == 1) {
916 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG);
917 if (reg & LCDC_RASTER_ENABLE) {
918 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
919 LCDC_RASTER_ENABLE);
920 tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
921 LCDC_RASTER_ENABLE);
922 }
923 } else {
924 if (tilcdc_crtc->sync_lost_count++ >
925 SYNC_LOST_COUNT_LIMIT) {
926 drm_err(dev,
927 "%s(0x%08x): Sync lost flood detected, recovering",
928 __func__, stat);
929 queue_work(system_wq,
930 &tilcdc_crtc->recover_work);
931 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
932 LCDC_SYNC_LOST);
933 tilcdc_crtc->sync_lost_count = 0;
934 }
935 }
936 }
937
938 if (stat & LCDC_FRAME_DONE) {
939 tilcdc_crtc->frame_done = true;
940 wake_up(&tilcdc_crtc->frame_done_wq);
941 /* rev 1 lcdc appears to hang if irq is not disabled here */
942 if (priv->rev == 1)
943 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
944 LCDC_V1_FRAME_DONE_INT_ENA);
945 }
946
947 /* For revision 2 only */
948 if (priv->rev == 2) {
949 /* Indicate to LCDC that the interrupt service routine has
950 * completed, see 13.3.6.1.6 in AM335x TRM.
951 */
952 tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
953 }
954
955 return IRQ_HANDLED;
956}
957
958int tilcdc_crtc_create(struct drm_device *dev)
959{
960 struct tilcdc_drm_private *priv = ddev_to_tilcdc_priv(dev);
961 struct tilcdc_crtc *tilcdc_crtc;
962 struct tilcdc_plane *primary;
963 struct drm_crtc *crtc;
964 int ret;
965
966 primary = tilcdc_plane_init(dev);
967 if (IS_ERR(primary)) {
968 drm_err(dev, "Failed to initialize plane: %pe\n", primary);
969 return PTR_ERR(primary);
970 }
971
972 tilcdc_crtc = drmm_crtc_alloc_with_planes(dev, struct tilcdc_crtc, base,
973 &primary->base,
974 NULL,
975 &tilcdc_crtc_funcs,
976 "tilcdc crtc");
977 if (IS_ERR(tilcdc_crtc)) {
978 drm_err(dev, "Failed to init CRTC: %pe\n", tilcdc_crtc);
979 return PTR_ERR(tilcdc_crtc);
980 }
981
982 tilcdc_crtc->primary = primary;
983 priv->crtc = &tilcdc_crtc->base;
984 ret = drmm_add_action_or_reset(dev, tilcdc_crtc_destroy, priv);
985 if (ret)
986 return ret;
987
988 init_completion(&tilcdc_crtc->palette_loaded);
989 tilcdc_crtc->palette_base = dmam_alloc_coherent(dev->dev,
990 TILCDC_PALETTE_SIZE,
991 &tilcdc_crtc->palette_dma_handle,
992 GFP_KERNEL | __GFP_ZERO);
993 if (!tilcdc_crtc->palette_base)
994 return -ENOMEM;
995 *tilcdc_crtc->palette_base = TILCDC_PALETTE_FIRST_ENTRY;
996
997 crtc = &tilcdc_crtc->base;
998
999 mutex_init(&tilcdc_crtc->enable_lock);
1000
1001 init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
1002
1003 spin_lock_init(&tilcdc_crtc->irq_lock);
1004 INIT_WORK(&tilcdc_crtc->recover_work, tilcdc_crtc_recover_work);
1005
1006 drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
1007
1008 return 0;
1009}