2 * Copyright (C) 2014 Free Electrons
3 * Copyright (C) 2014 Atmel
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
23 #include "atmel_hlcdc_dc.h"
26 atmel_hlcdc_layer_fb_flip_release(struct drm_flip_work *work, void *val)
28 struct atmel_hlcdc_layer_fb_flip *flip = val;
31 drm_framebuffer_unreference(flip->fb);
36 atmel_hlcdc_layer_fb_flip_destroy(struct atmel_hlcdc_layer_fb_flip *flip)
39 drm_framebuffer_unreference(flip->fb);
45 atmel_hlcdc_layer_fb_flip_release_queue(struct atmel_hlcdc_layer *layer,
46 struct atmel_hlcdc_layer_fb_flip *flip)
53 for (i = 0; i < layer->max_planes; i++) {
57 flip->dscrs[i]->status = 0;
58 flip->dscrs[i] = NULL;
61 drm_flip_work_queue_task(&layer->gc, flip->task);
62 drm_flip_work_commit(&layer->gc, layer->wq);
65 static void atmel_hlcdc_layer_update_reset(struct atmel_hlcdc_layer *layer,
68 struct atmel_hlcdc_layer_update *upd = &layer->update;
69 struct atmel_hlcdc_layer_update_slot *slot;
74 slot = &upd->slots[id];
75 bitmap_clear(slot->updated_configs, 0, layer->desc->nconfigs);
76 memset(slot->configs, 0,
77 sizeof(*slot->configs) * layer->desc->nconfigs);
80 atmel_hlcdc_layer_fb_flip_release_queue(layer, slot->fb_flip);
85 static void atmel_hlcdc_layer_update_apply(struct atmel_hlcdc_layer *layer)
87 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
88 const struct atmel_hlcdc_layer_desc *desc = layer->desc;
89 struct atmel_hlcdc_layer_update *upd = &layer->update;
90 struct regmap *regmap = layer->hlcdc->regmap;
91 struct atmel_hlcdc_layer_update_slot *slot;
92 struct atmel_hlcdc_layer_fb_flip *fb_flip;
93 struct atmel_hlcdc_dma_channel_dscr *dscr;
98 if (upd->pending < 0 || upd->pending > 1)
101 slot = &upd->slots[upd->pending];
103 for_each_set_bit(cfg, slot->updated_configs, layer->desc->nconfigs) {
106 ATMEL_HLCDC_LAYER_CFG(layer, cfg),
108 action |= ATMEL_HLCDC_LAYER_UPDATE;
111 fb_flip = slot->fb_flip;
116 if (dma->status == ATMEL_HLCDC_LAYER_DISABLED) {
117 for (i = 0; i < fb_flip->ngems; i++) {
118 dscr = fb_flip->dscrs[i];
119 dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
120 ATMEL_HLCDC_LAYER_DMA_IRQ |
121 ATMEL_HLCDC_LAYER_ADD_IRQ |
122 ATMEL_HLCDC_LAYER_DONE_IRQ;
126 ATMEL_HLCDC_LAYER_PLANE_ADDR(i),
130 ATMEL_HLCDC_LAYER_PLANE_CTRL(i),
134 ATMEL_HLCDC_LAYER_PLANE_NEXT(i),
138 action |= ATMEL_HLCDC_LAYER_DMA_CHAN;
139 dma->status = ATMEL_HLCDC_LAYER_ENABLED;
141 for (i = 0; i < fb_flip->ngems; i++) {
142 dscr = fb_flip->dscrs[i];
143 dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
144 ATMEL_HLCDC_LAYER_DMA_IRQ |
145 ATMEL_HLCDC_LAYER_DSCR_IRQ |
146 ATMEL_HLCDC_LAYER_DONE_IRQ;
150 ATMEL_HLCDC_LAYER_PLANE_HEAD(i),
154 action |= ATMEL_HLCDC_LAYER_A2Q;
157 /* Release unneeded descriptors */
158 for (i = fb_flip->ngems; i < layer->max_planes; i++) {
159 fb_flip->dscrs[i]->status = 0;
160 fb_flip->dscrs[i] = NULL;
163 dma->queue = fb_flip;
164 slot->fb_flip = NULL;
169 desc->regs_offset + ATMEL_HLCDC_LAYER_CHER,
172 atmel_hlcdc_layer_update_reset(layer, upd->pending);
177 void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer)
179 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
180 const struct atmel_hlcdc_layer_desc *desc = layer->desc;
181 struct regmap *regmap = layer->hlcdc->regmap;
182 struct atmel_hlcdc_layer_fb_flip *flip;
184 unsigned int isr, imr;
186 unsigned int plane_status;
191 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IMR, &imr);
192 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
197 spin_lock_irqsave(&layer->lock, flags);
199 flip = dma->queue ? dma->queue : dma->cur;
202 spin_unlock_irqrestore(&layer->lock, flags);
207 * Set LOADED and DONE flags: they'll be cleared if at least one
208 * memory plane is not LOADED or DONE.
210 flip_status = ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED |
211 ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
212 for (i = 0; i < flip->ngems; i++) {
213 plane_status = (status >> (8 * i));
216 (ATMEL_HLCDC_LAYER_ADD_IRQ |
217 ATMEL_HLCDC_LAYER_DSCR_IRQ) &
218 ~flip->dscrs[i]->ctrl) {
219 flip->dscrs[i]->status |=
220 ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
221 flip->dscrs[i]->ctrl |=
222 ATMEL_HLCDC_LAYER_ADD_IRQ |
223 ATMEL_HLCDC_LAYER_DSCR_IRQ;
227 ATMEL_HLCDC_LAYER_DONE_IRQ &
228 ~flip->dscrs[i]->ctrl) {
229 flip->dscrs[i]->status |=
230 ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
231 flip->dscrs[i]->ctrl |=
232 ATMEL_HLCDC_LAYER_DONE_IRQ;
235 if (plane_status & ATMEL_HLCDC_LAYER_OVR_IRQ)
236 flip->dscrs[i]->status |=
237 ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
240 * Clear LOADED and DONE flags if the memory plane is either
241 * not LOADED or not DONE.
243 if (!(flip->dscrs[i]->status &
244 ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED))
245 flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
247 if (!(flip->dscrs[i]->status &
248 ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE))
249 flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
252 * An overrun on one memory plane impact the whole framebuffer
253 * transfer, hence we set the OVERRUN flag as soon as there's
254 * one memory plane reporting such an overrun.
256 flip_status |= flip->dscrs[i]->status &
257 ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
260 /* Get changed bits */
261 flip_status ^= flip->status;
262 flip->status |= flip_status;
264 if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED) {
265 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
266 dma->cur = dma->queue;
270 if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE) {
271 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
275 if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN) {
277 desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
278 ATMEL_HLCDC_LAYER_RST);
280 atmel_hlcdc_layer_fb_flip_release_queue(layer,
284 atmel_hlcdc_layer_fb_flip_release_queue(layer,
292 atmel_hlcdc_layer_update_apply(layer);
295 dma->status = ATMEL_HLCDC_LAYER_DISABLED;
298 spin_unlock_irqrestore(&layer->lock, flags);
301 void atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
303 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
304 struct atmel_hlcdc_layer_update *upd = &layer->update;
305 struct regmap *regmap = layer->hlcdc->regmap;
306 const struct atmel_hlcdc_layer_desc *desc = layer->desc;
310 spin_lock_irqsave(&layer->lock, flags);
312 /* Disable the layer */
313 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
314 ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q |
315 ATMEL_HLCDC_LAYER_UPDATE);
317 /* Clear all pending interrupts */
318 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
320 /* Discard current and queued framebuffer transfers. */
322 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
327 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->queue);
332 * Then discard the pending update request (if any) to prevent
333 * DMA irq handler from restarting the DMA channel after it has
336 if (upd->pending >= 0) {
337 atmel_hlcdc_layer_update_reset(layer, upd->pending);
341 dma->status = ATMEL_HLCDC_LAYER_DISABLED;
343 spin_unlock_irqrestore(&layer->lock, flags);
346 int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer)
348 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
349 struct atmel_hlcdc_layer_update *upd = &layer->update;
350 struct regmap *regmap = layer->hlcdc->regmap;
351 struct atmel_hlcdc_layer_fb_flip *fb_flip;
352 struct atmel_hlcdc_layer_update_slot *slot;
356 fb_flip = kzalloc(sizeof(*fb_flip), GFP_KERNEL);
360 fb_flip->task = drm_flip_work_allocate_task(fb_flip, GFP_KERNEL);
361 if (!fb_flip->task) {
366 spin_lock_irqsave(&layer->lock, flags);
368 upd->next = upd->pending ? 0 : 1;
370 slot = &upd->slots[upd->next];
372 for (i = 0; i < layer->max_planes * 4; i++) {
373 if (!dma->dscrs[i].status) {
374 fb_flip->dscrs[j++] = &dma->dscrs[i];
375 dma->dscrs[i].status =
376 ATMEL_HLCDC_DMA_CHANNEL_DSCR_RESERVED;
377 if (j == layer->max_planes)
382 if (j < layer->max_planes) {
383 for (i = 0; i < j; i++)
384 fb_flip->dscrs[i]->status = 0;
387 if (j < layer->max_planes) {
388 spin_unlock_irqrestore(&layer->lock, flags);
389 atmel_hlcdc_layer_fb_flip_destroy(fb_flip);
393 slot->fb_flip = fb_flip;
395 if (upd->pending >= 0) {
396 memcpy(slot->configs,
397 upd->slots[upd->pending].configs,
398 layer->desc->nconfigs * sizeof(u32));
399 memcpy(slot->updated_configs,
400 upd->slots[upd->pending].updated_configs,
401 DIV_ROUND_UP(layer->desc->nconfigs,
402 BITS_PER_BYTE * sizeof(unsigned long)) *
403 sizeof(unsigned long));
404 slot->fb_flip->fb = upd->slots[upd->pending].fb_flip->fb;
405 if (upd->slots[upd->pending].fb_flip->fb) {
407 upd->slots[upd->pending].fb_flip->fb;
408 slot->fb_flip->ngems =
409 upd->slots[upd->pending].fb_flip->ngems;
410 drm_framebuffer_reference(slot->fb_flip->fb);
413 regmap_bulk_read(regmap,
414 layer->desc->regs_offset +
415 ATMEL_HLCDC_LAYER_CFG(layer, 0),
416 upd->slots[upd->next].configs,
417 layer->desc->nconfigs);
420 spin_unlock_irqrestore(&layer->lock, flags);
425 void atmel_hlcdc_layer_update_rollback(struct atmel_hlcdc_layer *layer)
427 struct atmel_hlcdc_layer_update *upd = &layer->update;
429 atmel_hlcdc_layer_update_reset(layer, upd->next);
433 void atmel_hlcdc_layer_update_set_fb(struct atmel_hlcdc_layer *layer,
434 struct drm_framebuffer *fb,
435 unsigned int *offsets)
437 struct atmel_hlcdc_layer_update *upd = &layer->update;
438 struct atmel_hlcdc_layer_fb_flip *fb_flip;
439 struct atmel_hlcdc_layer_update_slot *slot;
440 struct atmel_hlcdc_dma_channel_dscr *dscr;
441 struct drm_framebuffer *old_fb;
445 if (upd->next < 0 || upd->next > 1)
449 nplanes = drm_format_num_planes(fb->pixel_format);
451 if (nplanes > layer->max_planes)
454 slot = &upd->slots[upd->next];
456 fb_flip = slot->fb_flip;
457 old_fb = slot->fb_flip->fb;
459 for (i = 0; i < nplanes; i++) {
460 struct drm_gem_cma_object *gem;
462 dscr = slot->fb_flip->dscrs[i];
463 gem = drm_fb_cma_get_gem_obj(fb, i);
464 dscr->addr = gem->paddr + offsets[i];
467 fb_flip->ngems = nplanes;
471 drm_framebuffer_reference(fb);
474 drm_framebuffer_unreference(old_fb);
477 void atmel_hlcdc_layer_update_cfg(struct atmel_hlcdc_layer *layer, int cfg,
480 struct atmel_hlcdc_layer_update *upd = &layer->update;
481 struct atmel_hlcdc_layer_update_slot *slot;
483 if (upd->next < 0 || upd->next > 1)
486 if (cfg >= layer->desc->nconfigs)
489 slot = &upd->slots[upd->next];
490 slot->configs[cfg] &= ~mask;
491 slot->configs[cfg] |= (val & mask);
492 set_bit(cfg, slot->updated_configs);
495 void atmel_hlcdc_layer_update_commit(struct atmel_hlcdc_layer *layer)
497 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
498 struct atmel_hlcdc_layer_update *upd = &layer->update;
499 struct atmel_hlcdc_layer_update_slot *slot;
502 if (upd->next < 0 || upd->next > 1)
505 slot = &upd->slots[upd->next];
507 spin_lock_irqsave(&layer->lock, flags);
510 * Release pending update request and replace it by the new one.
512 if (upd->pending >= 0)
513 atmel_hlcdc_layer_update_reset(layer, upd->pending);
515 upd->pending = upd->next;
519 atmel_hlcdc_layer_update_apply(layer);
521 spin_unlock_irqrestore(&layer->lock, flags);
527 static int atmel_hlcdc_layer_dma_init(struct drm_device *dev,
528 struct atmel_hlcdc_layer *layer)
530 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
534 dma->dscrs = dma_alloc_coherent(dev->dev,
535 layer->max_planes * 4 *
537 &dma_addr, GFP_KERNEL);
541 for (i = 0; i < layer->max_planes * 4; i++) {
542 struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
544 dscr->next = dma_addr + (i * sizeof(*dscr));
550 static void atmel_hlcdc_layer_dma_cleanup(struct drm_device *dev,
551 struct atmel_hlcdc_layer *layer)
553 struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
556 for (i = 0; i < layer->max_planes * 4; i++) {
557 struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
562 dma_free_coherent(dev->dev, layer->max_planes * 4 *
563 sizeof(*dma->dscrs), dma->dscrs,
567 static int atmel_hlcdc_layer_update_init(struct drm_device *dev,
568 struct atmel_hlcdc_layer *layer,
569 const struct atmel_hlcdc_layer_desc *desc)
571 struct atmel_hlcdc_layer_update *upd = &layer->update;
576 updated_size = DIV_ROUND_UP(desc->nconfigs,
578 sizeof(unsigned long));
580 buffer = devm_kzalloc(dev->dev,
581 ((desc->nconfigs * sizeof(u32)) +
582 (updated_size * sizeof(unsigned long))) * 2,
587 for (i = 0; i < 2; i++) {
588 upd->slots[i].updated_configs = buffer;
589 buffer += updated_size * sizeof(unsigned long);
590 upd->slots[i].configs = buffer;
591 buffer += desc->nconfigs * sizeof(u32);
600 int atmel_hlcdc_layer_init(struct drm_device *dev,
601 struct atmel_hlcdc_layer *layer,
602 const struct atmel_hlcdc_layer_desc *desc)
604 struct atmel_hlcdc_dc *dc = dev->dev_private;
605 struct regmap *regmap = dc->hlcdc->regmap;
610 layer->hlcdc = dc->hlcdc;
614 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
615 ATMEL_HLCDC_LAYER_RST);
616 for (i = 0; i < desc->formats->nformats; i++) {
617 int nplanes = drm_format_num_planes(desc->formats->formats[i]);
619 if (nplanes > layer->max_planes)
620 layer->max_planes = nplanes;
623 spin_lock_init(&layer->lock);
624 drm_flip_work_init(&layer->gc, desc->name,
625 atmel_hlcdc_layer_fb_flip_release);
626 ret = atmel_hlcdc_layer_dma_init(dev, layer);
630 ret = atmel_hlcdc_layer_update_init(dev, layer, desc);
634 /* Flush Status Register */
635 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
637 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR,
641 for (i = 0; i < layer->max_planes; i++)
642 tmp |= (ATMEL_HLCDC_LAYER_DMA_IRQ |
643 ATMEL_HLCDC_LAYER_DSCR_IRQ |
644 ATMEL_HLCDC_LAYER_ADD_IRQ |
645 ATMEL_HLCDC_LAYER_DONE_IRQ |
646 ATMEL_HLCDC_LAYER_OVR_IRQ) << (8 * i);
648 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IER, tmp);
653 void atmel_hlcdc_layer_cleanup(struct drm_device *dev,
654 struct atmel_hlcdc_layer *layer)
656 const struct atmel_hlcdc_layer_desc *desc = layer->desc;
657 struct regmap *regmap = layer->hlcdc->regmap;
659 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
661 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
662 ATMEL_HLCDC_LAYER_RST);
664 atmel_hlcdc_layer_dma_cleanup(dev, layer);
665 drm_flip_work_cleanup(&layer->gc);