2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/delay.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/spinlock.h>
20 #include <linux/interrupt.h>
21 #include <linux/of_device.h>
22 #include <linux/platform_device.h>
23 #include <linux/memory.h>
24 #include <linux/clk.h>
26 #include <linux/of_irq.h>
27 #include <linux/irqdomain.h>
28 #include <linux/cpumask.h>
29 #include <linux/platform_data/dma-mv_xor.h>
31 #include "dmaengine.h"
39 static void mv_xor_issue_pending(struct dma_chan *chan);
41 #define to_mv_xor_chan(chan) \
42 container_of(chan, struct mv_xor_chan, dmachan)
44 #define to_mv_xor_slot(tx) \
45 container_of(tx, struct mv_xor_desc_slot, async_tx)
47 #define mv_chan_to_devp(chan) \
50 static void mv_desc_init(struct mv_xor_desc_slot *desc,
51 dma_addr_t addr, u32 byte_count,
52 enum dma_ctrl_flags flags)
54 struct mv_xor_desc *hw_desc = desc->hw_desc;
56 hw_desc->status = XOR_DESC_DMA_OWNED;
57 hw_desc->phy_next_desc = 0;
58 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
59 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
60 XOR_DESC_EOD_INT_EN : 0;
61 hw_desc->phy_dest_addr = addr;
62 hw_desc->byte_count = byte_count;
65 static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
67 struct mv_xor_desc *hw_desc = desc->hw_desc;
72 hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
75 hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
83 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
86 struct mv_xor_desc *hw_desc = desc->hw_desc;
87 BUG_ON(hw_desc->phy_next_desc);
88 hw_desc->phy_next_desc = next_desc_addr;
91 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
92 int index, dma_addr_t addr)
94 struct mv_xor_desc *hw_desc = desc->hw_desc;
95 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
96 if (desc->type == DMA_XOR)
97 hw_desc->desc_command |= (1 << index);
100 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
102 return readl_relaxed(XOR_CURR_DESC(chan));
105 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
108 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
111 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
113 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
114 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
115 writel_relaxed(val, XOR_INTR_MASK(chan));
118 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
120 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
121 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
125 static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
129 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
130 val = ~(val << (chan->idx * 16));
131 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
132 writel_relaxed(val, XOR_INTR_CAUSE(chan));
135 static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
137 u32 val = 0xFFFF0000 >> (chan->idx * 16);
138 writel_relaxed(val, XOR_INTR_CAUSE(chan));
141 static void mv_chan_set_mode(struct mv_xor_chan *chan,
144 u32 config = readl_relaxed(XOR_CONFIG(chan));
149 #if defined(__BIG_ENDIAN)
150 config |= XOR_DESCRIPTOR_SWAP;
152 config &= ~XOR_DESCRIPTOR_SWAP;
155 writel_relaxed(config, XOR_CONFIG(chan));
158 static void mv_chan_activate(struct mv_xor_chan *chan)
160 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
162 /* writel ensures all descriptors are flushed before activation */
163 writel(BIT(0), XOR_ACTIVATION(chan));
166 static char mv_chan_is_busy(struct mv_xor_chan *chan)
168 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
170 state = (state >> 4) & 0x3;
172 return (state == 1) ? 1 : 0;
176 * mv_chan_start_new_chain - program the engine to operate on new
177 * chain headed by sw_desc
178 * Caller must hold &mv_chan->lock while calling this function
180 static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
181 struct mv_xor_desc_slot *sw_desc)
183 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
184 __func__, __LINE__, sw_desc);
186 /* set the hardware chain */
187 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
190 mv_xor_issue_pending(&mv_chan->dmachan);
194 mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
195 struct mv_xor_chan *mv_chan,
198 BUG_ON(desc->async_tx.cookie < 0);
200 if (desc->async_tx.cookie > 0) {
201 cookie = desc->async_tx.cookie;
203 /* call the callback (must not sleep or submit new
204 * operations to this channel)
206 if (desc->async_tx.callback)
207 desc->async_tx.callback(
208 desc->async_tx.callback_param);
210 dma_descriptor_unmap(&desc->async_tx);
213 /* run dependent operations */
214 dma_run_dependencies(&desc->async_tx);
220 mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
222 struct mv_xor_desc_slot *iter, *_iter;
224 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
225 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
228 if (async_tx_test_ack(&iter->async_tx))
229 list_move_tail(&iter->node, &mv_chan->free_slots);
235 mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
236 struct mv_xor_chan *mv_chan)
238 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
239 __func__, __LINE__, desc, desc->async_tx.flags);
241 /* the client is allowed to attach dependent operations
244 if (!async_tx_test_ack(&desc->async_tx))
245 /* move this slot to the completed_slots */
246 list_move_tail(&desc->node, &mv_chan->completed_slots);
248 list_move_tail(&desc->node, &mv_chan->free_slots);
253 /* This function must be called with the mv_xor_chan spinlock held */
254 static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
256 struct mv_xor_desc_slot *iter, *_iter;
257 dma_cookie_t cookie = 0;
258 int busy = mv_chan_is_busy(mv_chan);
259 u32 current_desc = mv_chan_get_current_desc(mv_chan);
260 int current_cleaned = 0;
261 struct mv_xor_desc *hw_desc;
263 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
264 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
265 mv_chan_clean_completed_slots(mv_chan);
267 /* free completed slots from the chain starting with
268 * the oldest descriptor
271 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
274 /* clean finished descriptors */
275 hw_desc = iter->hw_desc;
276 if (hw_desc->status & XOR_DESC_SUCCESS) {
277 cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
280 /* done processing desc, clean slot */
281 mv_desc_clean_slot(iter, mv_chan);
283 /* break if we did cleaned the current */
284 if (iter->async_tx.phys == current_desc) {
289 if (iter->async_tx.phys == current_desc) {
296 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
297 if (current_cleaned) {
299 * current descriptor cleaned and removed, run
302 iter = list_entry(mv_chan->chain.next,
303 struct mv_xor_desc_slot,
305 mv_chan_start_new_chain(mv_chan, iter);
307 if (!list_is_last(&iter->node, &mv_chan->chain)) {
309 * descriptors are still waiting after
310 * current, trigger them
312 iter = list_entry(iter->node.next,
313 struct mv_xor_desc_slot,
315 mv_chan_start_new_chain(mv_chan, iter);
318 * some descriptors are still waiting
321 tasklet_schedule(&mv_chan->irq_tasklet);
327 mv_chan->dmachan.completed_cookie = cookie;
330 static void mv_xor_tasklet(unsigned long data)
332 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
334 spin_lock_bh(&chan->lock);
335 mv_chan_slot_cleanup(chan);
336 spin_unlock_bh(&chan->lock);
339 static struct mv_xor_desc_slot *
340 mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
342 struct mv_xor_desc_slot *iter;
344 spin_lock_bh(&mv_chan->lock);
346 if (!list_empty(&mv_chan->free_slots)) {
347 iter = list_first_entry(&mv_chan->free_slots,
348 struct mv_xor_desc_slot,
351 list_move_tail(&iter->node, &mv_chan->allocated_slots);
353 spin_unlock_bh(&mv_chan->lock);
355 /* pre-ack descriptor */
356 async_tx_ack(&iter->async_tx);
357 iter->async_tx.cookie = -EBUSY;
363 spin_unlock_bh(&mv_chan->lock);
365 /* try to free some slots if the allocation fails */
366 tasklet_schedule(&mv_chan->irq_tasklet);
371 /************************ DMA engine API functions ****************************/
373 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
375 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
376 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
377 struct mv_xor_desc_slot *old_chain_tail;
379 int new_hw_chain = 1;
381 dev_dbg(mv_chan_to_devp(mv_chan),
382 "%s sw_desc %p: async_tx %p\n",
383 __func__, sw_desc, &sw_desc->async_tx);
385 spin_lock_bh(&mv_chan->lock);
386 cookie = dma_cookie_assign(tx);
388 if (list_empty(&mv_chan->chain))
389 list_move_tail(&sw_desc->node, &mv_chan->chain);
393 old_chain_tail = list_entry(mv_chan->chain.prev,
394 struct mv_xor_desc_slot,
396 list_move_tail(&sw_desc->node, &mv_chan->chain);
398 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
399 &old_chain_tail->async_tx.phys);
401 /* fix up the hardware chain */
402 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
404 /* if the channel is not busy */
405 if (!mv_chan_is_busy(mv_chan)) {
406 u32 current_desc = mv_chan_get_current_desc(mv_chan);
408 * and the curren desc is the end of the chain before
409 * the append, then we need to start the channel
411 if (current_desc == old_chain_tail->async_tx.phys)
417 mv_chan_start_new_chain(mv_chan, sw_desc);
419 spin_unlock_bh(&mv_chan->lock);
424 /* returns the number of allocated descriptors */
425 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
430 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
431 struct mv_xor_desc_slot *slot = NULL;
432 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
434 /* Allocate descriptor slots */
435 idx = mv_chan->slots_allocated;
436 while (idx < num_descs_in_pool) {
437 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
439 dev_info(mv_chan_to_devp(mv_chan),
440 "channel only initialized %d descriptor slots",
444 virt_desc = mv_chan->dma_desc_pool_virt;
445 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
447 dma_async_tx_descriptor_init(&slot->async_tx, chan);
448 slot->async_tx.tx_submit = mv_xor_tx_submit;
449 INIT_LIST_HEAD(&slot->node);
450 dma_desc = mv_chan->dma_desc_pool;
451 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
454 spin_lock_bh(&mv_chan->lock);
455 mv_chan->slots_allocated = idx;
456 list_add_tail(&slot->node, &mv_chan->free_slots);
457 spin_unlock_bh(&mv_chan->lock);
460 dev_dbg(mv_chan_to_devp(mv_chan),
461 "allocated %d descriptor slots\n",
462 mv_chan->slots_allocated);
464 return mv_chan->slots_allocated ? : -ENOMEM;
467 static struct dma_async_tx_descriptor *
468 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
469 unsigned int src_cnt, size_t len, unsigned long flags)
471 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
472 struct mv_xor_desc_slot *sw_desc;
474 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
477 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
479 dev_dbg(mv_chan_to_devp(mv_chan),
480 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
481 __func__, src_cnt, len, &dest, flags);
483 sw_desc = mv_chan_alloc_slot(mv_chan);
485 sw_desc->type = DMA_XOR;
486 sw_desc->async_tx.flags = flags;
487 mv_desc_init(sw_desc, dest, len, flags);
488 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
489 mv_desc_set_mode(sw_desc);
491 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
494 dev_dbg(mv_chan_to_devp(mv_chan),
495 "%s sw_desc %p async_tx %p \n",
496 __func__, sw_desc, &sw_desc->async_tx);
497 return sw_desc ? &sw_desc->async_tx : NULL;
500 static struct dma_async_tx_descriptor *
501 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
502 size_t len, unsigned long flags)
505 * A MEMCPY operation is identical to an XOR operation with only
506 * a single source address.
508 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
511 static struct dma_async_tx_descriptor *
512 mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
514 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
515 dma_addr_t src, dest;
518 src = mv_chan->dummy_src_addr;
519 dest = mv_chan->dummy_dst_addr;
520 len = MV_XOR_MIN_BYTE_COUNT;
523 * We implement the DMA_INTERRUPT operation as a minimum sized
524 * XOR operation with a single dummy source address.
526 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
529 static void mv_xor_free_chan_resources(struct dma_chan *chan)
531 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
532 struct mv_xor_desc_slot *iter, *_iter;
533 int in_use_descs = 0;
535 spin_lock_bh(&mv_chan->lock);
537 mv_chan_slot_cleanup(mv_chan);
539 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
542 list_move_tail(&iter->node, &mv_chan->free_slots);
544 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
547 list_move_tail(&iter->node, &mv_chan->free_slots);
549 list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
552 list_move_tail(&iter->node, &mv_chan->free_slots);
554 list_for_each_entry_safe_reverse(
555 iter, _iter, &mv_chan->free_slots, node) {
556 list_del(&iter->node);
558 mv_chan->slots_allocated--;
561 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
562 __func__, mv_chan->slots_allocated);
563 spin_unlock_bh(&mv_chan->lock);
566 dev_err(mv_chan_to_devp(mv_chan),
567 "freeing %d in use descriptors!\n", in_use_descs);
571 * mv_xor_status - poll the status of an XOR transaction
572 * @chan: XOR channel handle
573 * @cookie: XOR transaction identifier
574 * @txstate: XOR transactions state holder (or NULL)
576 static enum dma_status mv_xor_status(struct dma_chan *chan,
578 struct dma_tx_state *txstate)
580 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
583 ret = dma_cookie_status(chan, cookie, txstate);
584 if (ret == DMA_COMPLETE)
587 spin_lock_bh(&mv_chan->lock);
588 mv_chan_slot_cleanup(mv_chan);
589 spin_unlock_bh(&mv_chan->lock);
591 return dma_cookie_status(chan, cookie, txstate);
594 static void mv_chan_dump_regs(struct mv_xor_chan *chan)
598 val = readl_relaxed(XOR_CONFIG(chan));
599 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
601 val = readl_relaxed(XOR_ACTIVATION(chan));
602 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
604 val = readl_relaxed(XOR_INTR_CAUSE(chan));
605 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
607 val = readl_relaxed(XOR_INTR_MASK(chan));
608 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
610 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
611 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
613 val = readl_relaxed(XOR_ERROR_ADDR(chan));
614 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
617 static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
620 if (intr_cause & XOR_INT_ERR_DECODE) {
621 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
625 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
626 chan->idx, intr_cause);
628 mv_chan_dump_regs(chan);
632 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
634 struct mv_xor_chan *chan = data;
635 u32 intr_cause = mv_chan_get_intr_cause(chan);
637 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
639 if (intr_cause & XOR_INTR_ERRORS)
640 mv_chan_err_interrupt_handler(chan, intr_cause);
642 tasklet_schedule(&chan->irq_tasklet);
644 mv_chan_clear_eoc_cause(chan);
649 static void mv_xor_issue_pending(struct dma_chan *chan)
651 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
653 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
654 mv_chan->pending = 0;
655 mv_chan_activate(mv_chan);
660 * Perform a transaction to verify the HW works.
663 static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
667 dma_addr_t src_dma, dest_dma;
668 struct dma_chan *dma_chan;
670 struct dma_async_tx_descriptor *tx;
671 struct dmaengine_unmap_data *unmap;
674 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
678 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
684 /* Fill in src buffer */
685 for (i = 0; i < PAGE_SIZE; i++)
686 ((u8 *) src)[i] = (u8)i;
688 dma_chan = &mv_chan->dmachan;
689 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
694 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
700 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
701 PAGE_SIZE, DMA_TO_DEVICE);
702 unmap->addr[0] = src_dma;
704 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
711 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
712 PAGE_SIZE, DMA_FROM_DEVICE);
713 unmap->addr[1] = dest_dma;
715 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
721 unmap->len = PAGE_SIZE;
723 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
726 dev_err(dma_chan->device->dev,
727 "Self-test cannot prepare operation, disabling\n");
732 cookie = mv_xor_tx_submit(tx);
733 if (dma_submit_error(cookie)) {
734 dev_err(dma_chan->device->dev,
735 "Self-test submit error, disabling\n");
740 mv_xor_issue_pending(dma_chan);
744 if (mv_xor_status(dma_chan, cookie, NULL) !=
746 dev_err(dma_chan->device->dev,
747 "Self-test copy timed out, disabling\n");
752 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
753 PAGE_SIZE, DMA_FROM_DEVICE);
754 if (memcmp(src, dest, PAGE_SIZE)) {
755 dev_err(dma_chan->device->dev,
756 "Self-test copy failed compare, disabling\n");
762 dmaengine_unmap_put(unmap);
763 mv_xor_free_chan_resources(dma_chan);
770 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
772 mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
776 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
777 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
779 struct dma_async_tx_descriptor *tx;
780 struct dmaengine_unmap_data *unmap;
781 struct dma_chan *dma_chan;
786 int src_count = MV_XOR_NUM_SRC_TEST;
788 for (src_idx = 0; src_idx < src_count; src_idx++) {
789 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
790 if (!xor_srcs[src_idx]) {
792 __free_page(xor_srcs[src_idx]);
797 dest = alloc_page(GFP_KERNEL);
800 __free_page(xor_srcs[src_idx]);
804 /* Fill in src buffers */
805 for (src_idx = 0; src_idx < src_count; src_idx++) {
806 u8 *ptr = page_address(xor_srcs[src_idx]);
807 for (i = 0; i < PAGE_SIZE; i++)
808 ptr[i] = (1 << src_idx);
811 for (src_idx = 0; src_idx < src_count; src_idx++)
812 cmp_byte ^= (u8) (1 << src_idx);
814 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
815 (cmp_byte << 8) | cmp_byte;
817 memset(page_address(dest), 0, PAGE_SIZE);
819 dma_chan = &mv_chan->dmachan;
820 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
825 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
833 for (i = 0; i < src_count; i++) {
834 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
835 0, PAGE_SIZE, DMA_TO_DEVICE);
836 dma_srcs[i] = unmap->addr[i];
837 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
845 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
847 dest_dma = unmap->addr[src_count];
848 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
854 unmap->len = PAGE_SIZE;
856 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
857 src_count, PAGE_SIZE, 0);
859 dev_err(dma_chan->device->dev,
860 "Self-test cannot prepare operation, disabling\n");
865 cookie = mv_xor_tx_submit(tx);
866 if (dma_submit_error(cookie)) {
867 dev_err(dma_chan->device->dev,
868 "Self-test submit error, disabling\n");
873 mv_xor_issue_pending(dma_chan);
877 if (mv_xor_status(dma_chan, cookie, NULL) !=
879 dev_err(dma_chan->device->dev,
880 "Self-test xor timed out, disabling\n");
885 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
886 PAGE_SIZE, DMA_FROM_DEVICE);
887 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
888 u32 *ptr = page_address(dest);
889 if (ptr[i] != cmp_word) {
890 dev_err(dma_chan->device->dev,
891 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
892 i, ptr[i], cmp_word);
899 dmaengine_unmap_put(unmap);
900 mv_xor_free_chan_resources(dma_chan);
904 __free_page(xor_srcs[src_idx]);
909 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
911 struct dma_chan *chan, *_chan;
912 struct device *dev = mv_chan->dmadev.dev;
914 dma_async_device_unregister(&mv_chan->dmadev);
916 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
917 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
918 dma_unmap_single(dev, mv_chan->dummy_src_addr,
919 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
920 dma_unmap_single(dev, mv_chan->dummy_dst_addr,
921 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
923 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
925 list_del(&chan->device_node);
928 free_irq(mv_chan->irq, mv_chan);
933 static struct mv_xor_chan *
934 mv_xor_channel_add(struct mv_xor_device *xordev,
935 struct platform_device *pdev,
936 int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc)
939 struct mv_xor_chan *mv_chan;
940 struct dma_device *dma_dev;
942 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
944 return ERR_PTR(-ENOMEM);
948 mv_chan->op_in_desc = op_in_desc;
950 dma_dev = &mv_chan->dmadev;
953 * These source and destination dummy buffers are used to implement
954 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
955 * Hence, we only need to map the buffers at initialization-time.
957 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
958 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
959 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
960 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
962 /* allocate coherent memory for hardware descriptors
963 * note: writecombine gives slightly better performance, but
964 * requires that we explicitly flush the writes
966 mv_chan->dma_desc_pool_virt =
967 dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
969 if (!mv_chan->dma_desc_pool_virt)
970 return ERR_PTR(-ENOMEM);
972 /* discover transaction capabilites from the platform data */
973 dma_dev->cap_mask = cap_mask;
975 INIT_LIST_HEAD(&dma_dev->channels);
977 /* set base routines */
978 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
979 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
980 dma_dev->device_tx_status = mv_xor_status;
981 dma_dev->device_issue_pending = mv_xor_issue_pending;
982 dma_dev->dev = &pdev->dev;
984 /* set prep routines based on capability */
985 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
986 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
987 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
988 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
989 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
990 dma_dev->max_xor = 8;
991 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
994 mv_chan->mmr_base = xordev->xor_base;
995 mv_chan->mmr_high_base = xordev->xor_high_base;
996 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
999 /* clear errors before enabling interrupts */
1000 mv_chan_clear_err_status(mv_chan);
1002 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1003 0, dev_name(&pdev->dev), mv_chan);
1007 mv_chan_unmask_interrupts(mv_chan);
1009 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
1010 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
1012 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
1014 spin_lock_init(&mv_chan->lock);
1015 INIT_LIST_HEAD(&mv_chan->chain);
1016 INIT_LIST_HEAD(&mv_chan->completed_slots);
1017 INIT_LIST_HEAD(&mv_chan->free_slots);
1018 INIT_LIST_HEAD(&mv_chan->allocated_slots);
1019 mv_chan->dmachan.device = dma_dev;
1020 dma_cookie_init(&mv_chan->dmachan);
1022 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1024 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1025 ret = mv_chan_memcpy_self_test(mv_chan);
1026 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1031 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1032 ret = mv_chan_xor_self_test(mv_chan);
1033 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1038 dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
1039 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1040 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1041 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1042 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1044 dma_async_device_register(dma_dev);
1048 free_irq(mv_chan->irq, mv_chan);
1050 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1051 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1052 return ERR_PTR(ret);
1056 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1057 const struct mbus_dram_target_info *dram)
1059 void __iomem *base = xordev->xor_high_base;
1063 for (i = 0; i < 8; i++) {
1064 writel(0, base + WINDOW_BASE(i));
1065 writel(0, base + WINDOW_SIZE(i));
1067 writel(0, base + WINDOW_REMAP_HIGH(i));
1070 for (i = 0; i < dram->num_cs; i++) {
1071 const struct mbus_dram_window *cs = dram->cs + i;
1073 writel((cs->base & 0xffff0000) |
1074 (cs->mbus_attr << 8) |
1075 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1076 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1078 win_enable |= (1 << i);
1079 win_enable |= 3 << (16 + (2 * i));
1082 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1083 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1084 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1085 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1089 * Since this XOR driver is basically used only for RAID5, we don't
1090 * need to care about synchronizing ->suspend with DMA activity,
1091 * because the DMA engine will naturally be quiet due to the block
1092 * devices being suspended.
1094 static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
1096 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1099 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1100 struct mv_xor_chan *mv_chan = xordev->channels[i];
1105 mv_chan->saved_config_reg =
1106 readl_relaxed(XOR_CONFIG(mv_chan));
1107 mv_chan->saved_int_mask_reg =
1108 readl_relaxed(XOR_INTR_MASK(mv_chan));
1114 static int mv_xor_resume(struct platform_device *dev)
1116 struct mv_xor_device *xordev = platform_get_drvdata(dev);
1117 const struct mbus_dram_target_info *dram;
1120 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1121 struct mv_xor_chan *mv_chan = xordev->channels[i];
1126 writel_relaxed(mv_chan->saved_config_reg,
1127 XOR_CONFIG(mv_chan));
1128 writel_relaxed(mv_chan->saved_int_mask_reg,
1129 XOR_INTR_MASK(mv_chan));
1132 dram = mv_mbus_dram_info();
1134 mv_xor_conf_mbus_windows(xordev, dram);
1139 static const struct of_device_id mv_xor_dt_ids[] = {
1140 { .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG },
1141 { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC },
1145 static unsigned int mv_xor_engine_count;
1147 static int mv_xor_probe(struct platform_device *pdev)
1149 const struct mbus_dram_target_info *dram;
1150 struct mv_xor_device *xordev;
1151 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1152 struct resource *res;
1153 unsigned int max_engines, max_channels;
1157 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1159 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1163 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1167 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1168 resource_size(res));
1169 if (!xordev->xor_base)
1172 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1176 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1177 resource_size(res));
1178 if (!xordev->xor_high_base)
1181 platform_set_drvdata(pdev, xordev);
1184 * (Re-)program MBUS remapping windows if we are asked to.
1186 dram = mv_mbus_dram_info();
1188 mv_xor_conf_mbus_windows(xordev, dram);
1190 /* Not all platforms can gate the clock, so it is not
1191 * an error if the clock does not exists.
1193 xordev->clk = clk_get(&pdev->dev, NULL);
1194 if (!IS_ERR(xordev->clk))
1195 clk_prepare_enable(xordev->clk);
1198 * We don't want to have more than one channel per CPU in
1199 * order for async_tx to perform well. So we limit the number
1200 * of engines and channels so that we take into account this
1201 * constraint. Note that we also want to use channels from
1202 * separate engines when possible.
1204 max_engines = num_present_cpus();
1205 max_channels = min_t(unsigned int,
1206 MV_XOR_MAX_CHANNELS,
1207 DIV_ROUND_UP(num_present_cpus(), 2));
1209 if (mv_xor_engine_count >= max_engines)
1212 if (pdev->dev.of_node) {
1213 struct device_node *np;
1215 const struct of_device_id *of_id =
1216 of_match_device(mv_xor_dt_ids,
1219 for_each_child_of_node(pdev->dev.of_node, np) {
1220 struct mv_xor_chan *chan;
1221 dma_cap_mask_t cap_mask;
1223 op_in_desc = (int)of_id->data;
1225 if (i >= max_channels)
1228 dma_cap_zero(cap_mask);
1229 dma_cap_set(DMA_MEMCPY, cap_mask);
1230 dma_cap_set(DMA_XOR, cap_mask);
1231 dma_cap_set(DMA_INTERRUPT, cap_mask);
1233 irq = irq_of_parse_and_map(np, 0);
1236 goto err_channel_add;
1239 chan = mv_xor_channel_add(xordev, pdev, i,
1240 cap_mask, irq, op_in_desc);
1242 ret = PTR_ERR(chan);
1243 irq_dispose_mapping(irq);
1244 goto err_channel_add;
1247 xordev->channels[i] = chan;
1250 } else if (pdata && pdata->channels) {
1251 for (i = 0; i < max_channels; i++) {
1252 struct mv_xor_channel_data *cd;
1253 struct mv_xor_chan *chan;
1256 cd = &pdata->channels[i];
1259 goto err_channel_add;
1262 irq = platform_get_irq(pdev, i);
1265 goto err_channel_add;
1268 chan = mv_xor_channel_add(xordev, pdev, i,
1272 ret = PTR_ERR(chan);
1273 goto err_channel_add;
1276 xordev->channels[i] = chan;
1283 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1284 if (xordev->channels[i]) {
1285 mv_xor_channel_remove(xordev->channels[i]);
1286 if (pdev->dev.of_node)
1287 irq_dispose_mapping(xordev->channels[i]->irq);
1290 if (!IS_ERR(xordev->clk)) {
1291 clk_disable_unprepare(xordev->clk);
1292 clk_put(xordev->clk);
1298 static struct platform_driver mv_xor_driver = {
1299 .probe = mv_xor_probe,
1300 .suspend = mv_xor_suspend,
1301 .resume = mv_xor_resume,
1303 .name = MV_XOR_NAME,
1304 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1309 static int __init mv_xor_init(void)
1311 return platform_driver_register(&mv_xor_driver);
1313 device_initcall(mv_xor_init);
1317 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1318 MODULE_LICENSE("GPL");