1 // SPDX-License-Identifier: GPL-2.0
3 * hal.c - DIM2 HAL implementation
4 * (MediaLB, Device Interface Macro IP, OS62420)
6 * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
14 #include <linux/stddef.h>
15 #include <linux/kernel.h>
19 * Size factor for isochronous DBR buffer.
22 #define ISOC_DBR_FACTOR 3u
25 * Number of 32-bit units for DBR map.
27 * 1: block size is 512, max allocation is 16K
28 * 2: block size is 256, max allocation is 8K
29 * 4: block size is 128, max allocation is 4K
30 * 8: block size is 64, max allocation is 2K
32 * Min allocated space is block size.
33 * Max possible allocated space is 32 blocks.
35 #define DBR_MAP_SIZE 2
37 /* -------------------------------------------------------------------------- */
38 /* not configurable area */
45 #define DBR_SIZE (16 * 1024) /* specified by IP */
46 #define DBR_BLOCK_SIZE (DBR_SIZE / 32 / DBR_MAP_SIZE)
48 #define ROUND_UP_TO(x, d) (DIV_ROUND_UP(x, (d)) * (d))
50 /* -------------------------------------------------------------------------- */
51 /* generic helper functions and macros */
53 static inline u32 bit_mask(u8 position)
55 return (u32)1 << position;
58 static inline bool dim_on_error(u8 error_id, const char *error_message)
60 dimcb_on_error(error_id, error_message);
64 /* -------------------------------------------------------------------------- */
65 /* types and local variables */
72 u16 sz_queue[CDT0_RPC_MASK + 1];
75 struct lld_global_vars_t {
76 bool dim_is_initialized;
77 bool mcm_is_initialized;
78 struct dim2_regs __iomem *dim2; /* DIM2 core base address */
79 struct async_tx_dbr atx_dbr;
81 u32 dbr_map[DBR_MAP_SIZE];
84 static struct lld_global_vars_t g = { false };
86 /* -------------------------------------------------------------------------- */
88 static int dbr_get_mask_size(u16 size)
92 for (i = 0; i < 6; i++)
93 if (size <= (DBR_BLOCK_SIZE << i))
99 * alloc_dbr() - Allocates DBR memory.
100 * @size: Allocating memory size.
101 * Returns: Offset in DBR memory by success or DBR_SIZE if out of memory.
103 static int alloc_dbr(u16 size)
106 int i, block_idx = 0;
109 return DBR_SIZE; /* out of memory */
111 mask_size = dbr_get_mask_size(size);
113 return DBR_SIZE; /* out of memory */
115 for (i = 0; i < DBR_MAP_SIZE; i++) {
116 u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
117 u32 mask = ~((~(u32)0) << blocks);
120 if ((g.dbr_map[i] & mask) == 0) {
121 g.dbr_map[i] |= mask;
122 return block_idx * DBR_BLOCK_SIZE;
124 block_idx += mask_size;
125 /* do shift left with 2 steps in case mask_size == 32 */
126 mask <<= mask_size - 1;
127 } while ((mask <<= 1) != 0);
130 return DBR_SIZE; /* out of memory */
133 static void free_dbr(int offs, int size)
135 int block_idx = offs / DBR_BLOCK_SIZE;
136 u32 const blocks = DIV_ROUND_UP(size, DBR_BLOCK_SIZE);
137 u32 mask = ~((~(u32)0) << blocks);
139 mask <<= block_idx % 32;
140 g.dbr_map[block_idx / 32] &= ~mask;
143 /* -------------------------------------------------------------------------- */
145 static void dim2_transfer_madr(u32 val)
147 writel(val, &g.dim2->MADR);
149 /* wait for transfer completion */
150 while ((readl(&g.dim2->MCTL) & 1) != 1)
153 writel(0, &g.dim2->MCTL); /* clear transfer complete */
156 static void dim2_clear_dbr(u16 addr, u16 size)
158 enum { MADR_TB_BIT = 30, MADR_WNR_BIT = 31 };
160 u16 const end_addr = addr + size;
161 u32 const cmd = bit_mask(MADR_WNR_BIT) | bit_mask(MADR_TB_BIT);
163 writel(0, &g.dim2->MCTL); /* clear transfer complete */
164 writel(0, &g.dim2->MDAT0);
166 for (; addr < end_addr; addr++)
167 dim2_transfer_madr(cmd | addr);
170 static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
172 dim2_transfer_madr(ctr_addr);
174 return readl((&g.dim2->MDAT0) + mdat_idx);
177 static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
179 enum { MADR_WNR_BIT = 31 };
181 writel(0, &g.dim2->MCTL); /* clear transfer complete */
184 writel(value[0], &g.dim2->MDAT0);
186 writel(value[1], &g.dim2->MDAT1);
188 writel(value[2], &g.dim2->MDAT2);
190 writel(value[3], &g.dim2->MDAT3);
192 writel(mask[0], &g.dim2->MDWE0);
193 writel(mask[1], &g.dim2->MDWE1);
194 writel(mask[2], &g.dim2->MDWE2);
195 writel(mask[3], &g.dim2->MDWE3);
197 dim2_transfer_madr(bit_mask(MADR_WNR_BIT) | ctr_addr);
200 static inline void dim2_write_ctr(u32 ctr_addr, const u32 *value)
202 u32 const mask[4] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
204 dim2_write_ctr_mask(ctr_addr, mask, value);
207 static inline void dim2_clear_ctr(u32 ctr_addr)
209 u32 const value[4] = { 0, 0, 0, 0 };
211 dim2_write_ctr(ctr_addr, value);
214 static void dim2_configure_cat(u8 cat_base, u8 ch_addr, u8 ch_type,
217 bool isoc_fce = ch_type == CAT_CT_VAL_ISOC;
218 bool sync_mfe = ch_type == CAT_CT_VAL_SYNC;
220 (read_not_write << CAT_RNW_BIT) |
221 (ch_type << CAT_CT_SHIFT) |
222 (ch_addr << CAT_CL_SHIFT) |
223 (isoc_fce << CAT_FCE_BIT) |
224 (sync_mfe << CAT_MFE_BIT) |
225 (false << CAT_MT_BIT) |
226 (true << CAT_CE_BIT);
227 u8 const ctr_addr = cat_base + ch_addr / 8;
228 u8 const idx = (ch_addr % 8) / 2;
229 u8 const shift = (ch_addr % 2) * 16;
230 u32 mask[4] = { 0, 0, 0, 0 };
231 u32 value[4] = { 0, 0, 0, 0 };
233 mask[idx] = (u32)0xFFFF << shift;
234 value[idx] = cat << shift;
235 dim2_write_ctr_mask(ctr_addr, mask, value);
238 static void dim2_clear_cat(u8 cat_base, u8 ch_addr)
240 u8 const ctr_addr = cat_base + ch_addr / 8;
241 u8 const idx = (ch_addr % 8) / 2;
242 u8 const shift = (ch_addr % 2) * 16;
243 u32 mask[4] = { 0, 0, 0, 0 };
244 u32 value[4] = { 0, 0, 0, 0 };
246 mask[idx] = (u32)0xFFFF << shift;
247 dim2_write_ctr_mask(ctr_addr, mask, value);
250 static void dim2_configure_cdt(u8 ch_addr, u16 dbr_address, u16 hw_buffer_size,
253 u32 cdt[4] = { 0, 0, 0, 0 };
256 cdt[1] = ((packet_length - 1) << CDT1_BS_ISOC_SHIFT);
259 ((hw_buffer_size - 1) << CDT3_BD_SHIFT) |
260 (dbr_address << CDT3_BA_SHIFT);
261 dim2_write_ctr(CDT + ch_addr, cdt);
264 static u16 dim2_rpc(u8 ch_addr)
266 u32 cdt0 = dim2_read_ctr(CDT + ch_addr, 0);
268 return (cdt0 >> CDT0_RPC_SHIFT) & CDT0_RPC_MASK;
271 static void dim2_clear_cdt(u8 ch_addr)
273 u32 cdt[4] = { 0, 0, 0, 0 };
275 dim2_write_ctr(CDT + ch_addr, cdt);
278 static void dim2_configure_adt(u8 ch_addr)
280 u32 adt[4] = { 0, 0, 0, 0 };
283 (true << ADT0_CE_BIT) |
284 (true << ADT0_LE_BIT) |
287 dim2_write_ctr(ADT + ch_addr, adt);
290 static void dim2_clear_adt(u8 ch_addr)
292 u32 adt[4] = { 0, 0, 0, 0 };
294 dim2_write_ctr(ADT + ch_addr, adt);
297 static void dim2_start_ctrl_async(u8 ch_addr, u8 idx, u32 buf_addr,
300 u8 const shift = idx * 16;
302 u32 mask[4] = { 0, 0, 0, 0 };
303 u32 adt[4] = { 0, 0, 0, 0 };
306 bit_mask(ADT1_PS_BIT + shift) |
307 bit_mask(ADT1_RDY_BIT + shift) |
308 (ADT1_CTRL_ASYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
310 (true << (ADT1_PS_BIT + shift)) |
311 (true << (ADT1_RDY_BIT + shift)) |
312 ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
314 mask[idx + 2] = 0xFFFFFFFF;
315 adt[idx + 2] = buf_addr;
317 dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
320 static void dim2_start_isoc_sync(u8 ch_addr, u8 idx, u32 buf_addr,
323 u8 const shift = idx * 16;
325 u32 mask[4] = { 0, 0, 0, 0 };
326 u32 adt[4] = { 0, 0, 0, 0 };
329 bit_mask(ADT1_RDY_BIT + shift) |
330 (ADT1_ISOC_SYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
332 (true << (ADT1_RDY_BIT + shift)) |
333 ((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
335 mask[idx + 2] = 0xFFFFFFFF;
336 adt[idx + 2] = buf_addr;
338 dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
341 static void dim2_clear_ctram(void)
345 for (ctr_addr = 0; ctr_addr < 0x90; ctr_addr++)
346 dim2_clear_ctr(ctr_addr);
349 static void dim2_configure_channel(u8 ch_addr, u8 type, u8 is_tx, u16 dbr_address,
350 u16 hw_buffer_size, u16 packet_length)
352 dim2_configure_cdt(ch_addr, dbr_address, hw_buffer_size, packet_length);
353 dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0);
355 dim2_configure_adt(ch_addr);
356 dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1);
358 /* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
359 writel(readl(&g.dim2->ACMR0) | bit_mask(ch_addr), &g.dim2->ACMR0);
362 static void dim2_clear_channel(u8 ch_addr)
364 /* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
365 writel(readl(&g.dim2->ACMR0) & ~bit_mask(ch_addr), &g.dim2->ACMR0);
367 dim2_clear_cat(AHB_CAT, ch_addr);
368 dim2_clear_adt(ch_addr);
370 dim2_clear_cat(MLB_CAT, ch_addr);
371 dim2_clear_cdt(ch_addr);
373 /* clear channel status bit */
374 writel(bit_mask(ch_addr), &g.dim2->ACSR0);
377 /* -------------------------------------------------------------------------- */
378 /* trace async tx dbr fill state */
380 static inline u16 norm_pc(u16 pc)
382 return pc & CDT0_RPC_MASK;
385 static void dbrcnt_init(u8 ch_addr, u16 dbr_size)
387 g.atx_dbr.rest_size = dbr_size;
388 g.atx_dbr.rpc = dim2_rpc(ch_addr);
389 g.atx_dbr.wpc = g.atx_dbr.rpc;
392 static void dbrcnt_enq(int buf_sz)
394 g.atx_dbr.rest_size -= buf_sz;
395 g.atx_dbr.sz_queue[norm_pc(g.atx_dbr.wpc)] = buf_sz;
399 u16 dim_dbr_space(struct dim_channel *ch)
402 struct async_tx_dbr *dbr = &g.atx_dbr;
404 if (ch->addr != dbr->ch_addr)
407 cur_rpc = dim2_rpc(ch->addr);
409 while (norm_pc(dbr->rpc) != cur_rpc) {
410 dbr->rest_size += dbr->sz_queue[norm_pc(dbr->rpc)];
414 if ((u16)(dbr->wpc - dbr->rpc) >= CDT0_RPC_MASK)
417 return dbr->rest_size;
420 /* -------------------------------------------------------------------------- */
421 /* channel state helpers */
423 static void state_init(struct int_ch_state *state)
425 state->request_counter = 0;
426 state->service_counter = 0;
433 /* -------------------------------------------------------------------------- */
434 /* macro helper functions */
436 static inline bool check_channel_address(u32 ch_address)
438 return ch_address > 0 && (ch_address % 2) == 0 &&
439 (ch_address / 2) <= (u32)CAT_CL_MASK;
442 static inline bool check_packet_length(u32 packet_length)
444 u16 const max_size = ((u16)CDT3_BD_ISOC_MASK + 1u) / ISOC_DBR_FACTOR;
446 if (packet_length <= 0)
447 return false; /* too small */
449 if (packet_length > max_size)
450 return false; /* too big */
452 if (packet_length - 1u > (u32)CDT1_BS_ISOC_MASK)
453 return false; /* too big */
458 static inline bool check_bytes_per_frame(u32 bytes_per_frame)
460 u16 const bd_factor = g.fcnt + 2;
461 u16 const max_size = ((u16)CDT3_BD_MASK + 1u) >> bd_factor;
463 if (bytes_per_frame <= 0)
464 return false; /* too small */
466 if (bytes_per_frame > max_size)
467 return false; /* too big */
472 u16 dim_norm_ctrl_async_buffer_size(u16 buf_size)
474 u16 const max_size = (u16)ADT1_CTRL_ASYNC_BD_MASK + 1u;
476 if (buf_size > max_size)
482 static inline u16 norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
485 u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
487 if (buf_size > max_size)
490 n = buf_size / packet_length;
493 return 0; /* too small buffer for given packet_length */
495 return packet_length * n;
498 static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
501 u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
502 u32 const unit = bytes_per_frame << g.fcnt;
504 if (buf_size > max_size)
510 return 0; /* too small buffer for given bytes_per_frame */
515 static void dim2_cleanup(void)
517 /* disable MediaLB */
518 writel(false << MLBC0_MLBEN_BIT, &g.dim2->MLBC0);
522 /* disable mlb_int interrupt */
523 writel(0, &g.dim2->MIEN);
525 /* clear status for all dma channels */
526 writel(0xFFFFFFFF, &g.dim2->ACSR0);
527 writel(0xFFFFFFFF, &g.dim2->ACSR1);
529 /* mask interrupts for all channels */
530 writel(0, &g.dim2->ACMR0);
531 writel(0, &g.dim2->ACMR1);
534 static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
538 /* configure and enable MediaLB */
539 writel(enable_6pin << MLBC0_MLBPEN_BIT |
540 mlb_clock << MLBC0_MLBCLK_SHIFT |
541 g.fcnt << MLBC0_FCNT_SHIFT |
542 true << MLBC0_MLBEN_BIT,
545 /* activate all HBI channels */
546 writel(0xFFFFFFFF, &g.dim2->HCMR0);
547 writel(0xFFFFFFFF, &g.dim2->HCMR1);
550 writel(bit_mask(HCTL_EN_BIT), &g.dim2->HCTL);
553 writel(ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
554 true << ACTL_SCE_BIT, &g.dim2->ACTL);
557 static bool dim2_is_mlb_locked(void)
559 u32 const mask0 = bit_mask(MLBC0_MLBLK_BIT);
560 u32 const mask1 = bit_mask(MLBC1_CLKMERR_BIT) |
561 bit_mask(MLBC1_LOCKERR_BIT);
562 u32 const c1 = readl(&g.dim2->MLBC1);
563 u32 const nda_mask = (u32)MLBC1_NDA_MASK << MLBC1_NDA_SHIFT;
565 writel(c1 & nda_mask, &g.dim2->MLBC1);
566 return (readl(&g.dim2->MLBC1) & mask1) == 0 &&
567 (readl(&g.dim2->MLBC0) & mask0) != 0;
570 /* -------------------------------------------------------------------------- */
571 /* channel help routines */
573 static inline bool service_channel(u8 ch_addr, u8 idx)
575 u8 const shift = idx * 16;
576 u32 const adt1 = dim2_read_ctr(ADT + ch_addr, 1);
577 u32 mask[4] = { 0, 0, 0, 0 };
578 u32 adt_w[4] = { 0, 0, 0, 0 };
580 if (((adt1 >> (ADT1_DNE_BIT + shift)) & 1) == 0)
584 bit_mask(ADT1_DNE_BIT + shift) |
585 bit_mask(ADT1_ERR_BIT + shift) |
586 bit_mask(ADT1_RDY_BIT + shift);
587 dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
589 /* clear channel status bit */
590 writel(bit_mask(ch_addr), &g.dim2->ACSR0);
595 /* -------------------------------------------------------------------------- */
596 /* channel init routines */
598 static void isoc_init(struct dim_channel *ch, u8 ch_addr, u16 packet_length)
600 state_init(&ch->state);
604 ch->packet_length = packet_length;
605 ch->bytes_per_frame = 0;
606 ch->done_sw_buffers_number = 0;
609 static void sync_init(struct dim_channel *ch, u8 ch_addr, u16 bytes_per_frame)
611 state_init(&ch->state);
615 ch->packet_length = 0;
616 ch->bytes_per_frame = bytes_per_frame;
617 ch->done_sw_buffers_number = 0;
620 static void channel_init(struct dim_channel *ch, u8 ch_addr)
622 state_init(&ch->state);
626 ch->packet_length = 0;
627 ch->bytes_per_frame = 0;
628 ch->done_sw_buffers_number = 0;
631 /* returns true if channel interrupt state is cleared */
632 static bool channel_service_interrupt(struct dim_channel *ch)
634 struct int_ch_state *const state = &ch->state;
636 if (!service_channel(ch->addr, state->idx2))
640 state->request_counter++;
644 static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
646 struct int_ch_state *const state = &ch->state;
649 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE, "Bad buffer size");
651 if (ch->packet_length == 0 && ch->bytes_per_frame == 0 &&
652 buf_size != dim_norm_ctrl_async_buffer_size(buf_size))
653 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
654 "Bad control/async buffer size");
656 if (ch->packet_length &&
657 buf_size != norm_isoc_buffer_size(buf_size, ch->packet_length))
658 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
659 "Bad isochronous buffer size");
661 if (ch->bytes_per_frame &&
662 buf_size != norm_sync_buffer_size(buf_size, ch->bytes_per_frame))
663 return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
664 "Bad synchronous buffer size");
666 if (state->level >= 2u)
667 return dim_on_error(DIM_ERR_OVERFLOW, "Channel overflow");
671 if (ch->addr == g.atx_dbr.ch_addr)
672 dbrcnt_enq(buf_size);
674 if (ch->packet_length || ch->bytes_per_frame)
675 dim2_start_isoc_sync(ch->addr, state->idx1, buf_addr, buf_size);
677 dim2_start_ctrl_async(ch->addr, state->idx1, buf_addr,
684 static u8 channel_service(struct dim_channel *ch)
686 struct int_ch_state *const state = &ch->state;
688 if (state->service_counter != state->request_counter) {
689 state->service_counter++;
690 if (state->level == 0)
691 return DIM_ERR_UNDERFLOW;
694 ch->done_sw_buffers_number++;
700 static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
702 if (buffers_number > ch->done_sw_buffers_number)
703 return dim_on_error(DIM_ERR_UNDERFLOW, "Channel underflow");
705 ch->done_sw_buffers_number -= buffers_number;
709 /* -------------------------------------------------------------------------- */
712 u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock,
715 g.dim_is_initialized = false;
717 if (!dim_base_address)
718 return DIM_INIT_ERR_DIM_ADDR;
720 /* MediaLB clock: 0 - 256 fs, 1 - 512 fs, 2 - 1024 fs, 3 - 2048 fs */
721 /* MediaLB clock: 4 - 3072 fs, 5 - 4096 fs, 6 - 6144 fs, 7 - 8192 fs */
723 return DIM_INIT_ERR_MLB_CLOCK;
725 if (fcnt > MLBC0_FCNT_MAX_VAL)
726 return DIM_INIT_ERR_MLB_CLOCK;
728 g.dim2 = dim_base_address;
733 dim2_initialize(mlb_clock >= 3, mlb_clock);
735 g.dim_is_initialized = true;
740 void dim_shutdown(void)
742 g.dim_is_initialized = false;
746 bool dim_get_lock_state(void)
748 return dim2_is_mlb_locked();
751 static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
752 u16 ch_address, u16 hw_buffer_size)
754 if (!g.dim_is_initialized || !ch)
755 return DIM_ERR_DRIVER_NOT_INITIALIZED;
757 if (!check_channel_address(ch_address))
758 return DIM_INIT_ERR_CHANNEL_ADDRESS;
761 ch->dbr_size = ROUND_UP_TO(hw_buffer_size, DBR_BLOCK_SIZE);
762 ch->dbr_addr = alloc_dbr(ch->dbr_size);
763 if (ch->dbr_addr >= DBR_SIZE)
764 return DIM_INIT_ERR_OUT_OF_MEMORY;
766 channel_init(ch, ch_address / 2);
768 dim2_configure_channel(ch->addr, type, is_tx,
769 ch->dbr_addr, ch->dbr_size, 0);
774 void dim_service_mlb_int_irq(void)
776 writel(0, &g.dim2->MS0);
777 writel(0, &g.dim2->MS1);
781 * Retrieves maximal possible correct buffer size for isochronous data type
782 * conform to given packet length and not bigger than given buffer size.
784 * Returns non-zero correct buffer size or zero by error.
786 u16 dim_norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
788 if (!check_packet_length(packet_length))
791 return norm_isoc_buffer_size(buf_size, packet_length);
795 * Retrieves maximal possible correct buffer size for synchronous data type
796 * conform to given bytes per frame and not bigger than given buffer size.
798 * Returns non-zero correct buffer size or zero by error.
800 u16 dim_norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
802 if (!check_bytes_per_frame(bytes_per_frame))
805 return norm_sync_buffer_size(buf_size, bytes_per_frame);
808 u8 dim_init_control(struct dim_channel *ch, u8 is_tx, u16 ch_address,
811 return init_ctrl_async(ch, CAT_CT_VAL_CONTROL, is_tx, ch_address,
815 u8 dim_init_async(struct dim_channel *ch, u8 is_tx, u16 ch_address,
818 u8 ret = init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
821 if (is_tx && !g.atx_dbr.ch_addr) {
822 g.atx_dbr.ch_addr = ch->addr;
823 dbrcnt_init(ch->addr, ch->dbr_size);
824 writel(bit_mask(20), &g.dim2->MIEN);
830 u8 dim_init_isoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
833 if (!g.dim_is_initialized || !ch)
834 return DIM_ERR_DRIVER_NOT_INITIALIZED;
836 if (!check_channel_address(ch_address))
837 return DIM_INIT_ERR_CHANNEL_ADDRESS;
839 if (!check_packet_length(packet_length))
840 return DIM_ERR_BAD_CONFIG;
843 ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
844 ch->dbr_addr = alloc_dbr(ch->dbr_size);
845 if (ch->dbr_addr >= DBR_SIZE)
846 return DIM_INIT_ERR_OUT_OF_MEMORY;
848 isoc_init(ch, ch_address / 2, packet_length);
850 dim2_configure_channel(ch->addr, CAT_CT_VAL_ISOC, is_tx, ch->dbr_addr,
851 ch->dbr_size, packet_length);
856 u8 dim_init_sync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
859 u16 bd_factor = g.fcnt + 2;
861 if (!g.dim_is_initialized || !ch)
862 return DIM_ERR_DRIVER_NOT_INITIALIZED;
864 if (!check_channel_address(ch_address))
865 return DIM_INIT_ERR_CHANNEL_ADDRESS;
867 if (!check_bytes_per_frame(bytes_per_frame))
868 return DIM_ERR_BAD_CONFIG;
871 ch->dbr_size = bytes_per_frame << bd_factor;
872 ch->dbr_addr = alloc_dbr(ch->dbr_size);
873 if (ch->dbr_addr >= DBR_SIZE)
874 return DIM_INIT_ERR_OUT_OF_MEMORY;
876 sync_init(ch, ch_address / 2, bytes_per_frame);
878 dim2_clear_dbr(ch->dbr_addr, ch->dbr_size);
879 dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
880 ch->dbr_addr, ch->dbr_size, 0);
885 u8 dim_destroy_channel(struct dim_channel *ch)
887 if (!g.dim_is_initialized || !ch)
888 return DIM_ERR_DRIVER_NOT_INITIALIZED;
890 if (ch->addr == g.atx_dbr.ch_addr) {
891 writel(0, &g.dim2->MIEN);
892 g.atx_dbr.ch_addr = 0;
895 dim2_clear_channel(ch->addr);
896 if (ch->dbr_addr < DBR_SIZE)
897 free_dbr(ch->dbr_addr, ch->dbr_size);
898 ch->dbr_addr = DBR_SIZE;
903 void dim_service_ahb_int_irq(struct dim_channel *const *channels)
907 if (!g.dim_is_initialized) {
908 dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
909 "DIM is not initialized");
914 dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channels");
919 * Use while-loop and a flag to make sure the age is changed back at
920 * least once, otherwise the interrupt may never come if CPU generates
921 * interrupt on changing age.
922 * This cycle runs not more than number of channels, because
923 * channel_service_interrupt() routine doesn't start the channel again.
926 struct dim_channel *const *ch = channels;
928 state_changed = false;
931 state_changed |= channel_service_interrupt(*ch);
934 } while (state_changed);
937 u8 dim_service_channel(struct dim_channel *ch)
939 if (!g.dim_is_initialized || !ch)
940 return DIM_ERR_DRIVER_NOT_INITIALIZED;
942 return channel_service(ch);
945 struct dim_ch_state *dim_get_channel_state(struct dim_channel *ch,
946 struct dim_ch_state *state_ptr)
948 if (!ch || !state_ptr)
951 state_ptr->ready = ch->state.level < 2;
952 state_ptr->done_buffers = ch->done_sw_buffers_number;
957 bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
961 return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
964 return channel_start(ch, buffer_addr, buffer_size);
967 bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number)
970 return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
973 return channel_detach_buffers(ch, buffers_number);