]>
Commit | Line | Data |
---|---|---|
1ba13a5d EI |
1 | /* |
2 | * QEMU ETRAX DMA Controller. | |
3 | * | |
4 | * Copyright (c) 2008 Edgar E. Iglesias, Axis Communications AB. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | #include <stdio.h> | |
25 | #include <sys/time.h> | |
26 | #include "hw.h" | |
492c30af AL |
27 | #include "qemu-common.h" |
28 | #include "sysemu.h" | |
1ba13a5d EI |
29 | |
30 | #include "etraxfs_dma.h" | |
31 | ||
32 | #define D(x) | |
33 | ||
c01c07bb EI |
34 | #define RW_DATA (0x0 / 4) |
35 | #define RW_SAVED_DATA (0x58 / 4) | |
36 | #define RW_SAVED_DATA_BUF (0x5c / 4) | |
37 | #define RW_GROUP (0x60 / 4) | |
38 | #define RW_GROUP_DOWN (0x7c / 4) | |
39 | #define RW_CMD (0x80 / 4) | |
40 | #define RW_CFG (0x84 / 4) | |
41 | #define RW_STAT (0x88 / 4) | |
42 | #define RW_INTR_MASK (0x8c / 4) | |
43 | #define RW_ACK_INTR (0x90 / 4) | |
44 | #define R_INTR (0x94 / 4) | |
45 | #define R_MASKED_INTR (0x98 / 4) | |
46 | #define RW_STREAM_CMD (0x9c / 4) | |
47 | ||
48 | #define DMA_REG_MAX (0x100 / 4) | |
1ba13a5d EI |
49 | |
50 | /* descriptors */ | |
51 | ||
52 | // ------------------------------------------------------------ dma_descr_group | |
53 | typedef struct dma_descr_group { | |
41107bcb | 54 | uint32_t next; |
1ba13a5d EI |
55 | unsigned eol : 1; |
56 | unsigned tol : 1; | |
57 | unsigned bol : 1; | |
58 | unsigned : 1; | |
59 | unsigned intr : 1; | |
60 | unsigned : 2; | |
61 | unsigned en : 1; | |
62 | unsigned : 7; | |
63 | unsigned dis : 1; | |
64 | unsigned md : 16; | |
65 | struct dma_descr_group *up; | |
66 | union { | |
67 | struct dma_descr_context *context; | |
68 | struct dma_descr_group *group; | |
69 | } down; | |
70 | } dma_descr_group; | |
71 | ||
72 | // ---------------------------------------------------------- dma_descr_context | |
73 | typedef struct dma_descr_context { | |
41107bcb | 74 | uint32_t next; |
1ba13a5d EI |
75 | unsigned eol : 1; |
76 | unsigned : 3; | |
77 | unsigned intr : 1; | |
78 | unsigned : 1; | |
79 | unsigned store_mode : 1; | |
80 | unsigned en : 1; | |
81 | unsigned : 7; | |
82 | unsigned dis : 1; | |
83 | unsigned md0 : 16; | |
84 | unsigned md1; | |
85 | unsigned md2; | |
86 | unsigned md3; | |
87 | unsigned md4; | |
41107bcb EI |
88 | uint32_t saved_data; |
89 | uint32_t saved_data_buf; | |
1ba13a5d EI |
90 | } dma_descr_context; |
91 | ||
92 | // ------------------------------------------------------------- dma_descr_data | |
93 | typedef struct dma_descr_data { | |
41107bcb EI |
94 | uint32_t next; |
95 | uint32_t buf; | |
1ba13a5d EI |
96 | unsigned eol : 1; |
97 | unsigned : 2; | |
98 | unsigned out_eop : 1; | |
99 | unsigned intr : 1; | |
100 | unsigned wait : 1; | |
101 | unsigned : 2; | |
102 | unsigned : 3; | |
103 | unsigned in_eop : 1; | |
104 | unsigned : 4; | |
105 | unsigned md : 16; | |
41107bcb | 106 | uint32_t after; |
1ba13a5d EI |
107 | } dma_descr_data; |
108 | ||
109 | /* Constants */ | |
110 | enum { | |
111 | regk_dma_ack_pkt = 0x00000100, | |
112 | regk_dma_anytime = 0x00000001, | |
113 | regk_dma_array = 0x00000008, | |
114 | regk_dma_burst = 0x00000020, | |
115 | regk_dma_client = 0x00000002, | |
116 | regk_dma_copy_next = 0x00000010, | |
117 | regk_dma_copy_up = 0x00000020, | |
118 | regk_dma_data_at_eol = 0x00000001, | |
119 | regk_dma_dis_c = 0x00000010, | |
120 | regk_dma_dis_g = 0x00000020, | |
121 | regk_dma_idle = 0x00000001, | |
122 | regk_dma_intern = 0x00000004, | |
123 | regk_dma_load_c = 0x00000200, | |
124 | regk_dma_load_c_n = 0x00000280, | |
125 | regk_dma_load_c_next = 0x00000240, | |
126 | regk_dma_load_d = 0x00000140, | |
127 | regk_dma_load_g = 0x00000300, | |
128 | regk_dma_load_g_down = 0x000003c0, | |
129 | regk_dma_load_g_next = 0x00000340, | |
130 | regk_dma_load_g_up = 0x00000380, | |
131 | regk_dma_next_en = 0x00000010, | |
132 | regk_dma_next_pkt = 0x00000010, | |
133 | regk_dma_no = 0x00000000, | |
134 | regk_dma_only_at_wait = 0x00000000, | |
135 | regk_dma_restore = 0x00000020, | |
136 | regk_dma_rst = 0x00000001, | |
137 | regk_dma_running = 0x00000004, | |
138 | regk_dma_rw_cfg_default = 0x00000000, | |
139 | regk_dma_rw_cmd_default = 0x00000000, | |
140 | regk_dma_rw_intr_mask_default = 0x00000000, | |
141 | regk_dma_rw_stat_default = 0x00000101, | |
142 | regk_dma_rw_stream_cmd_default = 0x00000000, | |
143 | regk_dma_save_down = 0x00000020, | |
144 | regk_dma_save_up = 0x00000020, | |
145 | regk_dma_set_reg = 0x00000050, | |
146 | regk_dma_set_w_size1 = 0x00000190, | |
147 | regk_dma_set_w_size2 = 0x000001a0, | |
148 | regk_dma_set_w_size4 = 0x000001c0, | |
149 | regk_dma_stopped = 0x00000002, | |
150 | regk_dma_store_c = 0x00000002, | |
151 | regk_dma_store_descr = 0x00000000, | |
152 | regk_dma_store_g = 0x00000004, | |
153 | regk_dma_store_md = 0x00000001, | |
154 | regk_dma_sw = 0x00000008, | |
155 | regk_dma_update_down = 0x00000020, | |
156 | regk_dma_yes = 0x00000001 | |
157 | }; | |
158 | ||
159 | enum dma_ch_state | |
160 | { | |
4487fd34 | 161 | RST = 1, |
1ba13a5d EI |
162 | STOPPED = 2, |
163 | RUNNING = 4 | |
164 | }; | |
165 | ||
166 | struct fs_dma_channel | |
167 | { | |
96d7ddde | 168 | qemu_irq irq; |
1ba13a5d EI |
169 | struct etraxfs_dma_client *client; |
170 | ||
1ba13a5d EI |
171 | /* Internal status. */ |
172 | int stream_cmd_src; | |
173 | enum dma_ch_state state; | |
174 | ||
175 | unsigned int input : 1; | |
176 | unsigned int eol : 1; | |
177 | ||
178 | struct dma_descr_group current_g; | |
179 | struct dma_descr_context current_c; | |
180 | struct dma_descr_data current_d; | |
181 | ||
182 | /* Controll registers. */ | |
183 | uint32_t regs[DMA_REG_MAX]; | |
184 | }; | |
185 | ||
186 | struct fs_dma_ctrl | |
187 | { | |
e6320485 | 188 | int map; |
1ba13a5d EI |
189 | int nr_channels; |
190 | struct fs_dma_channel *channels; | |
492c30af AL |
191 | |
192 | QEMUBH *bh; | |
1ba13a5d EI |
193 | }; |
194 | ||
c01c07bb EI |
195 | static void DMA_run(void *opaque); |
196 | static int channel_out_run(struct fs_dma_ctrl *ctrl, int c); | |
197 | ||
1ba13a5d EI |
198 | static inline uint32_t channel_reg(struct fs_dma_ctrl *ctrl, int c, int reg) |
199 | { | |
200 | return ctrl->channels[c].regs[reg]; | |
201 | } | |
202 | ||
203 | static inline int channel_stopped(struct fs_dma_ctrl *ctrl, int c) | |
204 | { | |
205 | return channel_reg(ctrl, c, RW_CFG) & 2; | |
206 | } | |
207 | ||
208 | static inline int channel_en(struct fs_dma_ctrl *ctrl, int c) | |
209 | { | |
210 | return (channel_reg(ctrl, c, RW_CFG) & 1) | |
211 | && ctrl->channels[c].client; | |
212 | } | |
213 | ||
c227f099 | 214 | static inline int fs_channel(target_phys_addr_t addr) |
1ba13a5d EI |
215 | { |
216 | /* Every channel has a 0x2000 ctrl register map. */ | |
8da3ff18 | 217 | return addr >> 13; |
1ba13a5d EI |
218 | } |
219 | ||
d297f464 | 220 | #ifdef USE_THIS_DEAD_CODE |
1ba13a5d EI |
221 | static void channel_load_g(struct fs_dma_ctrl *ctrl, int c) |
222 | { | |
c227f099 | 223 | target_phys_addr_t addr = channel_reg(ctrl, c, RW_GROUP); |
1ba13a5d EI |
224 | |
225 | /* Load and decode. FIXME: handle endianness. */ | |
226 | cpu_physical_memory_read (addr, | |
227 | (void *) &ctrl->channels[c].current_g, | |
228 | sizeof ctrl->channels[c].current_g); | |
229 | } | |
230 | ||
231 | static void dump_c(int ch, struct dma_descr_context *c) | |
232 | { | |
233 | printf("%s ch=%d\n", __func__, ch); | |
41107bcb EI |
234 | printf("next=%x\n", c->next); |
235 | printf("saved_data=%x\n", c->saved_data); | |
236 | printf("saved_data_buf=%x\n", c->saved_data_buf); | |
1ba13a5d EI |
237 | printf("eol=%x\n", (uint32_t) c->eol); |
238 | } | |
239 | ||
240 | static void dump_d(int ch, struct dma_descr_data *d) | |
241 | { | |
242 | printf("%s ch=%d\n", __func__, ch); | |
41107bcb EI |
243 | printf("next=%x\n", d->next); |
244 | printf("buf=%x\n", d->buf); | |
245 | printf("after=%x\n", d->after); | |
1ba13a5d EI |
246 | printf("intr=%x\n", (uint32_t) d->intr); |
247 | printf("out_eop=%x\n", (uint32_t) d->out_eop); | |
248 | printf("in_eop=%x\n", (uint32_t) d->in_eop); | |
249 | printf("eol=%x\n", (uint32_t) d->eol); | |
250 | } | |
d297f464 | 251 | #endif |
1ba13a5d EI |
252 | |
253 | static void channel_load_c(struct fs_dma_ctrl *ctrl, int c) | |
254 | { | |
c227f099 | 255 | target_phys_addr_t addr = channel_reg(ctrl, c, RW_GROUP_DOWN); |
1ba13a5d EI |
256 | |
257 | /* Load and decode. FIXME: handle endianness. */ | |
258 | cpu_physical_memory_read (addr, | |
259 | (void *) &ctrl->channels[c].current_c, | |
260 | sizeof ctrl->channels[c].current_c); | |
261 | ||
262 | D(dump_c(c, &ctrl->channels[c].current_c)); | |
263 | /* I guess this should update the current pos. */ | |
d297f464 EI |
264 | ctrl->channels[c].regs[RW_SAVED_DATA] = |
265 | (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data; | |
1ba13a5d | 266 | ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = |
d297f464 | 267 | (uint32_t)(unsigned long)ctrl->channels[c].current_c.saved_data_buf; |
1ba13a5d EI |
268 | } |
269 | ||
270 | static void channel_load_d(struct fs_dma_ctrl *ctrl, int c) | |
271 | { | |
c227f099 | 272 | target_phys_addr_t addr = channel_reg(ctrl, c, RW_SAVED_DATA); |
1ba13a5d EI |
273 | |
274 | /* Load and decode. FIXME: handle endianness. */ | |
41107bcb | 275 | D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr)); |
1ba13a5d EI |
276 | cpu_physical_memory_read (addr, |
277 | (void *) &ctrl->channels[c].current_d, | |
278 | sizeof ctrl->channels[c].current_d); | |
279 | ||
280 | D(dump_d(c, &ctrl->channels[c].current_d)); | |
fa1bdde4 | 281 | ctrl->channels[c].regs[RW_DATA] = addr; |
a8303d18 EI |
282 | } |
283 | ||
284 | static void channel_store_c(struct fs_dma_ctrl *ctrl, int c) | |
285 | { | |
c227f099 | 286 | target_phys_addr_t addr = channel_reg(ctrl, c, RW_GROUP_DOWN); |
a8303d18 EI |
287 | |
288 | /* Encode and store. FIXME: handle endianness. */ | |
41107bcb | 289 | D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr)); |
a8303d18 EI |
290 | D(dump_d(c, &ctrl->channels[c].current_d)); |
291 | cpu_physical_memory_write (addr, | |
292 | (void *) &ctrl->channels[c].current_c, | |
293 | sizeof ctrl->channels[c].current_c); | |
1ba13a5d EI |
294 | } |
295 | ||
296 | static void channel_store_d(struct fs_dma_ctrl *ctrl, int c) | |
297 | { | |
c227f099 | 298 | target_phys_addr_t addr = channel_reg(ctrl, c, RW_SAVED_DATA); |
1ba13a5d | 299 | |
a8303d18 | 300 | /* Encode and store. FIXME: handle endianness. */ |
41107bcb | 301 | D(printf("%s ch=%d addr=" TARGET_FMT_plx "\n", __func__, c, addr)); |
1ba13a5d EI |
302 | cpu_physical_memory_write (addr, |
303 | (void *) &ctrl->channels[c].current_d, | |
304 | sizeof ctrl->channels[c].current_d); | |
305 | } | |
306 | ||
307 | static inline void channel_stop(struct fs_dma_ctrl *ctrl, int c) | |
308 | { | |
309 | /* FIXME: */ | |
310 | } | |
311 | ||
312 | static inline void channel_start(struct fs_dma_ctrl *ctrl, int c) | |
313 | { | |
314 | if (ctrl->channels[c].client) | |
315 | { | |
316 | ctrl->channels[c].eol = 0; | |
317 | ctrl->channels[c].state = RUNNING; | |
c01c07bb EI |
318 | if (!ctrl->channels[c].input) |
319 | channel_out_run(ctrl, c); | |
1ba13a5d EI |
320 | } else |
321 | printf("WARNING: starting DMA ch %d with no client\n", c); | |
1ab5f75c EI |
322 | |
323 | qemu_bh_schedule_idle(ctrl->bh); | |
1ba13a5d EI |
324 | } |
325 | ||
326 | static void channel_continue(struct fs_dma_ctrl *ctrl, int c) | |
327 | { | |
328 | if (!channel_en(ctrl, c) | |
329 | || channel_stopped(ctrl, c) | |
330 | || ctrl->channels[c].state != RUNNING | |
331 | /* Only reload the current data descriptor if it has eol set. */ | |
332 | || !ctrl->channels[c].current_d.eol) { | |
333 | D(printf("continue failed ch=%d state=%d stopped=%d en=%d eol=%d\n", | |
334 | c, ctrl->channels[c].state, | |
335 | channel_stopped(ctrl, c), | |
336 | channel_en(ctrl,c), | |
337 | ctrl->channels[c].eol)); | |
338 | D(dump_d(c, &ctrl->channels[c].current_d)); | |
339 | return; | |
340 | } | |
341 | ||
342 | /* Reload the current descriptor. */ | |
343 | channel_load_d(ctrl, c); | |
344 | ||
345 | /* If the current descriptor cleared the eol flag and we had already | |
346 | reached eol state, do the continue. */ | |
347 | if (!ctrl->channels[c].current_d.eol && ctrl->channels[c].eol) { | |
41107bcb | 348 | D(printf("continue %d ok %x\n", c, |
1ba13a5d EI |
349 | ctrl->channels[c].current_d.next)); |
350 | ctrl->channels[c].regs[RW_SAVED_DATA] = | |
d297f464 | 351 | (uint32_t)(unsigned long)ctrl->channels[c].current_d.next; |
1ba13a5d | 352 | channel_load_d(ctrl, c); |
c01c07bb EI |
353 | ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = |
354 | (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf; | |
355 | ||
1ba13a5d EI |
356 | channel_start(ctrl, c); |
357 | } | |
a8303d18 | 358 | ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = |
d297f464 | 359 | (uint32_t)(unsigned long)ctrl->channels[c].current_d.buf; |
1ba13a5d EI |
360 | } |
361 | ||
362 | static void channel_stream_cmd(struct fs_dma_ctrl *ctrl, int c, uint32_t v) | |
363 | { | |
364 | unsigned int cmd = v & ((1 << 10) - 1); | |
365 | ||
d27b2e50 EI |
366 | D(printf("%s ch=%d cmd=%x\n", |
367 | __func__, c, cmd)); | |
1ba13a5d EI |
368 | if (cmd & regk_dma_load_d) { |
369 | channel_load_d(ctrl, c); | |
370 | if (cmd & regk_dma_burst) | |
371 | channel_start(ctrl, c); | |
372 | } | |
373 | ||
374 | if (cmd & regk_dma_load_c) { | |
375 | channel_load_c(ctrl, c); | |
376 | } | |
377 | } | |
378 | ||
379 | static void channel_update_irq(struct fs_dma_ctrl *ctrl, int c) | |
380 | { | |
381 | D(printf("%s %d\n", __func__, c)); | |
382 | ctrl->channels[c].regs[R_INTR] &= | |
383 | ~(ctrl->channels[c].regs[RW_ACK_INTR]); | |
384 | ||
385 | ctrl->channels[c].regs[R_MASKED_INTR] = | |
386 | ctrl->channels[c].regs[R_INTR] | |
387 | & ctrl->channels[c].regs[RW_INTR_MASK]; | |
388 | ||
389 | D(printf("%s: chan=%d masked_intr=%x\n", __func__, | |
390 | c, | |
391 | ctrl->channels[c].regs[R_MASKED_INTR])); | |
392 | ||
96d7ddde | 393 | qemu_set_irq(ctrl->channels[c].irq, |
7a3161ba | 394 | !!ctrl->channels[c].regs[R_MASKED_INTR]); |
1ba13a5d EI |
395 | } |
396 | ||
1ab5f75c | 397 | static int channel_out_run(struct fs_dma_ctrl *ctrl, int c) |
1ba13a5d EI |
398 | { |
399 | uint32_t len; | |
400 | uint32_t saved_data_buf; | |
401 | unsigned char buf[2 * 1024]; | |
402 | ||
1ab5f75c EI |
403 | if (ctrl->channels[c].eol) |
404 | return 0; | |
405 | ||
406 | do { | |
41107bcb | 407 | D(printf("ch=%d buf=%x after=%x\n", |
c968ef8d EI |
408 | c, |
409 | (uint32_t)ctrl->channels[c].current_d.buf, | |
41107bcb | 410 | (uint32_t)ctrl->channels[c].current_d.after)); |
c968ef8d | 411 | |
c01c07bb EI |
412 | channel_load_d(ctrl, c); |
413 | saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); | |
ea0f49a7 EI |
414 | len = (uint32_t)(unsigned long) |
415 | ctrl->channels[c].current_d.after; | |
c968ef8d EI |
416 | len -= saved_data_buf; |
417 | ||
418 | if (len > sizeof buf) | |
419 | len = sizeof buf; | |
420 | cpu_physical_memory_read (saved_data_buf, buf, len); | |
421 | ||
422 | D(printf("channel %d pushes %x %u bytes\n", c, | |
423 | saved_data_buf, len)); | |
424 | ||
425 | if (ctrl->channels[c].client->client.push) | |
426 | ctrl->channels[c].client->client.push( | |
427 | ctrl->channels[c].client->client.opaque, | |
428 | buf, len); | |
429 | else | |
430 | printf("WARNING: DMA ch%d dataloss," | |
431 | " no attached client.\n", c); | |
432 | ||
433 | saved_data_buf += len; | |
434 | ||
ea0f49a7 EI |
435 | if (saved_data_buf == (uint32_t)(unsigned long) |
436 | ctrl->channels[c].current_d.after) { | |
c968ef8d EI |
437 | /* Done. Step to next. */ |
438 | if (ctrl->channels[c].current_d.out_eop) { | |
439 | /* TODO: signal eop to the client. */ | |
440 | D(printf("signal eop\n")); | |
441 | } | |
442 | if (ctrl->channels[c].current_d.intr) { | |
443 | /* TODO: signal eop to the client. */ | |
444 | /* data intr. */ | |
c01c07bb EI |
445 | D(printf("signal intr %d eol=%d\n", |
446 | len, ctrl->channels[c].current_d.eol)); | |
c968ef8d EI |
447 | ctrl->channels[c].regs[R_INTR] |= (1 << 2); |
448 | channel_update_irq(ctrl, c); | |
449 | } | |
c01c07bb | 450 | channel_store_d(ctrl, c); |
c968ef8d EI |
451 | if (ctrl->channels[c].current_d.eol) { |
452 | D(printf("channel %d EOL\n", c)); | |
453 | ctrl->channels[c].eol = 1; | |
454 | ||
455 | /* Mark the context as disabled. */ | |
456 | ctrl->channels[c].current_c.dis = 1; | |
457 | channel_store_c(ctrl, c); | |
458 | ||
459 | channel_stop(ctrl, c); | |
460 | } else { | |
461 | ctrl->channels[c].regs[RW_SAVED_DATA] = | |
ea0f49a7 EI |
462 | (uint32_t)(unsigned long)ctrl-> |
463 | channels[c].current_d.next; | |
c968ef8d EI |
464 | /* Load new descriptor. */ |
465 | channel_load_d(ctrl, c); | |
466 | saved_data_buf = (uint32_t)(unsigned long) | |
467 | ctrl->channels[c].current_d.buf; | |
468 | } | |
469 | ||
c968ef8d EI |
470 | ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = |
471 | saved_data_buf; | |
472 | D(dump_d(c, &ctrl->channels[c].current_d)); | |
1ba13a5d | 473 | } |
a8303d18 | 474 | ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; |
1ab5f75c EI |
475 | } while (!ctrl->channels[c].eol); |
476 | return 1; | |
1ba13a5d EI |
477 | } |
478 | ||
479 | static int channel_in_process(struct fs_dma_ctrl *ctrl, int c, | |
480 | unsigned char *buf, int buflen, int eop) | |
481 | { | |
482 | uint32_t len; | |
483 | uint32_t saved_data_buf; | |
484 | ||
485 | if (ctrl->channels[c].eol == 1) | |
486 | return 0; | |
487 | ||
c01c07bb | 488 | channel_load_d(ctrl, c); |
1ba13a5d | 489 | saved_data_buf = channel_reg(ctrl, c, RW_SAVED_DATA_BUF); |
ea0f49a7 | 490 | len = (uint32_t)(unsigned long)ctrl->channels[c].current_d.after; |
1ba13a5d EI |
491 | len -= saved_data_buf; |
492 | ||
493 | if (len > buflen) | |
494 | len = buflen; | |
495 | ||
496 | cpu_physical_memory_write (saved_data_buf, buf, len); | |
497 | saved_data_buf += len; | |
498 | ||
d297f464 | 499 | if (saved_data_buf == |
ea0f49a7 | 500 | (uint32_t)(unsigned long)ctrl->channels[c].current_d.after |
1ba13a5d EI |
501 | || eop) { |
502 | uint32_t r_intr = ctrl->channels[c].regs[R_INTR]; | |
503 | ||
504 | D(printf("in dscr end len=%d\n", | |
505 | ctrl->channels[c].current_d.after | |
506 | - ctrl->channels[c].current_d.buf)); | |
41107bcb | 507 | ctrl->channels[c].current_d.after = saved_data_buf; |
1ba13a5d EI |
508 | |
509 | /* Done. Step to next. */ | |
510 | if (ctrl->channels[c].current_d.intr) { | |
511 | /* TODO: signal eop to the client. */ | |
512 | /* data intr. */ | |
513 | ctrl->channels[c].regs[R_INTR] |= 3; | |
514 | } | |
515 | if (eop) { | |
516 | ctrl->channels[c].current_d.in_eop = 1; | |
517 | ctrl->channels[c].regs[R_INTR] |= 8; | |
518 | } | |
519 | if (r_intr != ctrl->channels[c].regs[R_INTR]) | |
520 | channel_update_irq(ctrl, c); | |
521 | ||
522 | channel_store_d(ctrl, c); | |
523 | D(dump_d(c, &ctrl->channels[c].current_d)); | |
524 | ||
525 | if (ctrl->channels[c].current_d.eol) { | |
526 | D(printf("channel %d EOL\n", c)); | |
527 | ctrl->channels[c].eol = 1; | |
a8303d18 EI |
528 | |
529 | /* Mark the context as disabled. */ | |
530 | ctrl->channels[c].current_c.dis = 1; | |
531 | channel_store_c(ctrl, c); | |
532 | ||
1ba13a5d EI |
533 | channel_stop(ctrl, c); |
534 | } else { | |
535 | ctrl->channels[c].regs[RW_SAVED_DATA] = | |
ea0f49a7 EI |
536 | (uint32_t)(unsigned long)ctrl-> |
537 | channels[c].current_d.next; | |
1ba13a5d EI |
538 | /* Load new descriptor. */ |
539 | channel_load_d(ctrl, c); | |
ea0f49a7 | 540 | saved_data_buf = (uint32_t)(unsigned long) |
a8303d18 | 541 | ctrl->channels[c].current_d.buf; |
1ba13a5d EI |
542 | } |
543 | } | |
544 | ||
545 | ctrl->channels[c].regs[RW_SAVED_DATA_BUF] = saved_data_buf; | |
546 | return len; | |
547 | } | |
548 | ||
1ab5f75c | 549 | static inline int channel_in_run(struct fs_dma_ctrl *ctrl, int c) |
1ba13a5d | 550 | { |
1ab5f75c | 551 | if (ctrl->channels[c].client->client.pull) { |
1ba13a5d EI |
552 | ctrl->channels[c].client->client.pull( |
553 | ctrl->channels[c].client->client.opaque); | |
1ab5f75c EI |
554 | return 1; |
555 | } else | |
556 | return 0; | |
1ba13a5d EI |
557 | } |
558 | ||
c227f099 | 559 | static uint32_t dma_rinvalid (void *opaque, target_phys_addr_t addr) |
1ba13a5d | 560 | { |
41107bcb | 561 | hw_error("Unsupported short raccess. reg=" TARGET_FMT_plx "\n", addr); |
1ba13a5d EI |
562 | return 0; |
563 | } | |
564 | ||
565 | static uint32_t | |
c227f099 | 566 | dma_readl (void *opaque, target_phys_addr_t addr) |
1ba13a5d EI |
567 | { |
568 | struct fs_dma_ctrl *ctrl = opaque; | |
569 | int c; | |
570 | uint32_t r = 0; | |
571 | ||
e6320485 | 572 | /* Make addr relative to this channel and bounded to nr regs. */ |
8da3ff18 | 573 | c = fs_channel(addr); |
e6320485 | 574 | addr &= 0xff; |
c01c07bb | 575 | addr >>= 2; |
1ba13a5d | 576 | switch (addr) |
a8303d18 | 577 | { |
1ba13a5d EI |
578 | case RW_STAT: |
579 | r = ctrl->channels[c].state & 7; | |
580 | r |= ctrl->channels[c].eol << 5; | |
581 | r |= ctrl->channels[c].stream_cmd_src << 8; | |
582 | break; | |
583 | ||
a8303d18 | 584 | default: |
1ba13a5d | 585 | r = ctrl->channels[c].regs[addr]; |
41107bcb | 586 | D(printf ("%s c=%d addr=" TARGET_FMT_plx "\n", |
d27b2e50 | 587 | __func__, c, addr)); |
a8303d18 EI |
588 | break; |
589 | } | |
1ba13a5d EI |
590 | return r; |
591 | } | |
592 | ||
593 | static void | |
c227f099 | 594 | dma_winvalid (void *opaque, target_phys_addr_t addr, uint32_t value) |
1ba13a5d | 595 | { |
41107bcb | 596 | hw_error("Unsupported short waccess. reg=" TARGET_FMT_plx "\n", addr); |
1ba13a5d EI |
597 | } |
598 | ||
4487fd34 EI |
599 | static void |
600 | dma_update_state(struct fs_dma_ctrl *ctrl, int c) | |
601 | { | |
d11cf8cc EI |
602 | if (ctrl->channels[c].regs[RW_CFG] & 2) |
603 | ctrl->channels[c].state = STOPPED; | |
604 | if (!(ctrl->channels[c].regs[RW_CFG] & 1)) | |
605 | ctrl->channels[c].state = RST; | |
4487fd34 EI |
606 | } |
607 | ||
1ba13a5d | 608 | static void |
c227f099 | 609 | dma_writel (void *opaque, target_phys_addr_t addr, uint32_t value) |
1ba13a5d EI |
610 | { |
611 | struct fs_dma_ctrl *ctrl = opaque; | |
612 | int c; | |
613 | ||
e6320485 | 614 | /* Make addr relative to this channel and bounded to nr regs. */ |
8da3ff18 | 615 | c = fs_channel(addr); |
e6320485 | 616 | addr &= 0xff; |
c01c07bb | 617 | addr >>= 2; |
1ba13a5d | 618 | switch (addr) |
a8303d18 | 619 | { |
1ba13a5d | 620 | case RW_DATA: |
fa1bdde4 | 621 | ctrl->channels[c].regs[addr] = value; |
1ba13a5d EI |
622 | break; |
623 | ||
624 | case RW_CFG: | |
625 | ctrl->channels[c].regs[addr] = value; | |
4487fd34 | 626 | dma_update_state(ctrl, c); |
1ba13a5d EI |
627 | break; |
628 | case RW_CMD: | |
629 | /* continue. */ | |
4487fd34 EI |
630 | if (value & ~1) |
631 | printf("Invalid store to ch=%d RW_CMD %x\n", | |
632 | c, value); | |
1ba13a5d EI |
633 | ctrl->channels[c].regs[addr] = value; |
634 | channel_continue(ctrl, c); | |
635 | break; | |
636 | ||
637 | case RW_SAVED_DATA: | |
638 | case RW_SAVED_DATA_BUF: | |
639 | case RW_GROUP: | |
640 | case RW_GROUP_DOWN: | |
641 | ctrl->channels[c].regs[addr] = value; | |
642 | break; | |
643 | ||
644 | case RW_ACK_INTR: | |
645 | case RW_INTR_MASK: | |
646 | ctrl->channels[c].regs[addr] = value; | |
647 | channel_update_irq(ctrl, c); | |
648 | if (addr == RW_ACK_INTR) | |
649 | ctrl->channels[c].regs[RW_ACK_INTR] = 0; | |
650 | break; | |
651 | ||
652 | case RW_STREAM_CMD: | |
4487fd34 EI |
653 | if (value & ~1023) |
654 | printf("Invalid store to ch=%d " | |
655 | "RW_STREAMCMD %x\n", | |
656 | c, value); | |
1ba13a5d | 657 | ctrl->channels[c].regs[addr] = value; |
d27b2e50 | 658 | D(printf("stream_cmd ch=%d\n", c)); |
1ba13a5d EI |
659 | channel_stream_cmd(ctrl, c, value); |
660 | break; | |
661 | ||
a8303d18 | 662 | default: |
41107bcb EI |
663 | D(printf ("%s c=%d " TARGET_FMT_plx "\n", |
664 | __func__, c, addr)); | |
a8303d18 | 665 | break; |
1ba13a5d EI |
666 | } |
667 | } | |
668 | ||
d60efc6b | 669 | static CPUReadMemoryFunc * const dma_read[] = { |
1ba13a5d EI |
670 | &dma_rinvalid, |
671 | &dma_rinvalid, | |
672 | &dma_readl, | |
673 | }; | |
674 | ||
d60efc6b | 675 | static CPUWriteMemoryFunc * const dma_write[] = { |
1ba13a5d EI |
676 | &dma_winvalid, |
677 | &dma_winvalid, | |
678 | &dma_writel, | |
679 | }; | |
680 | ||
1ab5f75c | 681 | static int etraxfs_dmac_run(void *opaque) |
1ba13a5d EI |
682 | { |
683 | struct fs_dma_ctrl *ctrl = opaque; | |
684 | int i; | |
685 | int p = 0; | |
686 | ||
687 | for (i = 0; | |
688 | i < ctrl->nr_channels; | |
689 | i++) | |
690 | { | |
691 | if (ctrl->channels[i].state == RUNNING) | |
692 | { | |
1ab5f75c EI |
693 | if (ctrl->channels[i].input) { |
694 | p += channel_in_run(ctrl, i); | |
695 | } else { | |
696 | p += channel_out_run(ctrl, i); | |
697 | } | |
1ba13a5d EI |
698 | } |
699 | } | |
1ab5f75c | 700 | return p; |
1ba13a5d EI |
701 | } |
702 | ||
703 | int etraxfs_dmac_input(struct etraxfs_dma_client *client, | |
704 | void *buf, int len, int eop) | |
705 | { | |
706 | return channel_in_process(client->ctrl, client->channel, | |
707 | buf, len, eop); | |
708 | } | |
709 | ||
710 | /* Connect an IRQ line with a channel. */ | |
711 | void etraxfs_dmac_connect(void *opaque, int c, qemu_irq *line, int input) | |
712 | { | |
713 | struct fs_dma_ctrl *ctrl = opaque; | |
96d7ddde | 714 | ctrl->channels[c].irq = *line; |
1ba13a5d EI |
715 | ctrl->channels[c].input = input; |
716 | } | |
717 | ||
718 | void etraxfs_dmac_connect_client(void *opaque, int c, | |
719 | struct etraxfs_dma_client *cl) | |
720 | { | |
721 | struct fs_dma_ctrl *ctrl = opaque; | |
722 | cl->ctrl = ctrl; | |
723 | cl->channel = c; | |
724 | ctrl->channels[c].client = cl; | |
725 | } | |
726 | ||
727 | ||
492c30af | 728 | static void DMA_run(void *opaque) |
fa1bdde4 | 729 | { |
492c30af | 730 | struct fs_dma_ctrl *etraxfs_dmac = opaque; |
1ab5f75c EI |
731 | int p = 1; |
732 | ||
1354869c | 733 | if (runstate_is_running()) |
1ab5f75c EI |
734 | p = etraxfs_dmac_run(etraxfs_dmac); |
735 | ||
736 | if (p) | |
737 | qemu_bh_schedule_idle(etraxfs_dmac->bh); | |
fa1bdde4 EI |
738 | } |
739 | ||
c227f099 | 740 | void *etraxfs_dmac_init(target_phys_addr_t base, int nr_channels) |
1ba13a5d EI |
741 | { |
742 | struct fs_dma_ctrl *ctrl = NULL; | |
1ba13a5d | 743 | |
7267c094 | 744 | ctrl = g_malloc0(sizeof *ctrl); |
1ba13a5d | 745 | |
492c30af | 746 | ctrl->bh = qemu_bh_new(DMA_run, ctrl); |
492c30af | 747 | |
1ba13a5d | 748 | ctrl->nr_channels = nr_channels; |
7267c094 | 749 | ctrl->channels = g_malloc0(sizeof ctrl->channels[0] * nr_channels); |
1ba13a5d | 750 | |
2507c12a | 751 | ctrl->map = cpu_register_io_memory(dma_read, dma_write, ctrl, DEVICE_NATIVE_ENDIAN); |
e6320485 | 752 | cpu_register_physical_memory(base, nr_channels * 0x2000, ctrl->map); |
1ba13a5d | 753 | return ctrl; |
1ba13a5d | 754 | } |