]>
Commit | Line | Data |
---|---|---|
22f579c6 DA |
1 | /* via_dma.c -- DMA support for the VIA Unichrome/Pro |
2 | * | |
3 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A. | |
7 | * All Rights Reserved. | |
8 | * | |
9 | * Copyright 2004 The Unichrome project. | |
10 | * All Rights Reserved. | |
11 | * | |
12 | * Permission is hereby granted, free of charge, to any person obtaining a | |
13 | * copy of this software and associated documentation files (the "Software"), | |
14 | * to deal in the Software without restriction, including without limitation | |
15 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | |
16 | * and/or sell copies of the Software, and to permit persons to whom the | |
17 | * Software is furnished to do so, subject to the following conditions: | |
18 | * | |
19 | * The above copyright notice and this permission notice (including the | |
20 | * next paragraph) shall be included in all copies or substantial portions | |
21 | * of the Software. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
24 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
25 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
26 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
27 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
28 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
29 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
30 | * | |
31 | * Authors: | |
32 | * Tungsten Graphics, | |
33 | * Erdi Chen, | |
34 | * Thomas Hellstrom. | |
35 | */ | |
36 | ||
37 | #include "drmP.h" | |
38 | #include "drm.h" | |
39 | #include "via_drm.h" | |
40 | #include "via_drv.h" | |
41 | #include "via_3d_reg.h" | |
42 | ||
43 | #define CMDBUF_ALIGNMENT_SIZE (0x100) | |
44 | #define CMDBUF_ALIGNMENT_MASK (0x0ff) | |
45 | ||
46 | /* defines for VIA 3D registers */ | |
47 | #define VIA_REG_STATUS 0x400 | |
48 | #define VIA_REG_TRANSET 0x43C | |
49 | #define VIA_REG_TRANSPACE 0x440 | |
50 | ||
51 | /* VIA_REG_STATUS(0x400): Engine Status */ | |
52 | #define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */ | |
53 | #define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */ | |
54 | #define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */ | |
55 | #define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */ | |
56 | ||
57 | #define SetReg2DAGP(nReg, nData) { \ | |
58 | *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \ | |
59 | *((uint32_t *)(vb) + 1) = (nData); \ | |
60 | vb = ((uint32_t *)vb) + 2; \ | |
61 | dev_priv->dma_low +=8; \ | |
62 | } | |
63 | ||
64 | #define via_flush_write_combine() DRM_MEMORYBARRIER() | |
65 | ||
66 | #define VIA_OUT_RING_QW(w1,w2) \ | |
67 | *vb++ = (w1); \ | |
68 | *vb++ = (w2); \ | |
69 | dev_priv->dma_low += 8; | |
70 | ||
71 | static void via_cmdbuf_start(drm_via_private_t * dev_priv); | |
72 | static void via_cmdbuf_pause(drm_via_private_t * dev_priv); | |
73 | static void via_cmdbuf_reset(drm_via_private_t * dev_priv); | |
74 | static void via_cmdbuf_rewind(drm_via_private_t * dev_priv); | |
75 | static int via_wait_idle(drm_via_private_t * dev_priv); | |
76 | static void via_pad_cache(drm_via_private_t *dev_priv, int qwords); | |
77 | ||
78 | ||
79 | /* | |
80 | * Free space in command buffer. | |
81 | */ | |
82 | ||
83 | static uint32_t | |
84 | via_cmdbuf_space(drm_via_private_t *dev_priv) | |
85 | { | |
86 | uint32_t agp_base = dev_priv->dma_offset + | |
87 | (uint32_t) dev_priv->agpAddr; | |
88 | uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base; | |
89 | ||
90 | return ((hw_addr <= dev_priv->dma_low) ? | |
91 | (dev_priv->dma_high + hw_addr - dev_priv->dma_low) : | |
92 | (hw_addr - dev_priv->dma_low)); | |
93 | } | |
94 | ||
95 | /* | |
96 | * How much does the command regulator lag behind? | |
97 | */ | |
98 | ||
99 | static uint32_t | |
100 | via_cmdbuf_lag(drm_via_private_t *dev_priv) | |
101 | { | |
102 | uint32_t agp_base = dev_priv->dma_offset + | |
103 | (uint32_t) dev_priv->agpAddr; | |
104 | uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base; | |
105 | ||
106 | return ((hw_addr <= dev_priv->dma_low) ? | |
107 | (dev_priv->dma_low - hw_addr) : | |
108 | (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr)); | |
109 | } | |
110 | ||
111 | /* | |
112 | * Check that the given size fits in the buffer, otherwise wait. | |
113 | */ | |
114 | ||
115 | static inline int | |
116 | via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size) | |
117 | { | |
118 | uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; | |
119 | uint32_t cur_addr, hw_addr, next_addr; | |
120 | volatile uint32_t *hw_addr_ptr; | |
121 | uint32_t count; | |
122 | hw_addr_ptr = dev_priv->hw_addr_ptr; | |
123 | cur_addr = dev_priv->dma_low; | |
124 | next_addr = cur_addr + size + 512*1024; | |
125 | count = 1000000; | |
126 | do { | |
127 | hw_addr = *hw_addr_ptr - agp_base; | |
128 | if (count-- == 0) { | |
129 | DRM_ERROR("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n", | |
130 | hw_addr, cur_addr, next_addr); | |
131 | return -1; | |
132 | } | |
133 | } while ((cur_addr < hw_addr) && (next_addr >= hw_addr)); | |
134 | return 0; | |
135 | } | |
136 | ||
137 | ||
138 | /* | |
139 | * Checks whether buffer head has reach the end. Rewind the ring buffer | |
140 | * when necessary. | |
141 | * | |
142 | * Returns virtual pointer to ring buffer. | |
143 | */ | |
144 | ||
145 | static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv, | |
146 | unsigned int size) | |
147 | { | |
148 | if ((dev_priv->dma_low + size + 4*CMDBUF_ALIGNMENT_SIZE) > dev_priv->dma_high) { | |
149 | via_cmdbuf_rewind(dev_priv); | |
150 | } | |
151 | if (via_cmdbuf_wait(dev_priv, size) != 0) { | |
152 | return NULL; | |
153 | } | |
154 | ||
155 | return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low); | |
156 | } | |
157 | ||
158 | int via_dma_cleanup(drm_device_t * dev) | |
159 | { | |
160 | if (dev->dev_private) { | |
161 | drm_via_private_t *dev_priv = | |
162 | (drm_via_private_t *) dev->dev_private; | |
163 | ||
164 | if (dev_priv->ring.virtual_start) { | |
165 | via_cmdbuf_reset(dev_priv); | |
166 | ||
167 | drm_core_ioremapfree(&dev_priv->ring.map, dev); | |
168 | dev_priv->ring.virtual_start = NULL; | |
169 | } | |
170 | ||
171 | } | |
172 | ||
173 | return 0; | |
174 | } | |
175 | ||
176 | static int via_initialize(drm_device_t * dev, | |
177 | drm_via_private_t * dev_priv, | |
178 | drm_via_dma_init_t * init) | |
179 | { | |
180 | if (!dev_priv || !dev_priv->mmio) { | |
181 | DRM_ERROR("via_dma_init called before via_map_init\n"); | |
182 | return DRM_ERR(EFAULT); | |
183 | } | |
184 | ||
185 | if (dev_priv->ring.virtual_start != NULL) { | |
186 | DRM_ERROR("%s called again without calling cleanup\n", | |
187 | __FUNCTION__); | |
188 | return DRM_ERR(EFAULT); | |
189 | } | |
190 | ||
191 | if (!dev->agp || !dev->agp->base) { | |
192 | DRM_ERROR("%s called with no agp memory available\n", | |
193 | __FUNCTION__); | |
194 | return DRM_ERR(EFAULT); | |
195 | } | |
196 | ||
197 | dev_priv->ring.map.offset = dev->agp->base + init->offset; | |
198 | dev_priv->ring.map.size = init->size; | |
199 | dev_priv->ring.map.type = 0; | |
200 | dev_priv->ring.map.flags = 0; | |
201 | dev_priv->ring.map.mtrr = 0; | |
202 | ||
203 | drm_core_ioremap(&dev_priv->ring.map, dev); | |
204 | ||
205 | if (dev_priv->ring.map.handle == NULL) { | |
206 | via_dma_cleanup(dev); | |
207 | DRM_ERROR("can not ioremap virtual address for" | |
208 | " ring buffer\n"); | |
209 | return DRM_ERR(ENOMEM); | |
210 | } | |
211 | ||
212 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; | |
213 | ||
214 | dev_priv->dma_ptr = dev_priv->ring.virtual_start; | |
215 | dev_priv->dma_low = 0; | |
216 | dev_priv->dma_high = init->size; | |
217 | dev_priv->dma_wrap = init->size; | |
218 | dev_priv->dma_offset = init->offset; | |
219 | dev_priv->last_pause_ptr = NULL; | |
220 | dev_priv->hw_addr_ptr = dev_priv->mmio->handle + init->reg_pause_addr; | |
221 | ||
222 | via_cmdbuf_start(dev_priv); | |
223 | ||
224 | return 0; | |
225 | } | |
226 | ||
227 | int via_dma_init(DRM_IOCTL_ARGS) | |
228 | { | |
229 | DRM_DEVICE; | |
230 | drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; | |
231 | drm_via_dma_init_t init; | |
232 | int retcode = 0; | |
233 | ||
234 | DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t *) data, | |
235 | sizeof(init)); | |
236 | ||
237 | switch (init.func) { | |
238 | case VIA_INIT_DMA: | |
239 | if (!capable(CAP_SYS_ADMIN)) | |
240 | retcode = DRM_ERR(EPERM); | |
241 | else | |
242 | retcode = via_initialize(dev, dev_priv, &init); | |
243 | break; | |
244 | case VIA_CLEANUP_DMA: | |
245 | if (!capable(CAP_SYS_ADMIN)) | |
246 | retcode = DRM_ERR(EPERM); | |
247 | else | |
248 | retcode = via_dma_cleanup(dev); | |
249 | break; | |
250 | case VIA_DMA_INITIALIZED: | |
251 | retcode = (dev_priv->ring.virtual_start != NULL) ? | |
252 | 0: DRM_ERR( EFAULT ); | |
253 | break; | |
254 | default: | |
255 | retcode = DRM_ERR(EINVAL); | |
256 | break; | |
257 | } | |
258 | ||
259 | return retcode; | |
260 | } | |
261 | ||
262 | ||
263 | ||
264 | static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd) | |
265 | { | |
266 | drm_via_private_t *dev_priv; | |
267 | uint32_t *vb; | |
268 | int ret; | |
269 | ||
270 | dev_priv = (drm_via_private_t *) dev->dev_private; | |
271 | ||
272 | if (dev_priv->ring.virtual_start == NULL) { | |
273 | DRM_ERROR("%s called without initializing AGP ring buffer.\n", | |
274 | __FUNCTION__); | |
275 | return DRM_ERR(EFAULT); | |
276 | } | |
277 | ||
278 | if (cmd->size > VIA_PCI_BUF_SIZE) { | |
279 | return DRM_ERR(ENOMEM); | |
280 | } | |
281 | ||
282 | ||
283 | if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) | |
284 | return DRM_ERR(EFAULT); | |
285 | ||
286 | /* | |
287 | * Running this function on AGP memory is dead slow. Therefore | |
288 | * we run it on a temporary cacheable system memory buffer and | |
289 | * copy it to AGP memory when ready. | |
290 | */ | |
291 | ||
292 | ||
293 | if ((ret = via_verify_command_stream((uint32_t *)dev_priv->pci_buf, cmd->size, dev, 1))) { | |
294 | return ret; | |
295 | } | |
296 | ||
297 | ||
298 | vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size); | |
299 | if (vb == NULL) { | |
300 | return DRM_ERR(EAGAIN); | |
301 | } | |
302 | ||
303 | memcpy(vb, dev_priv->pci_buf, cmd->size); | |
304 | ||
305 | dev_priv->dma_low += cmd->size; | |
306 | ||
307 | /* | |
308 | * Small submissions somehow stalls the CPU. (AGP cache effects?) | |
309 | * pad to greater size. | |
310 | */ | |
311 | ||
312 | if (cmd->size < 0x100) | |
313 | via_pad_cache(dev_priv,(0x100 - cmd->size) >> 3); | |
314 | via_cmdbuf_pause(dev_priv); | |
315 | ||
316 | return 0; | |
317 | } | |
318 | ||
319 | int via_driver_dma_quiescent(drm_device_t * dev) | |
320 | { | |
321 | drm_via_private_t *dev_priv = dev->dev_private; | |
322 | ||
323 | if (!via_wait_idle(dev_priv)) { | |
324 | return DRM_ERR(EBUSY); | |
325 | } | |
326 | return 0; | |
327 | } | |
328 | ||
329 | int via_flush_ioctl(DRM_IOCTL_ARGS) | |
330 | { | |
331 | DRM_DEVICE; | |
332 | ||
333 | LOCK_TEST_WITH_RETURN( dev, filp ); | |
334 | ||
335 | return via_driver_dma_quiescent(dev); | |
336 | } | |
337 | ||
338 | int via_cmdbuffer(DRM_IOCTL_ARGS) | |
339 | { | |
340 | DRM_DEVICE; | |
341 | drm_via_cmdbuffer_t cmdbuf; | |
342 | int ret; | |
343 | ||
344 | LOCK_TEST_WITH_RETURN( dev, filp ); | |
345 | ||
346 | DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t *) data, | |
347 | sizeof(cmdbuf)); | |
348 | ||
349 | DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size); | |
350 | ||
351 | ret = via_dispatch_cmdbuffer(dev, &cmdbuf); | |
352 | if (ret) { | |
353 | return ret; | |
354 | } | |
355 | ||
356 | return 0; | |
357 | } | |
358 | ||
359 | extern int | |
360 | via_parse_command_stream(drm_device_t *dev, const uint32_t * buf, unsigned int size); | |
361 | static int via_dispatch_pci_cmdbuffer(drm_device_t * dev, | |
362 | drm_via_cmdbuffer_t * cmd) | |
363 | { | |
364 | drm_via_private_t *dev_priv = dev->dev_private; | |
365 | int ret; | |
366 | ||
367 | if (cmd->size > VIA_PCI_BUF_SIZE) { | |
368 | return DRM_ERR(ENOMEM); | |
369 | } | |
370 | if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) | |
371 | return DRM_ERR(EFAULT); | |
372 | ||
373 | if ((ret = via_verify_command_stream((uint32_t *)dev_priv->pci_buf, cmd->size, dev, 0))) { | |
374 | return ret; | |
375 | } | |
376 | ||
377 | ret = via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf, cmd->size); | |
378 | return ret; | |
379 | } | |
380 | ||
381 | int via_pci_cmdbuffer(DRM_IOCTL_ARGS) | |
382 | { | |
383 | DRM_DEVICE; | |
384 | drm_via_cmdbuffer_t cmdbuf; | |
385 | int ret; | |
386 | ||
387 | LOCK_TEST_WITH_RETURN( dev, filp ); | |
388 | ||
389 | DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t *) data, | |
390 | sizeof(cmdbuf)); | |
391 | ||
392 | DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf, | |
393 | cmdbuf.size); | |
394 | ||
395 | ret = via_dispatch_pci_cmdbuffer(dev, &cmdbuf); | |
396 | if (ret) { | |
397 | return ret; | |
398 | } | |
399 | ||
400 | return 0; | |
401 | } | |
402 | ||
403 | ||
404 | static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv, | |
405 | uint32_t * vb, int qw_count) | |
406 | { | |
407 | for (; qw_count > 0; --qw_count) { | |
408 | VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY); | |
409 | } | |
410 | return vb; | |
411 | } | |
412 | ||
413 | ||
414 | /* | |
415 | * This function is used internally by ring buffer mangement code. | |
416 | * | |
417 | * Returns virtual pointer to ring buffer. | |
418 | */ | |
419 | static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv) | |
420 | { | |
421 | return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low); | |
422 | } | |
423 | ||
424 | /* | |
425 | * Hooks a segment of data into the tail of the ring-buffer by | |
426 | * modifying the pause address stored in the buffer itself. If | |
427 | * the regulator has already paused, restart it. | |
428 | */ | |
429 | static int via_hook_segment(drm_via_private_t *dev_priv, | |
430 | uint32_t pause_addr_hi, uint32_t pause_addr_lo, | |
431 | int no_pci_fire) | |
432 | { | |
433 | int paused, count; | |
434 | volatile uint32_t *paused_at = dev_priv->last_pause_ptr; | |
435 | ||
436 | via_flush_write_combine(); | |
437 | while(! *(via_get_dma(dev_priv)-1)); | |
438 | *dev_priv->last_pause_ptr = pause_addr_lo; | |
439 | via_flush_write_combine(); | |
440 | ||
441 | /* | |
442 | * The below statement is inserted to really force the flush. | |
443 | * Not sure it is needed. | |
444 | */ | |
445 | ||
446 | while(! *dev_priv->last_pause_ptr); | |
447 | dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1; | |
448 | while(! *dev_priv->last_pause_ptr); | |
449 | ||
450 | ||
451 | paused = 0; | |
452 | count = 20; | |
453 | ||
454 | while (!(paused = (VIA_READ(0x41c) & 0x80000000)) && count--); | |
455 | if ((count <= 8) && (count >= 0)) { | |
456 | uint32_t rgtr, ptr; | |
457 | rgtr = *(dev_priv->hw_addr_ptr); | |
458 | ptr = ((char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) + | |
459 | dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4 - | |
460 | CMDBUF_ALIGNMENT_SIZE; | |
461 | if (rgtr <= ptr) { | |
462 | DRM_ERROR("Command regulator\npaused at count %d, address %x, " | |
463 | "while current pause address is %x.\n" | |
464 | "Please mail this message to " | |
465 | "<[email protected]>\n", | |
466 | count, rgtr, ptr); | |
467 | } | |
468 | } | |
469 | ||
470 | if (paused && !no_pci_fire) { | |
471 | uint32_t rgtr,ptr; | |
472 | uint32_t ptr_low; | |
473 | ||
474 | count = 1000000; | |
475 | while ((VIA_READ(VIA_REG_STATUS) & VIA_CMD_RGTR_BUSY) && count--); | |
476 | ||
477 | rgtr = *(dev_priv->hw_addr_ptr); | |
478 | ptr = ((char *)paused_at - dev_priv->dma_ptr) + | |
479 | dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4; | |
480 | ||
481 | ||
482 | ptr_low = (ptr > 3*CMDBUF_ALIGNMENT_SIZE) ? | |
483 | ptr - 3*CMDBUF_ALIGNMENT_SIZE : 0; | |
484 | if (rgtr <= ptr && rgtr >= ptr_low) { | |
485 | VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16)); | |
486 | VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi); | |
487 | VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo); | |
488 | } | |
489 | } | |
490 | return paused; | |
491 | } | |
492 | ||
493 | ||
494 | ||
495 | static int via_wait_idle(drm_via_private_t * dev_priv) | |
496 | { | |
497 | int count = 10000000; | |
498 | while (count-- && (VIA_READ(VIA_REG_STATUS) & | |
499 | (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | | |
500 | VIA_3D_ENG_BUSY))) ; | |
501 | return count; | |
502 | } | |
503 | ||
504 | static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type, | |
505 | uint32_t addr, uint32_t *cmd_addr_hi, | |
506 | uint32_t *cmd_addr_lo, | |
507 | int skip_wait) | |
508 | { | |
509 | uint32_t agp_base; | |
510 | uint32_t cmd_addr, addr_lo, addr_hi; | |
511 | uint32_t *vb; | |
512 | uint32_t qw_pad_count; | |
513 | ||
514 | if (!skip_wait) | |
515 | via_cmdbuf_wait(dev_priv, 2*CMDBUF_ALIGNMENT_SIZE); | |
516 | ||
517 | vb = via_get_dma(dev_priv); | |
518 | VIA_OUT_RING_QW( HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) | | |
519 | (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16); | |
520 | agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; | |
521 | qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) - | |
522 | ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3); | |
523 | ||
524 | ||
525 | cmd_addr = (addr) ? addr : | |
526 | agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3); | |
527 | addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) | | |
528 | (cmd_addr & HC_HAGPBpL_MASK)); | |
529 | addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24)); | |
530 | ||
531 | vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1); | |
532 | VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, | |
533 | *cmd_addr_lo = addr_lo); | |
534 | return vb; | |
535 | } | |
536 | ||
537 | ||
538 | ||
539 | ||
540 | static void via_cmdbuf_start(drm_via_private_t * dev_priv) | |
541 | { | |
542 | uint32_t pause_addr_lo, pause_addr_hi; | |
543 | uint32_t start_addr, start_addr_lo; | |
544 | uint32_t end_addr, end_addr_lo; | |
545 | uint32_t command; | |
546 | uint32_t agp_base; | |
547 | ||
548 | ||
549 | dev_priv->dma_low = 0; | |
550 | ||
551 | agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; | |
552 | start_addr = agp_base; | |
553 | end_addr = agp_base + dev_priv->dma_high; | |
554 | ||
555 | start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF)); | |
556 | end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF)); | |
557 | command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) | | |
558 | ((end_addr & 0xff000000) >> 16)); | |
559 | ||
560 | dev_priv->last_pause_ptr = | |
561 | via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, | |
562 | &pause_addr_hi, & pause_addr_lo, 1) - 1; | |
563 | ||
564 | via_flush_write_combine(); | |
565 | while(! *dev_priv->last_pause_ptr); | |
566 | ||
567 | VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16)); | |
568 | VIA_WRITE(VIA_REG_TRANSPACE, command); | |
569 | VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo); | |
570 | VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo); | |
571 | ||
572 | VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi); | |
573 | VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo); | |
574 | ||
575 | VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK); | |
576 | } | |
577 | ||
578 | static void via_pad_cache(drm_via_private_t *dev_priv, int qwords) | |
579 | { | |
580 | uint32_t *vb; | |
581 | ||
582 | via_cmdbuf_wait(dev_priv, qwords + 2); | |
583 | vb = via_get_dma(dev_priv); | |
584 | VIA_OUT_RING_QW( HC_HEADER2, HC_ParaType_NotTex << 16); | |
585 | via_align_buffer(dev_priv,vb,qwords); | |
586 | } | |
587 | ||
588 | static inline void via_dummy_bitblt(drm_via_private_t * dev_priv) | |
589 | { | |
590 | uint32_t *vb = via_get_dma(dev_priv); | |
591 | SetReg2DAGP(0x0C, (0 | (0 << 16))); | |
592 | SetReg2DAGP(0x10, 0 | (0 << 16)); | |
593 | SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000); | |
594 | } | |
595 | ||
596 | ||
597 | static void via_cmdbuf_jump(drm_via_private_t * dev_priv) | |
598 | { | |
599 | uint32_t agp_base; | |
600 | uint32_t pause_addr_lo, pause_addr_hi; | |
601 | uint32_t jump_addr_lo, jump_addr_hi; | |
602 | volatile uint32_t *last_pause_ptr; | |
603 | uint32_t dma_low_save1, dma_low_save2; | |
604 | ||
605 | agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; | |
606 | via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi, | |
607 | &jump_addr_lo, 0); | |
608 | ||
609 | dev_priv->dma_wrap = dev_priv->dma_low; | |
610 | ||
611 | ||
612 | /* | |
613 | * Wrap command buffer to the beginning. | |
614 | */ | |
615 | ||
616 | dev_priv->dma_low = 0; | |
617 | if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) { | |
618 | DRM_ERROR("via_cmdbuf_jump failed\n"); | |
619 | } | |
620 | ||
621 | via_dummy_bitblt(dev_priv); | |
622 | via_dummy_bitblt(dev_priv); | |
623 | ||
624 | last_pause_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, | |
625 | &pause_addr_lo, 0) -1; | |
626 | via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, | |
627 | &pause_addr_lo, 0); | |
628 | ||
629 | *last_pause_ptr = pause_addr_lo; | |
630 | dma_low_save1 = dev_priv->dma_low; | |
631 | ||
632 | /* | |
633 | * Now, set a trap that will pause the regulator if it tries to rerun the old | |
634 | * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause | |
635 | * and reissues the jump command over PCI, while the regulator has already taken the jump | |
636 | * and actually paused at the current buffer end). | |
637 | * There appears to be no other way to detect this condition, since the hw_addr_pointer | |
638 | * does not seem to get updated immediately when a jump occurs. | |
639 | */ | |
640 | ||
641 | last_pause_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, | |
642 | &pause_addr_lo, 0) -1; | |
643 | via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, | |
644 | &pause_addr_lo, 0); | |
645 | *last_pause_ptr = pause_addr_lo; | |
646 | ||
647 | dma_low_save2 = dev_priv->dma_low; | |
648 | dev_priv->dma_low = dma_low_save1; | |
649 | via_hook_segment( dev_priv, jump_addr_hi, jump_addr_lo, 0); | |
650 | dev_priv->dma_low = dma_low_save2; | |
651 | via_hook_segment( dev_priv, pause_addr_hi, pause_addr_lo, 0); | |
652 | } | |
653 | ||
654 | ||
655 | static void via_cmdbuf_rewind(drm_via_private_t * dev_priv) | |
656 | { | |
657 | via_cmdbuf_jump(dev_priv); | |
658 | } | |
659 | ||
660 | static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type) | |
661 | { | |
662 | uint32_t pause_addr_lo, pause_addr_hi; | |
663 | ||
664 | via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0); | |
665 | via_hook_segment( dev_priv, pause_addr_hi, pause_addr_lo, 0); | |
666 | } | |
667 | ||
668 | ||
669 | static void via_cmdbuf_pause(drm_via_private_t * dev_priv) | |
670 | { | |
671 | via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE); | |
672 | } | |
673 | ||
674 | static void via_cmdbuf_reset(drm_via_private_t * dev_priv) | |
675 | { | |
676 | via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP); | |
677 | via_wait_idle(dev_priv); | |
678 | } | |
679 | ||
680 | /* | |
681 | * User interface to the space and lag functions. | |
682 | */ | |
683 | ||
684 | int | |
685 | via_cmdbuf_size(DRM_IOCTL_ARGS) | |
686 | { | |
687 | DRM_DEVICE; | |
688 | drm_via_cmdbuf_size_t d_siz; | |
689 | int ret = 0; | |
690 | uint32_t tmp_size, count; | |
691 | drm_via_private_t *dev_priv; | |
692 | ||
693 | DRM_DEBUG("via cmdbuf_size\n"); | |
694 | LOCK_TEST_WITH_RETURN( dev, filp ); | |
695 | ||
696 | dev_priv = (drm_via_private_t *) dev->dev_private; | |
697 | ||
698 | if (dev_priv->ring.virtual_start == NULL) { | |
699 | DRM_ERROR("%s called without initializing AGP ring buffer.\n", | |
700 | __FUNCTION__); | |
701 | return DRM_ERR(EFAULT); | |
702 | } | |
703 | ||
704 | DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t *) data, | |
705 | sizeof(d_siz)); | |
706 | ||
707 | ||
708 | count = 1000000; | |
709 | tmp_size = d_siz.size; | |
710 | switch(d_siz.func) { | |
711 | case VIA_CMDBUF_SPACE: | |
712 | while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz.size) && count--) { | |
713 | if (!d_siz.wait) { | |
714 | break; | |
715 | } | |
716 | } | |
717 | if (!count) { | |
718 | DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n"); | |
719 | ret = DRM_ERR(EAGAIN); | |
720 | } | |
721 | break; | |
722 | case VIA_CMDBUF_LAG: | |
723 | while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz.size) && count--) { | |
724 | if (!d_siz.wait) { | |
725 | break; | |
726 | } | |
727 | } | |
728 | if (!count) { | |
729 | DRM_ERROR("VIA_CMDBUF_LAG timed out.\n"); | |
730 | ret = DRM_ERR(EAGAIN); | |
731 | } | |
732 | break; | |
733 | default: | |
734 | ret = DRM_ERR(EFAULT); | |
735 | } | |
736 | d_siz.size = tmp_size; | |
737 | ||
738 | DRM_COPY_TO_USER_IOCTL((drm_via_cmdbuf_size_t *) data, d_siz, | |
739 | sizeof(d_siz)); | |
740 | return ret; | |
741 | } |