]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* i915_dma.c -- DMA support for the I915 -*- linux-c -*- |
2 | */ | |
0d6aa60b | 3 | /* |
1da177e4 LT |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
5 | * All Rights Reserved. | |
bc54fd1a DA |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
0d6aa60b | 27 | */ |
1da177e4 LT |
28 | |
29 | #include "drmP.h" | |
30 | #include "drm.h" | |
79e53945 | 31 | #include "drm_crtc_helper.h" |
785b93ef | 32 | #include "drm_fb_helper.h" |
79e53945 | 33 | #include "intel_drv.h" |
1da177e4 LT |
34 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | |
1c5d22f7 | 36 | #include "i915_trace.h" |
28d52043 | 37 | #include <linux/vgaarb.h> |
1da177e4 | 38 | |
1da177e4 LT |
39 | /* Really want an OS-independent resettable timer. Would like to have |
40 | * this loop run for (eg) 3 sec, but have the timer reset every time | |
41 | * the head pointer changes, so that EBUSY only happens if the ring | |
42 | * actually stalls for (eg) 3 seconds. | |
43 | */ | |
84b1fd10 | 44 | int i915_wait_ring(struct drm_device * dev, int n, const char *caller) |
1da177e4 LT |
45 | { |
46 | drm_i915_private_t *dev_priv = dev->dev_private; | |
47 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); | |
d3a6d446 KP |
48 | u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; |
49 | u32 last_acthd = I915_READ(acthd_reg); | |
50 | u32 acthd; | |
585fb111 | 51 | u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; |
1da177e4 LT |
52 | int i; |
53 | ||
1c5d22f7 CW |
54 | trace_i915_ring_wait_begin (dev); |
55 | ||
d3a6d446 | 56 | for (i = 0; i < 100000; i++) { |
585fb111 | 57 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; |
d3a6d446 | 58 | acthd = I915_READ(acthd_reg); |
1da177e4 LT |
59 | ring->space = ring->head - (ring->tail + 8); |
60 | if (ring->space < 0) | |
61 | ring->space += ring->Size; | |
1c5d22f7 CW |
62 | if (ring->space >= n) { |
63 | trace_i915_ring_wait_end (dev); | |
1da177e4 | 64 | return 0; |
1c5d22f7 | 65 | } |
1da177e4 | 66 | |
98787c05 CW |
67 | if (dev->primary->master) { |
68 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | |
69 | if (master_priv->sarea_priv) | |
70 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | |
71 | } | |
72 | ||
1da177e4 LT |
73 | |
74 | if (ring->head != last_head) | |
75 | i = 0; | |
d3a6d446 KP |
76 | if (acthd != last_acthd) |
77 | i = 0; | |
1da177e4 LT |
78 | |
79 | last_head = ring->head; | |
d3a6d446 KP |
80 | last_acthd = acthd; |
81 | msleep_interruptible(10); | |
82 | ||
1da177e4 LT |
83 | } |
84 | ||
1c5d22f7 | 85 | trace_i915_ring_wait_end (dev); |
20caafa6 | 86 | return -EBUSY; |
1da177e4 LT |
87 | } |
88 | ||
0ef82af7 CW |
89 | /* As a ringbuffer is only allowed to wrap between instructions, fill |
90 | * the tail with NOOPs. | |
91 | */ | |
92 | int i915_wrap_ring(struct drm_device *dev) | |
93 | { | |
94 | drm_i915_private_t *dev_priv = dev->dev_private; | |
95 | volatile unsigned int *virt; | |
96 | int rem; | |
97 | ||
98 | rem = dev_priv->ring.Size - dev_priv->ring.tail; | |
99 | if (dev_priv->ring.space < rem) { | |
100 | int ret = i915_wait_ring(dev, rem, __func__); | |
101 | if (ret) | |
102 | return ret; | |
103 | } | |
104 | dev_priv->ring.space -= rem; | |
105 | ||
106 | virt = (unsigned int *) | |
107 | (dev_priv->ring.virtual_start + dev_priv->ring.tail); | |
108 | rem /= 4; | |
109 | while (rem--) | |
110 | *virt++ = MI_NOOP; | |
111 | ||
112 | dev_priv->ring.tail = 0; | |
113 | ||
114 | return 0; | |
115 | } | |
116 | ||
398c9cb2 KP |
117 | /** |
118 | * Sets up the hardware status page for devices that need a physical address | |
119 | * in the register. | |
120 | */ | |
3043c60c | 121 | static int i915_init_phys_hws(struct drm_device *dev) |
398c9cb2 KP |
122 | { |
123 | drm_i915_private_t *dev_priv = dev->dev_private; | |
124 | /* Program Hardware Status Page */ | |
125 | dev_priv->status_page_dmah = | |
126 | drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); | |
127 | ||
128 | if (!dev_priv->status_page_dmah) { | |
129 | DRM_ERROR("Can not allocate hardware status page\n"); | |
130 | return -ENOMEM; | |
131 | } | |
132 | dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; | |
133 | dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; | |
134 | ||
135 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | |
136 | ||
137 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); | |
8a4c47f3 | 138 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
398c9cb2 KP |
139 | return 0; |
140 | } | |
141 | ||
142 | /** | |
143 | * Frees the hardware status page, whether it's a physical address or a virtual | |
144 | * address set up by the X Server. | |
145 | */ | |
3043c60c | 146 | static void i915_free_hws(struct drm_device *dev) |
398c9cb2 KP |
147 | { |
148 | drm_i915_private_t *dev_priv = dev->dev_private; | |
149 | if (dev_priv->status_page_dmah) { | |
150 | drm_pci_free(dev, dev_priv->status_page_dmah); | |
151 | dev_priv->status_page_dmah = NULL; | |
152 | } | |
153 | ||
154 | if (dev_priv->status_gfx_addr) { | |
155 | dev_priv->status_gfx_addr = 0; | |
156 | drm_core_ioremapfree(&dev_priv->hws_map, dev); | |
157 | } | |
158 | ||
159 | /* Need to rewrite hardware status page */ | |
160 | I915_WRITE(HWS_PGA, 0x1ffff000); | |
161 | } | |
162 | ||
84b1fd10 | 163 | void i915_kernel_lost_context(struct drm_device * dev) |
1da177e4 LT |
164 | { |
165 | drm_i915_private_t *dev_priv = dev->dev_private; | |
7c1c2871 | 166 | struct drm_i915_master_private *master_priv; |
1da177e4 LT |
167 | drm_i915_ring_buffer_t *ring = &(dev_priv->ring); |
168 | ||
79e53945 JB |
169 | /* |
170 | * We should never lose context on the ring with modesetting | |
171 | * as we don't expose it to userspace | |
172 | */ | |
173 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | |
174 | return; | |
175 | ||
585fb111 JB |
176 | ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; |
177 | ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; | |
1da177e4 LT |
178 | ring->space = ring->head - (ring->tail + 8); |
179 | if (ring->space < 0) | |
180 | ring->space += ring->Size; | |
181 | ||
7c1c2871 DA |
182 | if (!dev->primary->master) |
183 | return; | |
184 | ||
185 | master_priv = dev->primary->master->driver_priv; | |
186 | if (ring->head == ring->tail && master_priv->sarea_priv) | |
187 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; | |
1da177e4 LT |
188 | } |
189 | ||
84b1fd10 | 190 | static int i915_dma_cleanup(struct drm_device * dev) |
1da177e4 | 191 | { |
ba8bbcf6 | 192 | drm_i915_private_t *dev_priv = dev->dev_private; |
1da177e4 LT |
193 | /* Make sure interrupts are disabled here because the uninstall ioctl |
194 | * may not have been called from userspace and after dev_private | |
195 | * is freed, it's too late. | |
196 | */ | |
ed4cb414 | 197 | if (dev->irq_enabled) |
b5e89ed5 | 198 | drm_irq_uninstall(dev); |
1da177e4 | 199 | |
ba8bbcf6 JB |
200 | if (dev_priv->ring.virtual_start) { |
201 | drm_core_ioremapfree(&dev_priv->ring.map, dev); | |
3043c60c EA |
202 | dev_priv->ring.virtual_start = NULL; |
203 | dev_priv->ring.map.handle = NULL; | |
ba8bbcf6 JB |
204 | dev_priv->ring.map.size = 0; |
205 | } | |
dc7a9319 | 206 | |
398c9cb2 KP |
207 | /* Clear the HWS virtual address at teardown */ |
208 | if (I915_NEED_GFX_HWS(dev)) | |
209 | i915_free_hws(dev); | |
1da177e4 LT |
210 | |
211 | return 0; | |
212 | } | |
213 | ||
ba8bbcf6 | 214 | static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) |
1da177e4 | 215 | { |
ba8bbcf6 | 216 | drm_i915_private_t *dev_priv = dev->dev_private; |
7c1c2871 | 217 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
1da177e4 | 218 | |
3a03ac1a DA |
219 | master_priv->sarea = drm_getsarea(dev); |
220 | if (master_priv->sarea) { | |
221 | master_priv->sarea_priv = (drm_i915_sarea_t *) | |
222 | ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); | |
223 | } else { | |
8a4c47f3 | 224 | DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); |
3a03ac1a DA |
225 | } |
226 | ||
673a394b EA |
227 | if (init->ring_size != 0) { |
228 | if (dev_priv->ring.ring_obj != NULL) { | |
229 | i915_dma_cleanup(dev); | |
230 | DRM_ERROR("Client tried to initialize ringbuffer in " | |
231 | "GEM mode\n"); | |
232 | return -EINVAL; | |
233 | } | |
1da177e4 | 234 | |
673a394b | 235 | dev_priv->ring.Size = init->ring_size; |
1da177e4 | 236 | |
673a394b EA |
237 | dev_priv->ring.map.offset = init->ring_start; |
238 | dev_priv->ring.map.size = init->ring_size; | |
239 | dev_priv->ring.map.type = 0; | |
240 | dev_priv->ring.map.flags = 0; | |
241 | dev_priv->ring.map.mtrr = 0; | |
1da177e4 | 242 | |
6fb88588 | 243 | drm_core_ioremap_wc(&dev_priv->ring.map, dev); |
673a394b EA |
244 | |
245 | if (dev_priv->ring.map.handle == NULL) { | |
246 | i915_dma_cleanup(dev); | |
247 | DRM_ERROR("can not ioremap virtual address for" | |
248 | " ring buffer\n"); | |
249 | return -ENOMEM; | |
250 | } | |
1da177e4 LT |
251 | } |
252 | ||
253 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; | |
254 | ||
a6b54f3f | 255 | dev_priv->cpp = init->cpp; |
1da177e4 LT |
256 | dev_priv->back_offset = init->back_offset; |
257 | dev_priv->front_offset = init->front_offset; | |
258 | dev_priv->current_page = 0; | |
7c1c2871 DA |
259 | if (master_priv->sarea_priv) |
260 | master_priv->sarea_priv->pf_current_page = 0; | |
1da177e4 | 261 | |
1da177e4 LT |
262 | /* Allow hardware batchbuffers unless told otherwise. |
263 | */ | |
264 | dev_priv->allow_batchbuffer = 1; | |
265 | ||
1da177e4 LT |
266 | return 0; |
267 | } | |
268 | ||
84b1fd10 | 269 | static int i915_dma_resume(struct drm_device * dev) |
1da177e4 LT |
270 | { |
271 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
272 | ||
8a4c47f3 | 273 | DRM_DEBUG_DRIVER("%s\n", __func__); |
1da177e4 | 274 | |
1da177e4 LT |
275 | if (dev_priv->ring.map.handle == NULL) { |
276 | DRM_ERROR("can not ioremap virtual address for" | |
277 | " ring buffer\n"); | |
20caafa6 | 278 | return -ENOMEM; |
1da177e4 LT |
279 | } |
280 | ||
281 | /* Program Hardware Status Page */ | |
282 | if (!dev_priv->hw_status_page) { | |
283 | DRM_ERROR("Can not find hardware status page\n"); | |
20caafa6 | 284 | return -EINVAL; |
1da177e4 | 285 | } |
8a4c47f3 | 286 | DRM_DEBUG_DRIVER("hw status page @ %p\n", |
be25ed9c | 287 | dev_priv->hw_status_page); |
1da177e4 | 288 | |
dc7a9319 | 289 | if (dev_priv->status_gfx_addr != 0) |
585fb111 | 290 | I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); |
dc7a9319 | 291 | else |
585fb111 | 292 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); |
8a4c47f3 | 293 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
1da177e4 LT |
294 | |
295 | return 0; | |
296 | } | |
297 | ||
c153f45f EA |
298 | static int i915_dma_init(struct drm_device *dev, void *data, |
299 | struct drm_file *file_priv) | |
1da177e4 | 300 | { |
c153f45f | 301 | drm_i915_init_t *init = data; |
1da177e4 LT |
302 | int retcode = 0; |
303 | ||
c153f45f | 304 | switch (init->func) { |
1da177e4 | 305 | case I915_INIT_DMA: |
ba8bbcf6 | 306 | retcode = i915_initialize(dev, init); |
1da177e4 LT |
307 | break; |
308 | case I915_CLEANUP_DMA: | |
309 | retcode = i915_dma_cleanup(dev); | |
310 | break; | |
311 | case I915_RESUME_DMA: | |
0d6aa60b | 312 | retcode = i915_dma_resume(dev); |
1da177e4 LT |
313 | break; |
314 | default: | |
20caafa6 | 315 | retcode = -EINVAL; |
1da177e4 LT |
316 | break; |
317 | } | |
318 | ||
319 | return retcode; | |
320 | } | |
321 | ||
322 | /* Implement basically the same security restrictions as hardware does | |
323 | * for MI_BATCH_NON_SECURE. These can be made stricter at any time. | |
324 | * | |
325 | * Most of the calculations below involve calculating the size of a | |
326 | * particular instruction. It's important to get the size right as | |
327 | * that tells us where the next instruction to check is. Any illegal | |
328 | * instruction detected will be given a size of zero, which is a | |
329 | * signal to abort the rest of the buffer. | |
330 | */ | |
331 | static int do_validate_cmd(int cmd) | |
332 | { | |
333 | switch (((cmd >> 29) & 0x7)) { | |
334 | case 0x0: | |
335 | switch ((cmd >> 23) & 0x3f) { | |
336 | case 0x0: | |
337 | return 1; /* MI_NOOP */ | |
338 | case 0x4: | |
339 | return 1; /* MI_FLUSH */ | |
340 | default: | |
341 | return 0; /* disallow everything else */ | |
342 | } | |
343 | break; | |
344 | case 0x1: | |
345 | return 0; /* reserved */ | |
346 | case 0x2: | |
347 | return (cmd & 0xff) + 2; /* 2d commands */ | |
348 | case 0x3: | |
349 | if (((cmd >> 24) & 0x1f) <= 0x18) | |
350 | return 1; | |
351 | ||
352 | switch ((cmd >> 24) & 0x1f) { | |
353 | case 0x1c: | |
354 | return 1; | |
355 | case 0x1d: | |
b5e89ed5 | 356 | switch ((cmd >> 16) & 0xff) { |
1da177e4 LT |
357 | case 0x3: |
358 | return (cmd & 0x1f) + 2; | |
359 | case 0x4: | |
360 | return (cmd & 0xf) + 2; | |
361 | default: | |
362 | return (cmd & 0xffff) + 2; | |
363 | } | |
364 | case 0x1e: | |
365 | if (cmd & (1 << 23)) | |
366 | return (cmd & 0xffff) + 1; | |
367 | else | |
368 | return 1; | |
369 | case 0x1f: | |
370 | if ((cmd & (1 << 23)) == 0) /* inline vertices */ | |
371 | return (cmd & 0x1ffff) + 2; | |
372 | else if (cmd & (1 << 17)) /* indirect random */ | |
373 | if ((cmd & 0xffff) == 0) | |
374 | return 0; /* unknown length, too hard */ | |
375 | else | |
376 | return (((cmd & 0xffff) + 1) / 2) + 1; | |
377 | else | |
378 | return 2; /* indirect sequential */ | |
379 | default: | |
380 | return 0; | |
381 | } | |
382 | default: | |
383 | return 0; | |
384 | } | |
385 | ||
386 | return 0; | |
387 | } | |
388 | ||
389 | static int validate_cmd(int cmd) | |
390 | { | |
391 | int ret = do_validate_cmd(cmd); | |
392 | ||
bc5f4523 | 393 | /* printk("validate_cmd( %x ): %d\n", cmd, ret); */ |
1da177e4 LT |
394 | |
395 | return ret; | |
396 | } | |
397 | ||
201361a5 | 398 | static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) |
1da177e4 LT |
399 | { |
400 | drm_i915_private_t *dev_priv = dev->dev_private; | |
401 | int i; | |
402 | RING_LOCALS; | |
403 | ||
de227f5f | 404 | if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8) |
20caafa6 | 405 | return -EINVAL; |
de227f5f | 406 | |
c29b669c | 407 | BEGIN_LP_RING((dwords+1)&~1); |
de227f5f | 408 | |
1da177e4 LT |
409 | for (i = 0; i < dwords;) { |
410 | int cmd, sz; | |
411 | ||
201361a5 | 412 | cmd = buffer[i]; |
1da177e4 | 413 | |
1da177e4 | 414 | if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) |
20caafa6 | 415 | return -EINVAL; |
1da177e4 | 416 | |
1da177e4 LT |
417 | OUT_RING(cmd); |
418 | ||
419 | while (++i, --sz) { | |
201361a5 | 420 | OUT_RING(buffer[i]); |
1da177e4 | 421 | } |
1da177e4 LT |
422 | } |
423 | ||
de227f5f DA |
424 | if (dwords & 1) |
425 | OUT_RING(0); | |
426 | ||
427 | ADVANCE_LP_RING(); | |
428 | ||
1da177e4 LT |
429 | return 0; |
430 | } | |
431 | ||
673a394b EA |
432 | int |
433 | i915_emit_box(struct drm_device *dev, | |
201361a5 | 434 | struct drm_clip_rect *boxes, |
673a394b | 435 | int i, int DR1, int DR4) |
1da177e4 LT |
436 | { |
437 | drm_i915_private_t *dev_priv = dev->dev_private; | |
201361a5 | 438 | struct drm_clip_rect box = boxes[i]; |
1da177e4 LT |
439 | RING_LOCALS; |
440 | ||
1da177e4 LT |
441 | if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { |
442 | DRM_ERROR("Bad box %d,%d..%d,%d\n", | |
443 | box.x1, box.y1, box.x2, box.y2); | |
20caafa6 | 444 | return -EINVAL; |
1da177e4 LT |
445 | } |
446 | ||
c29b669c AH |
447 | if (IS_I965G(dev)) { |
448 | BEGIN_LP_RING(4); | |
449 | OUT_RING(GFX_OP_DRAWRECT_INFO_I965); | |
450 | OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); | |
78eca43d | 451 | OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); |
c29b669c AH |
452 | OUT_RING(DR4); |
453 | ADVANCE_LP_RING(); | |
454 | } else { | |
455 | BEGIN_LP_RING(6); | |
456 | OUT_RING(GFX_OP_DRAWRECT_INFO); | |
457 | OUT_RING(DR1); | |
458 | OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); | |
459 | OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); | |
460 | OUT_RING(DR4); | |
461 | OUT_RING(0); | |
462 | ADVANCE_LP_RING(); | |
463 | } | |
1da177e4 LT |
464 | |
465 | return 0; | |
466 | } | |
467 | ||
c29b669c AH |
468 | /* XXX: Emitting the counter should really be moved to part of the IRQ |
469 | * emit. For now, do it in both places: | |
470 | */ | |
471 | ||
84b1fd10 | 472 | static void i915_emit_breadcrumb(struct drm_device *dev) |
de227f5f DA |
473 | { |
474 | drm_i915_private_t *dev_priv = dev->dev_private; | |
7c1c2871 | 475 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
de227f5f DA |
476 | RING_LOCALS; |
477 | ||
c99b058f | 478 | dev_priv->counter++; |
af6061af | 479 | if (dev_priv->counter > 0x7FFFFFFFUL) |
c99b058f | 480 | dev_priv->counter = 0; |
7c1c2871 DA |
481 | if (master_priv->sarea_priv) |
482 | master_priv->sarea_priv->last_enqueue = dev_priv->counter; | |
de227f5f DA |
483 | |
484 | BEGIN_LP_RING(4); | |
585fb111 | 485 | OUT_RING(MI_STORE_DWORD_INDEX); |
0baf823a | 486 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
de227f5f DA |
487 | OUT_RING(dev_priv->counter); |
488 | OUT_RING(0); | |
489 | ADVANCE_LP_RING(); | |
490 | } | |
491 | ||
84b1fd10 | 492 | static int i915_dispatch_cmdbuffer(struct drm_device * dev, |
201361a5 EA |
493 | drm_i915_cmdbuffer_t *cmd, |
494 | struct drm_clip_rect *cliprects, | |
495 | void *cmdbuf) | |
1da177e4 LT |
496 | { |
497 | int nbox = cmd->num_cliprects; | |
498 | int i = 0, count, ret; | |
499 | ||
500 | if (cmd->sz & 0x3) { | |
501 | DRM_ERROR("alignment"); | |
20caafa6 | 502 | return -EINVAL; |
1da177e4 LT |
503 | } |
504 | ||
505 | i915_kernel_lost_context(dev); | |
506 | ||
507 | count = nbox ? nbox : 1; | |
508 | ||
509 | for (i = 0; i < count; i++) { | |
510 | if (i < nbox) { | |
201361a5 | 511 | ret = i915_emit_box(dev, cliprects, i, |
1da177e4 LT |
512 | cmd->DR1, cmd->DR4); |
513 | if (ret) | |
514 | return ret; | |
515 | } | |
516 | ||
201361a5 | 517 | ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); |
1da177e4 LT |
518 | if (ret) |
519 | return ret; | |
520 | } | |
521 | ||
de227f5f | 522 | i915_emit_breadcrumb(dev); |
1da177e4 LT |
523 | return 0; |
524 | } | |
525 | ||
84b1fd10 | 526 | static int i915_dispatch_batchbuffer(struct drm_device * dev, |
201361a5 EA |
527 | drm_i915_batchbuffer_t * batch, |
528 | struct drm_clip_rect *cliprects) | |
1da177e4 LT |
529 | { |
530 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1da177e4 LT |
531 | int nbox = batch->num_cliprects; |
532 | int i = 0, count; | |
533 | RING_LOCALS; | |
534 | ||
535 | if ((batch->start | batch->used) & 0x7) { | |
536 | DRM_ERROR("alignment"); | |
20caafa6 | 537 | return -EINVAL; |
1da177e4 LT |
538 | } |
539 | ||
540 | i915_kernel_lost_context(dev); | |
541 | ||
542 | count = nbox ? nbox : 1; | |
543 | ||
544 | for (i = 0; i < count; i++) { | |
545 | if (i < nbox) { | |
201361a5 | 546 | int ret = i915_emit_box(dev, cliprects, i, |
1da177e4 LT |
547 | batch->DR1, batch->DR4); |
548 | if (ret) | |
549 | return ret; | |
550 | } | |
551 | ||
0790d5e1 | 552 | if (!IS_I830(dev) && !IS_845G(dev)) { |
1da177e4 | 553 | BEGIN_LP_RING(2); |
21f16289 DA |
554 | if (IS_I965G(dev)) { |
555 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); | |
556 | OUT_RING(batch->start); | |
557 | } else { | |
558 | OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); | |
559 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | |
560 | } | |
1da177e4 LT |
561 | ADVANCE_LP_RING(); |
562 | } else { | |
563 | BEGIN_LP_RING(4); | |
564 | OUT_RING(MI_BATCH_BUFFER); | |
565 | OUT_RING(batch->start | MI_BATCH_NON_SECURE); | |
566 | OUT_RING(batch->start + batch->used - 4); | |
567 | OUT_RING(0); | |
568 | ADVANCE_LP_RING(); | |
569 | } | |
570 | } | |
571 | ||
de227f5f | 572 | i915_emit_breadcrumb(dev); |
1da177e4 LT |
573 | |
574 | return 0; | |
575 | } | |
576 | ||
af6061af | 577 | static int i915_dispatch_flip(struct drm_device * dev) |
1da177e4 LT |
578 | { |
579 | drm_i915_private_t *dev_priv = dev->dev_private; | |
7c1c2871 DA |
580 | struct drm_i915_master_private *master_priv = |
581 | dev->primary->master->driver_priv; | |
1da177e4 LT |
582 | RING_LOCALS; |
583 | ||
7c1c2871 | 584 | if (!master_priv->sarea_priv) |
c99b058f KH |
585 | return -EINVAL; |
586 | ||
8a4c47f3 | 587 | DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", |
be25ed9c | 588 | __func__, |
589 | dev_priv->current_page, | |
590 | master_priv->sarea_priv->pf_current_page); | |
1da177e4 | 591 | |
af6061af DA |
592 | i915_kernel_lost_context(dev); |
593 | ||
594 | BEGIN_LP_RING(2); | |
585fb111 | 595 | OUT_RING(MI_FLUSH | MI_READ_FLUSH); |
af6061af DA |
596 | OUT_RING(0); |
597 | ADVANCE_LP_RING(); | |
1da177e4 | 598 | |
af6061af DA |
599 | BEGIN_LP_RING(6); |
600 | OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); | |
601 | OUT_RING(0); | |
602 | if (dev_priv->current_page == 0) { | |
603 | OUT_RING(dev_priv->back_offset); | |
604 | dev_priv->current_page = 1; | |
1da177e4 | 605 | } else { |
af6061af DA |
606 | OUT_RING(dev_priv->front_offset); |
607 | dev_priv->current_page = 0; | |
1da177e4 | 608 | } |
af6061af DA |
609 | OUT_RING(0); |
610 | ADVANCE_LP_RING(); | |
1da177e4 | 611 | |
af6061af DA |
612 | BEGIN_LP_RING(2); |
613 | OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); | |
614 | OUT_RING(0); | |
615 | ADVANCE_LP_RING(); | |
1da177e4 | 616 | |
7c1c2871 | 617 | master_priv->sarea_priv->last_enqueue = dev_priv->counter++; |
1da177e4 LT |
618 | |
619 | BEGIN_LP_RING(4); | |
585fb111 | 620 | OUT_RING(MI_STORE_DWORD_INDEX); |
0baf823a | 621 | OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
af6061af DA |
622 | OUT_RING(dev_priv->counter); |
623 | OUT_RING(0); | |
1da177e4 LT |
624 | ADVANCE_LP_RING(); |
625 | ||
7c1c2871 | 626 | master_priv->sarea_priv->pf_current_page = dev_priv->current_page; |
af6061af | 627 | return 0; |
1da177e4 LT |
628 | } |
629 | ||
84b1fd10 | 630 | static int i915_quiescent(struct drm_device * dev) |
1da177e4 LT |
631 | { |
632 | drm_i915_private_t *dev_priv = dev->dev_private; | |
633 | ||
634 | i915_kernel_lost_context(dev); | |
bf9d8929 | 635 | return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__); |
1da177e4 LT |
636 | } |
637 | ||
c153f45f EA |
638 | static int i915_flush_ioctl(struct drm_device *dev, void *data, |
639 | struct drm_file *file_priv) | |
1da177e4 | 640 | { |
546b0974 EA |
641 | int ret; |
642 | ||
643 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | |
1da177e4 | 644 | |
546b0974 EA |
645 | mutex_lock(&dev->struct_mutex); |
646 | ret = i915_quiescent(dev); | |
647 | mutex_unlock(&dev->struct_mutex); | |
648 | ||
649 | return ret; | |
1da177e4 LT |
650 | } |
651 | ||
c153f45f EA |
652 | static int i915_batchbuffer(struct drm_device *dev, void *data, |
653 | struct drm_file *file_priv) | |
1da177e4 | 654 | { |
1da177e4 | 655 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
7c1c2871 | 656 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
1da177e4 | 657 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
7c1c2871 | 658 | master_priv->sarea_priv; |
c153f45f | 659 | drm_i915_batchbuffer_t *batch = data; |
1da177e4 | 660 | int ret; |
201361a5 | 661 | struct drm_clip_rect *cliprects = NULL; |
1da177e4 LT |
662 | |
663 | if (!dev_priv->allow_batchbuffer) { | |
664 | DRM_ERROR("Batchbuffer ioctl disabled\n"); | |
20caafa6 | 665 | return -EINVAL; |
1da177e4 LT |
666 | } |
667 | ||
8a4c47f3 | 668 | DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", |
be25ed9c | 669 | batch->start, batch->used, batch->num_cliprects); |
1da177e4 | 670 | |
546b0974 | 671 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
1da177e4 | 672 | |
201361a5 EA |
673 | if (batch->num_cliprects < 0) |
674 | return -EINVAL; | |
675 | ||
676 | if (batch->num_cliprects) { | |
9a298b2a EA |
677 | cliprects = kcalloc(batch->num_cliprects, |
678 | sizeof(struct drm_clip_rect), | |
679 | GFP_KERNEL); | |
201361a5 EA |
680 | if (cliprects == NULL) |
681 | return -ENOMEM; | |
682 | ||
683 | ret = copy_from_user(cliprects, batch->cliprects, | |
684 | batch->num_cliprects * | |
685 | sizeof(struct drm_clip_rect)); | |
686 | if (ret != 0) | |
687 | goto fail_free; | |
688 | } | |
1da177e4 | 689 | |
546b0974 | 690 | mutex_lock(&dev->struct_mutex); |
201361a5 | 691 | ret = i915_dispatch_batchbuffer(dev, batch, cliprects); |
546b0974 | 692 | mutex_unlock(&dev->struct_mutex); |
1da177e4 | 693 | |
c99b058f | 694 | if (sarea_priv) |
0baf823a | 695 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
201361a5 EA |
696 | |
697 | fail_free: | |
9a298b2a | 698 | kfree(cliprects); |
201361a5 | 699 | |
1da177e4 LT |
700 | return ret; |
701 | } | |
702 | ||
c153f45f EA |
703 | static int i915_cmdbuffer(struct drm_device *dev, void *data, |
704 | struct drm_file *file_priv) | |
1da177e4 | 705 | { |
1da177e4 | 706 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
7c1c2871 | 707 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; |
1da177e4 | 708 | drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) |
7c1c2871 | 709 | master_priv->sarea_priv; |
c153f45f | 710 | drm_i915_cmdbuffer_t *cmdbuf = data; |
201361a5 EA |
711 | struct drm_clip_rect *cliprects = NULL; |
712 | void *batch_data; | |
1da177e4 LT |
713 | int ret; |
714 | ||
8a4c47f3 | 715 | DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", |
be25ed9c | 716 | cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); |
1da177e4 | 717 | |
546b0974 | 718 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
1da177e4 | 719 | |
201361a5 EA |
720 | if (cmdbuf->num_cliprects < 0) |
721 | return -EINVAL; | |
722 | ||
9a298b2a | 723 | batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); |
201361a5 EA |
724 | if (batch_data == NULL) |
725 | return -ENOMEM; | |
726 | ||
727 | ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); | |
728 | if (ret != 0) | |
729 | goto fail_batch_free; | |
730 | ||
731 | if (cmdbuf->num_cliprects) { | |
9a298b2a EA |
732 | cliprects = kcalloc(cmdbuf->num_cliprects, |
733 | sizeof(struct drm_clip_rect), GFP_KERNEL); | |
201361a5 EA |
734 | if (cliprects == NULL) |
735 | goto fail_batch_free; | |
736 | ||
737 | ret = copy_from_user(cliprects, cmdbuf->cliprects, | |
738 | cmdbuf->num_cliprects * | |
739 | sizeof(struct drm_clip_rect)); | |
740 | if (ret != 0) | |
741 | goto fail_clip_free; | |
1da177e4 LT |
742 | } |
743 | ||
546b0974 | 744 | mutex_lock(&dev->struct_mutex); |
201361a5 | 745 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); |
546b0974 | 746 | mutex_unlock(&dev->struct_mutex); |
1da177e4 LT |
747 | if (ret) { |
748 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); | |
355d7f37 | 749 | goto fail_clip_free; |
1da177e4 LT |
750 | } |
751 | ||
c99b058f | 752 | if (sarea_priv) |
0baf823a | 753 | sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); |
201361a5 | 754 | |
201361a5 | 755 | fail_clip_free: |
9a298b2a | 756 | kfree(cliprects); |
355d7f37 | 757 | fail_batch_free: |
9a298b2a | 758 | kfree(batch_data); |
201361a5 EA |
759 | |
760 | return ret; | |
1da177e4 LT |
761 | } |
762 | ||
c153f45f EA |
763 | static int i915_flip_bufs(struct drm_device *dev, void *data, |
764 | struct drm_file *file_priv) | |
1da177e4 | 765 | { |
546b0974 EA |
766 | int ret; |
767 | ||
8a4c47f3 | 768 | DRM_DEBUG_DRIVER("%s\n", __func__); |
1da177e4 | 769 | |
546b0974 | 770 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
1da177e4 | 771 | |
546b0974 EA |
772 | mutex_lock(&dev->struct_mutex); |
773 | ret = i915_dispatch_flip(dev); | |
774 | mutex_unlock(&dev->struct_mutex); | |
775 | ||
776 | return ret; | |
1da177e4 LT |
777 | } |
778 | ||
c153f45f EA |
779 | static int i915_getparam(struct drm_device *dev, void *data, |
780 | struct drm_file *file_priv) | |
1da177e4 | 781 | { |
1da177e4 | 782 | drm_i915_private_t *dev_priv = dev->dev_private; |
c153f45f | 783 | drm_i915_getparam_t *param = data; |
1da177e4 LT |
784 | int value; |
785 | ||
786 | if (!dev_priv) { | |
3e684eae | 787 | DRM_ERROR("called with no initialization\n"); |
20caafa6 | 788 | return -EINVAL; |
1da177e4 LT |
789 | } |
790 | ||
c153f45f | 791 | switch (param->param) { |
1da177e4 | 792 | case I915_PARAM_IRQ_ACTIVE: |
0a3e67a4 | 793 | value = dev->pdev->irq ? 1 : 0; |
1da177e4 LT |
794 | break; |
795 | case I915_PARAM_ALLOW_BATCHBUFFER: | |
796 | value = dev_priv->allow_batchbuffer ? 1 : 0; | |
797 | break; | |
0d6aa60b DA |
798 | case I915_PARAM_LAST_DISPATCH: |
799 | value = READ_BREADCRUMB(dev_priv); | |
800 | break; | |
ed4c9c4a KH |
801 | case I915_PARAM_CHIPSET_ID: |
802 | value = dev->pci_device; | |
803 | break; | |
673a394b | 804 | case I915_PARAM_HAS_GEM: |
ac5c4e76 | 805 | value = dev_priv->has_gem; |
673a394b | 806 | break; |
0f973f27 JB |
807 | case I915_PARAM_NUM_FENCES_AVAIL: |
808 | value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; | |
809 | break; | |
02e792fb SV |
810 | case I915_PARAM_HAS_OVERLAY: |
811 | value = dev_priv->overlay ? 1 : 0; | |
812 | break; | |
1da177e4 | 813 | default: |
8a4c47f3 | 814 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
be25ed9c | 815 | param->param); |
20caafa6 | 816 | return -EINVAL; |
1da177e4 LT |
817 | } |
818 | ||
c153f45f | 819 | if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { |
1da177e4 | 820 | DRM_ERROR("DRM_COPY_TO_USER failed\n"); |
20caafa6 | 821 | return -EFAULT; |
1da177e4 LT |
822 | } |
823 | ||
824 | return 0; | |
825 | } | |
826 | ||
c153f45f EA |
827 | static int i915_setparam(struct drm_device *dev, void *data, |
828 | struct drm_file *file_priv) | |
1da177e4 | 829 | { |
1da177e4 | 830 | drm_i915_private_t *dev_priv = dev->dev_private; |
c153f45f | 831 | drm_i915_setparam_t *param = data; |
1da177e4 LT |
832 | |
833 | if (!dev_priv) { | |
3e684eae | 834 | DRM_ERROR("called with no initialization\n"); |
20caafa6 | 835 | return -EINVAL; |
1da177e4 LT |
836 | } |
837 | ||
c153f45f | 838 | switch (param->param) { |
1da177e4 | 839 | case I915_SETPARAM_USE_MI_BATCHBUFFER_START: |
1da177e4 LT |
840 | break; |
841 | case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: | |
c153f45f | 842 | dev_priv->tex_lru_log_granularity = param->value; |
1da177e4 LT |
843 | break; |
844 | case I915_SETPARAM_ALLOW_BATCHBUFFER: | |
c153f45f | 845 | dev_priv->allow_batchbuffer = param->value; |
1da177e4 | 846 | break; |
0f973f27 JB |
847 | case I915_SETPARAM_NUM_USED_FENCES: |
848 | if (param->value > dev_priv->num_fence_regs || | |
849 | param->value < 0) | |
850 | return -EINVAL; | |
851 | /* Userspace can use first N regs */ | |
852 | dev_priv->fence_reg_start = param->value; | |
853 | break; | |
1da177e4 | 854 | default: |
8a4c47f3 | 855 | DRM_DEBUG_DRIVER("unknown parameter %d\n", |
be25ed9c | 856 | param->param); |
20caafa6 | 857 | return -EINVAL; |
1da177e4 LT |
858 | } |
859 | ||
860 | return 0; | |
861 | } | |
862 | ||
c153f45f EA |
863 | static int i915_set_status_page(struct drm_device *dev, void *data, |
864 | struct drm_file *file_priv) | |
dc7a9319 | 865 | { |
dc7a9319 | 866 | drm_i915_private_t *dev_priv = dev->dev_private; |
c153f45f | 867 | drm_i915_hws_addr_t *hws = data; |
b39d50e5 ZW |
868 | |
869 | if (!I915_NEED_GFX_HWS(dev)) | |
870 | return -EINVAL; | |
dc7a9319 WZ |
871 | |
872 | if (!dev_priv) { | |
3e684eae | 873 | DRM_ERROR("called with no initialization\n"); |
20caafa6 | 874 | return -EINVAL; |
dc7a9319 | 875 | } |
dc7a9319 | 876 | |
79e53945 JB |
877 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
878 | WARN(1, "tried to set status page when mode setting active\n"); | |
879 | return 0; | |
880 | } | |
881 | ||
8a4c47f3 | 882 | DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); |
c153f45f EA |
883 | |
884 | dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); | |
dc7a9319 | 885 | |
8b409580 | 886 | dev_priv->hws_map.offset = dev->agp->base + hws->addr; |
dc7a9319 WZ |
887 | dev_priv->hws_map.size = 4*1024; |
888 | dev_priv->hws_map.type = 0; | |
889 | dev_priv->hws_map.flags = 0; | |
890 | dev_priv->hws_map.mtrr = 0; | |
891 | ||
dd0910b3 | 892 | drm_core_ioremap_wc(&dev_priv->hws_map, dev); |
dc7a9319 | 893 | if (dev_priv->hws_map.handle == NULL) { |
dc7a9319 WZ |
894 | i915_dma_cleanup(dev); |
895 | dev_priv->status_gfx_addr = 0; | |
896 | DRM_ERROR("can not ioremap virtual address for" | |
897 | " G33 hw status page\n"); | |
20caafa6 | 898 | return -ENOMEM; |
dc7a9319 WZ |
899 | } |
900 | dev_priv->hw_status_page = dev_priv->hws_map.handle; | |
901 | ||
902 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | |
585fb111 | 903 | I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); |
8a4c47f3 | 904 | DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", |
be25ed9c | 905 | dev_priv->status_gfx_addr); |
8a4c47f3 | 906 | DRM_DEBUG_DRIVER("load hws at %p\n", |
be25ed9c | 907 | dev_priv->hw_status_page); |
dc7a9319 WZ |
908 | return 0; |
909 | } | |
910 | ||
ec2a4c3f DA |
911 | static int i915_get_bridge_dev(struct drm_device *dev) |
912 | { | |
913 | struct drm_i915_private *dev_priv = dev->dev_private; | |
914 | ||
915 | dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); | |
916 | if (!dev_priv->bridge_dev) { | |
917 | DRM_ERROR("bridge device not found\n"); | |
918 | return -1; | |
919 | } | |
920 | return 0; | |
921 | } | |
922 | ||
79e53945 JB |
923 | /** |
924 | * i915_probe_agp - get AGP bootup configuration | |
925 | * @pdev: PCI device | |
926 | * @aperture_size: returns AGP aperture configured size | |
927 | * @preallocated_size: returns size of BIOS preallocated AGP space | |
928 | * | |
929 | * Since Intel integrated graphics are UMA, the BIOS has to set aside | |
930 | * some RAM for the framebuffer at early boot. This code figures out | |
931 | * how much was set aside so we can use it for our own purposes. | |
932 | */ | |
2a34f5e6 | 933 | static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, |
80824003 JB |
934 | uint32_t *preallocated_size, |
935 | uint32_t *start) | |
79e53945 | 936 | { |
ec2a4c3f | 937 | struct drm_i915_private *dev_priv = dev->dev_private; |
79e53945 JB |
938 | u16 tmp = 0; |
939 | unsigned long overhead; | |
241fa85b | 940 | unsigned long stolen; |
79e53945 | 941 | |
79e53945 | 942 | /* Get the fb aperture size and "stolen" memory amount. */ |
ec2a4c3f | 943 | pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp); |
79e53945 JB |
944 | |
945 | *aperture_size = 1024 * 1024; | |
946 | *preallocated_size = 1024 * 1024; | |
947 | ||
60fd99e3 | 948 | switch (dev->pdev->device) { |
79e53945 JB |
949 | case PCI_DEVICE_ID_INTEL_82830_CGC: |
950 | case PCI_DEVICE_ID_INTEL_82845G_IG: | |
951 | case PCI_DEVICE_ID_INTEL_82855GM_IG: | |
952 | case PCI_DEVICE_ID_INTEL_82865_IG: | |
953 | if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) | |
954 | *aperture_size *= 64; | |
955 | else | |
956 | *aperture_size *= 128; | |
957 | break; | |
958 | default: | |
959 | /* 9xx supports large sizes, just look at the length */ | |
60fd99e3 | 960 | *aperture_size = pci_resource_len(dev->pdev, 2); |
79e53945 JB |
961 | break; |
962 | } | |
963 | ||
964 | /* | |
965 | * Some of the preallocated space is taken by the GTT | |
966 | * and popup. GTT is 1K per MB of aperture size, and popup is 4K. | |
967 | */ | |
2c07245f | 968 | if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev)) |
60fd99e3 EA |
969 | overhead = 4096; |
970 | else | |
971 | overhead = (*aperture_size / 1024) + 4096; | |
972 | ||
241fa85b EA |
973 | switch (tmp & INTEL_GMCH_GMS_MASK) { |
974 | case INTEL_855_GMCH_GMS_DISABLED: | |
975 | DRM_ERROR("video memory is disabled\n"); | |
976 | return -1; | |
79e53945 | 977 | case INTEL_855_GMCH_GMS_STOLEN_1M: |
241fa85b EA |
978 | stolen = 1 * 1024 * 1024; |
979 | break; | |
79e53945 | 980 | case INTEL_855_GMCH_GMS_STOLEN_4M: |
241fa85b | 981 | stolen = 4 * 1024 * 1024; |
79e53945 JB |
982 | break; |
983 | case INTEL_855_GMCH_GMS_STOLEN_8M: | |
241fa85b | 984 | stolen = 8 * 1024 * 1024; |
79e53945 JB |
985 | break; |
986 | case INTEL_855_GMCH_GMS_STOLEN_16M: | |
241fa85b | 987 | stolen = 16 * 1024 * 1024; |
79e53945 JB |
988 | break; |
989 | case INTEL_855_GMCH_GMS_STOLEN_32M: | |
241fa85b | 990 | stolen = 32 * 1024 * 1024; |
79e53945 JB |
991 | break; |
992 | case INTEL_915G_GMCH_GMS_STOLEN_48M: | |
241fa85b | 993 | stolen = 48 * 1024 * 1024; |
79e53945 JB |
994 | break; |
995 | case INTEL_915G_GMCH_GMS_STOLEN_64M: | |
241fa85b EA |
996 | stolen = 64 * 1024 * 1024; |
997 | break; | |
998 | case INTEL_GMCH_GMS_STOLEN_128M: | |
999 | stolen = 128 * 1024 * 1024; | |
1000 | break; | |
1001 | case INTEL_GMCH_GMS_STOLEN_256M: | |
1002 | stolen = 256 * 1024 * 1024; | |
1003 | break; | |
1004 | case INTEL_GMCH_GMS_STOLEN_96M: | |
1005 | stolen = 96 * 1024 * 1024; | |
1006 | break; | |
1007 | case INTEL_GMCH_GMS_STOLEN_160M: | |
1008 | stolen = 160 * 1024 * 1024; | |
1009 | break; | |
1010 | case INTEL_GMCH_GMS_STOLEN_224M: | |
1011 | stolen = 224 * 1024 * 1024; | |
1012 | break; | |
1013 | case INTEL_GMCH_GMS_STOLEN_352M: | |
1014 | stolen = 352 * 1024 * 1024; | |
79e53945 | 1015 | break; |
79e53945 JB |
1016 | default: |
1017 | DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", | |
241fa85b | 1018 | tmp & INTEL_GMCH_GMS_MASK); |
79e53945 JB |
1019 | return -1; |
1020 | } | |
241fa85b | 1021 | *preallocated_size = stolen - overhead; |
80824003 | 1022 | *start = overhead; |
79e53945 JB |
1023 | |
1024 | return 0; | |
1025 | } | |
1026 | ||
80824003 JB |
1027 | #define PTE_ADDRESS_MASK 0xfffff000 |
1028 | #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ | |
1029 | #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) | |
1030 | #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ | |
1031 | #define PTE_MAPPING_TYPE_CACHED (3 << 1) | |
1032 | #define PTE_MAPPING_TYPE_MASK (3 << 1) | |
1033 | #define PTE_VALID (1 << 0) | |
1034 | ||
1035 | /** | |
1036 | * i915_gtt_to_phys - take a GTT address and turn it into a physical one | |
1037 | * @dev: drm device | |
1038 | * @gtt_addr: address to translate | |
1039 | * | |
1040 | * Some chip functions require allocations from stolen space but need the | |
1041 | * physical address of the memory in question. We use this routine | |
1042 | * to get a physical address suitable for register programming from a given | |
1043 | * GTT address. | |
1044 | */ | |
1045 | static unsigned long i915_gtt_to_phys(struct drm_device *dev, | |
1046 | unsigned long gtt_addr) | |
1047 | { | |
1048 | unsigned long *gtt; | |
1049 | unsigned long entry, phys; | |
1050 | int gtt_bar = IS_I9XX(dev) ? 0 : 1; | |
1051 | int gtt_offset, gtt_size; | |
1052 | ||
1053 | if (IS_I965G(dev)) { | |
1054 | if (IS_G4X(dev) || IS_IGDNG(dev)) { | |
1055 | gtt_offset = 2*1024*1024; | |
1056 | gtt_size = 2*1024*1024; | |
1057 | } else { | |
1058 | gtt_offset = 512*1024; | |
1059 | gtt_size = 512*1024; | |
1060 | } | |
1061 | } else { | |
1062 | gtt_bar = 3; | |
1063 | gtt_offset = 0; | |
1064 | gtt_size = pci_resource_len(dev->pdev, gtt_bar); | |
1065 | } | |
1066 | ||
1067 | gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, | |
1068 | gtt_size); | |
1069 | if (!gtt) { | |
1070 | DRM_ERROR("ioremap of GTT failed\n"); | |
1071 | return 0; | |
1072 | } | |
1073 | ||
1074 | entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); | |
1075 | ||
1076 | DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); | |
1077 | ||
1078 | /* Mask out these reserved bits on this hardware. */ | |
1079 | if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || | |
1080 | IS_I945G(dev) || IS_I945GM(dev)) { | |
1081 | entry &= ~PTE_ADDRESS_MASK_HIGH; | |
1082 | } | |
1083 | ||
1084 | /* If it's not a mapping type we know, then bail. */ | |
1085 | if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && | |
1086 | (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) { | |
1087 | iounmap(gtt); | |
1088 | return 0; | |
1089 | } | |
1090 | ||
1091 | if (!(entry & PTE_VALID)) { | |
1092 | DRM_ERROR("bad GTT entry in stolen space\n"); | |
1093 | iounmap(gtt); | |
1094 | return 0; | |
1095 | } | |
1096 | ||
1097 | iounmap(gtt); | |
1098 | ||
1099 | phys =(entry & PTE_ADDRESS_MASK) | | |
1100 | ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); | |
1101 | ||
1102 | DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); | |
1103 | ||
1104 | return phys; | |
1105 | } | |
1106 | ||
1107 | static void i915_warn_stolen(struct drm_device *dev) | |
1108 | { | |
1109 | DRM_ERROR("not enough stolen space for compressed buffer, disabling\n"); | |
1110 | DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); | |
1111 | } | |
1112 | ||
1113 | static void i915_setup_compression(struct drm_device *dev, int size) | |
1114 | { | |
1115 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1116 | struct drm_mm_node *compressed_fb, *compressed_llb; | |
1117 | unsigned long cfb_base, ll_base; | |
1118 | ||
1119 | /* Leave 1M for line length buffer & misc. */ | |
1120 | compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); | |
1121 | if (!compressed_fb) { | |
1122 | i915_warn_stolen(dev); | |
1123 | return; | |
1124 | } | |
1125 | ||
1126 | compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); | |
1127 | if (!compressed_fb) { | |
1128 | i915_warn_stolen(dev); | |
1129 | return; | |
1130 | } | |
1131 | ||
74dff282 JB |
1132 | cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); |
1133 | if (!cfb_base) { | |
1134 | DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); | |
1135 | drm_mm_put_block(compressed_fb); | |
80824003 JB |
1136 | } |
1137 | ||
74dff282 JB |
1138 | if (!IS_GM45(dev)) { |
1139 | compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, | |
1140 | 4096, 0); | |
1141 | if (!compressed_llb) { | |
1142 | i915_warn_stolen(dev); | |
1143 | return; | |
1144 | } | |
1145 | ||
1146 | compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); | |
1147 | if (!compressed_llb) { | |
1148 | i915_warn_stolen(dev); | |
1149 | return; | |
1150 | } | |
1151 | ||
1152 | ll_base = i915_gtt_to_phys(dev, compressed_llb->start); | |
1153 | if (!ll_base) { | |
1154 | DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); | |
1155 | drm_mm_put_block(compressed_fb); | |
1156 | drm_mm_put_block(compressed_llb); | |
1157 | } | |
80824003 JB |
1158 | } |
1159 | ||
1160 | dev_priv->cfb_size = size; | |
1161 | ||
74dff282 JB |
1162 | if (IS_GM45(dev)) { |
1163 | g4x_disable_fbc(dev); | |
1164 | I915_WRITE(DPFC_CB_BASE, compressed_fb->start); | |
1165 | } else { | |
1166 | i8xx_disable_fbc(dev); | |
1167 | I915_WRITE(FBC_CFB_BASE, cfb_base); | |
1168 | I915_WRITE(FBC_LL_BASE, ll_base); | |
80824003 JB |
1169 | } |
1170 | ||
80824003 JB |
1171 | DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, |
1172 | ll_base, size >> 20); | |
80824003 JB |
1173 | } |
1174 | ||
28d52043 DA |
1175 | /* true = enable decode, false = disable decoder */ |
1176 | static unsigned int i915_vga_set_decode(void *cookie, bool state) | |
1177 | { | |
1178 | struct drm_device *dev = cookie; | |
1179 | ||
1180 | intel_modeset_vga_set_state(dev, state); | |
1181 | if (state) | |
1182 | return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | | |
1183 | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
1184 | else | |
1185 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | |
1186 | } | |
1187 | ||
2a34f5e6 | 1188 | static int i915_load_modeset_init(struct drm_device *dev, |
80824003 | 1189 | unsigned long prealloc_start, |
2a34f5e6 EA |
1190 | unsigned long prealloc_size, |
1191 | unsigned long agp_size) | |
79e53945 JB |
1192 | { |
1193 | struct drm_i915_private *dev_priv = dev->dev_private; | |
79e53945 JB |
1194 | int fb_bar = IS_I9XX(dev) ? 2 : 0; |
1195 | int ret = 0; | |
1196 | ||
1197 | dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & | |
1198 | 0xff000000; | |
1199 | ||
2906f025 | 1200 | if (IS_MOBILE(dev) || IS_I9XX(dev)) |
79e53945 JB |
1201 | dev_priv->cursor_needs_physical = true; |
1202 | else | |
1203 | dev_priv->cursor_needs_physical = false; | |
1204 | ||
2906f025 JB |
1205 | if (IS_I965G(dev) || IS_G33(dev)) |
1206 | dev_priv->cursor_needs_physical = false; | |
1207 | ||
79e53945 JB |
1208 | /* Basic memrange allocator for stolen space (aka vram) */ |
1209 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); | |
80824003 | 1210 | DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); |
79e53945 | 1211 | |
11ed50ec BG |
1212 | /* We're off and running w/KMS */ |
1213 | dev_priv->mm.suspended = 0; | |
79e53945 | 1214 | |
13f4c435 EA |
1215 | /* Let GEM Manage from end of prealloc space to end of aperture. |
1216 | * | |
1217 | * However, leave one page at the end still bound to the scratch page. | |
1218 | * There are a number of places where the hardware apparently | |
1219 | * prefetches past the end of the object, and we've seen multiple | |
1220 | * hangs with the GPU head pointer stuck in a batchbuffer bound | |
1221 | * at the last page of the aperture. One page should be enough to | |
1222 | * keep any prefetching inside of the aperture. | |
1223 | */ | |
1224 | i915_gem_do_init(dev, prealloc_size, agp_size - 4096); | |
79e53945 | 1225 | |
11ed50ec | 1226 | mutex_lock(&dev->struct_mutex); |
79e53945 | 1227 | ret = i915_gem_init_ringbuffer(dev); |
11ed50ec | 1228 | mutex_unlock(&dev->struct_mutex); |
79e53945 | 1229 | if (ret) |
b8da7de5 | 1230 | goto out; |
79e53945 | 1231 | |
80824003 | 1232 | /* Try to set up FBC with a reasonable compressed buffer size */ |
9216d44d | 1233 | if (I915_HAS_FBC(dev) && i915_powersave) { |
80824003 JB |
1234 | int cfb_size; |
1235 | ||
1236 | /* Try to get an 8M buffer... */ | |
1237 | if (prealloc_size > (9*1024*1024)) | |
1238 | cfb_size = 8*1024*1024; | |
1239 | else /* fall back to 7/8 of the stolen space */ | |
1240 | cfb_size = prealloc_size * 7 / 8; | |
1241 | i915_setup_compression(dev, cfb_size); | |
1242 | } | |
1243 | ||
79e53945 JB |
1244 | /* Allow hardware batchbuffers unless told otherwise. |
1245 | */ | |
1246 | dev_priv->allow_batchbuffer = 1; | |
1247 | ||
1248 | ret = intel_init_bios(dev); | |
1249 | if (ret) | |
1250 | DRM_INFO("failed to find VBIOS tables\n"); | |
1251 | ||
28d52043 DA |
1252 | /* if we have > 1 VGA cards, then disable the radeon VGA resources */ |
1253 | ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); | |
1254 | if (ret) | |
1255 | goto destroy_ringbuffer; | |
1256 | ||
79e53945 JB |
1257 | ret = drm_irq_install(dev); |
1258 | if (ret) | |
1259 | goto destroy_ringbuffer; | |
1260 | ||
79e53945 JB |
1261 | /* Always safe in the mode setting case. */ |
1262 | /* FIXME: do pre/post-mode set stuff in core KMS code */ | |
1263 | dev->vblank_disable_allowed = 1; | |
1264 | ||
1265 | /* | |
1266 | * Initialize the hardware status page IRQ location. | |
1267 | */ | |
1268 | ||
1269 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); | |
1270 | ||
1271 | intel_modeset_init(dev); | |
1272 | ||
7a1fb5d0 | 1273 | drm_helper_initial_config(dev); |
79e53945 | 1274 | |
79e53945 JB |
1275 | return 0; |
1276 | ||
79e53945 JB |
1277 | destroy_ringbuffer: |
1278 | i915_gem_cleanup_ringbuffer(dev); | |
1279 | out: | |
1280 | return ret; | |
1281 | } | |
1282 | ||
7c1c2871 DA |
1283 | int i915_master_create(struct drm_device *dev, struct drm_master *master) |
1284 | { | |
1285 | struct drm_i915_master_private *master_priv; | |
1286 | ||
9a298b2a | 1287 | master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); |
7c1c2871 DA |
1288 | if (!master_priv) |
1289 | return -ENOMEM; | |
1290 | ||
1291 | master->driver_priv = master_priv; | |
1292 | return 0; | |
1293 | } | |
1294 | ||
1295 | void i915_master_destroy(struct drm_device *dev, struct drm_master *master) | |
1296 | { | |
1297 | struct drm_i915_master_private *master_priv = master->driver_priv; | |
1298 | ||
1299 | if (!master_priv) | |
1300 | return; | |
1301 | ||
9a298b2a | 1302 | kfree(master_priv); |
7c1c2871 DA |
1303 | |
1304 | master->driver_priv = NULL; | |
1305 | } | |
1306 | ||
7662c8bd SL |
1307 | static void i915_get_mem_freq(struct drm_device *dev) |
1308 | { | |
1309 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1310 | u32 tmp; | |
1311 | ||
1312 | if (!IS_IGD(dev)) | |
1313 | return; | |
1314 | ||
1315 | tmp = I915_READ(CLKCFG); | |
1316 | ||
1317 | switch (tmp & CLKCFG_FSB_MASK) { | |
1318 | case CLKCFG_FSB_533: | |
1319 | dev_priv->fsb_freq = 533; /* 133*4 */ | |
1320 | break; | |
1321 | case CLKCFG_FSB_800: | |
1322 | dev_priv->fsb_freq = 800; /* 200*4 */ | |
1323 | break; | |
1324 | case CLKCFG_FSB_667: | |
1325 | dev_priv->fsb_freq = 667; /* 167*4 */ | |
1326 | break; | |
1327 | case CLKCFG_FSB_400: | |
1328 | dev_priv->fsb_freq = 400; /* 100*4 */ | |
1329 | break; | |
1330 | } | |
1331 | ||
1332 | switch (tmp & CLKCFG_MEM_MASK) { | |
1333 | case CLKCFG_MEM_533: | |
1334 | dev_priv->mem_freq = 533; | |
1335 | break; | |
1336 | case CLKCFG_MEM_667: | |
1337 | dev_priv->mem_freq = 667; | |
1338 | break; | |
1339 | case CLKCFG_MEM_800: | |
1340 | dev_priv->mem_freq = 800; | |
1341 | break; | |
1342 | } | |
1343 | } | |
1344 | ||
79e53945 JB |
1345 | /** |
1346 | * i915_driver_load - setup chip and create an initial config | |
1347 | * @dev: DRM device | |
1348 | * @flags: startup flags | |
1349 | * | |
1350 | * The driver load routine has to do several things: | |
1351 | * - drive output discovery via intel_modeset_init() | |
1352 | * - initialize the memory manager | |
1353 | * - allocate initial config memory | |
1354 | * - setup the DRM framebuffer with the allocated memory | |
1355 | */ | |
84b1fd10 | 1356 | int i915_driver_load(struct drm_device *dev, unsigned long flags) |
22eae947 | 1357 | { |
ba8bbcf6 | 1358 | struct drm_i915_private *dev_priv = dev->dev_private; |
d883f7f1 | 1359 | resource_size_t base, size; |
ba8bbcf6 | 1360 | int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; |
80824003 | 1361 | uint32_t agp_size, prealloc_size, prealloc_start; |
ba8bbcf6 | 1362 | |
22eae947 DA |
1363 | /* i915 has 4 more counters */ |
1364 | dev->counters += 4; | |
1365 | dev->types[6] = _DRM_STAT_IRQ; | |
1366 | dev->types[7] = _DRM_STAT_PRIMARY; | |
1367 | dev->types[8] = _DRM_STAT_SECONDARY; | |
1368 | dev->types[9] = _DRM_STAT_DMA; | |
1369 | ||
9a298b2a | 1370 | dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); |
ba8bbcf6 JB |
1371 | if (dev_priv == NULL) |
1372 | return -ENOMEM; | |
1373 | ||
ba8bbcf6 | 1374 | dev->dev_private = (void *)dev_priv; |
673a394b | 1375 | dev_priv->dev = dev; |
ba8bbcf6 JB |
1376 | |
1377 | /* Add register map (needed for suspend/resume) */ | |
1378 | base = drm_get_resource_start(dev, mmio_bar); | |
1379 | size = drm_get_resource_len(dev, mmio_bar); | |
1380 | ||
ec2a4c3f DA |
1381 | if (i915_get_bridge_dev(dev)) { |
1382 | ret = -EIO; | |
1383 | goto free_priv; | |
1384 | } | |
1385 | ||
3043c60c | 1386 | dev_priv->regs = ioremap(base, size); |
79e53945 JB |
1387 | if (!dev_priv->regs) { |
1388 | DRM_ERROR("failed to map registers\n"); | |
1389 | ret = -EIO; | |
ec2a4c3f | 1390 | goto put_bridge; |
79e53945 | 1391 | } |
ed4cb414 | 1392 | |
ab657db1 EA |
1393 | dev_priv->mm.gtt_mapping = |
1394 | io_mapping_create_wc(dev->agp->base, | |
1395 | dev->agp->agp_info.aper_size * 1024*1024); | |
6644107d VP |
1396 | if (dev_priv->mm.gtt_mapping == NULL) { |
1397 | ret = -EIO; | |
1398 | goto out_rmmap; | |
1399 | } | |
1400 | ||
ab657db1 EA |
1401 | /* Set up a WC MTRR for non-PAT systems. This is more common than |
1402 | * one would think, because the kernel disables PAT on first | |
1403 | * generation Core chips because WC PAT gets overridden by a UC | |
1404 | * MTRR if present. Even if a UC MTRR isn't present. | |
1405 | */ | |
1406 | dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, | |
1407 | dev->agp->agp_info.aper_size * | |
1408 | 1024 * 1024, | |
1409 | MTRR_TYPE_WRCOMB, 1); | |
1410 | if (dev_priv->mm.gtt_mtrr < 0) { | |
040aefa2 | 1411 | DRM_INFO("MTRR allocation failed. Graphics " |
ab657db1 EA |
1412 | "performance may suffer.\n"); |
1413 | } | |
1414 | ||
80824003 | 1415 | ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start); |
2a34f5e6 EA |
1416 | if (ret) |
1417 | goto out_iomapfree; | |
1418 | ||
9c9fe1f8 EA |
1419 | dev_priv->wq = create_workqueue("i915"); |
1420 | if (dev_priv->wq == NULL) { | |
1421 | DRM_ERROR("Failed to create our workqueue.\n"); | |
1422 | ret = -ENOMEM; | |
1423 | goto out_iomapfree; | |
1424 | } | |
1425 | ||
ac5c4e76 DA |
1426 | /* enable GEM by default */ |
1427 | dev_priv->has_gem = 1; | |
ac5c4e76 | 1428 | |
2a34f5e6 EA |
1429 | if (prealloc_size > agp_size * 3 / 4) { |
1430 | DRM_ERROR("Detected broken video BIOS with %d/%dkB of video " | |
1431 | "memory stolen.\n", | |
1432 | prealloc_size / 1024, agp_size / 1024); | |
1433 | DRM_ERROR("Disabling GEM. (try reducing stolen memory or " | |
1434 | "updating the BIOS to fix).\n"); | |
1435 | dev_priv->has_gem = 0; | |
1436 | } | |
1437 | ||
9880b7a5 | 1438 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
42c2798b | 1439 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
036a4a7d | 1440 | if (IS_G4X(dev) || IS_IGDNG(dev)) { |
42c2798b | 1441 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ |
9880b7a5 | 1442 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; |
42c2798b | 1443 | } |
9880b7a5 | 1444 | |
673a394b EA |
1445 | i915_gem_load(dev); |
1446 | ||
398c9cb2 KP |
1447 | /* Init HWS */ |
1448 | if (!I915_NEED_GFX_HWS(dev)) { | |
1449 | ret = i915_init_phys_hws(dev); | |
1450 | if (ret != 0) | |
9c9fe1f8 | 1451 | goto out_workqueue_free; |
398c9cb2 | 1452 | } |
ed4cb414 | 1453 | |
7662c8bd SL |
1454 | i915_get_mem_freq(dev); |
1455 | ||
ed4cb414 EA |
1456 | /* On the 945G/GM, the chipset reports the MSI capability on the |
1457 | * integrated graphics even though the support isn't actually there | |
1458 | * according to the published specs. It doesn't appear to function | |
1459 | * correctly in testing on 945G. | |
1460 | * This may be a side effect of MSI having been made available for PEG | |
1461 | * and the registers being closely associated. | |
d1ed629f KP |
1462 | * |
1463 | * According to chipset errata, on the 965GM, MSI interrupts may | |
b60678a7 KP |
1464 | * be lost or delayed, but we use them anyways to avoid |
1465 | * stuck interrupts on some machines. | |
ed4cb414 | 1466 | */ |
b60678a7 | 1467 | if (!IS_I945G(dev) && !IS_I945GM(dev)) |
d3e74d02 | 1468 | pci_enable_msi(dev->pdev); |
ed4cb414 EA |
1469 | |
1470 | spin_lock_init(&dev_priv->user_irq_lock); | |
63eeaf38 | 1471 | spin_lock_init(&dev_priv->error_lock); |
79e53945 | 1472 | dev_priv->user_irq_refcount = 0; |
9d34e5db | 1473 | dev_priv->trace_irq_seqno = 0; |
ed4cb414 | 1474 | |
52440211 KP |
1475 | ret = drm_vblank_init(dev, I915_NUM_PIPE); |
1476 | ||
1477 | if (ret) { | |
1478 | (void) i915_driver_unload(dev); | |
1479 | return ret; | |
1480 | } | |
1481 | ||
11ed50ec BG |
1482 | /* Start out suspended */ |
1483 | dev_priv->mm.suspended = 1; | |
1484 | ||
79e53945 | 1485 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
80824003 JB |
1486 | ret = i915_load_modeset_init(dev, prealloc_start, |
1487 | prealloc_size, agp_size); | |
79e53945 JB |
1488 | if (ret < 0) { |
1489 | DRM_ERROR("failed to init modeset\n"); | |
9c9fe1f8 | 1490 | goto out_workqueue_free; |
79e53945 JB |
1491 | } |
1492 | } | |
1493 | ||
74a365b3 | 1494 | /* Must be done after probing outputs */ |
e170b030 ZW |
1495 | /* FIXME: verify on IGDNG */ |
1496 | if (!IS_IGDNG(dev)) | |
1497 | intel_opregion_init(dev, 0); | |
74a365b3 | 1498 | |
f65d9421 BG |
1499 | setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, |
1500 | (unsigned long) dev); | |
79e53945 JB |
1501 | return 0; |
1502 | ||
9c9fe1f8 EA |
1503 | out_workqueue_free: |
1504 | destroy_workqueue(dev_priv->wq); | |
6644107d VP |
1505 | out_iomapfree: |
1506 | io_mapping_free(dev_priv->mm.gtt_mapping); | |
79e53945 JB |
1507 | out_rmmap: |
1508 | iounmap(dev_priv->regs); | |
ec2a4c3f DA |
1509 | put_bridge: |
1510 | pci_dev_put(dev_priv->bridge_dev); | |
79e53945 | 1511 | free_priv: |
9a298b2a | 1512 | kfree(dev_priv); |
ba8bbcf6 JB |
1513 | return ret; |
1514 | } | |
1515 | ||
1516 | int i915_driver_unload(struct drm_device *dev) | |
1517 | { | |
1518 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1519 | ||
9c9fe1f8 | 1520 | destroy_workqueue(dev_priv->wq); |
f65d9421 | 1521 | del_timer_sync(&dev_priv->hangcheck_timer); |
9c9fe1f8 | 1522 | |
ab657db1 EA |
1523 | io_mapping_free(dev_priv->mm.gtt_mapping); |
1524 | if (dev_priv->mm.gtt_mtrr >= 0) { | |
1525 | mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, | |
1526 | dev->agp->agp_info.aper_size * 1024 * 1024); | |
1527 | dev_priv->mm.gtt_mtrr = -1; | |
1528 | } | |
1529 | ||
79e53945 | 1530 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
79e53945 | 1531 | drm_irq_uninstall(dev); |
28d52043 | 1532 | vga_client_register(dev->pdev, NULL, NULL, NULL); |
79e53945 JB |
1533 | } |
1534 | ||
ed4cb414 EA |
1535 | if (dev->pdev->msi_enabled) |
1536 | pci_disable_msi(dev->pdev); | |
1537 | ||
3043c60c EA |
1538 | if (dev_priv->regs != NULL) |
1539 | iounmap(dev_priv->regs); | |
ba8bbcf6 | 1540 | |
e170b030 ZW |
1541 | if (!IS_IGDNG(dev)) |
1542 | intel_opregion_free(dev, 0); | |
8ee1c3db | 1543 | |
79e53945 JB |
1544 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1545 | intel_modeset_cleanup(dev); | |
1546 | ||
71acb5eb DA |
1547 | i915_gem_free_all_phys_object(dev); |
1548 | ||
79e53945 JB |
1549 | mutex_lock(&dev->struct_mutex); |
1550 | i915_gem_cleanup_ringbuffer(dev); | |
1551 | mutex_unlock(&dev->struct_mutex); | |
1552 | drm_mm_takedown(&dev_priv->vram); | |
1553 | i915_gem_lastclose(dev); | |
02e792fb SV |
1554 | |
1555 | intel_cleanup_overlay(dev); | |
79e53945 JB |
1556 | } |
1557 | ||
ec2a4c3f | 1558 | pci_dev_put(dev_priv->bridge_dev); |
9a298b2a | 1559 | kfree(dev->dev_private); |
ba8bbcf6 | 1560 | |
22eae947 DA |
1561 | return 0; |
1562 | } | |
1563 | ||
673a394b EA |
1564 | int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) |
1565 | { | |
1566 | struct drm_i915_file_private *i915_file_priv; | |
1567 | ||
8a4c47f3 | 1568 | DRM_DEBUG_DRIVER("\n"); |
673a394b | 1569 | i915_file_priv = (struct drm_i915_file_private *) |
9a298b2a | 1570 | kmalloc(sizeof(*i915_file_priv), GFP_KERNEL); |
673a394b EA |
1571 | |
1572 | if (!i915_file_priv) | |
1573 | return -ENOMEM; | |
1574 | ||
1575 | file_priv->driver_priv = i915_file_priv; | |
1576 | ||
b962442e | 1577 | INIT_LIST_HEAD(&i915_file_priv->mm.request_list); |
673a394b EA |
1578 | |
1579 | return 0; | |
1580 | } | |
1581 | ||
79e53945 JB |
1582 | /** |
1583 | * i915_driver_lastclose - clean up after all DRM clients have exited | |
1584 | * @dev: DRM device | |
1585 | * | |
1586 | * Take care of cleaning up after all DRM clients have exited. In the | |
1587 | * mode setting case, we want to restore the kernel's initial mode (just | |
1588 | * in case the last client left us in a bad state). | |
1589 | * | |
1590 | * Additionally, in the non-mode setting case, we'll tear down the AGP | |
1591 | * and DMA structures, since the kernel won't be using them, and clea | |
1592 | * up any GEM state. | |
1593 | */ | |
84b1fd10 | 1594 | void i915_driver_lastclose(struct drm_device * dev) |
1da177e4 | 1595 | { |
ba8bbcf6 JB |
1596 | drm_i915_private_t *dev_priv = dev->dev_private; |
1597 | ||
79e53945 | 1598 | if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { |
785b93ef | 1599 | drm_fb_helper_restore(); |
144a75fa | 1600 | return; |
79e53945 | 1601 | } |
144a75fa | 1602 | |
673a394b EA |
1603 | i915_gem_lastclose(dev); |
1604 | ||
ba8bbcf6 | 1605 | if (dev_priv->agp_heap) |
b5e89ed5 | 1606 | i915_mem_takedown(&(dev_priv->agp_heap)); |
ba8bbcf6 | 1607 | |
b5e89ed5 | 1608 | i915_dma_cleanup(dev); |
1da177e4 LT |
1609 | } |
1610 | ||
6c340eac | 1611 | void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) |
1da177e4 | 1612 | { |
ba8bbcf6 | 1613 | drm_i915_private_t *dev_priv = dev->dev_private; |
b962442e | 1614 | i915_gem_release(dev, file_priv); |
79e53945 JB |
1615 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
1616 | i915_mem_release(dev, file_priv, dev_priv->agp_heap); | |
1da177e4 LT |
1617 | } |
1618 | ||
673a394b EA |
1619 | void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) |
1620 | { | |
1621 | struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; | |
1622 | ||
9a298b2a | 1623 | kfree(i915_file_priv); |
673a394b EA |
1624 | } |
1625 | ||
c153f45f EA |
1626 | struct drm_ioctl_desc i915_ioctls[] = { |
1627 | DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
1628 | DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), | |
1629 | DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), | |
1630 | DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), | |
1631 | DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), | |
1632 | DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), | |
1633 | DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), | |
1634 | DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
1635 | DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), | |
1636 | DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), | |
1637 | DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
1638 | DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), | |
1639 | DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), | |
1640 | DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), | |
1641 | DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), | |
1642 | DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), | |
4b408939 | 1643 | DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
2bdf00b2 | 1644 | DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
673a394b EA |
1645 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), |
1646 | DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | |
1647 | DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | |
1648 | DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), | |
1649 | DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), | |
2bdf00b2 DA |
1650 | DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1651 | DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | |
673a394b EA |
1652 | DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), |
1653 | DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), | |
1654 | DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), | |
1655 | DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), | |
de151cf6 | 1656 | DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0), |
673a394b EA |
1657 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), |
1658 | DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), | |
1659 | DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), | |
1660 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), | |
5a125c3c | 1661 | DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), |
08d7b3d1 | 1662 | DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), |
3ef94daa | 1663 | DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), |
02e792fb SV |
1664 | DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), |
1665 | DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), | |
c94f7029 DA |
1666 | }; |
1667 | ||
1668 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | |
cda17380 DA |
1669 | |
1670 | /** | |
1671 | * Determine if the device really is AGP or not. | |
1672 | * | |
1673 | * All Intel graphics chipsets are treated as AGP, even if they are really | |
1674 | * PCI-e. | |
1675 | * | |
1676 | * \param dev The device to be tested. | |
1677 | * | |
1678 | * \returns | |
1679 | * A value of 1 is always retured to indictate every i9x5 is AGP. | |
1680 | */ | |
84b1fd10 | 1681 | int i915_driver_device_is_agp(struct drm_device * dev) |
cda17380 DA |
1682 | { |
1683 | return 1; | |
1684 | } |