]>
Commit | Line | Data |
---|---|---|
76aaf220 SV |
1 | /* |
2 | * Copyright © 2010 Daniel Vetter | |
c4ac524c | 3 | * Copyright © 2011-2014 Intel Corporation |
76aaf220 SV |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
22 | * IN THE SOFTWARE. | |
23 | * | |
24 | */ | |
25 | ||
aae4a3d8 CW |
26 | #include <linux/slab.h> /* fault-inject.h is not standalone! */ |
27 | ||
28 | #include <linux/fault-inject.h> | |
e007b19d | 29 | #include <linux/log2.h> |
606fec95 | 30 | #include <linux/random.h> |
0e46ce2e | 31 | #include <linux/seq_file.h> |
5bab6f60 | 32 | #include <linux/stop_machine.h> |
e007b19d | 33 | |
ed3ba079 LA |
34 | #include <asm/set_memory.h> |
35 | ||
760285e7 DH |
36 | #include <drm/drmP.h> |
37 | #include <drm/i915_drm.h> | |
e007b19d | 38 | |
76aaf220 | 39 | #include "i915_drv.h" |
5dda8fa3 | 40 | #include "i915_vgpu.h" |
76aaf220 SV |
41 | #include "i915_trace.h" |
42 | #include "intel_drv.h" | |
d07f0e59 | 43 | #include "intel_frontbuffer.h" |
76aaf220 | 44 | |
1abb70f5 | 45 | #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) |
bb8f9cff | 46 | |
45f8f69a TU |
47 | /** |
48 | * DOC: Global GTT views | |
49 | * | |
50 | * Background and previous state | |
51 | * | |
52 | * Historically objects could exists (be bound) in global GTT space only as | |
53 | * singular instances with a view representing all of the object's backing pages | |
54 | * in a linear fashion. This view will be called a normal view. | |
55 | * | |
56 | * To support multiple views of the same object, where the number of mapped | |
57 | * pages is not equal to the backing store, or where the layout of the pages | |
58 | * is not linear, concept of a GGTT view was added. | |
59 | * | |
60 | * One example of an alternative view is a stereo display driven by a single | |
61 | * image. In this case we would have a framebuffer looking like this | |
62 | * (2x2 pages): | |
63 | * | |
64 | * 12 | |
65 | * 34 | |
66 | * | |
67 | * Above would represent a normal GGTT view as normally mapped for GPU or CPU | |
68 | * rendering. In contrast, fed to the display engine would be an alternative | |
69 | * view which could look something like this: | |
70 | * | |
71 | * 1212 | |
72 | * 3434 | |
73 | * | |
74 | * In this example both the size and layout of pages in the alternative view is | |
75 | * different from the normal view. | |
76 | * | |
77 | * Implementation and usage | |
78 | * | |
79 | * GGTT views are implemented using VMAs and are distinguished via enum | |
80 | * i915_ggtt_view_type and struct i915_ggtt_view. | |
81 | * | |
82 | * A new flavour of core GEM functions which work with GGTT bound objects were | |
ec7adb6e JL |
83 | * added with the _ggtt_ infix, and sometimes with _view postfix to avoid |
84 | * renaming in large amounts of code. They take the struct i915_ggtt_view | |
85 | * parameter encapsulating all metadata required to implement a view. | |
45f8f69a TU |
86 | * |
87 | * As a helper for callers which are only interested in the normal view, | |
88 | * globally const i915_ggtt_view_normal singleton instance exists. All old core | |
89 | * GEM API functions, the ones not taking the view parameter, are operating on, | |
90 | * or with the normal GGTT view. | |
91 | * | |
92 | * Code wanting to add or use a new GGTT view needs to: | |
93 | * | |
94 | * 1. Add a new enum with a suitable name. | |
95 | * 2. Extend the metadata in the i915_ggtt_view structure if required. | |
96 | * 3. Add support to i915_get_vma_pages(). | |
97 | * | |
98 | * New views are required to build a scatter-gather table from within the | |
99 | * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and | |
100 | * exists for the lifetime of an VMA. | |
101 | * | |
102 | * Core API is designed to have copy semantics which means that passed in | |
103 | * struct i915_ggtt_view does not need to be persistent (left around after | |
104 | * calling the core API functions). | |
105 | * | |
106 | */ | |
107 | ||
70b9f6f8 SV |
108 | static int |
109 | i915_get_ggtt_vma_pages(struct i915_vma *vma); | |
110 | ||
7c3f86b6 CW |
111 | static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv) |
112 | { | |
ca6acc25 MK |
113 | /* |
114 | * Note that as an uncached mmio write, this will flush the | |
7c3f86b6 CW |
115 | * WCB of the writes into the GGTT before it triggers the invalidate. |
116 | */ | |
117 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); | |
118 | } | |
119 | ||
120 | static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv) | |
121 | { | |
122 | gen6_ggtt_invalidate(dev_priv); | |
123 | I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); | |
124 | } | |
125 | ||
126 | static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv) | |
127 | { | |
128 | intel_gtt_chipset_flush(); | |
129 | } | |
130 | ||
131 | static inline void i915_ggtt_invalidate(struct drm_i915_private *i915) | |
132 | { | |
133 | i915->ggtt.invalidate(i915); | |
134 | } | |
135 | ||
c033666a CW |
136 | int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, |
137 | int enable_ppgtt) | |
cfa7c862 | 138 | { |
1893a71b | 139 | bool has_full_ppgtt; |
1f9a99e0 | 140 | bool has_full_48bit_ppgtt; |
1893a71b | 141 | |
612dde7e JL |
142 | if (!dev_priv->info.has_aliasing_ppgtt) |
143 | return 0; | |
144 | ||
9e1d0e60 MT |
145 | has_full_ppgtt = dev_priv->info.has_full_ppgtt; |
146 | has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt; | |
1893a71b | 147 | |
e320d400 | 148 | if (intel_vgpu_active(dev_priv)) { |
8a4ab66f | 149 | /* GVT-g has no support for 32bit ppgtt */ |
e320d400 | 150 | has_full_ppgtt = false; |
8a4ab66f | 151 | has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv); |
e320d400 | 152 | } |
71ba2d64 | 153 | |
70ee45e1 DL |
154 | /* |
155 | * We don't allow disabling PPGTT for gen9+ as it's a requirement for | |
156 | * execlists, the sole mechanism available to submit work. | |
157 | */ | |
c033666a | 158 | if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9) |
cfa7c862 SV |
159 | return 0; |
160 | ||
161 | if (enable_ppgtt == 1) | |
162 | return 1; | |
163 | ||
1893a71b | 164 | if (enable_ppgtt == 2 && has_full_ppgtt) |
cfa7c862 SV |
165 | return 2; |
166 | ||
1f9a99e0 MT |
167 | if (enable_ppgtt == 3 && has_full_48bit_ppgtt) |
168 | return 3; | |
169 | ||
93a25a9e | 170 | /* Disable ppgtt on SNB if VT-d is on. */ |
80debff8 | 171 | if (IS_GEN6(dev_priv) && intel_vtd_active()) { |
93a25a9e | 172 | DRM_INFO("Disabling PPGTT because VT-d is on\n"); |
cfa7c862 | 173 | return 0; |
93a25a9e | 174 | } |
93a25a9e | 175 | |
79556df2 CW |
176 | if (has_full_48bit_ppgtt) |
177 | return 3; | |
4fc05063 | 178 | |
79556df2 CW |
179 | if (has_full_ppgtt) |
180 | return 2; | |
4fc05063 | 181 | |
612dde7e | 182 | return 1; |
93a25a9e SV |
183 | } |
184 | ||
549fe88b CW |
185 | static int ppgtt_bind_vma(struct i915_vma *vma, |
186 | enum i915_cache_level cache_level, | |
187 | u32 unused) | |
47552659 | 188 | { |
ff685975 | 189 | u32 pte_flags; |
549fe88b CW |
190 | int err; |
191 | ||
192 | if (!(vma->flags & I915_VMA_LOCAL_BIND)) { | |
193 | err = vma->vm->allocate_va_range(vma->vm, | |
194 | vma->node.start, vma->size); | |
195 | if (err) | |
196 | return err; | |
197 | } | |
47552659 | 198 | |
250f8c81 | 199 | /* Applicable to VLV, and gen8+ */ |
ff685975 | 200 | pte_flags = 0; |
3e977ac6 | 201 | if (i915_gem_object_is_readonly(vma->obj)) |
47552659 SV |
202 | pte_flags |= PTE_READ_ONLY; |
203 | ||
4a234c5f | 204 | vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); |
70b9f6f8 SV |
205 | |
206 | return 0; | |
47552659 SV |
207 | } |
208 | ||
209 | static void ppgtt_unbind_vma(struct i915_vma *vma) | |
210 | { | |
ff685975 | 211 | vma->vm->clear_range(vma->vm, vma->node.start, vma->size); |
47552659 | 212 | } |
6f65e29a | 213 | |
fa3f46af MA |
214 | static int ppgtt_set_pages(struct i915_vma *vma) |
215 | { | |
216 | GEM_BUG_ON(vma->pages); | |
217 | ||
218 | vma->pages = vma->obj->mm.pages; | |
219 | ||
7464284b MA |
220 | vma->page_sizes = vma->obj->mm.page_sizes; |
221 | ||
fa3f46af MA |
222 | return 0; |
223 | } | |
224 | ||
225 | static void clear_pages(struct i915_vma *vma) | |
226 | { | |
227 | GEM_BUG_ON(!vma->pages); | |
228 | ||
229 | if (vma->pages != vma->obj->mm.pages) { | |
230 | sg_free_table(vma->pages); | |
231 | kfree(vma->pages); | |
232 | } | |
233 | vma->pages = NULL; | |
7464284b MA |
234 | |
235 | memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); | |
fa3f46af MA |
236 | } |
237 | ||
2c642b07 | 238 | static gen8_pte_t gen8_pte_encode(dma_addr_t addr, |
25dda4da JB |
239 | enum i915_cache_level level, |
240 | u32 flags) | |
94ec8f61 | 241 | { |
25dda4da JB |
242 | gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; |
243 | ||
244 | if (unlikely(flags & PTE_READ_ONLY)) | |
245 | pte &= ~_PAGE_RW; | |
63c42e56 BW |
246 | |
247 | switch (level) { | |
248 | case I915_CACHE_NONE: | |
c095b97c | 249 | pte |= PPAT_UNCACHED; |
63c42e56 BW |
250 | break; |
251 | case I915_CACHE_WT: | |
c095b97c | 252 | pte |= PPAT_DISPLAY_ELLC; |
63c42e56 BW |
253 | break; |
254 | default: | |
c095b97c | 255 | pte |= PPAT_CACHED; |
63c42e56 BW |
256 | break; |
257 | } | |
258 | ||
94ec8f61 BW |
259 | return pte; |
260 | } | |
261 | ||
fe36f55d MK |
262 | static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, |
263 | const enum i915_cache_level level) | |
b1fe6673 | 264 | { |
07749ef3 | 265 | gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; |
b1fe6673 BW |
266 | pde |= addr; |
267 | if (level != I915_CACHE_NONE) | |
c095b97c | 268 | pde |= PPAT_CACHED_PDE; |
b1fe6673 | 269 | else |
c095b97c | 270 | pde |= PPAT_UNCACHED; |
b1fe6673 BW |
271 | return pde; |
272 | } | |
273 | ||
762d9936 MT |
274 | #define gen8_pdpe_encode gen8_pde_encode |
275 | #define gen8_pml4e_encode gen8_pde_encode | |
276 | ||
07749ef3 MT |
277 | static gen6_pte_t snb_pte_encode(dma_addr_t addr, |
278 | enum i915_cache_level level, | |
4fb84d99 | 279 | u32 unused) |
54d12527 | 280 | { |
4fb84d99 | 281 | gen6_pte_t pte = GEN6_PTE_VALID; |
54d12527 | 282 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
e7210c3c BW |
283 | |
284 | switch (level) { | |
350ec881 CW |
285 | case I915_CACHE_L3_LLC: |
286 | case I915_CACHE_LLC: | |
287 | pte |= GEN6_PTE_CACHE_LLC; | |
288 | break; | |
289 | case I915_CACHE_NONE: | |
290 | pte |= GEN6_PTE_UNCACHED; | |
291 | break; | |
292 | default: | |
5f77eeb0 | 293 | MISSING_CASE(level); |
350ec881 CW |
294 | } |
295 | ||
296 | return pte; | |
297 | } | |
298 | ||
07749ef3 MT |
299 | static gen6_pte_t ivb_pte_encode(dma_addr_t addr, |
300 | enum i915_cache_level level, | |
4fb84d99 | 301 | u32 unused) |
350ec881 | 302 | { |
4fb84d99 | 303 | gen6_pte_t pte = GEN6_PTE_VALID; |
350ec881 CW |
304 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
305 | ||
306 | switch (level) { | |
307 | case I915_CACHE_L3_LLC: | |
308 | pte |= GEN7_PTE_CACHE_L3_LLC; | |
e7210c3c BW |
309 | break; |
310 | case I915_CACHE_LLC: | |
311 | pte |= GEN6_PTE_CACHE_LLC; | |
312 | break; | |
313 | case I915_CACHE_NONE: | |
9119708c | 314 | pte |= GEN6_PTE_UNCACHED; |
e7210c3c BW |
315 | break; |
316 | default: | |
5f77eeb0 | 317 | MISSING_CASE(level); |
e7210c3c BW |
318 | } |
319 | ||
54d12527 BW |
320 | return pte; |
321 | } | |
322 | ||
07749ef3 MT |
323 | static gen6_pte_t byt_pte_encode(dma_addr_t addr, |
324 | enum i915_cache_level level, | |
4fb84d99 | 325 | u32 flags) |
93c34e70 | 326 | { |
4fb84d99 | 327 | gen6_pte_t pte = GEN6_PTE_VALID; |
93c34e70 KG |
328 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
329 | ||
24f3a8cf AG |
330 | if (!(flags & PTE_READ_ONLY)) |
331 | pte |= BYT_PTE_WRITEABLE; | |
93c34e70 KG |
332 | |
333 | if (level != I915_CACHE_NONE) | |
334 | pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; | |
335 | ||
336 | return pte; | |
337 | } | |
338 | ||
07749ef3 MT |
339 | static gen6_pte_t hsw_pte_encode(dma_addr_t addr, |
340 | enum i915_cache_level level, | |
4fb84d99 | 341 | u32 unused) |
9119708c | 342 | { |
4fb84d99 | 343 | gen6_pte_t pte = GEN6_PTE_VALID; |
0d8ff15e | 344 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
9119708c KG |
345 | |
346 | if (level != I915_CACHE_NONE) | |
87a6b688 | 347 | pte |= HSW_WB_LLC_AGE3; |
9119708c KG |
348 | |
349 | return pte; | |
350 | } | |
351 | ||
07749ef3 MT |
352 | static gen6_pte_t iris_pte_encode(dma_addr_t addr, |
353 | enum i915_cache_level level, | |
4fb84d99 | 354 | u32 unused) |
4d15c145 | 355 | { |
4fb84d99 | 356 | gen6_pte_t pte = GEN6_PTE_VALID; |
4d15c145 BW |
357 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
358 | ||
651d794f CW |
359 | switch (level) { |
360 | case I915_CACHE_NONE: | |
361 | break; | |
362 | case I915_CACHE_WT: | |
c51e9701 | 363 | pte |= HSW_WT_ELLC_LLC_AGE3; |
651d794f CW |
364 | break; |
365 | default: | |
c51e9701 | 366 | pte |= HSW_WB_ELLC_LLC_AGE3; |
651d794f CW |
367 | break; |
368 | } | |
4d15c145 BW |
369 | |
370 | return pte; | |
371 | } | |
372 | ||
63fd659f CW |
373 | static void stash_init(struct pagestash *stash) |
374 | { | |
375 | pagevec_init(&stash->pvec); | |
376 | spin_lock_init(&stash->lock); | |
377 | } | |
378 | ||
379 | static struct page *stash_pop_page(struct pagestash *stash) | |
380 | { | |
381 | struct page *page = NULL; | |
382 | ||
383 | spin_lock(&stash->lock); | |
384 | if (likely(stash->pvec.nr)) | |
385 | page = stash->pvec.pages[--stash->pvec.nr]; | |
386 | spin_unlock(&stash->lock); | |
387 | ||
388 | return page; | |
389 | } | |
390 | ||
391 | static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) | |
392 | { | |
393 | int nr; | |
394 | ||
395 | spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING); | |
396 | ||
397 | nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec)); | |
398 | memcpy(stash->pvec.pages + stash->pvec.nr, | |
399 | pvec->pages + pvec->nr - nr, | |
400 | sizeof(pvec->pages[0]) * nr); | |
401 | stash->pvec.nr += nr; | |
402 | ||
403 | spin_unlock(&stash->lock); | |
404 | ||
405 | pvec->nr -= nr; | |
406 | } | |
407 | ||
8448661d | 408 | static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) |
678d96fb | 409 | { |
63fd659f CW |
410 | struct pagevec stack; |
411 | struct page *page; | |
678d96fb | 412 | |
8448661d CW |
413 | if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) |
414 | i915_gem_shrink_all(vm->i915); | |
aae4a3d8 | 415 | |
63fd659f CW |
416 | page = stash_pop_page(&vm->free_pages); |
417 | if (page) | |
418 | return page; | |
66df1014 CW |
419 | |
420 | if (!vm->pt_kmap_wc) | |
421 | return alloc_page(gfp); | |
422 | ||
66df1014 | 423 | /* Look in our global stash of WC pages... */ |
63fd659f CW |
424 | page = stash_pop_page(&vm->i915->mm.wc_stash); |
425 | if (page) | |
426 | return page; | |
66df1014 | 427 | |
073cd781 | 428 | /* |
63fd659f | 429 | * Otherwise batch allocate pages to amortize cost of set_pages_wc. |
073cd781 CW |
430 | * |
431 | * We have to be careful as page allocation may trigger the shrinker | |
432 | * (via direct reclaim) which will fill up the WC stash underneath us. | |
433 | * So we add our WB pages into a temporary pvec on the stack and merge | |
434 | * them into the WC stash after all the allocations are complete. | |
435 | */ | |
63fd659f | 436 | pagevec_init(&stack); |
66df1014 CW |
437 | do { |
438 | struct page *page; | |
8448661d | 439 | |
66df1014 CW |
440 | page = alloc_page(gfp); |
441 | if (unlikely(!page)) | |
442 | break; | |
443 | ||
63fd659f CW |
444 | stack.pages[stack.nr++] = page; |
445 | } while (pagevec_space(&stack)); | |
66df1014 | 446 | |
63fd659f CW |
447 | if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) { |
448 | page = stack.pages[--stack.nr]; | |
8448661d | 449 | |
63fd659f CW |
450 | /* Merge spare WC pages to the global stash */ |
451 | stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); | |
073cd781 | 452 | |
63fd659f CW |
453 | /* Push any surplus WC pages onto the local VM stash */ |
454 | if (stack.nr) | |
455 | stash_push_pagevec(&vm->free_pages, &stack); | |
073cd781 | 456 | } |
8448661d | 457 | |
63fd659f CW |
458 | /* Return unwanted leftovers */ |
459 | if (unlikely(stack.nr)) { | |
460 | WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr)); | |
461 | __pagevec_release(&stack); | |
462 | } | |
463 | ||
464 | return page; | |
8448661d CW |
465 | } |
466 | ||
66df1014 CW |
467 | static void vm_free_pages_release(struct i915_address_space *vm, |
468 | bool immediate) | |
8448661d | 469 | { |
63fd659f CW |
470 | struct pagevec *pvec = &vm->free_pages.pvec; |
471 | struct pagevec stack; | |
66df1014 | 472 | |
63fd659f | 473 | lockdep_assert_held(&vm->free_pages.lock); |
66df1014 | 474 | GEM_BUG_ON(!pagevec_count(pvec)); |
8448661d | 475 | |
66df1014 | 476 | if (vm->pt_kmap_wc) { |
63fd659f CW |
477 | /* |
478 | * When we use WC, first fill up the global stash and then | |
66df1014 CW |
479 | * only if full immediately free the overflow. |
480 | */ | |
63fd659f | 481 | stash_push_pagevec(&vm->i915->mm.wc_stash, pvec); |
8448661d | 482 | |
63fd659f CW |
483 | /* |
484 | * As we have made some room in the VM's free_pages, | |
485 | * we can wait for it to fill again. Unless we are | |
486 | * inside i915_address_space_fini() and must | |
487 | * immediately release the pages! | |
488 | */ | |
489 | if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1)) | |
490 | return; | |
66df1014 | 491 | |
63fd659f CW |
492 | /* |
493 | * We have to drop the lock to allow ourselves to sleep, | |
494 | * so take a copy of the pvec and clear the stash for | |
495 | * others to use it as we sleep. | |
496 | */ | |
497 | stack = *pvec; | |
498 | pagevec_reinit(pvec); | |
499 | spin_unlock(&vm->free_pages.lock); | |
500 | ||
501 | pvec = &stack; | |
66df1014 | 502 | set_pages_array_wb(pvec->pages, pvec->nr); |
63fd659f CW |
503 | |
504 | spin_lock(&vm->free_pages.lock); | |
66df1014 CW |
505 | } |
506 | ||
507 | __pagevec_release(pvec); | |
8448661d CW |
508 | } |
509 | ||
510 | static void vm_free_page(struct i915_address_space *vm, struct page *page) | |
511 | { | |
15e4cda9 CW |
512 | /* |
513 | * On !llc, we need to change the pages back to WB. We only do so | |
514 | * in bulk, so we rarely need to change the page attributes here, | |
515 | * but doing so requires a stop_machine() from deep inside arch/x86/mm. | |
516 | * To make detection of the possible sleep more likely, use an | |
517 | * unconditional might_sleep() for everybody. | |
518 | */ | |
519 | might_sleep(); | |
63fd659f CW |
520 | spin_lock(&vm->free_pages.lock); |
521 | if (!pagevec_add(&vm->free_pages.pvec, page)) | |
66df1014 | 522 | vm_free_pages_release(vm, false); |
63fd659f CW |
523 | spin_unlock(&vm->free_pages.lock); |
524 | } | |
525 | ||
526 | static void i915_address_space_init(struct i915_address_space *vm, | |
527 | struct drm_i915_private *dev_priv) | |
528 | { | |
19bb33c7 CW |
529 | /* |
530 | * The vm->mutex must be reclaim safe (for use in the shrinker). | |
531 | * Do a dummy acquire now under fs_reclaim so that any allocation | |
532 | * attempt holding the lock is immediately reported by lockdep. | |
533 | */ | |
534 | mutex_init(&vm->mutex); | |
535 | i915_gem_shrinker_taints_mutex(&vm->mutex); | |
536 | ||
63fd659f CW |
537 | GEM_BUG_ON(!vm->total); |
538 | drm_mm_init(&vm->mm, 0, vm->total); | |
539 | vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; | |
540 | ||
541 | stash_init(&vm->free_pages); | |
542 | ||
543 | INIT_LIST_HEAD(&vm->active_list); | |
544 | INIT_LIST_HEAD(&vm->inactive_list); | |
545 | INIT_LIST_HEAD(&vm->unbound_list); | |
63fd659f CW |
546 | } |
547 | ||
548 | static void i915_address_space_fini(struct i915_address_space *vm) | |
549 | { | |
550 | spin_lock(&vm->free_pages.lock); | |
551 | if (pagevec_count(&vm->free_pages.pvec)) | |
552 | vm_free_pages_release(vm, true); | |
553 | GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); | |
554 | spin_unlock(&vm->free_pages.lock); | |
555 | ||
556 | drm_mm_takedown(&vm->mm); | |
19bb33c7 CW |
557 | |
558 | mutex_destroy(&vm->mutex); | |
8448661d | 559 | } |
678d96fb | 560 | |
8448661d CW |
561 | static int __setup_page_dma(struct i915_address_space *vm, |
562 | struct i915_page_dma *p, | |
563 | gfp_t gfp) | |
564 | { | |
1abb70f5 | 565 | p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL); |
8448661d CW |
566 | if (unlikely(!p->page)) |
567 | return -ENOMEM; | |
678d96fb | 568 | |
58174eac CW |
569 | p->daddr = dma_map_page_attrs(vm->dma, |
570 | p->page, 0, PAGE_SIZE, | |
571 | PCI_DMA_BIDIRECTIONAL, | |
66daec6b | 572 | DMA_ATTR_SKIP_CPU_SYNC | |
58174eac | 573 | DMA_ATTR_NO_WARN); |
8448661d CW |
574 | if (unlikely(dma_mapping_error(vm->dma, p->daddr))) { |
575 | vm_free_page(vm, p->page); | |
576 | return -ENOMEM; | |
44159ddb | 577 | } |
1266cdb1 MT |
578 | |
579 | return 0; | |
678d96fb BW |
580 | } |
581 | ||
8448661d | 582 | static int setup_page_dma(struct i915_address_space *vm, |
275a991c | 583 | struct i915_page_dma *p) |
c114f76a | 584 | { |
1abb70f5 | 585 | return __setup_page_dma(vm, p, __GFP_HIGHMEM); |
c114f76a MK |
586 | } |
587 | ||
8448661d | 588 | static void cleanup_page_dma(struct i915_address_space *vm, |
275a991c | 589 | struct i915_page_dma *p) |
06fda602 | 590 | { |
8448661d CW |
591 | dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
592 | vm_free_page(vm, p->page); | |
44159ddb MK |
593 | } |
594 | ||
9231da70 | 595 | #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) |
d1c54acd | 596 | |
8448661d CW |
597 | #define setup_px(vm, px) setup_page_dma((vm), px_base(px)) |
598 | #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px)) | |
64b3c936 CW |
599 | #define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v)) |
600 | #define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v)) | |
567047be | 601 | |
8448661d CW |
602 | static void fill_page_dma(struct i915_address_space *vm, |
603 | struct i915_page_dma *p, | |
604 | const u64 val) | |
d1c54acd | 605 | { |
9231da70 | 606 | u64 * const vaddr = kmap_atomic(p->page); |
d1c54acd | 607 | |
4dd504f7 | 608 | memset64(vaddr, val, PAGE_SIZE / sizeof(val)); |
d1c54acd | 609 | |
9231da70 | 610 | kunmap_atomic(vaddr); |
d1c54acd MK |
611 | } |
612 | ||
8448661d CW |
613 | static void fill_page_dma_32(struct i915_address_space *vm, |
614 | struct i915_page_dma *p, | |
615 | const u32 v) | |
73eeea53 | 616 | { |
8448661d | 617 | fill_page_dma(vm, p, (u64)v << 32 | v); |
73eeea53 MK |
618 | } |
619 | ||
8bcdd0f7 | 620 | static int |
8448661d | 621 | setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) |
4ad2af1e | 622 | { |
7fb9ee5d | 623 | unsigned long size; |
66df1014 | 624 | |
aa095871 MA |
625 | /* |
626 | * In order to utilize 64K pages for an object with a size < 2M, we will | |
627 | * need to support a 64K scratch page, given that every 16th entry for a | |
628 | * page-table operating in 64K mode must point to a properly aligned 64K | |
629 | * region, including any PTEs which happen to point to scratch. | |
630 | * | |
631 | * This is only relevant for the 48b PPGTT where we support | |
632 | * huge-gtt-pages, see also i915_vma_insert(). | |
633 | * | |
634 | * TODO: we should really consider write-protecting the scratch-page and | |
635 | * sharing between ppgtt | |
636 | */ | |
7fb9ee5d | 637 | size = I915_GTT_PAGE_SIZE_4K; |
aa095871 MA |
638 | if (i915_vm_is_48bit(vm) && |
639 | HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) { | |
7fb9ee5d CW |
640 | size = I915_GTT_PAGE_SIZE_64K; |
641 | gfp |= __GFP_NOWARN; | |
aa095871 | 642 | } |
7fb9ee5d CW |
643 | gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL; |
644 | ||
645 | do { | |
646 | int order = get_order(size); | |
647 | struct page *page; | |
648 | dma_addr_t addr; | |
66df1014 | 649 | |
7fb9ee5d | 650 | page = alloc_pages(gfp, order); |
aa095871 | 651 | if (unlikely(!page)) |
7fb9ee5d | 652 | goto skip; |
aa095871 | 653 | |
58174eac CW |
654 | addr = dma_map_page_attrs(vm->dma, |
655 | page, 0, size, | |
656 | PCI_DMA_BIDIRECTIONAL, | |
66daec6b | 657 | DMA_ATTR_SKIP_CPU_SYNC | |
58174eac | 658 | DMA_ATTR_NO_WARN); |
7fb9ee5d CW |
659 | if (unlikely(dma_mapping_error(vm->dma, addr))) |
660 | goto free_page; | |
66df1014 | 661 | |
7fb9ee5d CW |
662 | if (unlikely(!IS_ALIGNED(addr, size))) |
663 | goto unmap_page; | |
aa095871 | 664 | |
7fb9ee5d CW |
665 | vm->scratch_page.page = page; |
666 | vm->scratch_page.daddr = addr; | |
667 | vm->scratch_page.order = order; | |
668 | return 0; | |
669 | ||
670 | unmap_page: | |
671 | dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL); | |
672 | free_page: | |
673 | __free_pages(page, order); | |
674 | skip: | |
675 | if (size == I915_GTT_PAGE_SIZE_4K) | |
676 | return -ENOMEM; | |
677 | ||
678 | size = I915_GTT_PAGE_SIZE_4K; | |
679 | gfp &= ~__GFP_NOWARN; | |
680 | } while (1); | |
4ad2af1e MK |
681 | } |
682 | ||
8448661d | 683 | static void cleanup_scratch_page(struct i915_address_space *vm) |
4ad2af1e | 684 | { |
66df1014 CW |
685 | struct i915_page_dma *p = &vm->scratch_page; |
686 | ||
aa095871 MA |
687 | dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT, |
688 | PCI_DMA_BIDIRECTIONAL); | |
689 | __free_pages(p->page, p->order); | |
4ad2af1e MK |
690 | } |
691 | ||
8448661d | 692 | static struct i915_page_table *alloc_pt(struct i915_address_space *vm) |
06fda602 | 693 | { |
ec565b3c | 694 | struct i915_page_table *pt; |
06fda602 | 695 | |
1abb70f5 | 696 | pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL); |
dd19674b | 697 | if (unlikely(!pt)) |
06fda602 BW |
698 | return ERR_PTR(-ENOMEM); |
699 | ||
dd19674b CW |
700 | if (unlikely(setup_px(vm, pt))) { |
701 | kfree(pt); | |
702 | return ERR_PTR(-ENOMEM); | |
703 | } | |
06fda602 | 704 | |
dd19674b | 705 | pt->used_ptes = 0; |
06fda602 BW |
706 | return pt; |
707 | } | |
708 | ||
8448661d | 709 | static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt) |
06fda602 | 710 | { |
8448661d | 711 | cleanup_px(vm, pt); |
2e906bea MK |
712 | kfree(pt); |
713 | } | |
714 | ||
715 | static void gen8_initialize_pt(struct i915_address_space *vm, | |
716 | struct i915_page_table *pt) | |
717 | { | |
dd19674b | 718 | fill_px(vm, pt, |
25dda4da | 719 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0)); |
2e906bea MK |
720 | } |
721 | ||
986dbac4 | 722 | static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt, |
2e906bea MK |
723 | struct i915_page_table *pt) |
724 | { | |
986dbac4 | 725 | fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte); |
06fda602 BW |
726 | } |
727 | ||
8448661d | 728 | static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) |
06fda602 | 729 | { |
ec565b3c | 730 | struct i915_page_directory *pd; |
06fda602 | 731 | |
1abb70f5 | 732 | pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL); |
fe52e37f | 733 | if (unlikely(!pd)) |
06fda602 BW |
734 | return ERR_PTR(-ENOMEM); |
735 | ||
fe52e37f CW |
736 | if (unlikely(setup_px(vm, pd))) { |
737 | kfree(pd); | |
738 | return ERR_PTR(-ENOMEM); | |
739 | } | |
e5815a2e | 740 | |
fe52e37f | 741 | pd->used_pdes = 0; |
06fda602 BW |
742 | return pd; |
743 | } | |
744 | ||
8448661d | 745 | static void free_pd(struct i915_address_space *vm, |
275a991c | 746 | struct i915_page_directory *pd) |
2e906bea | 747 | { |
fe52e37f CW |
748 | cleanup_px(vm, pd); |
749 | kfree(pd); | |
2e906bea MK |
750 | } |
751 | ||
752 | static void gen8_initialize_pd(struct i915_address_space *vm, | |
753 | struct i915_page_directory *pd) | |
754 | { | |
dd19674b CW |
755 | fill_px(vm, pd, |
756 | gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC)); | |
e53792f4 | 757 | memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES); |
2e906bea MK |
758 | } |
759 | ||
fe52e37f | 760 | static int __pdp_init(struct i915_address_space *vm, |
6ac18502 MT |
761 | struct i915_page_directory_pointer *pdp) |
762 | { | |
3e490042 | 763 | const unsigned int pdpes = i915_pdpes_per_pdp(vm); |
6ac18502 | 764 | |
fe52e37f | 765 | pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory), |
1abb70f5 | 766 | I915_GFP_ALLOW_FAIL); |
e2b763ca | 767 | if (unlikely(!pdp->page_directory)) |
6ac18502 | 768 | return -ENOMEM; |
6ac18502 | 769 | |
e53792f4 | 770 | memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes); |
fe52e37f | 771 | |
6ac18502 MT |
772 | return 0; |
773 | } | |
774 | ||
775 | static void __pdp_fini(struct i915_page_directory_pointer *pdp) | |
776 | { | |
6ac18502 MT |
777 | kfree(pdp->page_directory); |
778 | pdp->page_directory = NULL; | |
779 | } | |
780 | ||
1e6437b0 MK |
781 | static inline bool use_4lvl(const struct i915_address_space *vm) |
782 | { | |
783 | return i915_vm_is_48bit(vm); | |
784 | } | |
785 | ||
8448661d CW |
786 | static struct i915_page_directory_pointer * |
787 | alloc_pdp(struct i915_address_space *vm) | |
762d9936 MT |
788 | { |
789 | struct i915_page_directory_pointer *pdp; | |
790 | int ret = -ENOMEM; | |
791 | ||
62d4028f | 792 | GEM_BUG_ON(!use_4lvl(vm)); |
762d9936 MT |
793 | |
794 | pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); | |
795 | if (!pdp) | |
796 | return ERR_PTR(-ENOMEM); | |
797 | ||
fe52e37f | 798 | ret = __pdp_init(vm, pdp); |
762d9936 MT |
799 | if (ret) |
800 | goto fail_bitmap; | |
801 | ||
8448661d | 802 | ret = setup_px(vm, pdp); |
762d9936 MT |
803 | if (ret) |
804 | goto fail_page_m; | |
805 | ||
806 | return pdp; | |
807 | ||
808 | fail_page_m: | |
809 | __pdp_fini(pdp); | |
810 | fail_bitmap: | |
811 | kfree(pdp); | |
812 | ||
813 | return ERR_PTR(ret); | |
814 | } | |
815 | ||
8448661d | 816 | static void free_pdp(struct i915_address_space *vm, |
6ac18502 MT |
817 | struct i915_page_directory_pointer *pdp) |
818 | { | |
819 | __pdp_fini(pdp); | |
1e6437b0 MK |
820 | |
821 | if (!use_4lvl(vm)) | |
822 | return; | |
823 | ||
824 | cleanup_px(vm, pdp); | |
825 | kfree(pdp); | |
762d9936 MT |
826 | } |
827 | ||
69ab76fd MT |
828 | static void gen8_initialize_pdp(struct i915_address_space *vm, |
829 | struct i915_page_directory_pointer *pdp) | |
830 | { | |
831 | gen8_ppgtt_pdpe_t scratch_pdpe; | |
832 | ||
833 | scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC); | |
834 | ||
8448661d | 835 | fill_px(vm, pdp, scratch_pdpe); |
69ab76fd MT |
836 | } |
837 | ||
838 | static void gen8_initialize_pml4(struct i915_address_space *vm, | |
839 | struct i915_pml4 *pml4) | |
840 | { | |
e2b763ca CW |
841 | fill_px(vm, pml4, |
842 | gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC)); | |
e53792f4 | 843 | memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4); |
6ac18502 MT |
844 | } |
845 | ||
fce93755 MK |
846 | /* PDE TLBs are a pain to invalidate on GEN8+. When we modify |
847 | * the page table structures, we mark them dirty so that | |
848 | * context switching/execlist queuing code takes extra steps | |
849 | * to ensure that tlbs are flushed. | |
850 | */ | |
851 | static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) | |
852 | { | |
82ad6443 | 853 | ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask; |
fce93755 MK |
854 | } |
855 | ||
2ce5179f MW |
856 | /* Removes entries from a single page table, releasing it if it's empty. |
857 | * Caller can use the return value to update higher-level entries. | |
858 | */ | |
859 | static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, | |
d209b9c3 | 860 | struct i915_page_table *pt, |
dd19674b | 861 | u64 start, u64 length) |
459108b8 | 862 | { |
d209b9c3 | 863 | unsigned int num_entries = gen8_pte_count(start, length); |
37c63934 MK |
864 | unsigned int pte = gen8_pte_index(start); |
865 | unsigned int pte_end = pte + num_entries; | |
894ccebe | 866 | const gen8_pte_t scratch_pte = |
25dda4da | 867 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); |
894ccebe | 868 | gen8_pte_t *vaddr; |
459108b8 | 869 | |
dd19674b | 870 | GEM_BUG_ON(num_entries > pt->used_ptes); |
37c63934 | 871 | |
dd19674b CW |
872 | pt->used_ptes -= num_entries; |
873 | if (!pt->used_ptes) | |
874 | return true; | |
2ce5179f | 875 | |
9231da70 | 876 | vaddr = kmap_atomic_px(pt); |
37c63934 | 877 | while (pte < pte_end) |
894ccebe | 878 | vaddr[pte++] = scratch_pte; |
9231da70 | 879 | kunmap_atomic(vaddr); |
2ce5179f MW |
880 | |
881 | return false; | |
d209b9c3 | 882 | } |
06fda602 | 883 | |
dd19674b CW |
884 | static void gen8_ppgtt_set_pde(struct i915_address_space *vm, |
885 | struct i915_page_directory *pd, | |
886 | struct i915_page_table *pt, | |
887 | unsigned int pde) | |
888 | { | |
889 | gen8_pde_t *vaddr; | |
890 | ||
891 | pd->page_table[pde] = pt; | |
892 | ||
893 | vaddr = kmap_atomic_px(pd); | |
894 | vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC); | |
895 | kunmap_atomic(vaddr); | |
896 | } | |
897 | ||
2ce5179f | 898 | static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, |
d209b9c3 | 899 | struct i915_page_directory *pd, |
dd19674b | 900 | u64 start, u64 length) |
d209b9c3 MW |
901 | { |
902 | struct i915_page_table *pt; | |
dd19674b | 903 | u32 pde; |
d209b9c3 MW |
904 | |
905 | gen8_for_each_pde(pt, pd, start, length, pde) { | |
bf75d59e CW |
906 | GEM_BUG_ON(pt == vm->scratch_pt); |
907 | ||
dd19674b CW |
908 | if (!gen8_ppgtt_clear_pt(vm, pt, start, length)) |
909 | continue; | |
06fda602 | 910 | |
dd19674b | 911 | gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde); |
bf75d59e | 912 | GEM_BUG_ON(!pd->used_pdes); |
fe52e37f | 913 | pd->used_pdes--; |
dd19674b CW |
914 | |
915 | free_pt(vm, pt); | |
2ce5179f MW |
916 | } |
917 | ||
fe52e37f CW |
918 | return !pd->used_pdes; |
919 | } | |
2ce5179f | 920 | |
fe52e37f CW |
921 | static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm, |
922 | struct i915_page_directory_pointer *pdp, | |
923 | struct i915_page_directory *pd, | |
924 | unsigned int pdpe) | |
925 | { | |
926 | gen8_ppgtt_pdpe_t *vaddr; | |
927 | ||
928 | pdp->page_directory[pdpe] = pd; | |
1e6437b0 | 929 | if (!use_4lvl(vm)) |
fe52e37f CW |
930 | return; |
931 | ||
932 | vaddr = kmap_atomic_px(pdp); | |
933 | vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC); | |
934 | kunmap_atomic(vaddr); | |
d209b9c3 | 935 | } |
06fda602 | 936 | |
2ce5179f MW |
937 | /* Removes entries from a single page dir pointer, releasing it if it's empty. |
938 | * Caller can use the return value to update higher-level entries | |
939 | */ | |
940 | static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, | |
d209b9c3 | 941 | struct i915_page_directory_pointer *pdp, |
fe52e37f | 942 | u64 start, u64 length) |
d209b9c3 MW |
943 | { |
944 | struct i915_page_directory *pd; | |
fe52e37f | 945 | unsigned int pdpe; |
06fda602 | 946 | |
d209b9c3 | 947 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
bf75d59e CW |
948 | GEM_BUG_ON(pd == vm->scratch_pd); |
949 | ||
fe52e37f CW |
950 | if (!gen8_ppgtt_clear_pd(vm, pd, start, length)) |
951 | continue; | |
459108b8 | 952 | |
fe52e37f | 953 | gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); |
bf75d59e | 954 | GEM_BUG_ON(!pdp->used_pdpes); |
e2b763ca | 955 | pdp->used_pdpes--; |
2ce5179f | 956 | |
fe52e37f CW |
957 | free_pd(vm, pd); |
958 | } | |
fce93755 | 959 | |
e2b763ca | 960 | return !pdp->used_pdpes; |
d209b9c3 | 961 | } |
459108b8 | 962 | |
fe52e37f CW |
963 | static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm, |
964 | u64 start, u64 length) | |
965 | { | |
966 | gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length); | |
967 | } | |
968 | ||
e2b763ca CW |
969 | static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4, |
970 | struct i915_page_directory_pointer *pdp, | |
971 | unsigned int pml4e) | |
972 | { | |
973 | gen8_ppgtt_pml4e_t *vaddr; | |
974 | ||
975 | pml4->pdps[pml4e] = pdp; | |
976 | ||
977 | vaddr = kmap_atomic_px(pml4); | |
978 | vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC); | |
979 | kunmap_atomic(vaddr); | |
980 | } | |
981 | ||
2ce5179f MW |
982 | /* Removes entries from a single pml4. |
983 | * This is the top-level structure in 4-level page tables used on gen8+. | |
984 | * Empty entries are always scratch pml4e. | |
985 | */ | |
fe52e37f CW |
986 | static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, |
987 | u64 start, u64 length) | |
d209b9c3 | 988 | { |
fe52e37f CW |
989 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
990 | struct i915_pml4 *pml4 = &ppgtt->pml4; | |
d209b9c3 | 991 | struct i915_page_directory_pointer *pdp; |
e2b763ca | 992 | unsigned int pml4e; |
2ce5179f | 993 | |
1e6437b0 | 994 | GEM_BUG_ON(!use_4lvl(vm)); |
459108b8 | 995 | |
d209b9c3 | 996 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
bf75d59e CW |
997 | GEM_BUG_ON(pdp == vm->scratch_pdp); |
998 | ||
e2b763ca CW |
999 | if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length)) |
1000 | continue; | |
459108b8 | 1001 | |
e2b763ca | 1002 | gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); |
e2b763ca CW |
1003 | |
1004 | free_pdp(vm, pdp); | |
459108b8 BW |
1005 | } |
1006 | } | |
1007 | ||
5684514b | 1008 | static inline struct sgt_dma { |
894ccebe CW |
1009 | struct scatterlist *sg; |
1010 | dma_addr_t dma, max; | |
5684514b CW |
1011 | } sgt_dma(struct i915_vma *vma) { |
1012 | struct scatterlist *sg = vma->pages->sgl; | |
1013 | dma_addr_t addr = sg_dma_address(sg); | |
1014 | return (struct sgt_dma) { sg, addr, addr + sg->length }; | |
1015 | } | |
894ccebe | 1016 | |
9e89f9ee CW |
1017 | struct gen8_insert_pte { |
1018 | u16 pml4e; | |
1019 | u16 pdpe; | |
1020 | u16 pde; | |
1021 | u16 pte; | |
1022 | }; | |
1023 | ||
1024 | static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start) | |
1025 | { | |
1026 | return (struct gen8_insert_pte) { | |
1027 | gen8_pml4e_index(start), | |
1028 | gen8_pdpe_index(start), | |
1029 | gen8_pde_index(start), | |
1030 | gen8_pte_index(start), | |
1031 | }; | |
1032 | } | |
1033 | ||
894ccebe CW |
1034 | static __always_inline bool |
1035 | gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, | |
f9b5b782 | 1036 | struct i915_page_directory_pointer *pdp, |
894ccebe | 1037 | struct sgt_dma *iter, |
9e89f9ee | 1038 | struct gen8_insert_pte *idx, |
250f8c81 JB |
1039 | enum i915_cache_level cache_level, |
1040 | u32 flags) | |
f9b5b782 | 1041 | { |
894ccebe | 1042 | struct i915_page_directory *pd; |
250f8c81 | 1043 | const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); |
894ccebe CW |
1044 | gen8_pte_t *vaddr; |
1045 | bool ret; | |
9df15b49 | 1046 | |
82ad6443 | 1047 | GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); |
9e89f9ee CW |
1048 | pd = pdp->page_directory[idx->pdpe]; |
1049 | vaddr = kmap_atomic_px(pd->page_table[idx->pde]); | |
894ccebe | 1050 | do { |
9e89f9ee CW |
1051 | vaddr[idx->pte] = pte_encode | iter->dma; |
1052 | ||
f6e35cda | 1053 | iter->dma += I915_GTT_PAGE_SIZE; |
894ccebe CW |
1054 | if (iter->dma >= iter->max) { |
1055 | iter->sg = __sg_next(iter->sg); | |
1056 | if (!iter->sg) { | |
1057 | ret = false; | |
1058 | break; | |
1059 | } | |
7ad47cf2 | 1060 | |
894ccebe CW |
1061 | iter->dma = sg_dma_address(iter->sg); |
1062 | iter->max = iter->dma + iter->sg->length; | |
d7b3de91 | 1063 | } |
9df15b49 | 1064 | |
9e89f9ee CW |
1065 | if (++idx->pte == GEN8_PTES) { |
1066 | idx->pte = 0; | |
1067 | ||
1068 | if (++idx->pde == I915_PDES) { | |
1069 | idx->pde = 0; | |
1070 | ||
894ccebe | 1071 | /* Limited by sg length for 3lvl */ |
9e89f9ee CW |
1072 | if (++idx->pdpe == GEN8_PML4ES_PER_PML4) { |
1073 | idx->pdpe = 0; | |
894ccebe | 1074 | ret = true; |
de5ba8eb | 1075 | break; |
894ccebe CW |
1076 | } |
1077 | ||
82ad6443 | 1078 | GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); |
9e89f9ee | 1079 | pd = pdp->page_directory[idx->pdpe]; |
7ad47cf2 | 1080 | } |
894ccebe | 1081 | |
9231da70 | 1082 | kunmap_atomic(vaddr); |
9e89f9ee | 1083 | vaddr = kmap_atomic_px(pd->page_table[idx->pde]); |
9df15b49 | 1084 | } |
894ccebe | 1085 | } while (1); |
9231da70 | 1086 | kunmap_atomic(vaddr); |
d1c54acd | 1087 | |
894ccebe | 1088 | return ret; |
9df15b49 BW |
1089 | } |
1090 | ||
894ccebe | 1091 | static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, |
4a234c5f | 1092 | struct i915_vma *vma, |
894ccebe | 1093 | enum i915_cache_level cache_level, |
250f8c81 | 1094 | u32 flags) |
f9b5b782 | 1095 | { |
17369ba0 | 1096 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
5684514b | 1097 | struct sgt_dma iter = sgt_dma(vma); |
4a234c5f | 1098 | struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); |
f9b5b782 | 1099 | |
9e89f9ee | 1100 | gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx, |
250f8c81 | 1101 | cache_level, flags); |
d9ec12f8 MA |
1102 | |
1103 | vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; | |
894ccebe | 1104 | } |
de5ba8eb | 1105 | |
0a03852e MA |
1106 | static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, |
1107 | struct i915_page_directory_pointer **pdps, | |
1108 | struct sgt_dma *iter, | |
250f8c81 JB |
1109 | enum i915_cache_level cache_level, |
1110 | u32 flags) | |
0a03852e | 1111 | { |
250f8c81 | 1112 | const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); |
0a03852e MA |
1113 | u64 start = vma->node.start; |
1114 | dma_addr_t rem = iter->sg->length; | |
1115 | ||
1116 | do { | |
1117 | struct gen8_insert_pte idx = gen8_insert_pte(start); | |
1118 | struct i915_page_directory_pointer *pdp = pdps[idx.pml4e]; | |
1119 | struct i915_page_directory *pd = pdp->page_directory[idx.pdpe]; | |
1120 | unsigned int page_size; | |
17a00cf7 | 1121 | bool maybe_64K = false; |
0a03852e MA |
1122 | gen8_pte_t encode = pte_encode; |
1123 | gen8_pte_t *vaddr; | |
1124 | u16 index, max; | |
1125 | ||
1126 | if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M && | |
1127 | IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) && | |
1128 | rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) { | |
1129 | index = idx.pde; | |
1130 | max = I915_PDES; | |
1131 | page_size = I915_GTT_PAGE_SIZE_2M; | |
1132 | ||
1133 | encode |= GEN8_PDE_PS_2M; | |
1134 | ||
1135 | vaddr = kmap_atomic_px(pd); | |
1136 | } else { | |
1137 | struct i915_page_table *pt = pd->page_table[idx.pde]; | |
1138 | ||
1139 | index = idx.pte; | |
1140 | max = GEN8_PTES; | |
1141 | page_size = I915_GTT_PAGE_SIZE; | |
1142 | ||
17a00cf7 MA |
1143 | if (!index && |
1144 | vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K && | |
1145 | IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && | |
1146 | (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || | |
21c62a9d | 1147 | rem >= (max - index) * I915_GTT_PAGE_SIZE)) |
17a00cf7 MA |
1148 | maybe_64K = true; |
1149 | ||
0a03852e MA |
1150 | vaddr = kmap_atomic_px(pt); |
1151 | } | |
1152 | ||
1153 | do { | |
1154 | GEM_BUG_ON(iter->sg->length < page_size); | |
1155 | vaddr[index++] = encode | iter->dma; | |
1156 | ||
1157 | start += page_size; | |
1158 | iter->dma += page_size; | |
1159 | rem -= page_size; | |
1160 | if (iter->dma >= iter->max) { | |
1161 | iter->sg = __sg_next(iter->sg); | |
1162 | if (!iter->sg) | |
1163 | break; | |
1164 | ||
1165 | rem = iter->sg->length; | |
1166 | iter->dma = sg_dma_address(iter->sg); | |
1167 | iter->max = iter->dma + rem; | |
1168 | ||
17a00cf7 MA |
1169 | if (maybe_64K && index < max && |
1170 | !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && | |
1171 | (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || | |
21c62a9d | 1172 | rem >= (max - index) * I915_GTT_PAGE_SIZE))) |
17a00cf7 MA |
1173 | maybe_64K = false; |
1174 | ||
0a03852e MA |
1175 | if (unlikely(!IS_ALIGNED(iter->dma, page_size))) |
1176 | break; | |
1177 | } | |
1178 | } while (rem >= page_size && index < max); | |
1179 | ||
1180 | kunmap_atomic(vaddr); | |
17a00cf7 MA |
1181 | |
1182 | /* | |
1183 | * Is it safe to mark the 2M block as 64K? -- Either we have | |
1184 | * filled whole page-table with 64K entries, or filled part of | |
1185 | * it and have reached the end of the sg table and we have | |
1186 | * enough padding. | |
1187 | */ | |
1188 | if (maybe_64K && | |
1189 | (index == max || | |
1190 | (i915_vm_has_scratch_64K(vma->vm) && | |
1191 | !iter->sg && IS_ALIGNED(vma->node.start + | |
1192 | vma->node.size, | |
1193 | I915_GTT_PAGE_SIZE_2M)))) { | |
1194 | vaddr = kmap_atomic_px(pd); | |
1195 | vaddr[idx.pde] |= GEN8_PDE_IPS_64K; | |
1196 | kunmap_atomic(vaddr); | |
d9ec12f8 | 1197 | page_size = I915_GTT_PAGE_SIZE_64K; |
f79401b4 MA |
1198 | |
1199 | /* | |
1200 | * We write all 4K page entries, even when using 64K | |
1201 | * pages. In order to verify that the HW isn't cheating | |
1202 | * by using the 4K PTE instead of the 64K PTE, we want | |
1203 | * to remove all the surplus entries. If the HW skipped | |
1204 | * the 64K PTE, it will read/write into the scratch page | |
1205 | * instead - which we detect as missing results during | |
1206 | * selftests. | |
1207 | */ | |
1208 | if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { | |
1209 | u16 i; | |
1210 | ||
1211 | encode = pte_encode | vma->vm->scratch_page.daddr; | |
1212 | vaddr = kmap_atomic_px(pd->page_table[idx.pde]); | |
1213 | ||
1214 | for (i = 1; i < index; i += 16) | |
1215 | memset64(vaddr + i, encode, 15); | |
1216 | ||
1217 | kunmap_atomic(vaddr); | |
1218 | } | |
17a00cf7 | 1219 | } |
d9ec12f8 MA |
1220 | |
1221 | vma->page_sizes.gtt |= page_size; | |
0a03852e MA |
1222 | } while (iter->sg); |
1223 | } | |
1224 | ||
894ccebe | 1225 | static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, |
4a234c5f | 1226 | struct i915_vma *vma, |
894ccebe | 1227 | enum i915_cache_level cache_level, |
250f8c81 | 1228 | u32 flags) |
894ccebe CW |
1229 | { |
1230 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); | |
5684514b | 1231 | struct sgt_dma iter = sgt_dma(vma); |
894ccebe | 1232 | struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps; |
de5ba8eb | 1233 | |
0a03852e | 1234 | if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { |
250f8c81 JB |
1235 | gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level, |
1236 | flags); | |
0a03852e MA |
1237 | } else { |
1238 | struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); | |
1239 | ||
1240 | while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], | |
250f8c81 JB |
1241 | &iter, &idx, cache_level, |
1242 | flags)) | |
0a03852e | 1243 | GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4); |
d9ec12f8 MA |
1244 | |
1245 | vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; | |
0a03852e | 1246 | } |
f9b5b782 MT |
1247 | } |
1248 | ||
8448661d | 1249 | static void gen8_free_page_tables(struct i915_address_space *vm, |
f37c0505 | 1250 | struct i915_page_directory *pd) |
7ad47cf2 BW |
1251 | { |
1252 | int i; | |
1253 | ||
fe52e37f CW |
1254 | for (i = 0; i < I915_PDES; i++) { |
1255 | if (pd->page_table[i] != vm->scratch_pt) | |
1256 | free_pt(vm, pd->page_table[i]); | |
06fda602 | 1257 | } |
d7b3de91 BW |
1258 | } |
1259 | ||
8776f02b MK |
1260 | static int gen8_init_scratch(struct i915_address_space *vm) |
1261 | { | |
64c050db | 1262 | int ret; |
8776f02b | 1263 | |
1abb70f5 | 1264 | ret = setup_scratch_page(vm, __GFP_HIGHMEM); |
8bcdd0f7 CW |
1265 | if (ret) |
1266 | return ret; | |
8776f02b | 1267 | |
8448661d | 1268 | vm->scratch_pt = alloc_pt(vm); |
8776f02b | 1269 | if (IS_ERR(vm->scratch_pt)) { |
64c050db MA |
1270 | ret = PTR_ERR(vm->scratch_pt); |
1271 | goto free_scratch_page; | |
8776f02b MK |
1272 | } |
1273 | ||
8448661d | 1274 | vm->scratch_pd = alloc_pd(vm); |
8776f02b | 1275 | if (IS_ERR(vm->scratch_pd)) { |
64c050db MA |
1276 | ret = PTR_ERR(vm->scratch_pd); |
1277 | goto free_pt; | |
8776f02b MK |
1278 | } |
1279 | ||
1e6437b0 | 1280 | if (use_4lvl(vm)) { |
8448661d | 1281 | vm->scratch_pdp = alloc_pdp(vm); |
69ab76fd | 1282 | if (IS_ERR(vm->scratch_pdp)) { |
64c050db MA |
1283 | ret = PTR_ERR(vm->scratch_pdp); |
1284 | goto free_pd; | |
69ab76fd MT |
1285 | } |
1286 | } | |
1287 | ||
8776f02b MK |
1288 | gen8_initialize_pt(vm, vm->scratch_pt); |
1289 | gen8_initialize_pd(vm, vm->scratch_pd); | |
1e6437b0 | 1290 | if (use_4lvl(vm)) |
69ab76fd | 1291 | gen8_initialize_pdp(vm, vm->scratch_pdp); |
8776f02b MK |
1292 | |
1293 | return 0; | |
64c050db MA |
1294 | |
1295 | free_pd: | |
8448661d | 1296 | free_pd(vm, vm->scratch_pd); |
64c050db | 1297 | free_pt: |
8448661d | 1298 | free_pt(vm, vm->scratch_pt); |
64c050db | 1299 | free_scratch_page: |
8448661d | 1300 | cleanup_scratch_page(vm); |
64c050db MA |
1301 | |
1302 | return ret; | |
8776f02b MK |
1303 | } |
1304 | ||
650da34c ZL |
1305 | static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) |
1306 | { | |
82ad6443 | 1307 | struct i915_address_space *vm = &ppgtt->vm; |
1e6437b0 | 1308 | struct drm_i915_private *dev_priv = vm->i915; |
650da34c | 1309 | enum vgt_g2v_type msg; |
650da34c ZL |
1310 | int i; |
1311 | ||
1e6437b0 MK |
1312 | if (use_4lvl(vm)) { |
1313 | const u64 daddr = px_dma(&ppgtt->pml4); | |
650da34c | 1314 | |
ab75bb5d VS |
1315 | I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); |
1316 | I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); | |
650da34c ZL |
1317 | |
1318 | msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : | |
1319 | VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); | |
1320 | } else { | |
e7167769 | 1321 | for (i = 0; i < GEN8_3LVL_PDPES; i++) { |
1e6437b0 | 1322 | const u64 daddr = i915_page_dir_dma_addr(ppgtt, i); |
650da34c | 1323 | |
ab75bb5d VS |
1324 | I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr)); |
1325 | I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr)); | |
650da34c ZL |
1326 | } |
1327 | ||
1328 | msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : | |
1329 | VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); | |
1330 | } | |
1331 | ||
1332 | I915_WRITE(vgtif_reg(g2v_notify), msg); | |
1333 | ||
1334 | return 0; | |
1335 | } | |
1336 | ||
8776f02b MK |
1337 | static void gen8_free_scratch(struct i915_address_space *vm) |
1338 | { | |
1e6437b0 | 1339 | if (use_4lvl(vm)) |
8448661d CW |
1340 | free_pdp(vm, vm->scratch_pdp); |
1341 | free_pd(vm, vm->scratch_pd); | |
1342 | free_pt(vm, vm->scratch_pt); | |
1343 | cleanup_scratch_page(vm); | |
8776f02b MK |
1344 | } |
1345 | ||
8448661d | 1346 | static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, |
762d9936 | 1347 | struct i915_page_directory_pointer *pdp) |
b45a6715 | 1348 | { |
3e490042 | 1349 | const unsigned int pdpes = i915_pdpes_per_pdp(vm); |
b45a6715 BW |
1350 | int i; |
1351 | ||
3e490042 | 1352 | for (i = 0; i < pdpes; i++) { |
fe52e37f | 1353 | if (pdp->page_directory[i] == vm->scratch_pd) |
06fda602 BW |
1354 | continue; |
1355 | ||
8448661d CW |
1356 | gen8_free_page_tables(vm, pdp->page_directory[i]); |
1357 | free_pd(vm, pdp->page_directory[i]); | |
7ad47cf2 | 1358 | } |
69876bed | 1359 | |
8448661d | 1360 | free_pdp(vm, pdp); |
762d9936 MT |
1361 | } |
1362 | ||
1363 | static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) | |
1364 | { | |
1365 | int i; | |
1366 | ||
c5d092a4 | 1367 | for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) { |
82ad6443 | 1368 | if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp) |
762d9936 MT |
1369 | continue; |
1370 | ||
82ad6443 | 1371 | gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]); |
762d9936 MT |
1372 | } |
1373 | ||
82ad6443 | 1374 | cleanup_px(&ppgtt->vm, &ppgtt->pml4); |
762d9936 MT |
1375 | } |
1376 | ||
1377 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) | |
1378 | { | |
49d73912 | 1379 | struct drm_i915_private *dev_priv = vm->i915; |
e5716f55 | 1380 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
762d9936 | 1381 | |
275a991c | 1382 | if (intel_vgpu_active(dev_priv)) |
650da34c ZL |
1383 | gen8_ppgtt_notify_vgt(ppgtt, false); |
1384 | ||
1e6437b0 | 1385 | if (use_4lvl(vm)) |
762d9936 | 1386 | gen8_ppgtt_cleanup_4lvl(ppgtt); |
1e6437b0 | 1387 | else |
82ad6443 | 1388 | gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp); |
d4ec9da0 | 1389 | |
8776f02b | 1390 | gen8_free_scratch(vm); |
b45a6715 BW |
1391 | } |
1392 | ||
fe52e37f CW |
1393 | static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, |
1394 | struct i915_page_directory *pd, | |
1395 | u64 start, u64 length) | |
bf2b4ed2 | 1396 | { |
d7b2633d | 1397 | struct i915_page_table *pt; |
dd19674b | 1398 | u64 from = start; |
fe52e37f | 1399 | unsigned int pde; |
bf2b4ed2 | 1400 | |
e8ebd8e2 | 1401 | gen8_for_each_pde(pt, pd, start, length, pde) { |
14826673 CW |
1402 | int count = gen8_pte_count(start, length); |
1403 | ||
fe52e37f | 1404 | if (pt == vm->scratch_pt) { |
b715a2f0 CW |
1405 | pd->used_pdes++; |
1406 | ||
dd19674b | 1407 | pt = alloc_pt(vm); |
b715a2f0 CW |
1408 | if (IS_ERR(pt)) { |
1409 | pd->used_pdes--; | |
dd19674b | 1410 | goto unwind; |
b715a2f0 | 1411 | } |
5441f0cb | 1412 | |
22a8a4fc | 1413 | if (count < GEN8_PTES || intel_vgpu_active(vm->i915)) |
14826673 | 1414 | gen8_initialize_pt(vm, pt); |
fe52e37f CW |
1415 | |
1416 | gen8_ppgtt_set_pde(vm, pd, pt, pde); | |
bf75d59e | 1417 | GEM_BUG_ON(pd->used_pdes > I915_PDES); |
dd19674b | 1418 | } |
fe52e37f | 1419 | |
14826673 | 1420 | pt->used_ptes += count; |
7ad47cf2 | 1421 | } |
bf2b4ed2 | 1422 | return 0; |
7ad47cf2 | 1423 | |
dd19674b CW |
1424 | unwind: |
1425 | gen8_ppgtt_clear_pd(vm, pd, from, start - from); | |
d7b3de91 | 1426 | return -ENOMEM; |
bf2b4ed2 BW |
1427 | } |
1428 | ||
c5d092a4 CW |
1429 | static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, |
1430 | struct i915_page_directory_pointer *pdp, | |
1431 | u64 start, u64 length) | |
bf2b4ed2 | 1432 | { |
5441f0cb | 1433 | struct i915_page_directory *pd; |
e2b763ca CW |
1434 | u64 from = start; |
1435 | unsigned int pdpe; | |
bf2b4ed2 BW |
1436 | int ret; |
1437 | ||
e8ebd8e2 | 1438 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
e2b763ca | 1439 | if (pd == vm->scratch_pd) { |
b715a2f0 CW |
1440 | pdp->used_pdpes++; |
1441 | ||
e2b763ca | 1442 | pd = alloc_pd(vm); |
b715a2f0 CW |
1443 | if (IS_ERR(pd)) { |
1444 | pdp->used_pdpes--; | |
e2b763ca | 1445 | goto unwind; |
b715a2f0 | 1446 | } |
5441f0cb | 1447 | |
e2b763ca | 1448 | gen8_initialize_pd(vm, pd); |
fe52e37f | 1449 | gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); |
3e490042 | 1450 | GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm)); |
75afcf72 CW |
1451 | |
1452 | mark_tlbs_dirty(i915_vm_to_ppgtt(vm)); | |
e2b763ca CW |
1453 | } |
1454 | ||
1455 | ret = gen8_ppgtt_alloc_pd(vm, pd, start, length); | |
bf75d59e CW |
1456 | if (unlikely(ret)) |
1457 | goto unwind_pd; | |
fe52e37f | 1458 | } |
33c8819f | 1459 | |
d7b3de91 | 1460 | return 0; |
bf2b4ed2 | 1461 | |
bf75d59e CW |
1462 | unwind_pd: |
1463 | if (!pd->used_pdes) { | |
1464 | gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); | |
1465 | GEM_BUG_ON(!pdp->used_pdpes); | |
1466 | pdp->used_pdpes--; | |
1467 | free_pd(vm, pd); | |
1468 | } | |
e2b763ca CW |
1469 | unwind: |
1470 | gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); | |
1471 | return -ENOMEM; | |
bf2b4ed2 BW |
1472 | } |
1473 | ||
c5d092a4 CW |
1474 | static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm, |
1475 | u64 start, u64 length) | |
762d9936 | 1476 | { |
c5d092a4 CW |
1477 | return gen8_ppgtt_alloc_pdp(vm, |
1478 | &i915_vm_to_ppgtt(vm)->pdp, start, length); | |
1479 | } | |
762d9936 | 1480 | |
c5d092a4 CW |
1481 | static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, |
1482 | u64 start, u64 length) | |
1483 | { | |
1484 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); | |
1485 | struct i915_pml4 *pml4 = &ppgtt->pml4; | |
1486 | struct i915_page_directory_pointer *pdp; | |
1487 | u64 from = start; | |
1488 | u32 pml4e; | |
1489 | int ret; | |
762d9936 | 1490 | |
e8ebd8e2 | 1491 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
c5d092a4 CW |
1492 | if (pml4->pdps[pml4e] == vm->scratch_pdp) { |
1493 | pdp = alloc_pdp(vm); | |
1494 | if (IS_ERR(pdp)) | |
1495 | goto unwind; | |
762d9936 | 1496 | |
c5d092a4 CW |
1497 | gen8_initialize_pdp(vm, pdp); |
1498 | gen8_ppgtt_set_pml4e(pml4, pdp, pml4e); | |
1499 | } | |
762d9936 | 1500 | |
c5d092a4 | 1501 | ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length); |
bf75d59e CW |
1502 | if (unlikely(ret)) |
1503 | goto unwind_pdp; | |
762d9936 MT |
1504 | } |
1505 | ||
762d9936 MT |
1506 | return 0; |
1507 | ||
bf75d59e CW |
1508 | unwind_pdp: |
1509 | if (!pdp->used_pdpes) { | |
1510 | gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); | |
1511 | free_pdp(vm, pdp); | |
1512 | } | |
c5d092a4 CW |
1513 | unwind: |
1514 | gen8_ppgtt_clear_4lvl(vm, from, start - from); | |
1515 | return -ENOMEM; | |
762d9936 MT |
1516 | } |
1517 | ||
8448661d CW |
1518 | static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt, |
1519 | struct i915_page_directory_pointer *pdp, | |
75c7b0b8 | 1520 | u64 start, u64 length, |
ea91e401 MT |
1521 | gen8_pte_t scratch_pte, |
1522 | struct seq_file *m) | |
1523 | { | |
82ad6443 | 1524 | struct i915_address_space *vm = &ppgtt->vm; |
ea91e401 | 1525 | struct i915_page_directory *pd; |
75c7b0b8 | 1526 | u32 pdpe; |
ea91e401 | 1527 | |
e8ebd8e2 | 1528 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
ea91e401 | 1529 | struct i915_page_table *pt; |
75c7b0b8 CW |
1530 | u64 pd_len = length; |
1531 | u64 pd_start = start; | |
1532 | u32 pde; | |
ea91e401 | 1533 | |
82ad6443 | 1534 | if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd) |
ea91e401 MT |
1535 | continue; |
1536 | ||
1537 | seq_printf(m, "\tPDPE #%d\n", pdpe); | |
e8ebd8e2 | 1538 | gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { |
75c7b0b8 | 1539 | u32 pte; |
ea91e401 MT |
1540 | gen8_pte_t *pt_vaddr; |
1541 | ||
82ad6443 | 1542 | if (pd->page_table[pde] == ppgtt->vm.scratch_pt) |
ea91e401 MT |
1543 | continue; |
1544 | ||
9231da70 | 1545 | pt_vaddr = kmap_atomic_px(pt); |
ea91e401 | 1546 | for (pte = 0; pte < GEN8_PTES; pte += 4) { |
75c7b0b8 CW |
1547 | u64 va = (pdpe << GEN8_PDPE_SHIFT | |
1548 | pde << GEN8_PDE_SHIFT | | |
1549 | pte << GEN8_PTE_SHIFT); | |
ea91e401 MT |
1550 | int i; |
1551 | bool found = false; | |
1552 | ||
1553 | for (i = 0; i < 4; i++) | |
1554 | if (pt_vaddr[pte + i] != scratch_pte) | |
1555 | found = true; | |
1556 | if (!found) | |
1557 | continue; | |
1558 | ||
1559 | seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte); | |
1560 | for (i = 0; i < 4; i++) { | |
1561 | if (pt_vaddr[pte + i] != scratch_pte) | |
1562 | seq_printf(m, " %llx", pt_vaddr[pte + i]); | |
1563 | else | |
1564 | seq_puts(m, " SCRATCH "); | |
1565 | } | |
1566 | seq_puts(m, "\n"); | |
1567 | } | |
ea91e401 MT |
1568 | kunmap_atomic(pt_vaddr); |
1569 | } | |
1570 | } | |
1571 | } | |
1572 | ||
1573 | static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) | |
1574 | { | |
82ad6443 | 1575 | struct i915_address_space *vm = &ppgtt->vm; |
894ccebe | 1576 | const gen8_pte_t scratch_pte = |
25dda4da | 1577 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); |
82ad6443 | 1578 | u64 start = 0, length = ppgtt->vm.total; |
ea91e401 | 1579 | |
1e6437b0 | 1580 | if (use_4lvl(vm)) { |
75c7b0b8 | 1581 | u64 pml4e; |
ea91e401 MT |
1582 | struct i915_pml4 *pml4 = &ppgtt->pml4; |
1583 | struct i915_page_directory_pointer *pdp; | |
1584 | ||
e8ebd8e2 | 1585 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
82ad6443 | 1586 | if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp) |
ea91e401 MT |
1587 | continue; |
1588 | ||
1589 | seq_printf(m, " PML4E #%llu\n", pml4e); | |
8448661d | 1590 | gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m); |
ea91e401 | 1591 | } |
1e6437b0 MK |
1592 | } else { |
1593 | gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m); | |
ea91e401 MT |
1594 | } |
1595 | } | |
1596 | ||
e2b763ca | 1597 | static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt) |
331f38e7 | 1598 | { |
82ad6443 | 1599 | struct i915_address_space *vm = &ppgtt->vm; |
e2b763ca CW |
1600 | struct i915_page_directory_pointer *pdp = &ppgtt->pdp; |
1601 | struct i915_page_directory *pd; | |
82ad6443 | 1602 | u64 start = 0, length = ppgtt->vm.total; |
e2b763ca CW |
1603 | u64 from = start; |
1604 | unsigned int pdpe; | |
331f38e7 | 1605 | |
e2b763ca CW |
1606 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
1607 | pd = alloc_pd(vm); | |
1608 | if (IS_ERR(pd)) | |
1609 | goto unwind; | |
331f38e7 | 1610 | |
e2b763ca CW |
1611 | gen8_initialize_pd(vm, pd); |
1612 | gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); | |
1613 | pdp->used_pdpes++; | |
1614 | } | |
331f38e7 | 1615 | |
e2b763ca CW |
1616 | pdp->used_pdpes++; /* never remove */ |
1617 | return 0; | |
331f38e7 | 1618 | |
e2b763ca CW |
1619 | unwind: |
1620 | start -= from; | |
1621 | gen8_for_each_pdpe(pd, pdp, from, start, pdpe) { | |
1622 | gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); | |
1623 | free_pd(vm, pd); | |
1624 | } | |
1625 | pdp->used_pdpes = 0; | |
1626 | return -ENOMEM; | |
331f38e7 ZL |
1627 | } |
1628 | ||
eb0b44ad | 1629 | /* |
f3a964b9 BW |
1630 | * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers |
1631 | * with a net effect resembling a 2-level page table in normal x86 terms. Each | |
1632 | * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address | |
1633 | * space. | |
37aca44a | 1634 | * |
f3a964b9 | 1635 | */ |
17f297b4 | 1636 | static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) |
37aca44a | 1637 | { |
17f297b4 CW |
1638 | struct i915_hw_ppgtt *ppgtt; |
1639 | int err; | |
1640 | ||
1641 | ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); | |
1642 | if (!ppgtt) | |
1643 | return ERR_PTR(-ENOMEM); | |
1644 | ||
63fd659f CW |
1645 | kref_init(&ppgtt->ref); |
1646 | ||
17f297b4 CW |
1647 | ppgtt->vm.i915 = i915; |
1648 | ppgtt->vm.dma = &i915->drm.pdev->dev; | |
7cb6d7ac | 1649 | |
17f297b4 | 1650 | ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ? |
1e6437b0 MK |
1651 | 1ULL << 48 : |
1652 | 1ULL << 32; | |
1653 | ||
c9e66688 CW |
1654 | /* |
1655 | * From bdw, there is support for read-only pages in the PPGTT. | |
1656 | * | |
1657 | * XXX GVT is not honouring the lack of RW in the PTE bits. | |
1658 | */ | |
1659 | ppgtt->vm.has_read_only = !intel_vgpu_active(i915); | |
250f8c81 | 1660 | |
63fd659f CW |
1661 | i915_address_space_init(&ppgtt->vm, i915); |
1662 | ||
8448661d CW |
1663 | /* There are only few exceptions for gen >=6. chv and bxt. |
1664 | * And we are not sure about the latter so play safe for now. | |
1665 | */ | |
17f297b4 | 1666 | if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915)) |
82ad6443 | 1667 | ppgtt->vm.pt_kmap_wc = true; |
8448661d | 1668 | |
17f297b4 CW |
1669 | err = gen8_init_scratch(&ppgtt->vm); |
1670 | if (err) | |
1671 | goto err_free; | |
66df1014 | 1672 | |
17f297b4 CW |
1673 | if (use_4lvl(&ppgtt->vm)) { |
1674 | err = setup_px(&ppgtt->vm, &ppgtt->pml4); | |
1675 | if (err) | |
1676 | goto err_scratch; | |
6ac18502 | 1677 | |
82ad6443 | 1678 | gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4); |
69ab76fd | 1679 | |
82ad6443 CW |
1680 | ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl; |
1681 | ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl; | |
1682 | ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl; | |
762d9936 | 1683 | } else { |
17f297b4 CW |
1684 | err = __pdp_init(&ppgtt->vm, &ppgtt->pdp); |
1685 | if (err) | |
1686 | goto err_scratch; | |
81ba8aef | 1687 | |
17f297b4 CW |
1688 | if (intel_vgpu_active(i915)) { |
1689 | err = gen8_preallocate_top_level_pdp(ppgtt); | |
1690 | if (err) { | |
e2b763ca | 1691 | __pdp_fini(&ppgtt->pdp); |
17f297b4 | 1692 | goto err_scratch; |
e2b763ca | 1693 | } |
331f38e7 | 1694 | } |
894ccebe | 1695 | |
82ad6443 CW |
1696 | ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl; |
1697 | ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl; | |
1698 | ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl; | |
81ba8aef | 1699 | } |
6ac18502 | 1700 | |
17f297b4 | 1701 | if (intel_vgpu_active(i915)) |
650da34c ZL |
1702 | gen8_ppgtt_notify_vgt(ppgtt, true); |
1703 | ||
82ad6443 | 1704 | ppgtt->vm.cleanup = gen8_ppgtt_cleanup; |
054b9acd MK |
1705 | ppgtt->debug_dump = gen8_dump_ppgtt; |
1706 | ||
549fe88b | 1707 | ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; |
93f2cde2 CW |
1708 | ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; |
1709 | ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; | |
1710 | ppgtt->vm.vma_ops.clear_pages = clear_pages; | |
1711 | ||
17f297b4 | 1712 | return ppgtt; |
6ac18502 | 1713 | |
17f297b4 | 1714 | err_scratch: |
82ad6443 | 1715 | gen8_free_scratch(&ppgtt->vm); |
17f297b4 CW |
1716 | err_free: |
1717 | kfree(ppgtt); | |
1718 | return ERR_PTR(err); | |
d7b2633d MT |
1719 | } |
1720 | ||
35ac40d8 | 1721 | static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m) |
87d60b63 | 1722 | { |
35ac40d8 | 1723 | struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); |
986dbac4 | 1724 | const gen6_pte_t scratch_pte = ppgtt->scratch_pte; |
e9e7dc41 CW |
1725 | struct i915_page_table *pt; |
1726 | u32 pte, pde; | |
1727 | ||
1728 | gen6_for_all_pdes(pt, &base->pd, pde) { | |
1729 | gen6_pte_t *vaddr; | |
1730 | ||
1731 | if (pt == base->vm.scratch_pt) | |
1732 | continue; | |
1733 | ||
1734 | if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) { | |
1735 | u32 expected = | |
1736 | GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | | |
1737 | GEN6_PDE_VALID; | |
1738 | u32 pd_entry = readl(ppgtt->pd_addr + pde); | |
1739 | ||
1740 | if (pd_entry != expected) | |
1741 | seq_printf(m, | |
1742 | "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", | |
1743 | pde, | |
1744 | pd_entry, | |
1745 | expected); | |
1746 | ||
1747 | seq_printf(m, "\tPDE: %x\n", pd_entry); | |
1748 | } | |
1749 | ||
1750 | vaddr = kmap_atomic_px(base->pd.page_table[pde]); | |
1751 | for (pte = 0; pte < GEN6_PTES; pte += 4) { | |
87d60b63 | 1752 | int i; |
e9e7dc41 | 1753 | |
87d60b63 | 1754 | for (i = 0; i < 4; i++) |
e9e7dc41 CW |
1755 | if (vaddr[pte + i] != scratch_pte) |
1756 | break; | |
1757 | if (i == 4) | |
87d60b63 BW |
1758 | continue; |
1759 | ||
c5828105 | 1760 | seq_printf(m, "\t\t(%03d, %04d) %08llx: ", |
e9e7dc41 | 1761 | pde, pte, |
f6e35cda | 1762 | (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE); |
87d60b63 | 1763 | for (i = 0; i < 4; i++) { |
e9e7dc41 CW |
1764 | if (vaddr[pte + i] != scratch_pte) |
1765 | seq_printf(m, " %08x", vaddr[pte + i]); | |
87d60b63 | 1766 | else |
e9e7dc41 | 1767 | seq_puts(m, " SCRATCH"); |
87d60b63 BW |
1768 | } |
1769 | seq_puts(m, "\n"); | |
1770 | } | |
e9e7dc41 | 1771 | kunmap_atomic(vaddr); |
87d60b63 BW |
1772 | } |
1773 | } | |
1774 | ||
678d96fb | 1775 | /* Write pde (index) from the page directory @pd to the page table @pt */ |
35ac40d8 | 1776 | static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt, |
16a011c8 CW |
1777 | const unsigned int pde, |
1778 | const struct i915_page_table *pt) | |
6197349b | 1779 | { |
678d96fb | 1780 | /* Caller needs to make sure the write completes if necessary */ |
68a85703 CW |
1781 | iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID, |
1782 | ppgtt->pd_addr + pde); | |
678d96fb | 1783 | } |
6197349b | 1784 | |
c6be607a | 1785 | static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv) |
eeb9488e | 1786 | { |
e2f80391 | 1787 | struct intel_engine_cs *engine; |
3b3f1650 | 1788 | enum intel_engine_id id; |
3e302542 | 1789 | |
3b3f1650 | 1790 | for_each_engine(engine, dev_priv, id) { |
c6be607a TU |
1791 | u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ? |
1792 | GEN8_GFX_PPGTT_48B : 0; | |
e2f80391 | 1793 | I915_WRITE(RING_MODE_GEN7(engine), |
2dba3239 | 1794 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); |
eeb9488e | 1795 | } |
eeb9488e | 1796 | } |
6197349b | 1797 | |
c6be607a | 1798 | static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) |
3e302542 | 1799 | { |
e2f80391 | 1800 | struct intel_engine_cs *engine; |
75c7b0b8 | 1801 | u32 ecochk, ecobits; |
3b3f1650 | 1802 | enum intel_engine_id id; |
6197349b | 1803 | |
b4a74e3a BW |
1804 | ecobits = I915_READ(GAC_ECO_BITS); |
1805 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); | |
a65c2fcd | 1806 | |
b4a74e3a | 1807 | ecochk = I915_READ(GAM_ECOCHK); |
772c2a51 | 1808 | if (IS_HASWELL(dev_priv)) { |
b4a74e3a BW |
1809 | ecochk |= ECOCHK_PPGTT_WB_HSW; |
1810 | } else { | |
1811 | ecochk |= ECOCHK_PPGTT_LLC_IVB; | |
1812 | ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; | |
1813 | } | |
1814 | I915_WRITE(GAM_ECOCHK, ecochk); | |
a65c2fcd | 1815 | |
3b3f1650 | 1816 | for_each_engine(engine, dev_priv, id) { |
6197349b | 1817 | /* GFX_MODE is per-ring on gen7+ */ |
e2f80391 | 1818 | I915_WRITE(RING_MODE_GEN7(engine), |
b4a74e3a | 1819 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
6197349b | 1820 | } |
b4a74e3a | 1821 | } |
6197349b | 1822 | |
c6be607a | 1823 | static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv) |
b4a74e3a | 1824 | { |
75c7b0b8 | 1825 | u32 ecochk, gab_ctl, ecobits; |
a65c2fcd | 1826 | |
b4a74e3a BW |
1827 | ecobits = I915_READ(GAC_ECO_BITS); |
1828 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | | |
1829 | ECOBITS_PPGTT_CACHE64B); | |
6197349b | 1830 | |
b4a74e3a BW |
1831 | gab_ctl = I915_READ(GAB_CTL); |
1832 | I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); | |
1833 | ||
1834 | ecochk = I915_READ(GAM_ECOCHK); | |
1835 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); | |
1836 | ||
1837 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | |
6197349b BW |
1838 | } |
1839 | ||
1d2a314c | 1840 | /* PPGTT support for Sandybdrige/Gen6 and later */ |
853ba5d2 | 1841 | static void gen6_ppgtt_clear_range(struct i915_address_space *vm, |
dd19674b | 1842 | u64 start, u64 length) |
1d2a314c | 1843 | { |
4a192c7e | 1844 | struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); |
21c62a9d | 1845 | unsigned int first_entry = start / I915_GTT_PAGE_SIZE; |
dd19674b CW |
1846 | unsigned int pde = first_entry / GEN6_PTES; |
1847 | unsigned int pte = first_entry % GEN6_PTES; | |
21c62a9d | 1848 | unsigned int num_entries = length / I915_GTT_PAGE_SIZE; |
986dbac4 | 1849 | const gen6_pte_t scratch_pte = ppgtt->scratch_pte; |
1d2a314c | 1850 | |
7bddb01f | 1851 | while (num_entries) { |
4a192c7e CW |
1852 | struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++]; |
1853 | const unsigned int end = min(pte + num_entries, GEN6_PTES); | |
1854 | const unsigned int count = end - pte; | |
dd19674b | 1855 | gen6_pte_t *vaddr; |
7bddb01f | 1856 | |
4a192c7e CW |
1857 | GEM_BUG_ON(pt == vm->scratch_pt); |
1858 | ||
1859 | num_entries -= count; | |
1860 | ||
1861 | GEM_BUG_ON(count > pt->used_ptes); | |
1862 | pt->used_ptes -= count; | |
1863 | if (!pt->used_ptes) | |
1864 | ppgtt->scan_for_unused_pt = true; | |
1d2a314c | 1865 | |
549fe88b CW |
1866 | /* |
1867 | * Note that the hw doesn't support removing PDE on the fly | |
dd19674b CW |
1868 | * (they are cached inside the context with no means to |
1869 | * invalidate the cache), so we can only reset the PTE | |
1870 | * entries back to scratch. | |
1871 | */ | |
1d2a314c | 1872 | |
dd19674b CW |
1873 | vaddr = kmap_atomic_px(pt); |
1874 | do { | |
1875 | vaddr[pte++] = scratch_pte; | |
1876 | } while (pte < end); | |
1877 | kunmap_atomic(vaddr); | |
1d2a314c | 1878 | |
dd19674b | 1879 | pte = 0; |
7bddb01f | 1880 | } |
1d2a314c SV |
1881 | } |
1882 | ||
853ba5d2 | 1883 | static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, |
4a234c5f | 1884 | struct i915_vma *vma, |
75c7b0b8 CW |
1885 | enum i915_cache_level cache_level, |
1886 | u32 flags) | |
def886c3 | 1887 | { |
e5716f55 | 1888 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
21c62a9d | 1889 | unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE; |
07749ef3 MT |
1890 | unsigned act_pt = first_entry / GEN6_PTES; |
1891 | unsigned act_pte = first_entry % GEN6_PTES; | |
b31144c0 | 1892 | const u32 pte_encode = vm->pte_encode(0, cache_level, flags); |
5684514b | 1893 | struct sgt_dma iter = sgt_dma(vma); |
b31144c0 CW |
1894 | gen6_pte_t *vaddr; |
1895 | ||
4a192c7e CW |
1896 | GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt); |
1897 | ||
9231da70 | 1898 | vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]); |
b31144c0 CW |
1899 | do { |
1900 | vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); | |
6e995e23 | 1901 | |
f6e35cda | 1902 | iter.dma += I915_GTT_PAGE_SIZE; |
b31144c0 CW |
1903 | if (iter.dma == iter.max) { |
1904 | iter.sg = __sg_next(iter.sg); | |
1905 | if (!iter.sg) | |
1906 | break; | |
6e995e23 | 1907 | |
b31144c0 CW |
1908 | iter.dma = sg_dma_address(iter.sg); |
1909 | iter.max = iter.dma + iter.sg->length; | |
1910 | } | |
24f3a8cf | 1911 | |
07749ef3 | 1912 | if (++act_pte == GEN6_PTES) { |
9231da70 CW |
1913 | kunmap_atomic(vaddr); |
1914 | vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]); | |
6e995e23 | 1915 | act_pte = 0; |
def886c3 | 1916 | } |
b31144c0 | 1917 | } while (1); |
9231da70 | 1918 | kunmap_atomic(vaddr); |
d9ec12f8 MA |
1919 | |
1920 | vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; | |
def886c3 SV |
1921 | } |
1922 | ||
678d96fb | 1923 | static int gen6_alloc_va_range(struct i915_address_space *vm, |
dd19674b | 1924 | u64 start, u64 length) |
678d96fb | 1925 | { |
35ac40d8 | 1926 | struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); |
ec565b3c | 1927 | struct i915_page_table *pt; |
dd19674b CW |
1928 | u64 from = start; |
1929 | unsigned int pde; | |
1930 | bool flush = false; | |
4933d519 | 1931 | |
35ac40d8 | 1932 | gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) { |
4a192c7e CW |
1933 | const unsigned int count = gen6_pte_count(start, length); |
1934 | ||
dd19674b CW |
1935 | if (pt == vm->scratch_pt) { |
1936 | pt = alloc_pt(vm); | |
1937 | if (IS_ERR(pt)) | |
1938 | goto unwind_out; | |
4933d519 | 1939 | |
986dbac4 | 1940 | gen6_initialize_pt(ppgtt, pt); |
35ac40d8 | 1941 | ppgtt->base.pd.page_table[pde] = pt; |
e9e7dc41 CW |
1942 | |
1943 | if (i915_vma_is_bound(ppgtt->vma, | |
1944 | I915_VMA_GLOBAL_BIND)) { | |
1945 | gen6_write_pde(ppgtt, pde, pt); | |
1946 | flush = true; | |
1947 | } | |
4a192c7e CW |
1948 | |
1949 | GEM_BUG_ON(pt->used_ptes); | |
4933d519 | 1950 | } |
4a192c7e CW |
1951 | |
1952 | pt->used_ptes += count; | |
4933d519 MT |
1953 | } |
1954 | ||
dd19674b | 1955 | if (flush) { |
35ac40d8 CW |
1956 | mark_tlbs_dirty(&ppgtt->base); |
1957 | gen6_ggtt_invalidate(ppgtt->base.vm.i915); | |
678d96fb BW |
1958 | } |
1959 | ||
1960 | return 0; | |
4933d519 MT |
1961 | |
1962 | unwind_out: | |
b4e2727d | 1963 | gen6_ppgtt_clear_range(vm, from, start - from); |
dd19674b | 1964 | return -ENOMEM; |
678d96fb BW |
1965 | } |
1966 | ||
e9e7dc41 | 1967 | static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt) |
8776f02b | 1968 | { |
e9e7dc41 CW |
1969 | struct i915_address_space * const vm = &ppgtt->base.vm; |
1970 | struct i915_page_table *unused; | |
1971 | u32 pde; | |
8bcdd0f7 | 1972 | int ret; |
8776f02b | 1973 | |
1abb70f5 | 1974 | ret = setup_scratch_page(vm, __GFP_HIGHMEM); |
8bcdd0f7 CW |
1975 | if (ret) |
1976 | return ret; | |
8776f02b | 1977 | |
986dbac4 CW |
1978 | ppgtt->scratch_pte = |
1979 | vm->pte_encode(vm->scratch_page.daddr, | |
1980 | I915_CACHE_NONE, PTE_READ_ONLY); | |
1981 | ||
8448661d | 1982 | vm->scratch_pt = alloc_pt(vm); |
8776f02b | 1983 | if (IS_ERR(vm->scratch_pt)) { |
8448661d | 1984 | cleanup_scratch_page(vm); |
8776f02b MK |
1985 | return PTR_ERR(vm->scratch_pt); |
1986 | } | |
1987 | ||
986dbac4 | 1988 | gen6_initialize_pt(ppgtt, vm->scratch_pt); |
e9e7dc41 CW |
1989 | gen6_for_all_pdes(unused, &ppgtt->base.pd, pde) |
1990 | ppgtt->base.pd.page_table[pde] = vm->scratch_pt; | |
8776f02b MK |
1991 | |
1992 | return 0; | |
1993 | } | |
1994 | ||
a9ded785 | 1995 | static void gen6_ppgtt_free_scratch(struct i915_address_space *vm) |
8776f02b | 1996 | { |
8448661d CW |
1997 | free_pt(vm, vm->scratch_pt); |
1998 | cleanup_scratch_page(vm); | |
8776f02b MK |
1999 | } |
2000 | ||
a9ded785 | 2001 | static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt) |
a00d825d | 2002 | { |
09942c65 | 2003 | struct i915_page_table *pt; |
75c7b0b8 | 2004 | u32 pde; |
4933d519 | 2005 | |
35ac40d8 | 2006 | gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) |
a9ded785 CW |
2007 | if (pt != ppgtt->base.vm.scratch_pt) |
2008 | free_pt(&ppgtt->base.vm, pt); | |
2009 | } | |
2010 | ||
2011 | static void gen6_ppgtt_cleanup(struct i915_address_space *vm) | |
2012 | { | |
2013 | struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); | |
06fda602 | 2014 | |
e9e7dc41 | 2015 | i915_vma_destroy(ppgtt->vma); |
a9ded785 CW |
2016 | |
2017 | gen6_ppgtt_free_pd(ppgtt); | |
2018 | gen6_ppgtt_free_scratch(vm); | |
3440d265 SV |
2019 | } |
2020 | ||
e9e7dc41 | 2021 | static int pd_vma_set_pages(struct i915_vma *vma) |
3440d265 | 2022 | { |
e9e7dc41 CW |
2023 | vma->pages = ERR_PTR(-ENODEV); |
2024 | return 0; | |
2025 | } | |
1d2a314c | 2026 | |
e9e7dc41 CW |
2027 | static void pd_vma_clear_pages(struct i915_vma *vma) |
2028 | { | |
2029 | GEM_BUG_ON(!vma->pages); | |
4933d519 | 2030 | |
e9e7dc41 CW |
2031 | vma->pages = NULL; |
2032 | } | |
2033 | ||
2034 | static int pd_vma_bind(struct i915_vma *vma, | |
2035 | enum i915_cache_level cache_level, | |
2036 | u32 unused) | |
2037 | { | |
2038 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); | |
2039 | struct gen6_hw_ppgtt *ppgtt = vma->private; | |
f6e35cda | 2040 | u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE; |
e9e7dc41 CW |
2041 | struct i915_page_table *pt; |
2042 | unsigned int pde; | |
678d96fb | 2043 | |
e9e7dc41 CW |
2044 | ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t); |
2045 | ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset; | |
1d2a314c | 2046 | |
e9e7dc41 CW |
2047 | gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) |
2048 | gen6_write_pde(ppgtt, pde, pt); | |
52c126ee | 2049 | |
e9e7dc41 CW |
2050 | mark_tlbs_dirty(&ppgtt->base); |
2051 | gen6_ggtt_invalidate(ppgtt->base.vm.i915); | |
52c126ee | 2052 | |
c8c26622 | 2053 | return 0; |
4933d519 | 2054 | } |
06dc68d6 | 2055 | |
e9e7dc41 | 2056 | static void pd_vma_unbind(struct i915_vma *vma) |
4933d519 | 2057 | { |
4a192c7e CW |
2058 | struct gen6_hw_ppgtt *ppgtt = vma->private; |
2059 | struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt; | |
2060 | struct i915_page_table *pt; | |
2061 | unsigned int pde; | |
2062 | ||
2063 | if (!ppgtt->scan_for_unused_pt) | |
2064 | return; | |
2065 | ||
2066 | /* Free all no longer used page tables */ | |
2067 | gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) { | |
2068 | if (pt->used_ptes || pt == scratch_pt) | |
2069 | continue; | |
2070 | ||
2071 | free_pt(&ppgtt->base.vm, pt); | |
2072 | ppgtt->base.pd.page_table[pde] = scratch_pt; | |
2073 | } | |
2074 | ||
2075 | ppgtt->scan_for_unused_pt = false; | |
e9e7dc41 CW |
2076 | } |
2077 | ||
2078 | static const struct i915_vma_ops pd_vma_ops = { | |
2079 | .set_pages = pd_vma_set_pages, | |
2080 | .clear_pages = pd_vma_clear_pages, | |
2081 | .bind_vma = pd_vma_bind, | |
2082 | .unbind_vma = pd_vma_unbind, | |
2083 | }; | |
2084 | ||
2085 | static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) | |
2086 | { | |
2087 | struct drm_i915_private *i915 = ppgtt->base.vm.i915; | |
2088 | struct i915_ggtt *ggtt = &i915->ggtt; | |
2089 | struct i915_vma *vma; | |
e9e7dc41 CW |
2090 | |
2091 | GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); | |
2092 | GEM_BUG_ON(size > ggtt->vm.total); | |
2093 | ||
2094 | vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL); | |
2095 | if (!vma) | |
2096 | return ERR_PTR(-ENOMEM); | |
2097 | ||
e9e7dc41 CW |
2098 | init_request_active(&vma->last_fence, NULL); |
2099 | ||
2100 | vma->vm = &ggtt->vm; | |
2101 | vma->ops = &pd_vma_ops; | |
2102 | vma->private = ppgtt; | |
2103 | ||
5c3f8c22 CW |
2104 | vma->active = RB_ROOT; |
2105 | ||
e9e7dc41 CW |
2106 | vma->size = size; |
2107 | vma->fence_size = size; | |
2108 | vma->flags = I915_VMA_GGTT; | |
2109 | vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ | |
2110 | ||
2111 | INIT_LIST_HEAD(&vma->obj_link); | |
2112 | list_add(&vma->vm_link, &vma->vm->unbound_list); | |
2113 | ||
2114 | return vma; | |
2115 | } | |
1d2a314c | 2116 | |
a2bbf714 | 2117 | int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) |
e9e7dc41 CW |
2118 | { |
2119 | struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); | |
2120 | ||
a2bbf714 CW |
2121 | /* |
2122 | * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt | |
2123 | * which will be pinned into every active context. | |
2124 | * (When vma->pin_count becomes atomic, I expect we will naturally | |
2125 | * need a larger, unpacked, type and kill this redundancy.) | |
2126 | */ | |
2127 | if (ppgtt->pin_count++) | |
2128 | return 0; | |
2129 | ||
e9e7dc41 CW |
2130 | /* |
2131 | * PPGTT PDEs reside in the GGTT and consists of 512 entries. The | |
2132 | * allocator works in address space sizes, so it's multiplied by page | |
2133 | * size. We allocate at the top of the GTT to avoid fragmentation. | |
2134 | */ | |
2135 | return i915_vma_pin(ppgtt->vma, | |
2136 | 0, GEN6_PD_ALIGN, | |
2137 | PIN_GLOBAL | PIN_HIGH); | |
b146520f BW |
2138 | } |
2139 | ||
a2bbf714 CW |
2140 | void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) |
2141 | { | |
2142 | struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); | |
2143 | ||
2144 | GEM_BUG_ON(!ppgtt->pin_count); | |
2145 | if (--ppgtt->pin_count) | |
2146 | return; | |
2147 | ||
2148 | i915_vma_unpin(ppgtt->vma); | |
2149 | } | |
2150 | ||
17f297b4 | 2151 | static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) |
b146520f | 2152 | { |
17f297b4 | 2153 | struct i915_ggtt * const ggtt = &i915->ggtt; |
35ac40d8 | 2154 | struct gen6_hw_ppgtt *ppgtt; |
17f297b4 CW |
2155 | int err; |
2156 | ||
2157 | ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); | |
2158 | if (!ppgtt) | |
2159 | return ERR_PTR(-ENOMEM); | |
2160 | ||
63fd659f CW |
2161 | kref_init(&ppgtt->base.ref); |
2162 | ||
35ac40d8 CW |
2163 | ppgtt->base.vm.i915 = i915; |
2164 | ppgtt->base.vm.dma = &i915->drm.pdev->dev; | |
b146520f | 2165 | |
f6e35cda | 2166 | ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE; |
1d2a314c | 2167 | |
63fd659f CW |
2168 | i915_address_space_init(&ppgtt->base.vm, i915); |
2169 | ||
549fe88b | 2170 | ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; |
35ac40d8 CW |
2171 | ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; |
2172 | ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; | |
2173 | ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup; | |
2174 | ppgtt->base.debug_dump = gen6_dump_ppgtt; | |
054b9acd | 2175 | |
549fe88b | 2176 | ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma; |
35ac40d8 CW |
2177 | ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma; |
2178 | ppgtt->base.vm.vma_ops.set_pages = ppgtt_set_pages; | |
2179 | ppgtt->base.vm.vma_ops.clear_pages = clear_pages; | |
93f2cde2 | 2180 | |
a9ded785 CW |
2181 | ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; |
2182 | ||
e9e7dc41 | 2183 | err = gen6_ppgtt_init_scratch(ppgtt); |
a9ded785 CW |
2184 | if (err) |
2185 | goto err_free; | |
2186 | ||
e9e7dc41 CW |
2187 | ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE); |
2188 | if (IS_ERR(ppgtt->vma)) { | |
2189 | err = PTR_ERR(ppgtt->vma); | |
a9ded785 | 2190 | goto err_scratch; |
e9e7dc41 | 2191 | } |
a9ded785 | 2192 | |
35ac40d8 | 2193 | return &ppgtt->base; |
3440d265 | 2194 | |
a9ded785 CW |
2195 | err_scratch: |
2196 | gen6_ppgtt_free_scratch(&ppgtt->base.vm); | |
17f297b4 CW |
2197 | err_free: |
2198 | kfree(ppgtt); | |
2199 | return ERR_PTR(err); | |
fa76da34 | 2200 | } |
c114f76a | 2201 | |
c6be607a | 2202 | static void gtt_write_workarounds(struct drm_i915_private *dev_priv) |
d5165ebd | 2203 | { |
d5165ebd TG |
2204 | /* This function is for gtt related workarounds. This function is |
2205 | * called on driver load and after a GPU reset, so you can place | |
2206 | * workarounds here even if they get overwritten by GPU reset. | |
2207 | */ | |
cc38cae7 | 2208 | /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */ |
8652744b | 2209 | if (IS_BROADWELL(dev_priv)) |
d5165ebd | 2210 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); |
920a14b2 | 2211 | else if (IS_CHERRYVIEW(dev_priv)) |
d5165ebd | 2212 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); |
9fb5026f | 2213 | else if (IS_GEN9_LP(dev_priv)) |
d5165ebd | 2214 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); |
cc38cae7 OM |
2215 | else if (INTEL_GEN(dev_priv) >= 9) |
2216 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); | |
9a6330cf MA |
2217 | |
2218 | /* | |
2219 | * To support 64K PTEs we need to first enable the use of the | |
2220 | * Intermediate-Page-Size(IPS) bit of the PDE field via some magical | |
2221 | * mmio, otherwise the page-walker will simply ignore the IPS bit. This | |
2222 | * shouldn't be needed after GEN10. | |
2223 | * | |
2224 | * 64K pages were first introduced from BDW+, although technically they | |
2225 | * only *work* from gen9+. For pre-BDW we instead have the option for | |
2226 | * 32K pages, but we don't currently have any support for it in our | |
2227 | * driver. | |
2228 | */ | |
2229 | if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) && | |
2230 | INTEL_GEN(dev_priv) <= 10) | |
2231 | I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA, | |
2232 | I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) | | |
2233 | GAMW_ECO_ENABLE_64K_IPS_FIELD); | |
d5165ebd TG |
2234 | } |
2235 | ||
c6be607a | 2236 | int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) |
82460d97 | 2237 | { |
c6be607a | 2238 | gtt_write_workarounds(dev_priv); |
d5165ebd | 2239 | |
671b5013 TD |
2240 | /* In the case of execlists, PPGTT is enabled by the context descriptor |
2241 | * and the PDPs are contained within the context itself. We don't | |
2242 | * need to do anything here. */ | |
fb5c551a | 2243 | if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) |
671b5013 TD |
2244 | return 0; |
2245 | ||
c6be607a | 2246 | if (!USES_PPGTT(dev_priv)) |
82460d97 SV |
2247 | return 0; |
2248 | ||
5db94019 | 2249 | if (IS_GEN6(dev_priv)) |
c6be607a | 2250 | gen6_ppgtt_enable(dev_priv); |
5db94019 | 2251 | else if (IS_GEN7(dev_priv)) |
c6be607a TU |
2252 | gen7_ppgtt_enable(dev_priv); |
2253 | else if (INTEL_GEN(dev_priv) >= 8) | |
2254 | gen8_ppgtt_enable(dev_priv); | |
82460d97 | 2255 | else |
c6be607a | 2256 | MISSING_CASE(INTEL_GEN(dev_priv)); |
82460d97 | 2257 | |
4ad2fd88 JH |
2258 | return 0; |
2259 | } | |
1d2a314c | 2260 | |
17f297b4 CW |
2261 | static struct i915_hw_ppgtt * |
2262 | __hw_ppgtt_create(struct drm_i915_private *i915) | |
2263 | { | |
2264 | if (INTEL_GEN(i915) < 8) | |
2265 | return gen6_ppgtt_create(i915); | |
2266 | else | |
2267 | return gen8_ppgtt_create(i915); | |
2268 | } | |
2269 | ||
4d884705 | 2270 | struct i915_hw_ppgtt * |
17f297b4 | 2271 | i915_ppgtt_create(struct drm_i915_private *i915, |
63fd659f | 2272 | struct drm_i915_file_private *fpriv) |
4d884705 SV |
2273 | { |
2274 | struct i915_hw_ppgtt *ppgtt; | |
4d884705 | 2275 | |
17f297b4 CW |
2276 | ppgtt = __hw_ppgtt_create(i915); |
2277 | if (IS_ERR(ppgtt)) | |
2278 | return ppgtt; | |
4d884705 | 2279 | |
82ad6443 | 2280 | ppgtt->vm.file = fpriv; |
1188bc66 | 2281 | |
82ad6443 | 2282 | trace_i915_ppgtt_create(&ppgtt->vm); |
198c974d | 2283 | |
4d884705 SV |
2284 | return ppgtt; |
2285 | } | |
2286 | ||
0c7eeda1 | 2287 | void i915_ppgtt_close(struct i915_address_space *vm) |
3365e226 CW |
2288 | { |
2289 | GEM_BUG_ON(vm->closed); | |
2290 | vm->closed = true; | |
2291 | } | |
2292 | ||
2293 | static void ppgtt_destroy_vma(struct i915_address_space *vm) | |
0c7eeda1 CW |
2294 | { |
2295 | struct list_head *phases[] = { | |
2296 | &vm->active_list, | |
2297 | &vm->inactive_list, | |
2298 | &vm->unbound_list, | |
2299 | NULL, | |
2300 | }, **phase; | |
2301 | ||
0c7eeda1 | 2302 | vm->closed = true; |
0c7eeda1 CW |
2303 | for (phase = phases; *phase; phase++) { |
2304 | struct i915_vma *vma, *vn; | |
2305 | ||
2306 | list_for_each_entry_safe(vma, vn, *phase, vm_link) | |
3365e226 | 2307 | i915_vma_destroy(vma); |
0c7eeda1 CW |
2308 | } |
2309 | } | |
2310 | ||
ed9724dd | 2311 | void i915_ppgtt_release(struct kref *kref) |
ee960be7 SV |
2312 | { |
2313 | struct i915_hw_ppgtt *ppgtt = | |
2314 | container_of(kref, struct i915_hw_ppgtt, ref); | |
2315 | ||
82ad6443 | 2316 | trace_i915_ppgtt_release(&ppgtt->vm); |
198c974d | 2317 | |
82ad6443 | 2318 | ppgtt_destroy_vma(&ppgtt->vm); |
3365e226 | 2319 | |
82ad6443 CW |
2320 | GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list)); |
2321 | GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list)); | |
2322 | GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list)); | |
ee960be7 | 2323 | |
82ad6443 CW |
2324 | ppgtt->vm.cleanup(&ppgtt->vm); |
2325 | i915_address_space_fini(&ppgtt->vm); | |
ee960be7 SV |
2326 | kfree(ppgtt); |
2327 | } | |
1d2a314c | 2328 | |
a81cc00c BW |
2329 | /* Certain Gen5 chipsets require require idling the GPU before |
2330 | * unmapping anything from the GTT when VT-d is enabled. | |
2331 | */ | |
97d6d7ab | 2332 | static bool needs_idle_maps(struct drm_i915_private *dev_priv) |
a81cc00c | 2333 | { |
a81cc00c BW |
2334 | /* Query intel_iommu to see if we need the workaround. Presumably that |
2335 | * was loaded first. | |
2336 | */ | |
80debff8 | 2337 | return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active(); |
a81cc00c BW |
2338 | } |
2339 | ||
09605548 | 2340 | static void gen6_check_faults(struct drm_i915_private *dev_priv) |
828c7908 | 2341 | { |
e2f80391 | 2342 | struct intel_engine_cs *engine; |
3b3f1650 | 2343 | enum intel_engine_id id; |
b03ec3d6 | 2344 | u32 fault; |
828c7908 | 2345 | |
3b3f1650 | 2346 | for_each_engine(engine, dev_priv, id) { |
b03ec3d6 MT |
2347 | fault = I915_READ(RING_FAULT_REG(engine)); |
2348 | if (fault & RING_FAULT_VALID) { | |
828c7908 | 2349 | DRM_DEBUG_DRIVER("Unexpected fault\n" |
59a5d290 | 2350 | "\tAddr: 0x%08lx\n" |
828c7908 BW |
2351 | "\tAddress space: %s\n" |
2352 | "\tSource ID: %d\n" | |
2353 | "\tType: %d\n", | |
b03ec3d6 MT |
2354 | fault & PAGE_MASK, |
2355 | fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", | |
2356 | RING_FAULT_SRCID(fault), | |
2357 | RING_FAULT_FAULT_TYPE(fault)); | |
828c7908 BW |
2358 | } |
2359 | } | |
b03ec3d6 MT |
2360 | } |
2361 | ||
09605548 | 2362 | static void gen8_check_faults(struct drm_i915_private *dev_priv) |
b03ec3d6 MT |
2363 | { |
2364 | u32 fault = I915_READ(GEN8_RING_FAULT_REG); | |
2365 | ||
2366 | if (fault & RING_FAULT_VALID) { | |
5a3f58df OM |
2367 | u32 fault_data0, fault_data1; |
2368 | u64 fault_addr; | |
2369 | ||
2370 | fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0); | |
2371 | fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1); | |
2372 | fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | | |
2373 | ((u64)fault_data0 << 12); | |
2374 | ||
b03ec3d6 | 2375 | DRM_DEBUG_DRIVER("Unexpected fault\n" |
5a3f58df OM |
2376 | "\tAddr: 0x%08x_%08x\n" |
2377 | "\tAddress space: %s\n" | |
b03ec3d6 MT |
2378 | "\tEngine ID: %d\n" |
2379 | "\tSource ID: %d\n" | |
2380 | "\tType: %d\n", | |
5a3f58df OM |
2381 | upper_32_bits(fault_addr), |
2382 | lower_32_bits(fault_addr), | |
2383 | fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", | |
b03ec3d6 MT |
2384 | GEN8_RING_FAULT_ENGINE_ID(fault), |
2385 | RING_FAULT_SRCID(fault), | |
2386 | RING_FAULT_FAULT_TYPE(fault)); | |
b03ec3d6 | 2387 | } |
b03ec3d6 MT |
2388 | } |
2389 | ||
2390 | void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) | |
2391 | { | |
2392 | /* From GEN8 onwards we only have one 'All Engine Fault Register' */ | |
2393 | if (INTEL_GEN(dev_priv) >= 8) | |
09605548 | 2394 | gen8_check_faults(dev_priv); |
b03ec3d6 | 2395 | else if (INTEL_GEN(dev_priv) >= 6) |
09605548 | 2396 | gen6_check_faults(dev_priv); |
b03ec3d6 MT |
2397 | else |
2398 | return; | |
09605548 LL |
2399 | |
2400 | i915_clear_error_registers(dev_priv); | |
828c7908 BW |
2401 | } |
2402 | ||
275a991c | 2403 | void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) |
828c7908 | 2404 | { |
72e96d64 | 2405 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
828c7908 BW |
2406 | |
2407 | /* Don't bother messing with faults pre GEN6 as we have little | |
2408 | * documentation supporting that it's a good idea. | |
2409 | */ | |
275a991c | 2410 | if (INTEL_GEN(dev_priv) < 6) |
828c7908 BW |
2411 | return; |
2412 | ||
dc97997a | 2413 | i915_check_and_clear_faults(dev_priv); |
828c7908 | 2414 | |
82ad6443 | 2415 | ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); |
91e56499 | 2416 | |
7c3f86b6 | 2417 | i915_ggtt_invalidate(dev_priv); |
828c7908 BW |
2418 | } |
2419 | ||
03ac84f1 CW |
2420 | int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, |
2421 | struct sg_table *pages) | |
7c2e6fdf | 2422 | { |
1a292fa5 | 2423 | do { |
82e07602 CW |
2424 | if (dma_map_sg_attrs(&obj->base.dev->pdev->dev, |
2425 | pages->sgl, pages->nents, | |
2426 | PCI_DMA_BIDIRECTIONAL, | |
2427 | DMA_ATTR_NO_WARN)) | |
1a292fa5 CW |
2428 | return 0; |
2429 | ||
2430 | /* If the DMA remap fails, one cause can be that we have | |
2431 | * too many objects pinned in a small remapping table, | |
2432 | * such as swiotlb. Incrementally purge all other objects and | |
2433 | * try again - if there are no more pages to remove from | |
2434 | * the DMA remapper, i915_gem_shrink will return 0. | |
2435 | */ | |
2436 | GEM_BUG_ON(obj->mm.pages == pages); | |
2437 | } while (i915_gem_shrink(to_i915(obj->base.dev), | |
912d572d | 2438 | obj->base.size >> PAGE_SHIFT, NULL, |
1a292fa5 CW |
2439 | I915_SHRINK_BOUND | |
2440 | I915_SHRINK_UNBOUND | | |
2441 | I915_SHRINK_ACTIVE)); | |
9da3da66 | 2442 | |
03ac84f1 | 2443 | return -ENOSPC; |
7c2e6fdf SV |
2444 | } |
2445 | ||
2c642b07 | 2446 | static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) |
94ec8f61 | 2447 | { |
94ec8f61 | 2448 | writeq(pte, addr); |
94ec8f61 BW |
2449 | } |
2450 | ||
d6473f56 CW |
2451 | static void gen8_ggtt_insert_page(struct i915_address_space *vm, |
2452 | dma_addr_t addr, | |
75c7b0b8 | 2453 | u64 offset, |
d6473f56 CW |
2454 | enum i915_cache_level level, |
2455 | u32 unused) | |
2456 | { | |
7c3f86b6 | 2457 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
d6473f56 | 2458 | gen8_pte_t __iomem *pte = |
21c62a9d | 2459 | (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; |
d6473f56 | 2460 | |
25dda4da | 2461 | gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); |
d6473f56 | 2462 | |
7c3f86b6 | 2463 | ggtt->invalidate(vm->i915); |
d6473f56 CW |
2464 | } |
2465 | ||
94ec8f61 | 2466 | static void gen8_ggtt_insert_entries(struct i915_address_space *vm, |
4a234c5f | 2467 | struct i915_vma *vma, |
75c7b0b8 | 2468 | enum i915_cache_level level, |
250f8c81 | 2469 | u32 flags) |
94ec8f61 | 2470 | { |
ce7fda2e | 2471 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
85d1225e DG |
2472 | struct sgt_iter sgt_iter; |
2473 | gen8_pte_t __iomem *gtt_entries; | |
25dda4da | 2474 | const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0); |
85d1225e | 2475 | dma_addr_t addr; |
be69459a | 2476 | |
3e977ac6 CW |
2477 | /* |
2478 | * Note that we ignore PTE_READ_ONLY here. The caller must be careful | |
2479 | * not to allow the user to override access to a read only page. | |
2480 | */ | |
250f8c81 | 2481 | |
894ccebe | 2482 | gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; |
21c62a9d | 2483 | gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; |
4a234c5f | 2484 | for_each_sgt_dma(addr, sgt_iter, vma->pages) |
894ccebe | 2485 | gen8_set_pte(gtt_entries++, pte_encode | addr); |
85d1225e | 2486 | |
ca6acc25 MK |
2487 | /* |
2488 | * We want to flush the TLBs only after we're certain all the PTE | |
2489 | * updates have finished. | |
94ec8f61 | 2490 | */ |
7c3f86b6 | 2491 | ggtt->invalidate(vm->i915); |
94ec8f61 BW |
2492 | } |
2493 | ||
d6473f56 CW |
2494 | static void gen6_ggtt_insert_page(struct i915_address_space *vm, |
2495 | dma_addr_t addr, | |
75c7b0b8 | 2496 | u64 offset, |
d6473f56 CW |
2497 | enum i915_cache_level level, |
2498 | u32 flags) | |
2499 | { | |
7c3f86b6 | 2500 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
d6473f56 | 2501 | gen6_pte_t __iomem *pte = |
21c62a9d | 2502 | (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; |
d6473f56 | 2503 | |
4fb84d99 | 2504 | iowrite32(vm->pte_encode(addr, level, flags), pte); |
d6473f56 | 2505 | |
7c3f86b6 | 2506 | ggtt->invalidate(vm->i915); |
d6473f56 CW |
2507 | } |
2508 | ||
e76e9aeb BW |
2509 | /* |
2510 | * Binds an object into the global gtt with the specified cache level. The object | |
2511 | * will be accessible to the GPU via commands whose operands reference offsets | |
2512 | * within the global GTT as well as accessible by the GPU through the GMADR | |
2513 | * mapped BAR (dev_priv->mm.gtt->gtt). | |
2514 | */ | |
853ba5d2 | 2515 | static void gen6_ggtt_insert_entries(struct i915_address_space *vm, |
4a234c5f | 2516 | struct i915_vma *vma, |
75c7b0b8 CW |
2517 | enum i915_cache_level level, |
2518 | u32 flags) | |
e76e9aeb | 2519 | { |
ce7fda2e | 2520 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
b31144c0 | 2521 | gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; |
21c62a9d | 2522 | unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; |
b31144c0 | 2523 | struct sgt_iter iter; |
85d1225e | 2524 | dma_addr_t addr; |
4a234c5f | 2525 | for_each_sgt_dma(addr, iter, vma->pages) |
b31144c0 | 2526 | iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); |
0f9b91c7 | 2527 | |
ca6acc25 MK |
2528 | /* |
2529 | * We want to flush the TLBs only after we're certain all the PTE | |
2530 | * updates have finished. | |
0f9b91c7 | 2531 | */ |
7c3f86b6 | 2532 | ggtt->invalidate(vm->i915); |
e76e9aeb BW |
2533 | } |
2534 | ||
f7770bfd | 2535 | static void nop_clear_range(struct i915_address_space *vm, |
75c7b0b8 | 2536 | u64 start, u64 length) |
f7770bfd CW |
2537 | { |
2538 | } | |
2539 | ||
94ec8f61 | 2540 | static void gen8_ggtt_clear_range(struct i915_address_space *vm, |
75c7b0b8 | 2541 | u64 start, u64 length) |
94ec8f61 | 2542 | { |
ce7fda2e | 2543 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
21c62a9d VS |
2544 | unsigned first_entry = start / I915_GTT_PAGE_SIZE; |
2545 | unsigned num_entries = length / I915_GTT_PAGE_SIZE; | |
894ccebe | 2546 | const gen8_pte_t scratch_pte = |
25dda4da | 2547 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); |
894ccebe | 2548 | gen8_pte_t __iomem *gtt_base = |
72e96d64 JL |
2549 | (gen8_pte_t __iomem *)ggtt->gsm + first_entry; |
2550 | const int max_entries = ggtt_total_entries(ggtt) - first_entry; | |
94ec8f61 BW |
2551 | int i; |
2552 | ||
2553 | if (WARN(num_entries > max_entries, | |
2554 | "First entry = %d; Num entries = %d (max=%d)\n", | |
2555 | first_entry, num_entries, max_entries)) | |
2556 | num_entries = max_entries; | |
2557 | ||
94ec8f61 BW |
2558 | for (i = 0; i < num_entries; i++) |
2559 | gen8_set_pte(>t_base[i], scratch_pte); | |
94ec8f61 BW |
2560 | } |
2561 | ||
0ef34ad6 JB |
2562 | static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) |
2563 | { | |
2564 | struct drm_i915_private *dev_priv = vm->i915; | |
2565 | ||
2566 | /* | |
2567 | * Make sure the internal GAM fifo has been cleared of all GTT | |
2568 | * writes before exiting stop_machine(). This guarantees that | |
2569 | * any aperture accesses waiting to start in another process | |
2570 | * cannot back up behind the GTT writes causing a hang. | |
2571 | * The register can be any arbitrary GAM register. | |
2572 | */ | |
2573 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | |
2574 | } | |
2575 | ||
2576 | struct insert_page { | |
2577 | struct i915_address_space *vm; | |
2578 | dma_addr_t addr; | |
2579 | u64 offset; | |
2580 | enum i915_cache_level level; | |
2581 | }; | |
2582 | ||
2583 | static int bxt_vtd_ggtt_insert_page__cb(void *_arg) | |
2584 | { | |
2585 | struct insert_page *arg = _arg; | |
2586 | ||
2587 | gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); | |
2588 | bxt_vtd_ggtt_wa(arg->vm); | |
2589 | ||
2590 | return 0; | |
2591 | } | |
2592 | ||
2593 | static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, | |
2594 | dma_addr_t addr, | |
2595 | u64 offset, | |
2596 | enum i915_cache_level level, | |
2597 | u32 unused) | |
2598 | { | |
2599 | struct insert_page arg = { vm, addr, offset, level }; | |
2600 | ||
2601 | stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); | |
2602 | } | |
2603 | ||
2604 | struct insert_entries { | |
2605 | struct i915_address_space *vm; | |
4a234c5f | 2606 | struct i915_vma *vma; |
0ef34ad6 | 2607 | enum i915_cache_level level; |
250f8c81 | 2608 | u32 flags; |
0ef34ad6 JB |
2609 | }; |
2610 | ||
2611 | static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) | |
2612 | { | |
2613 | struct insert_entries *arg = _arg; | |
2614 | ||
250f8c81 | 2615 | gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); |
0ef34ad6 JB |
2616 | bxt_vtd_ggtt_wa(arg->vm); |
2617 | ||
2618 | return 0; | |
2619 | } | |
2620 | ||
2621 | static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, | |
4a234c5f | 2622 | struct i915_vma *vma, |
0ef34ad6 | 2623 | enum i915_cache_level level, |
250f8c81 | 2624 | u32 flags) |
0ef34ad6 | 2625 | { |
250f8c81 | 2626 | struct insert_entries arg = { vm, vma, level, flags }; |
0ef34ad6 JB |
2627 | |
2628 | stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); | |
2629 | } | |
2630 | ||
2631 | struct clear_range { | |
2632 | struct i915_address_space *vm; | |
2633 | u64 start; | |
2634 | u64 length; | |
2635 | }; | |
2636 | ||
2637 | static int bxt_vtd_ggtt_clear_range__cb(void *_arg) | |
2638 | { | |
2639 | struct clear_range *arg = _arg; | |
2640 | ||
2641 | gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); | |
2642 | bxt_vtd_ggtt_wa(arg->vm); | |
2643 | ||
2644 | return 0; | |
2645 | } | |
2646 | ||
2647 | static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm, | |
2648 | u64 start, | |
2649 | u64 length) | |
2650 | { | |
2651 | struct clear_range arg = { vm, start, length }; | |
2652 | ||
2653 | stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL); | |
2654 | } | |
2655 | ||
853ba5d2 | 2656 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, |
75c7b0b8 | 2657 | u64 start, u64 length) |
7faf1ab2 | 2658 | { |
ce7fda2e | 2659 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
21c62a9d VS |
2660 | unsigned first_entry = start / I915_GTT_PAGE_SIZE; |
2661 | unsigned num_entries = length / I915_GTT_PAGE_SIZE; | |
07749ef3 | 2662 | gen6_pte_t scratch_pte, __iomem *gtt_base = |
72e96d64 JL |
2663 | (gen6_pte_t __iomem *)ggtt->gsm + first_entry; |
2664 | const int max_entries = ggtt_total_entries(ggtt) - first_entry; | |
7faf1ab2 SV |
2665 | int i; |
2666 | ||
2667 | if (WARN(num_entries > max_entries, | |
2668 | "First entry = %d; Num entries = %d (max=%d)\n", | |
2669 | first_entry, num_entries, max_entries)) | |
2670 | num_entries = max_entries; | |
2671 | ||
8bcdd0f7 | 2672 | scratch_pte = vm->pte_encode(vm->scratch_page.daddr, |
4fb84d99 | 2673 | I915_CACHE_LLC, 0); |
828c7908 | 2674 | |
7faf1ab2 SV |
2675 | for (i = 0; i < num_entries; i++) |
2676 | iowrite32(scratch_pte, >t_base[i]); | |
7faf1ab2 SV |
2677 | } |
2678 | ||
d6473f56 CW |
2679 | static void i915_ggtt_insert_page(struct i915_address_space *vm, |
2680 | dma_addr_t addr, | |
75c7b0b8 | 2681 | u64 offset, |
d6473f56 CW |
2682 | enum i915_cache_level cache_level, |
2683 | u32 unused) | |
2684 | { | |
d6473f56 CW |
2685 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? |
2686 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; | |
d6473f56 CW |
2687 | |
2688 | intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); | |
d6473f56 CW |
2689 | } |
2690 | ||
d369d2d9 | 2691 | static void i915_ggtt_insert_entries(struct i915_address_space *vm, |
4a234c5f | 2692 | struct i915_vma *vma, |
75c7b0b8 CW |
2693 | enum i915_cache_level cache_level, |
2694 | u32 unused) | |
7faf1ab2 SV |
2695 | { |
2696 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? | |
2697 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; | |
2698 | ||
4a234c5f MA |
2699 | intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT, |
2700 | flags); | |
7faf1ab2 SV |
2701 | } |
2702 | ||
853ba5d2 | 2703 | static void i915_ggtt_clear_range(struct i915_address_space *vm, |
75c7b0b8 | 2704 | u64 start, u64 length) |
7faf1ab2 | 2705 | { |
2eedfc7d | 2706 | intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); |
7faf1ab2 SV |
2707 | } |
2708 | ||
70b9f6f8 SV |
2709 | static int ggtt_bind_vma(struct i915_vma *vma, |
2710 | enum i915_cache_level cache_level, | |
2711 | u32 flags) | |
0a878716 | 2712 | { |
49d73912 | 2713 | struct drm_i915_private *i915 = vma->vm->i915; |
0a878716 | 2714 | struct drm_i915_gem_object *obj = vma->obj; |
ba7a5741 | 2715 | u32 pte_flags; |
0a878716 | 2716 | |
250f8c81 | 2717 | /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ |
ba7a5741 | 2718 | pte_flags = 0; |
3e977ac6 | 2719 | if (i915_gem_object_is_readonly(obj)) |
0a878716 SV |
2720 | pte_flags |= PTE_READ_ONLY; |
2721 | ||
9c870d03 | 2722 | intel_runtime_pm_get(i915); |
4a234c5f | 2723 | vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); |
9c870d03 | 2724 | intel_runtime_pm_put(i915); |
0a878716 | 2725 | |
d9ec12f8 MA |
2726 | vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; |
2727 | ||
0a878716 SV |
2728 | /* |
2729 | * Without aliasing PPGTT there's no difference between | |
2730 | * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally | |
2731 | * upgrade to both bound if we bind either to avoid double-binding. | |
2732 | */ | |
3272db53 | 2733 | vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; |
0a878716 SV |
2734 | |
2735 | return 0; | |
2736 | } | |
2737 | ||
cbc4e9e6 CW |
2738 | static void ggtt_unbind_vma(struct i915_vma *vma) |
2739 | { | |
2740 | struct drm_i915_private *i915 = vma->vm->i915; | |
2741 | ||
2742 | intel_runtime_pm_get(i915); | |
2743 | vma->vm->clear_range(vma->vm, vma->node.start, vma->size); | |
2744 | intel_runtime_pm_put(i915); | |
2745 | } | |
2746 | ||
0a878716 SV |
2747 | static int aliasing_gtt_bind_vma(struct i915_vma *vma, |
2748 | enum i915_cache_level cache_level, | |
2749 | u32 flags) | |
d5bd1449 | 2750 | { |
49d73912 | 2751 | struct drm_i915_private *i915 = vma->vm->i915; |
321d178e | 2752 | u32 pte_flags; |
ff685975 | 2753 | int ret; |
70b9f6f8 | 2754 | |
24f3a8cf | 2755 | /* Currently applicable only to VLV */ |
321d178e | 2756 | pte_flags = 0; |
3e977ac6 | 2757 | if (i915_gem_object_is_readonly(vma->obj)) |
f329f5f6 | 2758 | pte_flags |= PTE_READ_ONLY; |
24f3a8cf | 2759 | |
ff685975 CW |
2760 | if (flags & I915_VMA_LOCAL_BIND) { |
2761 | struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; | |
2762 | ||
549fe88b | 2763 | if (!(vma->flags & I915_VMA_LOCAL_BIND)) { |
82ad6443 CW |
2764 | ret = appgtt->vm.allocate_va_range(&appgtt->vm, |
2765 | vma->node.start, | |
2766 | vma->size); | |
ff685975 | 2767 | if (ret) |
fa3f46af | 2768 | return ret; |
ff685975 CW |
2769 | } |
2770 | ||
82ad6443 CW |
2771 | appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level, |
2772 | pte_flags); | |
ff685975 CW |
2773 | } |
2774 | ||
3272db53 | 2775 | if (flags & I915_VMA_GLOBAL_BIND) { |
9c870d03 | 2776 | intel_runtime_pm_get(i915); |
4a234c5f | 2777 | vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); |
9c870d03 | 2778 | intel_runtime_pm_put(i915); |
6f65e29a | 2779 | } |
d5bd1449 | 2780 | |
70b9f6f8 | 2781 | return 0; |
d5bd1449 CW |
2782 | } |
2783 | ||
cbc4e9e6 | 2784 | static void aliasing_gtt_unbind_vma(struct i915_vma *vma) |
74163907 | 2785 | { |
49d73912 | 2786 | struct drm_i915_private *i915 = vma->vm->i915; |
6f65e29a | 2787 | |
9c870d03 CW |
2788 | if (vma->flags & I915_VMA_GLOBAL_BIND) { |
2789 | intel_runtime_pm_get(i915); | |
cbc4e9e6 | 2790 | vma->vm->clear_range(vma->vm, vma->node.start, vma->size); |
9c870d03 CW |
2791 | intel_runtime_pm_put(i915); |
2792 | } | |
06615ee5 | 2793 | |
cbc4e9e6 | 2794 | if (vma->flags & I915_VMA_LOCAL_BIND) { |
82ad6443 | 2795 | struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm; |
cbc4e9e6 CW |
2796 | |
2797 | vm->clear_range(vm, vma->node.start, vma->size); | |
2798 | } | |
74163907 SV |
2799 | } |
2800 | ||
03ac84f1 CW |
2801 | void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, |
2802 | struct sg_table *pages) | |
7c2e6fdf | 2803 | { |
52a05c30 DW |
2804 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
2805 | struct device *kdev = &dev_priv->drm.pdev->dev; | |
307dc25b | 2806 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
5c042287 | 2807 | |
307dc25b | 2808 | if (unlikely(ggtt->do_idle_maps)) { |
ec625fb9 | 2809 | if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) { |
307dc25b CW |
2810 | DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); |
2811 | /* Wait a bit, in hopes it avoids the hang */ | |
2812 | udelay(10); | |
2813 | } | |
2814 | } | |
5c042287 | 2815 | |
03ac84f1 | 2816 | dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL); |
7c2e6fdf | 2817 | } |
644ec02b | 2818 | |
fa3f46af MA |
2819 | static int ggtt_set_pages(struct i915_vma *vma) |
2820 | { | |
2821 | int ret; | |
2822 | ||
2823 | GEM_BUG_ON(vma->pages); | |
2824 | ||
2825 | ret = i915_get_ggtt_vma_pages(vma); | |
2826 | if (ret) | |
2827 | return ret; | |
2828 | ||
7464284b MA |
2829 | vma->page_sizes = vma->obj->mm.page_sizes; |
2830 | ||
fa3f46af MA |
2831 | return 0; |
2832 | } | |
2833 | ||
45b186f1 | 2834 | static void i915_gtt_color_adjust(const struct drm_mm_node *node, |
42d6ab48 | 2835 | unsigned long color, |
440fd528 TR |
2836 | u64 *start, |
2837 | u64 *end) | |
42d6ab48 | 2838 | { |
a6508ded | 2839 | if (node->allocated && node->color != color) |
f51455d4 | 2840 | *start += I915_GTT_PAGE_SIZE; |
42d6ab48 | 2841 | |
a6508ded CW |
2842 | /* Also leave a space between the unallocated reserved node after the |
2843 | * GTT and any objects within the GTT, i.e. we use the color adjustment | |
2844 | * to insert a guard page to prevent prefetches crossing over the | |
2845 | * GTT boundary. | |
2846 | */ | |
b44f97fd | 2847 | node = list_next_entry(node, node_list); |
a6508ded | 2848 | if (node->color != color) |
f51455d4 | 2849 | *end -= I915_GTT_PAGE_SIZE; |
42d6ab48 | 2850 | } |
fbe5d36e | 2851 | |
6cde9a02 CW |
2852 | int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915) |
2853 | { | |
2854 | struct i915_ggtt *ggtt = &i915->ggtt; | |
2855 | struct i915_hw_ppgtt *ppgtt; | |
2856 | int err; | |
2857 | ||
63fd659f | 2858 | ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM)); |
1188bc66 CW |
2859 | if (IS_ERR(ppgtt)) |
2860 | return PTR_ERR(ppgtt); | |
6cde9a02 | 2861 | |
a0fbacb5 | 2862 | if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { |
e565ceb0 CW |
2863 | err = -ENODEV; |
2864 | goto err_ppgtt; | |
2865 | } | |
2866 | ||
549fe88b CW |
2867 | /* |
2868 | * Note we only pre-allocate as far as the end of the global | |
2869 | * GTT. On 48b / 4-level page-tables, the difference is very, | |
2870 | * very significant! We have to preallocate as GVT/vgpu does | |
2871 | * not like the page directory disappearing. | |
2872 | */ | |
2873 | err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total); | |
2874 | if (err) | |
2875 | goto err_ppgtt; | |
6cde9a02 | 2876 | |
6cde9a02 | 2877 | i915->mm.aliasing_ppgtt = ppgtt; |
cbc4e9e6 | 2878 | |
93f2cde2 CW |
2879 | GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); |
2880 | ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; | |
6cde9a02 | 2881 | |
93f2cde2 CW |
2882 | GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); |
2883 | ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; | |
cbc4e9e6 | 2884 | |
6cde9a02 CW |
2885 | return 0; |
2886 | ||
6cde9a02 | 2887 | err_ppgtt: |
1188bc66 | 2888 | i915_ppgtt_put(ppgtt); |
6cde9a02 CW |
2889 | return err; |
2890 | } | |
2891 | ||
2892 | void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915) | |
2893 | { | |
2894 | struct i915_ggtt *ggtt = &i915->ggtt; | |
2895 | struct i915_hw_ppgtt *ppgtt; | |
2896 | ||
2897 | ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt); | |
2898 | if (!ppgtt) | |
2899 | return; | |
2900 | ||
1188bc66 | 2901 | i915_ppgtt_put(ppgtt); |
6cde9a02 | 2902 | |
93f2cde2 CW |
2903 | ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; |
2904 | ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; | |
6cde9a02 CW |
2905 | } |
2906 | ||
f6b9d5ca | 2907 | int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) |
644ec02b | 2908 | { |
e78891ca BW |
2909 | /* Let GEM Manage all of the aperture. |
2910 | * | |
2911 | * However, leave one page at the end still bound to the scratch page. | |
2912 | * There are a number of places where the hardware apparently prefetches | |
2913 | * past the end of the object, and we've seen multiple hangs with the | |
2914 | * GPU head pointer stuck in a batchbuffer bound at the last page of the | |
2915 | * aperture. One page should be enough to keep any prefetching inside | |
2916 | * of the aperture. | |
2917 | */ | |
72e96d64 | 2918 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
ed2f3452 | 2919 | unsigned long hole_start, hole_end; |
f6b9d5ca | 2920 | struct drm_mm_node *entry; |
fa76da34 | 2921 | int ret; |
644ec02b | 2922 | |
dd18cedf JB |
2923 | /* |
2924 | * GuC requires all resources that we're sharing with it to be placed in | |
2925 | * non-WOPCM memory. If GuC is not present or not in use we still need a | |
2926 | * small bias as ring wraparound at offset 0 sometimes hangs. No idea | |
2927 | * why. | |
2928 | */ | |
2929 | ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, | |
2930 | intel_guc_reserved_gtt_size(&dev_priv->guc)); | |
2931 | ||
b02d22a3 ZW |
2932 | ret = intel_vgt_balloon(dev_priv); |
2933 | if (ret) | |
2934 | return ret; | |
5dda8fa3 | 2935 | |
95374d75 | 2936 | /* Reserve a mappable slot for our lockless error capture */ |
82ad6443 | 2937 | ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture, |
4e64e553 CW |
2938 | PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, |
2939 | 0, ggtt->mappable_end, | |
2940 | DRM_MM_INSERT_LOW); | |
95374d75 CW |
2941 | if (ret) |
2942 | return ret; | |
2943 | ||
ed2f3452 | 2944 | /* Clear any non-preallocated blocks */ |
82ad6443 | 2945 | drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { |
ed2f3452 CW |
2946 | DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", |
2947 | hole_start, hole_end); | |
82ad6443 CW |
2948 | ggtt->vm.clear_range(&ggtt->vm, hole_start, |
2949 | hole_end - hole_start); | |
ed2f3452 CW |
2950 | } |
2951 | ||
2952 | /* And finally clear the reserved guard page */ | |
82ad6443 | 2953 | ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); |
6c5566a8 | 2954 | |
97d6d7ab | 2955 | if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) { |
6cde9a02 | 2956 | ret = i915_gem_init_aliasing_ppgtt(dev_priv); |
95374d75 | 2957 | if (ret) |
6cde9a02 | 2958 | goto err; |
fa76da34 SV |
2959 | } |
2960 | ||
6c5566a8 | 2961 | return 0; |
95374d75 | 2962 | |
95374d75 CW |
2963 | err: |
2964 | drm_mm_remove_node(&ggtt->error_capture); | |
2965 | return ret; | |
e76e9aeb BW |
2966 | } |
2967 | ||
d85489d3 JL |
2968 | /** |
2969 | * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization | |
97d6d7ab | 2970 | * @dev_priv: i915 device |
d85489d3 | 2971 | */ |
97d6d7ab | 2972 | void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) |
90d0a0e8 | 2973 | { |
72e96d64 | 2974 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
94d4a2a9 | 2975 | struct i915_vma *vma, *vn; |
66df1014 | 2976 | struct pagevec *pvec; |
94d4a2a9 | 2977 | |
82ad6443 | 2978 | ggtt->vm.closed = true; |
94d4a2a9 CW |
2979 | |
2980 | mutex_lock(&dev_priv->drm.struct_mutex); | |
eed28903 CW |
2981 | i915_gem_fini_aliasing_ppgtt(dev_priv); |
2982 | ||
82ad6443 CW |
2983 | GEM_BUG_ON(!list_empty(&ggtt->vm.active_list)); |
2984 | list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) | |
94d4a2a9 | 2985 | WARN_ON(i915_vma_unbind(vma)); |
1188bc66 | 2986 | |
95374d75 CW |
2987 | if (drm_mm_node_allocated(&ggtt->error_capture)) |
2988 | drm_mm_remove_node(&ggtt->error_capture); | |
2989 | ||
82ad6443 | 2990 | if (drm_mm_initialized(&ggtt->vm.mm)) { |
b02d22a3 | 2991 | intel_vgt_deballoon(dev_priv); |
82ad6443 | 2992 | i915_address_space_fini(&ggtt->vm); |
90d0a0e8 SV |
2993 | } |
2994 | ||
82ad6443 | 2995 | ggtt->vm.cleanup(&ggtt->vm); |
66df1014 | 2996 | |
63fd659f | 2997 | pvec = &dev_priv->mm.wc_stash.pvec; |
66df1014 CW |
2998 | if (pvec->nr) { |
2999 | set_pages_array_wb(pvec->pages, pvec->nr); | |
3000 | __pagevec_release(pvec); | |
3001 | } | |
3002 | ||
1188bc66 | 3003 | mutex_unlock(&dev_priv->drm.struct_mutex); |
f6b9d5ca CW |
3004 | |
3005 | arch_phys_wc_del(ggtt->mtrr); | |
73ebd503 | 3006 | io_mapping_fini(&ggtt->iomap); |
eed28903 | 3007 | |
8c01903c | 3008 | i915_gem_cleanup_stolen(dev_priv); |
90d0a0e8 | 3009 | } |
70e32544 | 3010 | |
2c642b07 | 3011 | static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) |
e76e9aeb BW |
3012 | { |
3013 | snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; | |
3014 | snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; | |
3015 | return snb_gmch_ctl << 20; | |
3016 | } | |
3017 | ||
2c642b07 | 3018 | static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) |
9459d252 BW |
3019 | { |
3020 | bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; | |
3021 | bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; | |
3022 | if (bdw_gmch_ctl) | |
3023 | bdw_gmch_ctl = 1 << bdw_gmch_ctl; | |
562d55d9 BW |
3024 | |
3025 | #ifdef CONFIG_X86_32 | |
f6e35cda | 3026 | /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */ |
562d55d9 BW |
3027 | if (bdw_gmch_ctl > 4) |
3028 | bdw_gmch_ctl = 4; | |
3029 | #endif | |
3030 | ||
9459d252 BW |
3031 | return bdw_gmch_ctl << 20; |
3032 | } | |
3033 | ||
2c642b07 | 3034 | static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) |
d7f25f23 DL |
3035 | { |
3036 | gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; | |
3037 | gmch_ctrl &= SNB_GMCH_GGMS_MASK; | |
3038 | ||
3039 | if (gmch_ctrl) | |
3040 | return 1 << (20 + gmch_ctrl); | |
3041 | ||
3042 | return 0; | |
3043 | } | |
3044 | ||
34c998b4 | 3045 | static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) |
63340133 | 3046 | { |
82ad6443 | 3047 | struct drm_i915_private *dev_priv = ggtt->vm.i915; |
49d73912 | 3048 | struct pci_dev *pdev = dev_priv->drm.pdev; |
34c998b4 | 3049 | phys_addr_t phys_addr; |
8bcdd0f7 | 3050 | int ret; |
63340133 BW |
3051 | |
3052 | /* For Modern GENs the PTEs and register space are split in the BAR */ | |
34c998b4 | 3053 | phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; |
63340133 | 3054 | |
2a073f89 | 3055 | /* |
385db982 RV |
3056 | * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range |
3057 | * will be dropped. For WC mappings in general we have 64 byte burst | |
3058 | * writes when the WC buffer is flushed, so we can't use it, but have to | |
2a073f89 ID |
3059 | * resort to an uncached mapping. The WC issue is easily caught by the |
3060 | * readback check when writing GTT PTE entries. | |
3061 | */ | |
385db982 | 3062 | if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) |
34c998b4 | 3063 | ggtt->gsm = ioremap_nocache(phys_addr, size); |
2a073f89 | 3064 | else |
34c998b4 | 3065 | ggtt->gsm = ioremap_wc(phys_addr, size); |
72e96d64 | 3066 | if (!ggtt->gsm) { |
34c998b4 | 3067 | DRM_ERROR("Failed to map the ggtt page table\n"); |
63340133 BW |
3068 | return -ENOMEM; |
3069 | } | |
3070 | ||
82ad6443 | 3071 | ret = setup_scratch_page(&ggtt->vm, GFP_DMA32); |
8bcdd0f7 | 3072 | if (ret) { |
63340133 BW |
3073 | DRM_ERROR("Scratch setup failed\n"); |
3074 | /* iounmap will also get called at remove, but meh */ | |
72e96d64 | 3075 | iounmap(ggtt->gsm); |
8bcdd0f7 | 3076 | return ret; |
63340133 BW |
3077 | } |
3078 | ||
4ad2af1e | 3079 | return 0; |
63340133 BW |
3080 | } |
3081 | ||
4395890a ZW |
3082 | static struct intel_ppat_entry * |
3083 | __alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value) | |
4e34935f | 3084 | { |
4395890a ZW |
3085 | struct intel_ppat_entry *entry = &ppat->entries[index]; |
3086 | ||
3087 | GEM_BUG_ON(index >= ppat->max_entries); | |
3088 | GEM_BUG_ON(test_bit(index, ppat->used)); | |
3089 | ||
3090 | entry->ppat = ppat; | |
3091 | entry->value = value; | |
3092 | kref_init(&entry->ref); | |
3093 | set_bit(index, ppat->used); | |
3094 | set_bit(index, ppat->dirty); | |
3095 | ||
3096 | return entry; | |
3097 | } | |
3098 | ||
3099 | static void __free_ppat_entry(struct intel_ppat_entry *entry) | |
3100 | { | |
3101 | struct intel_ppat *ppat = entry->ppat; | |
3102 | unsigned int index = entry - ppat->entries; | |
3103 | ||
3104 | GEM_BUG_ON(index >= ppat->max_entries); | |
3105 | GEM_BUG_ON(!test_bit(index, ppat->used)); | |
3106 | ||
3107 | entry->value = ppat->clear_value; | |
3108 | clear_bit(index, ppat->used); | |
3109 | set_bit(index, ppat->dirty); | |
3110 | } | |
3111 | ||
3112 | /** | |
3113 | * intel_ppat_get - get a usable PPAT entry | |
3114 | * @i915: i915 device instance | |
3115 | * @value: the PPAT value required by the caller | |
3116 | * | |
3117 | * The function tries to search if there is an existing PPAT entry which | |
3118 | * matches with the required value. If perfectly matched, the existing PPAT | |
3119 | * entry will be used. If only partially matched, it will try to check if | |
3120 | * there is any available PPAT index. If yes, it will allocate a new PPAT | |
3121 | * index for the required entry and update the HW. If not, the partially | |
3122 | * matched entry will be used. | |
3123 | */ | |
3124 | const struct intel_ppat_entry * | |
3125 | intel_ppat_get(struct drm_i915_private *i915, u8 value) | |
3126 | { | |
3127 | struct intel_ppat *ppat = &i915->ppat; | |
4667c2d5 | 3128 | struct intel_ppat_entry *entry = NULL; |
4395890a ZW |
3129 | unsigned int scanned, best_score; |
3130 | int i; | |
3131 | ||
3132 | GEM_BUG_ON(!ppat->max_entries); | |
3133 | ||
3134 | scanned = best_score = 0; | |
3135 | for_each_set_bit(i, ppat->used, ppat->max_entries) { | |
3136 | unsigned int score; | |
3137 | ||
3138 | score = ppat->match(ppat->entries[i].value, value); | |
3139 | if (score > best_score) { | |
3140 | entry = &ppat->entries[i]; | |
3141 | if (score == INTEL_PPAT_PERFECT_MATCH) { | |
3142 | kref_get(&entry->ref); | |
3143 | return entry; | |
3144 | } | |
3145 | best_score = score; | |
3146 | } | |
3147 | scanned++; | |
3148 | } | |
3149 | ||
3150 | if (scanned == ppat->max_entries) { | |
4667c2d5 | 3151 | if (!entry) |
4395890a ZW |
3152 | return ERR_PTR(-ENOSPC); |
3153 | ||
3154 | kref_get(&entry->ref); | |
3155 | return entry; | |
3156 | } | |
3157 | ||
3158 | i = find_first_zero_bit(ppat->used, ppat->max_entries); | |
3159 | entry = __alloc_ppat_entry(ppat, i, value); | |
3160 | ppat->update_hw(i915); | |
3161 | return entry; | |
3162 | } | |
3163 | ||
3164 | static void release_ppat(struct kref *kref) | |
3165 | { | |
3166 | struct intel_ppat_entry *entry = | |
3167 | container_of(kref, struct intel_ppat_entry, ref); | |
3168 | struct drm_i915_private *i915 = entry->ppat->i915; | |
3169 | ||
3170 | __free_ppat_entry(entry); | |
3171 | entry->ppat->update_hw(i915); | |
3172 | } | |
3173 | ||
3174 | /** | |
3175 | * intel_ppat_put - put back the PPAT entry got from intel_ppat_get() | |
3176 | * @entry: an intel PPAT entry | |
3177 | * | |
3178 | * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the | |
3179 | * entry is dynamically allocated, its reference count will be decreased. Once | |
3180 | * the reference count becomes into zero, the PPAT index becomes free again. | |
3181 | */ | |
3182 | void intel_ppat_put(const struct intel_ppat_entry *entry) | |
3183 | { | |
3184 | struct intel_ppat *ppat = entry->ppat; | |
3185 | unsigned int index = entry - ppat->entries; | |
3186 | ||
3187 | GEM_BUG_ON(!ppat->max_entries); | |
3188 | ||
3189 | kref_put(&ppat->entries[index].ref, release_ppat); | |
3190 | } | |
3191 | ||
3192 | static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv) | |
3193 | { | |
3194 | struct intel_ppat *ppat = &dev_priv->ppat; | |
3195 | int i; | |
3196 | ||
3197 | for_each_set_bit(i, ppat->dirty, ppat->max_entries) { | |
3198 | I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value); | |
3199 | clear_bit(i, ppat->dirty); | |
3200 | } | |
3201 | } | |
3202 | ||
3203 | static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv) | |
3204 | { | |
3205 | struct intel_ppat *ppat = &dev_priv->ppat; | |
3206 | u64 pat = 0; | |
3207 | int i; | |
3208 | ||
3209 | for (i = 0; i < ppat->max_entries; i++) | |
3210 | pat |= GEN8_PPAT(i, ppat->entries[i].value); | |
3211 | ||
3212 | bitmap_clear(ppat->dirty, 0, ppat->max_entries); | |
3213 | ||
3214 | I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); | |
3215 | I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); | |
3216 | } | |
3217 | ||
3218 | static unsigned int bdw_private_pat_match(u8 src, u8 dst) | |
3219 | { | |
3220 | unsigned int score = 0; | |
3221 | enum { | |
3222 | AGE_MATCH = BIT(0), | |
3223 | TC_MATCH = BIT(1), | |
3224 | CA_MATCH = BIT(2), | |
3225 | }; | |
3226 | ||
3227 | /* Cache attribute has to be matched. */ | |
1298d51c | 3228 | if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst)) |
4395890a ZW |
3229 | return 0; |
3230 | ||
3231 | score |= CA_MATCH; | |
3232 | ||
3233 | if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst)) | |
3234 | score |= TC_MATCH; | |
3235 | ||
3236 | if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst)) | |
3237 | score |= AGE_MATCH; | |
3238 | ||
3239 | if (score == (AGE_MATCH | TC_MATCH | CA_MATCH)) | |
3240 | return INTEL_PPAT_PERFECT_MATCH; | |
3241 | ||
3242 | return score; | |
3243 | } | |
3244 | ||
3245 | static unsigned int chv_private_pat_match(u8 src, u8 dst) | |
3246 | { | |
3247 | return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ? | |
3248 | INTEL_PPAT_PERFECT_MATCH : 0; | |
3249 | } | |
3250 | ||
3251 | static void cnl_setup_private_ppat(struct intel_ppat *ppat) | |
3252 | { | |
3253 | ppat->max_entries = 8; | |
3254 | ppat->update_hw = cnl_private_pat_update_hw; | |
3255 | ppat->match = bdw_private_pat_match; | |
3256 | ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); | |
3257 | ||
4395890a ZW |
3258 | __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); |
3259 | __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); | |
3260 | __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); | |
3261 | __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); | |
3262 | __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); | |
3263 | __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); | |
3264 | __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); | |
3265 | __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); | |
4e34935f RV |
3266 | } |
3267 | ||
fbe5d36e BW |
3268 | /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability |
3269 | * bits. When using advanced contexts each context stores its own PAT, but | |
3270 | * writing this data shouldn't be harmful even in those cases. */ | |
4395890a | 3271 | static void bdw_setup_private_ppat(struct intel_ppat *ppat) |
fbe5d36e | 3272 | { |
4395890a ZW |
3273 | ppat->max_entries = 8; |
3274 | ppat->update_hw = bdw_private_pat_update_hw; | |
3275 | ppat->match = bdw_private_pat_match; | |
3276 | ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); | |
fbe5d36e | 3277 | |
4395890a | 3278 | if (!USES_PPGTT(ppat->i915)) { |
d6a8b72e RV |
3279 | /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, |
3280 | * so RTL will always use the value corresponding to | |
3281 | * pat_sel = 000". | |
3282 | * So let's disable cache for GGTT to avoid screen corruptions. | |
3283 | * MOCS still can be used though. | |
3284 | * - System agent ggtt writes (i.e. cpu gtt mmaps) already work | |
3285 | * before this patch, i.e. the same uncached + snooping access | |
3286 | * like on gen6/7 seems to be in effect. | |
3287 | * - So this just fixes blitter/render access. Again it looks | |
3288 | * like it's not just uncached access, but uncached + snooping. | |
3289 | * So we can still hold onto all our assumptions wrt cpu | |
3290 | * clflushing on LLC machines. | |
3291 | */ | |
4395890a ZW |
3292 | __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC); |
3293 | return; | |
3294 | } | |
d6a8b72e | 3295 | |
4395890a ZW |
3296 | __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */ |
3297 | __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */ | |
3298 | __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */ | |
3299 | __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */ | |
3300 | __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); | |
3301 | __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); | |
3302 | __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); | |
3303 | __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); | |
fbe5d36e BW |
3304 | } |
3305 | ||
4395890a | 3306 | static void chv_setup_private_ppat(struct intel_ppat *ppat) |
ee0ce478 | 3307 | { |
4395890a ZW |
3308 | ppat->max_entries = 8; |
3309 | ppat->update_hw = bdw_private_pat_update_hw; | |
3310 | ppat->match = chv_private_pat_match; | |
3311 | ppat->clear_value = CHV_PPAT_SNOOP; | |
ee0ce478 VS |
3312 | |
3313 | /* | |
3314 | * Map WB on BDW to snooped on CHV. | |
3315 | * | |
3316 | * Only the snoop bit has meaning for CHV, the rest is | |
3317 | * ignored. | |
3318 | * | |
cf3d262e VS |
3319 | * The hardware will never snoop for certain types of accesses: |
3320 | * - CPU GTT (GMADR->GGTT->no snoop->memory) | |
3321 | * - PPGTT page tables | |
3322 | * - some other special cycles | |
3323 | * | |
3324 | * As with BDW, we also need to consider the following for GT accesses: | |
3325 | * "For GGTT, there is NO pat_sel[2:0] from the entry, | |
3326 | * so RTL will always use the value corresponding to | |
3327 | * pat_sel = 000". | |
3328 | * Which means we must set the snoop bit in PAT entry 0 | |
3329 | * in order to keep the global status page working. | |
ee0ce478 | 3330 | */ |
ee0ce478 | 3331 | |
4395890a ZW |
3332 | __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP); |
3333 | __alloc_ppat_entry(ppat, 1, 0); | |
3334 | __alloc_ppat_entry(ppat, 2, 0); | |
3335 | __alloc_ppat_entry(ppat, 3, 0); | |
3336 | __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP); | |
3337 | __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP); | |
3338 | __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP); | |
3339 | __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP); | |
ee0ce478 VS |
3340 | } |
3341 | ||
34c998b4 CW |
3342 | static void gen6_gmch_remove(struct i915_address_space *vm) |
3343 | { | |
3344 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); | |
3345 | ||
3346 | iounmap(ggtt->gsm); | |
8448661d | 3347 | cleanup_scratch_page(vm); |
34c998b4 CW |
3348 | } |
3349 | ||
36e16c49 ZW |
3350 | static void setup_private_pat(struct drm_i915_private *dev_priv) |
3351 | { | |
4395890a ZW |
3352 | struct intel_ppat *ppat = &dev_priv->ppat; |
3353 | int i; | |
3354 | ||
3355 | ppat->i915 = dev_priv; | |
3356 | ||
36e16c49 | 3357 | if (INTEL_GEN(dev_priv) >= 10) |
4395890a | 3358 | cnl_setup_private_ppat(ppat); |
36e16c49 | 3359 | else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) |
4395890a | 3360 | chv_setup_private_ppat(ppat); |
36e16c49 | 3361 | else |
4395890a ZW |
3362 | bdw_setup_private_ppat(ppat); |
3363 | ||
3364 | GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES); | |
3365 | ||
3366 | for_each_clear_bit(i, ppat->used, ppat->max_entries) { | |
3367 | ppat->entries[i].value = ppat->clear_value; | |
3368 | ppat->entries[i].ppat = ppat; | |
3369 | set_bit(i, ppat->dirty); | |
3370 | } | |
3371 | ||
3372 | ppat->update_hw(dev_priv); | |
36e16c49 ZW |
3373 | } |
3374 | ||
d507d735 | 3375 | static int gen8_gmch_probe(struct i915_ggtt *ggtt) |
63340133 | 3376 | { |
82ad6443 | 3377 | struct drm_i915_private *dev_priv = ggtt->vm.i915; |
97d6d7ab | 3378 | struct pci_dev *pdev = dev_priv->drm.pdev; |
34c998b4 | 3379 | unsigned int size; |
63340133 | 3380 | u16 snb_gmch_ctl; |
4519290a | 3381 | int err; |
63340133 BW |
3382 | |
3383 | /* TODO: We're not aware of mappable constraints on gen8 yet */ | |
73ebd503 MA |
3384 | ggtt->gmadr = |
3385 | (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2), | |
3386 | pci_resource_len(pdev, 2)); | |
3387 | ggtt->mappable_end = resource_size(&ggtt->gmadr); | |
63340133 | 3388 | |
4519290a ID |
3389 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); |
3390 | if (!err) | |
3391 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39)); | |
3392 | if (err) | |
3393 | DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); | |
63340133 | 3394 | |
97d6d7ab | 3395 | pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
c258f91d | 3396 | if (IS_CHERRYVIEW(dev_priv)) |
34c998b4 | 3397 | size = chv_get_total_gtt_size(snb_gmch_ctl); |
c258f91d | 3398 | else |
34c998b4 | 3399 | size = gen8_get_total_gtt_size(snb_gmch_ctl); |
63340133 | 3400 | |
21c62a9d | 3401 | ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; |
82ad6443 | 3402 | ggtt->vm.cleanup = gen6_gmch_remove; |
82ad6443 CW |
3403 | ggtt->vm.insert_page = gen8_ggtt_insert_page; |
3404 | ggtt->vm.clear_range = nop_clear_range; | |
48f112fe | 3405 | if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv)) |
82ad6443 | 3406 | ggtt->vm.clear_range = gen8_ggtt_clear_range; |
f7770bfd | 3407 | |
82ad6443 | 3408 | ggtt->vm.insert_entries = gen8_ggtt_insert_entries; |
f7770bfd | 3409 | |
0ef34ad6 JB |
3410 | /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ |
3411 | if (intel_ggtt_update_needs_vtd_wa(dev_priv)) { | |
82ad6443 CW |
3412 | ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; |
3413 | ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; | |
3414 | if (ggtt->vm.clear_range != nop_clear_range) | |
3415 | ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; | |
8830f26b CW |
3416 | |
3417 | /* Prevent recursively calling stop_machine() and deadlocks. */ | |
3418 | dev_info(dev_priv->drm.dev, | |
3419 | "Disabling error capture for VT-d workaround\n"); | |
3420 | i915_disable_error_state(dev_priv, -ENODEV); | |
0ef34ad6 JB |
3421 | } |
3422 | ||
7c3f86b6 CW |
3423 | ggtt->invalidate = gen6_ggtt_invalidate; |
3424 | ||
93f2cde2 CW |
3425 | ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; |
3426 | ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; | |
3427 | ggtt->vm.vma_ops.set_pages = ggtt_set_pages; | |
3428 | ggtt->vm.vma_ops.clear_pages = clear_pages; | |
3429 | ||
36e16c49 ZW |
3430 | setup_private_pat(dev_priv); |
3431 | ||
34c998b4 | 3432 | return ggtt_probe_common(ggtt, size); |
63340133 BW |
3433 | } |
3434 | ||
d507d735 | 3435 | static int gen6_gmch_probe(struct i915_ggtt *ggtt) |
e76e9aeb | 3436 | { |
82ad6443 | 3437 | struct drm_i915_private *dev_priv = ggtt->vm.i915; |
97d6d7ab | 3438 | struct pci_dev *pdev = dev_priv->drm.pdev; |
34c998b4 | 3439 | unsigned int size; |
e76e9aeb | 3440 | u16 snb_gmch_ctl; |
4519290a | 3441 | int err; |
e76e9aeb | 3442 | |
73ebd503 MA |
3443 | ggtt->gmadr = |
3444 | (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2), | |
3445 | pci_resource_len(pdev, 2)); | |
3446 | ggtt->mappable_end = resource_size(&ggtt->gmadr); | |
41907ddc | 3447 | |
baa09f5f BW |
3448 | /* 64/512MB is the current min/max we actually know of, but this is just |
3449 | * a coarse sanity check. | |
e76e9aeb | 3450 | */ |
34c998b4 | 3451 | if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { |
b7128ef1 | 3452 | DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end); |
baa09f5f | 3453 | return -ENXIO; |
e76e9aeb BW |
3454 | } |
3455 | ||
4519290a ID |
3456 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); |
3457 | if (!err) | |
3458 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); | |
3459 | if (err) | |
3460 | DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err); | |
97d6d7ab | 3461 | pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
e76e9aeb | 3462 | |
34c998b4 | 3463 | size = gen6_get_total_gtt_size(snb_gmch_ctl); |
21c62a9d | 3464 | ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; |
e76e9aeb | 3465 | |
82ad6443 CW |
3466 | ggtt->vm.clear_range = gen6_ggtt_clear_range; |
3467 | ggtt->vm.insert_page = gen6_ggtt_insert_page; | |
3468 | ggtt->vm.insert_entries = gen6_ggtt_insert_entries; | |
82ad6443 | 3469 | ggtt->vm.cleanup = gen6_gmch_remove; |
34c998b4 | 3470 | |
7c3f86b6 CW |
3471 | ggtt->invalidate = gen6_ggtt_invalidate; |
3472 | ||
34c998b4 | 3473 | if (HAS_EDRAM(dev_priv)) |
82ad6443 | 3474 | ggtt->vm.pte_encode = iris_pte_encode; |
34c998b4 | 3475 | else if (IS_HASWELL(dev_priv)) |
82ad6443 | 3476 | ggtt->vm.pte_encode = hsw_pte_encode; |
34c998b4 | 3477 | else if (IS_VALLEYVIEW(dev_priv)) |
82ad6443 | 3478 | ggtt->vm.pte_encode = byt_pte_encode; |
34c998b4 | 3479 | else if (INTEL_GEN(dev_priv) >= 7) |
82ad6443 | 3480 | ggtt->vm.pte_encode = ivb_pte_encode; |
34c998b4 | 3481 | else |
82ad6443 | 3482 | ggtt->vm.pte_encode = snb_pte_encode; |
7faf1ab2 | 3483 | |
93f2cde2 CW |
3484 | ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; |
3485 | ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; | |
3486 | ggtt->vm.vma_ops.set_pages = ggtt_set_pages; | |
3487 | ggtt->vm.vma_ops.clear_pages = clear_pages; | |
3488 | ||
34c998b4 | 3489 | return ggtt_probe_common(ggtt, size); |
e76e9aeb BW |
3490 | } |
3491 | ||
34c998b4 | 3492 | static void i915_gmch_remove(struct i915_address_space *vm) |
e76e9aeb | 3493 | { |
34c998b4 | 3494 | intel_gmch_remove(); |
644ec02b | 3495 | } |
baa09f5f | 3496 | |
d507d735 | 3497 | static int i915_gmch_probe(struct i915_ggtt *ggtt) |
baa09f5f | 3498 | { |
82ad6443 | 3499 | struct drm_i915_private *dev_priv = ggtt->vm.i915; |
73ebd503 | 3500 | phys_addr_t gmadr_base; |
baa09f5f BW |
3501 | int ret; |
3502 | ||
91c8a326 | 3503 | ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL); |
baa09f5f BW |
3504 | if (!ret) { |
3505 | DRM_ERROR("failed to set up gmch\n"); | |
3506 | return -EIO; | |
3507 | } | |
3508 | ||
82ad6443 | 3509 | intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); |
baa09f5f | 3510 | |
73ebd503 MA |
3511 | ggtt->gmadr = |
3512 | (struct resource) DEFINE_RES_MEM(gmadr_base, | |
3513 | ggtt->mappable_end); | |
3514 | ||
97d6d7ab | 3515 | ggtt->do_idle_maps = needs_idle_maps(dev_priv); |
82ad6443 CW |
3516 | ggtt->vm.insert_page = i915_ggtt_insert_page; |
3517 | ggtt->vm.insert_entries = i915_ggtt_insert_entries; | |
3518 | ggtt->vm.clear_range = i915_ggtt_clear_range; | |
82ad6443 | 3519 | ggtt->vm.cleanup = i915_gmch_remove; |
baa09f5f | 3520 | |
7c3f86b6 CW |
3521 | ggtt->invalidate = gmch_ggtt_invalidate; |
3522 | ||
93f2cde2 CW |
3523 | ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; |
3524 | ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; | |
3525 | ggtt->vm.vma_ops.set_pages = ggtt_set_pages; | |
3526 | ggtt->vm.vma_ops.clear_pages = clear_pages; | |
3527 | ||
d507d735 | 3528 | if (unlikely(ggtt->do_idle_maps)) |
c0a7f818 CW |
3529 | DRM_INFO("applying Ironlake quirks for intel_iommu\n"); |
3530 | ||
baa09f5f BW |
3531 | return 0; |
3532 | } | |
3533 | ||
d85489d3 | 3534 | /** |
0088e522 | 3535 | * i915_ggtt_probe_hw - Probe GGTT hardware location |
97d6d7ab | 3536 | * @dev_priv: i915 device |
d85489d3 | 3537 | */ |
97d6d7ab | 3538 | int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) |
baa09f5f | 3539 | { |
62106b4f | 3540 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
baa09f5f BW |
3541 | int ret; |
3542 | ||
82ad6443 CW |
3543 | ggtt->vm.i915 = dev_priv; |
3544 | ggtt->vm.dma = &dev_priv->drm.pdev->dev; | |
c114f76a | 3545 | |
34c998b4 CW |
3546 | if (INTEL_GEN(dev_priv) <= 5) |
3547 | ret = i915_gmch_probe(ggtt); | |
3548 | else if (INTEL_GEN(dev_priv) < 8) | |
3549 | ret = gen6_gmch_probe(ggtt); | |
3550 | else | |
3551 | ret = gen8_gmch_probe(ggtt); | |
a54c0c27 | 3552 | if (ret) |
baa09f5f | 3553 | return ret; |
baa09f5f | 3554 | |
db9309a5 CW |
3555 | /* Trim the GGTT to fit the GuC mappable upper range (when enabled). |
3556 | * This is easier than doing range restriction on the fly, as we | |
3557 | * currently don't have any bits spare to pass in this upper | |
3558 | * restriction! | |
3559 | */ | |
93ffbe8e | 3560 | if (USES_GUC(dev_priv)) { |
82ad6443 CW |
3561 | ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP); |
3562 | ggtt->mappable_end = | |
3563 | min_t(u64, ggtt->mappable_end, ggtt->vm.total); | |
db9309a5 CW |
3564 | } |
3565 | ||
82ad6443 | 3566 | if ((ggtt->vm.total - 1) >> 32) { |
c890e2d5 | 3567 | DRM_ERROR("We never expected a Global GTT with more than 32bits" |
f6b9d5ca | 3568 | " of address space! Found %lldM!\n", |
82ad6443 CW |
3569 | ggtt->vm.total >> 20); |
3570 | ggtt->vm.total = 1ULL << 32; | |
3571 | ggtt->mappable_end = | |
3572 | min_t(u64, ggtt->mappable_end, ggtt->vm.total); | |
c890e2d5 CW |
3573 | } |
3574 | ||
82ad6443 | 3575 | if (ggtt->mappable_end > ggtt->vm.total) { |
f6b9d5ca | 3576 | DRM_ERROR("mappable aperture extends past end of GGTT," |
b7128ef1 | 3577 | " aperture=%pa, total=%llx\n", |
82ad6443 CW |
3578 | &ggtt->mappable_end, ggtt->vm.total); |
3579 | ggtt->mappable_end = ggtt->vm.total; | |
f6b9d5ca CW |
3580 | } |
3581 | ||
baa09f5f | 3582 | /* GMADR is the PCI mmio aperture into the global GTT. */ |
82ad6443 | 3583 | DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20); |
73ebd503 | 3584 | DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20); |
1875fe7b | 3585 | DRM_DEBUG_DRIVER("DSM size = %lluM\n", |
77894226 | 3586 | (u64)resource_size(&intel_graphics_stolen_res) >> 20); |
80debff8 | 3587 | if (intel_vtd_active()) |
5db6c735 | 3588 | DRM_INFO("VT-d active for gfx access\n"); |
baa09f5f BW |
3589 | |
3590 | return 0; | |
0088e522 CW |
3591 | } |
3592 | ||
3593 | /** | |
3594 | * i915_ggtt_init_hw - Initialize GGTT hardware | |
97d6d7ab | 3595 | * @dev_priv: i915 device |
0088e522 | 3596 | */ |
97d6d7ab | 3597 | int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) |
0088e522 | 3598 | { |
0088e522 CW |
3599 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
3600 | int ret; | |
3601 | ||
63fd659f CW |
3602 | stash_init(&dev_priv->mm.wc_stash); |
3603 | ||
a6508ded CW |
3604 | /* Note that we use page colouring to enforce a guard page at the |
3605 | * end of the address space. This is required as the CS may prefetch | |
3606 | * beyond the end of the batch buffer, across the page boundary, | |
3607 | * and beyond the end of the GTT if we do not provide a guard. | |
f6b9d5ca | 3608 | */ |
80b204bc | 3609 | mutex_lock(&dev_priv->drm.struct_mutex); |
63fd659f | 3610 | i915_address_space_init(&ggtt->vm, dev_priv); |
250f8c81 | 3611 | |
48e90504 TU |
3612 | ggtt->vm.is_ggtt = true; |
3613 | ||
250f8c81 JB |
3614 | /* Only VLV supports read-only GGTT mappings */ |
3615 | ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv); | |
3616 | ||
a6508ded | 3617 | if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv)) |
82ad6443 | 3618 | ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; |
80b204bc | 3619 | mutex_unlock(&dev_priv->drm.struct_mutex); |
f6b9d5ca | 3620 | |
73ebd503 MA |
3621 | if (!io_mapping_init_wc(&dev_priv->ggtt.iomap, |
3622 | dev_priv->ggtt.gmadr.start, | |
f7bbe788 | 3623 | dev_priv->ggtt.mappable_end)) { |
f6b9d5ca CW |
3624 | ret = -EIO; |
3625 | goto out_gtt_cleanup; | |
3626 | } | |
3627 | ||
73ebd503 | 3628 | ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end); |
f6b9d5ca | 3629 | |
0088e522 CW |
3630 | /* |
3631 | * Initialise stolen early so that we may reserve preallocated | |
3632 | * objects for the BIOS to KMS transition. | |
3633 | */ | |
7ace3d30 | 3634 | ret = i915_gem_init_stolen(dev_priv); |
0088e522 CW |
3635 | if (ret) |
3636 | goto out_gtt_cleanup; | |
3637 | ||
3638 | return 0; | |
a4eba47b ID |
3639 | |
3640 | out_gtt_cleanup: | |
82ad6443 | 3641 | ggtt->vm.cleanup(&ggtt->vm); |
a4eba47b | 3642 | return ret; |
baa09f5f | 3643 | } |
6f65e29a | 3644 | |
97d6d7ab | 3645 | int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) |
ac840ae5 | 3646 | { |
97d6d7ab | 3647 | if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt()) |
ac840ae5 VS |
3648 | return -EIO; |
3649 | ||
3650 | return 0; | |
3651 | } | |
3652 | ||
7c3f86b6 CW |
3653 | void i915_ggtt_enable_guc(struct drm_i915_private *i915) |
3654 | { | |
04f7b24e CW |
3655 | GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate); |
3656 | ||
7c3f86b6 | 3657 | i915->ggtt.invalidate = guc_ggtt_invalidate; |
aeb950bd MW |
3658 | |
3659 | i915_ggtt_invalidate(i915); | |
7c3f86b6 CW |
3660 | } |
3661 | ||
3662 | void i915_ggtt_disable_guc(struct drm_i915_private *i915) | |
3663 | { | |
35e90081 CW |
3664 | /* XXX Temporary pardon for error unload */ |
3665 | if (i915->ggtt.invalidate == gen6_ggtt_invalidate) | |
3666 | return; | |
3667 | ||
04f7b24e CW |
3668 | /* We should only be called after i915_ggtt_enable_guc() */ |
3669 | GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate); | |
3670 | ||
3671 | i915->ggtt.invalidate = gen6_ggtt_invalidate; | |
aeb950bd MW |
3672 | |
3673 | i915_ggtt_invalidate(i915); | |
7c3f86b6 CW |
3674 | } |
3675 | ||
275a991c | 3676 | void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) |
fa42331b | 3677 | { |
72e96d64 | 3678 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
74479985 | 3679 | struct i915_vma *vma, *vn; |
fa42331b | 3680 | |
dc97997a | 3681 | i915_check_and_clear_faults(dev_priv); |
fa42331b SV |
3682 | |
3683 | /* First fill our portion of the GTT with scratch pages */ | |
82ad6443 | 3684 | ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); |
fa42331b | 3685 | |
82ad6443 | 3686 | ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */ |
fbb30a5c CW |
3687 | |
3688 | /* clflush objects bound into the GGTT and rebind them. */ | |
82ad6443 CW |
3689 | GEM_BUG_ON(!list_empty(&ggtt->vm.active_list)); |
3690 | list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) { | |
74479985 | 3691 | struct drm_i915_gem_object *obj = vma->obj; |
fbb30a5c | 3692 | |
74479985 CW |
3693 | if (!(vma->flags & I915_VMA_GLOBAL_BIND)) |
3694 | continue; | |
fbb30a5c | 3695 | |
74479985 CW |
3696 | if (!i915_vma_unbind(vma)) |
3697 | continue; | |
2c3d9984 | 3698 | |
520ea7c5 CW |
3699 | WARN_ON(i915_vma_bind(vma, |
3700 | obj ? obj->cache_level : 0, | |
3701 | PIN_UPDATE)); | |
3702 | if (obj) | |
3703 | WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); | |
2c3d9984 | 3704 | } |
fa42331b | 3705 | |
82ad6443 | 3706 | ggtt->vm.closed = false; |
e9e7dc41 | 3707 | i915_ggtt_invalidate(dev_priv); |
fbb30a5c | 3708 | |
275a991c | 3709 | if (INTEL_GEN(dev_priv) >= 8) { |
4395890a | 3710 | struct intel_ppat *ppat = &dev_priv->ppat; |
fa42331b | 3711 | |
4395890a ZW |
3712 | bitmap_set(ppat->dirty, 0, ppat->max_entries); |
3713 | dev_priv->ppat.update_hw(dev_priv); | |
fa42331b SV |
3714 | return; |
3715 | } | |
fa42331b SV |
3716 | } |
3717 | ||
804beb4b | 3718 | static struct scatterlist * |
2d7f3bdb | 3719 | rotate_pages(const dma_addr_t *in, unsigned int offset, |
804beb4b | 3720 | unsigned int width, unsigned int height, |
87130255 | 3721 | unsigned int stride, |
804beb4b | 3722 | struct sg_table *st, struct scatterlist *sg) |
50470bb0 TU |
3723 | { |
3724 | unsigned int column, row; | |
3725 | unsigned int src_idx; | |
50470bb0 | 3726 | |
50470bb0 | 3727 | for (column = 0; column < width; column++) { |
87130255 | 3728 | src_idx = stride * (height - 1) + column; |
50470bb0 TU |
3729 | for (row = 0; row < height; row++) { |
3730 | st->nents++; | |
3731 | /* We don't need the pages, but need to initialize | |
3732 | * the entries so the sg list can be happily traversed. | |
3733 | * The only thing we need are DMA addresses. | |
3734 | */ | |
f6e35cda | 3735 | sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); |
804beb4b | 3736 | sg_dma_address(sg) = in[offset + src_idx]; |
f6e35cda | 3737 | sg_dma_len(sg) = I915_GTT_PAGE_SIZE; |
50470bb0 | 3738 | sg = sg_next(sg); |
87130255 | 3739 | src_idx -= stride; |
50470bb0 TU |
3740 | } |
3741 | } | |
804beb4b TU |
3742 | |
3743 | return sg; | |
50470bb0 TU |
3744 | } |
3745 | ||
ba7a5741 CW |
3746 | static noinline struct sg_table * |
3747 | intel_rotate_pages(struct intel_rotation_info *rot_info, | |
3748 | struct drm_i915_gem_object *obj) | |
50470bb0 | 3749 | { |
f6e35cda | 3750 | const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE; |
6687c906 | 3751 | unsigned int size = intel_rotation_info_size(rot_info); |
85d1225e DG |
3752 | struct sgt_iter sgt_iter; |
3753 | dma_addr_t dma_addr; | |
50470bb0 TU |
3754 | unsigned long i; |
3755 | dma_addr_t *page_addr_list; | |
3756 | struct sg_table *st; | |
89e3e142 | 3757 | struct scatterlist *sg; |
1d00dad5 | 3758 | int ret = -ENOMEM; |
50470bb0 | 3759 | |
50470bb0 | 3760 | /* Allocate a temporary list of source pages for random access. */ |
2098105e | 3761 | page_addr_list = kvmalloc_array(n_pages, |
f2a85e19 | 3762 | sizeof(dma_addr_t), |
0ee931c4 | 3763 | GFP_KERNEL); |
50470bb0 TU |
3764 | if (!page_addr_list) |
3765 | return ERR_PTR(ret); | |
3766 | ||
3767 | /* Allocate target SG list. */ | |
3768 | st = kmalloc(sizeof(*st), GFP_KERNEL); | |
3769 | if (!st) | |
3770 | goto err_st_alloc; | |
3771 | ||
6687c906 | 3772 | ret = sg_alloc_table(st, size, GFP_KERNEL); |
50470bb0 TU |
3773 | if (ret) |
3774 | goto err_sg_alloc; | |
3775 | ||
3776 | /* Populate source page list from the object. */ | |
3777 | i = 0; | |
a4f5ea64 | 3778 | for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages) |
85d1225e | 3779 | page_addr_list[i++] = dma_addr; |
50470bb0 | 3780 | |
85d1225e | 3781 | GEM_BUG_ON(i != n_pages); |
11f20322 VS |
3782 | st->nents = 0; |
3783 | sg = st->sgl; | |
3784 | ||
6687c906 VS |
3785 | for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { |
3786 | sg = rotate_pages(page_addr_list, rot_info->plane[i].offset, | |
3787 | rot_info->plane[i].width, rot_info->plane[i].height, | |
3788 | rot_info->plane[i].stride, st, sg); | |
89e3e142 TU |
3789 | } |
3790 | ||
2098105e | 3791 | kvfree(page_addr_list); |
50470bb0 TU |
3792 | |
3793 | return st; | |
3794 | ||
3795 | err_sg_alloc: | |
3796 | kfree(st); | |
3797 | err_st_alloc: | |
2098105e | 3798 | kvfree(page_addr_list); |
50470bb0 | 3799 | |
62d0fe45 CW |
3800 | DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", |
3801 | obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); | |
6687c906 | 3802 | |
50470bb0 TU |
3803 | return ERR_PTR(ret); |
3804 | } | |
ec7adb6e | 3805 | |
ba7a5741 | 3806 | static noinline struct sg_table * |
8bd7ef16 JL |
3807 | intel_partial_pages(const struct i915_ggtt_view *view, |
3808 | struct drm_i915_gem_object *obj) | |
3809 | { | |
3810 | struct sg_table *st; | |
d2a84a76 | 3811 | struct scatterlist *sg, *iter; |
8bab1193 | 3812 | unsigned int count = view->partial.size; |
d2a84a76 | 3813 | unsigned int offset; |
8bd7ef16 JL |
3814 | int ret = -ENOMEM; |
3815 | ||
3816 | st = kmalloc(sizeof(*st), GFP_KERNEL); | |
3817 | if (!st) | |
3818 | goto err_st_alloc; | |
3819 | ||
d2a84a76 | 3820 | ret = sg_alloc_table(st, count, GFP_KERNEL); |
8bd7ef16 JL |
3821 | if (ret) |
3822 | goto err_sg_alloc; | |
3823 | ||
8bab1193 | 3824 | iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset); |
d2a84a76 CW |
3825 | GEM_BUG_ON(!iter); |
3826 | ||
8bd7ef16 JL |
3827 | sg = st->sgl; |
3828 | st->nents = 0; | |
d2a84a76 CW |
3829 | do { |
3830 | unsigned int len; | |
8bd7ef16 | 3831 | |
d2a84a76 CW |
3832 | len = min(iter->length - (offset << PAGE_SHIFT), |
3833 | count << PAGE_SHIFT); | |
3834 | sg_set_page(sg, NULL, len, 0); | |
3835 | sg_dma_address(sg) = | |
3836 | sg_dma_address(iter) + (offset << PAGE_SHIFT); | |
3837 | sg_dma_len(sg) = len; | |
8bd7ef16 | 3838 | |
8bd7ef16 | 3839 | st->nents++; |
d2a84a76 CW |
3840 | count -= len >> PAGE_SHIFT; |
3841 | if (count == 0) { | |
3842 | sg_mark_end(sg); | |
3843 | return st; | |
3844 | } | |
8bd7ef16 | 3845 | |
d2a84a76 CW |
3846 | sg = __sg_next(sg); |
3847 | iter = __sg_next(iter); | |
3848 | offset = 0; | |
3849 | } while (1); | |
8bd7ef16 JL |
3850 | |
3851 | err_sg_alloc: | |
3852 | kfree(st); | |
3853 | err_st_alloc: | |
3854 | return ERR_PTR(ret); | |
3855 | } | |
3856 | ||
70b9f6f8 | 3857 | static int |
50470bb0 | 3858 | i915_get_ggtt_vma_pages(struct i915_vma *vma) |
fe14d5f4 | 3859 | { |
ba7a5741 | 3860 | int ret; |
50470bb0 | 3861 | |
2c3a3f44 CW |
3862 | /* The vma->pages are only valid within the lifespan of the borrowed |
3863 | * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so | |
3864 | * must be the vma->pages. A simple rule is that vma->pages must only | |
3865 | * be accessed when the obj->mm.pages are pinned. | |
3866 | */ | |
3867 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); | |
3868 | ||
ba7a5741 | 3869 | switch (vma->ggtt_view.type) { |
62d4028f CW |
3870 | default: |
3871 | GEM_BUG_ON(vma->ggtt_view.type); | |
3872 | /* fall through */ | |
ba7a5741 CW |
3873 | case I915_GGTT_VIEW_NORMAL: |
3874 | vma->pages = vma->obj->mm.pages; | |
fe14d5f4 TU |
3875 | return 0; |
3876 | ||
ba7a5741 | 3877 | case I915_GGTT_VIEW_ROTATED: |
247177dd | 3878 | vma->pages = |
ba7a5741 CW |
3879 | intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); |
3880 | break; | |
3881 | ||
3882 | case I915_GGTT_VIEW_PARTIAL: | |
247177dd | 3883 | vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); |
ba7a5741 | 3884 | break; |
ba7a5741 | 3885 | } |
fe14d5f4 | 3886 | |
ba7a5741 CW |
3887 | ret = 0; |
3888 | if (unlikely(IS_ERR(vma->pages))) { | |
247177dd CW |
3889 | ret = PTR_ERR(vma->pages); |
3890 | vma->pages = NULL; | |
50470bb0 TU |
3891 | DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", |
3892 | vma->ggtt_view.type, ret); | |
fe14d5f4 | 3893 | } |
50470bb0 | 3894 | return ret; |
fe14d5f4 TU |
3895 | } |
3896 | ||
625d988a CW |
3897 | /** |
3898 | * i915_gem_gtt_reserve - reserve a node in an address_space (GTT) | |
a4dbf7cf CW |
3899 | * @vm: the &struct i915_address_space |
3900 | * @node: the &struct drm_mm_node (typically i915_vma.mode) | |
3901 | * @size: how much space to allocate inside the GTT, | |
3902 | * must be #I915_GTT_PAGE_SIZE aligned | |
3903 | * @offset: where to insert inside the GTT, | |
3904 | * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node | |
3905 | * (@offset + @size) must fit within the address space | |
3906 | * @color: color to apply to node, if this node is not from a VMA, | |
3907 | * color must be #I915_COLOR_UNEVICTABLE | |
3908 | * @flags: control search and eviction behaviour | |
625d988a CW |
3909 | * |
3910 | * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside | |
3911 | * the address space (using @size and @color). If the @node does not fit, it | |
3912 | * tries to evict any overlapping nodes from the GTT, including any | |
3913 | * neighbouring nodes if the colors do not match (to ensure guard pages between | |
3914 | * differing domains). See i915_gem_evict_for_node() for the gory details | |
3915 | * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on | |
3916 | * evicting active overlapping objects, and any overlapping node that is pinned | |
3917 | * or marked as unevictable will also result in failure. | |
3918 | * | |
3919 | * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if | |
3920 | * asked to wait for eviction and interrupted. | |
3921 | */ | |
3922 | int i915_gem_gtt_reserve(struct i915_address_space *vm, | |
3923 | struct drm_mm_node *node, | |
3924 | u64 size, u64 offset, unsigned long color, | |
3925 | unsigned int flags) | |
3926 | { | |
3927 | int err; | |
3928 | ||
3929 | GEM_BUG_ON(!size); | |
3930 | GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); | |
3931 | GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT)); | |
3932 | GEM_BUG_ON(range_overflows(offset, size, vm->total)); | |
82ad6443 | 3933 | GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); |
9734ad13 | 3934 | GEM_BUG_ON(drm_mm_node_allocated(node)); |
625d988a CW |
3935 | |
3936 | node->size = size; | |
3937 | node->start = offset; | |
3938 | node->color = color; | |
3939 | ||
3940 | err = drm_mm_reserve_node(&vm->mm, node); | |
3941 | if (err != -ENOSPC) | |
3942 | return err; | |
3943 | ||
616d9cee CW |
3944 | if (flags & PIN_NOEVICT) |
3945 | return -ENOSPC; | |
3946 | ||
625d988a CW |
3947 | err = i915_gem_evict_for_node(vm, node, flags); |
3948 | if (err == 0) | |
3949 | err = drm_mm_reserve_node(&vm->mm, node); | |
3950 | ||
3951 | return err; | |
3952 | } | |
3953 | ||
606fec95 CW |
3954 | static u64 random_offset(u64 start, u64 end, u64 len, u64 align) |
3955 | { | |
3956 | u64 range, addr; | |
3957 | ||
3958 | GEM_BUG_ON(range_overflows(start, len, end)); | |
3959 | GEM_BUG_ON(round_up(start, align) > round_down(end - len, align)); | |
3960 | ||
3961 | range = round_down(end - len, align) - round_up(start, align); | |
3962 | if (range) { | |
3963 | if (sizeof(unsigned long) == sizeof(u64)) { | |
3964 | addr = get_random_long(); | |
3965 | } else { | |
3966 | addr = get_random_int(); | |
3967 | if (range > U32_MAX) { | |
3968 | addr <<= 32; | |
3969 | addr |= get_random_int(); | |
3970 | } | |
3971 | } | |
3972 | div64_u64_rem(addr, range, &addr); | |
3973 | start += addr; | |
3974 | } | |
3975 | ||
3976 | return round_up(start, align); | |
3977 | } | |
3978 | ||
e007b19d CW |
3979 | /** |
3980 | * i915_gem_gtt_insert - insert a node into an address_space (GTT) | |
a4dbf7cf CW |
3981 | * @vm: the &struct i915_address_space |
3982 | * @node: the &struct drm_mm_node (typically i915_vma.node) | |
3983 | * @size: how much space to allocate inside the GTT, | |
3984 | * must be #I915_GTT_PAGE_SIZE aligned | |
3985 | * @alignment: required alignment of starting offset, may be 0 but | |
3986 | * if specified, this must be a power-of-two and at least | |
3987 | * #I915_GTT_MIN_ALIGNMENT | |
3988 | * @color: color to apply to node | |
3989 | * @start: start of any range restriction inside GTT (0 for all), | |
e007b19d | 3990 | * must be #I915_GTT_PAGE_SIZE aligned |
a4dbf7cf CW |
3991 | * @end: end of any range restriction inside GTT (U64_MAX for all), |
3992 | * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX | |
3993 | * @flags: control search and eviction behaviour | |
e007b19d CW |
3994 | * |
3995 | * i915_gem_gtt_insert() first searches for an available hole into which | |
3996 | * is can insert the node. The hole address is aligned to @alignment and | |
3997 | * its @size must then fit entirely within the [@start, @end] bounds. The | |
3998 | * nodes on either side of the hole must match @color, or else a guard page | |
3999 | * will be inserted between the two nodes (or the node evicted). If no | |
606fec95 CW |
4000 | * suitable hole is found, first a victim is randomly selected and tested |
4001 | * for eviction, otherwise then the LRU list of objects within the GTT | |
e007b19d CW |
4002 | * is scanned to find the first set of replacement nodes to create the hole. |
4003 | * Those old overlapping nodes are evicted from the GTT (and so must be | |
4004 | * rebound before any future use). Any node that is currently pinned cannot | |
4005 | * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently | |
4006 | * active and #PIN_NONBLOCK is specified, that node is also skipped when | |
4007 | * searching for an eviction candidate. See i915_gem_evict_something() for | |
4008 | * the gory details on the eviction algorithm. | |
4009 | * | |
4010 | * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if | |
4011 | * asked to wait for eviction and interrupted. | |
4012 | */ | |
4013 | int i915_gem_gtt_insert(struct i915_address_space *vm, | |
4014 | struct drm_mm_node *node, | |
4015 | u64 size, u64 alignment, unsigned long color, | |
4016 | u64 start, u64 end, unsigned int flags) | |
4017 | { | |
4e64e553 | 4018 | enum drm_mm_insert_mode mode; |
606fec95 | 4019 | u64 offset; |
e007b19d CW |
4020 | int err; |
4021 | ||
4022 | lockdep_assert_held(&vm->i915->drm.struct_mutex); | |
4023 | GEM_BUG_ON(!size); | |
4024 | GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); | |
4025 | GEM_BUG_ON(alignment && !is_power_of_2(alignment)); | |
4026 | GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); | |
4027 | GEM_BUG_ON(start >= end); | |
4028 | GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); | |
4029 | GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); | |
82ad6443 | 4030 | GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); |
9734ad13 | 4031 | GEM_BUG_ON(drm_mm_node_allocated(node)); |
e007b19d CW |
4032 | |
4033 | if (unlikely(range_overflows(start, size, end))) | |
4034 | return -ENOSPC; | |
4035 | ||
4036 | if (unlikely(round_up(start, alignment) > round_down(end - size, alignment))) | |
4037 | return -ENOSPC; | |
4038 | ||
4e64e553 CW |
4039 | mode = DRM_MM_INSERT_BEST; |
4040 | if (flags & PIN_HIGH) | |
eb479f86 | 4041 | mode = DRM_MM_INSERT_HIGHEST; |
4e64e553 CW |
4042 | if (flags & PIN_MAPPABLE) |
4043 | mode = DRM_MM_INSERT_LOW; | |
e007b19d CW |
4044 | |
4045 | /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks, | |
4046 | * so we know that we always have a minimum alignment of 4096. | |
4047 | * The drm_mm range manager is optimised to return results | |
4048 | * with zero alignment, so where possible use the optimal | |
4049 | * path. | |
4050 | */ | |
4051 | BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE); | |
4052 | if (alignment <= I915_GTT_MIN_ALIGNMENT) | |
4053 | alignment = 0; | |
4054 | ||
4e64e553 CW |
4055 | err = drm_mm_insert_node_in_range(&vm->mm, node, |
4056 | size, alignment, color, | |
4057 | start, end, mode); | |
e007b19d CW |
4058 | if (err != -ENOSPC) |
4059 | return err; | |
4060 | ||
eb479f86 CW |
4061 | if (mode & DRM_MM_INSERT_ONCE) { |
4062 | err = drm_mm_insert_node_in_range(&vm->mm, node, | |
4063 | size, alignment, color, | |
4064 | start, end, | |
4065 | DRM_MM_INSERT_BEST); | |
4066 | if (err != -ENOSPC) | |
4067 | return err; | |
4068 | } | |
4069 | ||
616d9cee CW |
4070 | if (flags & PIN_NOEVICT) |
4071 | return -ENOSPC; | |
4072 | ||
606fec95 CW |
4073 | /* No free space, pick a slot at random. |
4074 | * | |
4075 | * There is a pathological case here using a GTT shared between | |
4076 | * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt): | |
4077 | * | |
4078 | * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->| | |
4079 | * (64k objects) (448k objects) | |
4080 | * | |
4081 | * Now imagine that the eviction LRU is ordered top-down (just because | |
4082 | * pathology meets real life), and that we need to evict an object to | |
4083 | * make room inside the aperture. The eviction scan then has to walk | |
4084 | * the 448k list before it finds one within range. And now imagine that | |
4085 | * it has to search for a new hole between every byte inside the memcpy, | |
4086 | * for several simultaneous clients. | |
4087 | * | |
4088 | * On a full-ppgtt system, if we have run out of available space, there | |
4089 | * will be lots and lots of objects in the eviction list! Again, | |
4090 | * searching that LRU list may be slow if we are also applying any | |
4091 | * range restrictions (e.g. restriction to low 4GiB) and so, for | |
4092 | * simplicity and similarilty between different GTT, try the single | |
4093 | * random replacement first. | |
4094 | */ | |
4095 | offset = random_offset(start, end, | |
4096 | size, alignment ?: I915_GTT_MIN_ALIGNMENT); | |
4097 | err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags); | |
4098 | if (err != -ENOSPC) | |
4099 | return err; | |
4100 | ||
4101 | /* Randomly selected placement is pinned, do a search */ | |
e007b19d CW |
4102 | err = i915_gem_evict_something(vm, size, alignment, color, |
4103 | start, end, flags); | |
4104 | if (err) | |
4105 | return err; | |
4106 | ||
4e64e553 CW |
4107 | return drm_mm_insert_node_in_range(&vm->mm, node, |
4108 | size, alignment, color, | |
4109 | start, end, DRM_MM_INSERT_EVICT); | |
e007b19d | 4110 | } |
3b5bb0a3 CW |
4111 | |
4112 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | |
4113 | #include "selftests/mock_gtt.c" | |
1c42819a | 4114 | #include "selftests/i915_gem_gtt.c" |
3b5bb0a3 | 4115 | #endif |