]>
Commit | Line | Data |
---|---|---|
673a394b EA |
1 | /* |
2 | * Copyright © 2008 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <[email protected]> | |
25 | * | |
26 | */ | |
27 | ||
280b713b EA |
28 | #include "linux/string.h" |
29 | #include "linux/bitops.h" | |
673a394b EA |
30 | #include "drmP.h" |
31 | #include "drm.h" | |
32 | #include "i915_drm.h" | |
33 | #include "i915_drv.h" | |
34 | ||
35 | /** @file i915_gem_tiling.c | |
36 | * | |
37 | * Support for managing tiling state of buffer objects. | |
38 | * | |
39 | * The idea behind tiling is to increase cache hit rates by rearranging | |
40 | * pixel data so that a group of pixel accesses are in the same cacheline. | |
41 | * Performance improvement from doing this on the back/depth buffer are on | |
42 | * the order of 30%. | |
43 | * | |
44 | * Intel architectures make this somewhat more complicated, though, by | |
45 | * adjustments made to addressing of data when the memory is in interleaved | |
46 | * mode (matched pairs of DIMMS) to improve memory bandwidth. | |
47 | * For interleaved memory, the CPU sends every sequential 64 bytes | |
48 | * to an alternate memory channel so it can get the bandwidth from both. | |
49 | * | |
50 | * The GPU also rearranges its accesses for increased bandwidth to interleaved | |
51 | * memory, and it matches what the CPU does for non-tiled. However, when tiled | |
52 | * it does it a little differently, since one walks addresses not just in the | |
53 | * X direction but also Y. So, along with alternating channels when bit | |
54 | * 6 of the address flips, it also alternates when other bits flip -- Bits 9 | |
55 | * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) | |
56 | * are common to both the 915 and 965-class hardware. | |
57 | * | |
58 | * The CPU also sometimes XORs in higher bits as well, to improve | |
59 | * bandwidth doing strided access like we do so frequently in graphics. This | |
60 | * is called "Channel XOR Randomization" in the MCH documentation. The result | |
61 | * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address | |
62 | * decode. | |
63 | * | |
64 | * All of this bit 6 XORing has an effect on our memory management, | |
65 | * as we need to make sure that the 3d driver can correctly address object | |
66 | * contents. | |
67 | * | |
68 | * If we don't have interleaved memory, all tiling is safe and no swizzling is | |
69 | * required. | |
70 | * | |
71 | * When bit 17 is XORed in, we simply refuse to tile at all. Bit | |
72 | * 17 is not just a page offset, so as we page an objet out and back in, | |
73 | * individual pages in it will have different bit 17 addresses, resulting in | |
74 | * each 64 bytes being swapped with its neighbor! | |
75 | * | |
76 | * Otherwise, if interleaved, we have to tell the 3d driver what the address | |
77 | * swizzling it needs to do is, since it's writing with the CPU to the pages | |
78 | * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the | |
79 | * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling | |
80 | * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order | |
81 | * to match what the GPU expects. | |
82 | */ | |
83 | ||
84 | /** | |
85 | * Detects bit 6 swizzling of address lookup between IGD access and CPU | |
86 | * access through main memory. | |
87 | */ | |
88 | void | |
89 | i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |
90 | { | |
91 | drm_i915_private_t *dev_priv = dev->dev_private; | |
92 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | |
93 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | |
94 | ||
bad720ff | 95 | if (IS_IRONLAKE(dev) || IS_GEN6(dev)) { |
f2b115e6 | 96 | /* On Ironlake whatever DRAM config, GPU always do |
553bd149 ZW |
97 | * same swizzling setup. |
98 | */ | |
99 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | |
100 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
a6c45cf0 | 101 | } else if (IS_GEN2(dev)) { |
673a394b EA |
102 | /* As far as we know, the 865 doesn't have these bit 6 |
103 | * swizzling issues. | |
104 | */ | |
105 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
106 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
568d9a8f | 107 | } else if (IS_MOBILE(dev)) { |
673a394b EA |
108 | uint32_t dcc; |
109 | ||
568d9a8f EA |
110 | /* On mobile 9xx chipsets, channel interleave by the CPU is |
111 | * determined by DCC. For single-channel, neither the CPU | |
112 | * nor the GPU do swizzling. For dual channel interleaved, | |
113 | * the GPU's interleave is bit 9 and 10 for X tiled, and bit | |
114 | * 9 for Y tiled. The CPU's interleave is independent, and | |
115 | * can be based on either bit 11 (haven't seen this yet) or | |
116 | * bit 17 (common). | |
673a394b EA |
117 | */ |
118 | dcc = I915_READ(DCC); | |
119 | switch (dcc & DCC_ADDRESSING_MODE_MASK) { | |
120 | case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: | |
121 | case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: | |
122 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
123 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
124 | break; | |
125 | case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: | |
568d9a8f EA |
126 | if (dcc & DCC_CHANNEL_XOR_DISABLE) { |
127 | /* This is the base swizzling by the GPU for | |
128 | * tiled buffers. | |
129 | */ | |
673a394b EA |
130 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
131 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
568d9a8f EA |
132 | } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { |
133 | /* Bit 11 swizzling by the CPU in addition. */ | |
673a394b EA |
134 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; |
135 | swizzle_y = I915_BIT_6_SWIZZLE_9_11; | |
136 | } else { | |
568d9a8f | 137 | /* Bit 17 swizzling by the CPU in addition. */ |
280b713b EA |
138 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; |
139 | swizzle_y = I915_BIT_6_SWIZZLE_9_17; | |
673a394b EA |
140 | } |
141 | break; | |
142 | } | |
143 | if (dcc == 0xffffffff) { | |
144 | DRM_ERROR("Couldn't read from MCHBAR. " | |
145 | "Disabling tiling.\n"); | |
146 | swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | |
147 | swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | |
148 | } | |
149 | } else { | |
150 | /* The 965, G33, and newer, have a very flexible memory | |
151 | * configuration. It will enable dual-channel mode | |
152 | * (interleaving) on as much memory as it can, and the GPU | |
153 | * will additionally sometimes enable different bit 6 | |
154 | * swizzling for tiled objects from the CPU. | |
155 | * | |
156 | * Here's what I found on the G965: | |
157 | * slot fill memory size swizzling | |
158 | * 0A 0B 1A 1B 1-ch 2-ch | |
159 | * 512 0 0 0 512 0 O | |
160 | * 512 0 512 0 16 1008 X | |
161 | * 512 0 0 512 16 1008 X | |
162 | * 0 512 0 512 16 1008 X | |
163 | * 1024 1024 1024 0 2048 1024 O | |
164 | * | |
165 | * We could probably detect this based on either the DRB | |
166 | * matching, which was the case for the swizzling required in | |
167 | * the table above, or from the 1-ch value being less than | |
168 | * the minimum size of a rank. | |
169 | */ | |
170 | if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { | |
171 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
172 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
173 | } else { | |
174 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | |
175 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
176 | } | |
177 | } | |
178 | ||
179 | dev_priv->mm.bit_6_swizzle_x = swizzle_x; | |
180 | dev_priv->mm.bit_6_swizzle_y = swizzle_y; | |
181 | } | |
182 | ||
0f973f27 | 183 | /* Check pitch constriants for all chips & tiling formats */ |
76446cac | 184 | bool |
0f973f27 JB |
185 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) |
186 | { | |
187 | int tile_width; | |
188 | ||
189 | /* Linear is always fine */ | |
190 | if (tiling_mode == I915_TILING_NONE) | |
191 | return true; | |
192 | ||
a6c45cf0 | 193 | if (IS_GEN2(dev) || |
e76a16de | 194 | (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) |
0f973f27 JB |
195 | tile_width = 128; |
196 | else | |
197 | tile_width = 512; | |
198 | ||
8d7773a3 | 199 | /* check maximum stride & object size */ |
a6c45cf0 | 200 | if (INTEL_INFO(dev)->gen >= 4) { |
8d7773a3 SV |
201 | /* i965 stores the end address of the gtt mapping in the fence |
202 | * reg, so dont bother to check the size */ | |
203 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) | |
204 | return false; | |
a6c45cf0 | 205 | } else { |
c36a2a6d | 206 | if (stride > 8192) |
8d7773a3 | 207 | return false; |
e76a16de | 208 | |
c36a2a6d SV |
209 | if (IS_GEN3(dev)) { |
210 | if (size > I830_FENCE_MAX_SIZE_VAL << 20) | |
211 | return false; | |
212 | } else { | |
213 | if (size > I830_FENCE_MAX_SIZE_VAL << 19) | |
214 | return false; | |
215 | } | |
8d7773a3 SV |
216 | } |
217 | ||
0f973f27 | 218 | /* 965+ just needs multiples of tile width */ |
a6c45cf0 | 219 | if (INTEL_INFO(dev)->gen >= 4) { |
0f973f27 JB |
220 | if (stride & (tile_width - 1)) |
221 | return false; | |
222 | return true; | |
223 | } | |
224 | ||
225 | /* Pre-965 needs power of two tile widths */ | |
226 | if (stride < tile_width) | |
227 | return false; | |
228 | ||
229 | if (stride & (stride - 1)) | |
230 | return false; | |
231 | ||
0f973f27 JB |
232 | return true; |
233 | } | |
234 | ||
f590d279 | 235 | bool |
52dc7d32 CW |
236 | i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) |
237 | { | |
238 | struct drm_device *dev = obj->dev; | |
23010e43 | 239 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
52dc7d32 CW |
240 | |
241 | if (obj_priv->gtt_space == NULL) | |
242 | return true; | |
243 | ||
244 | if (tiling_mode == I915_TILING_NONE) | |
245 | return true; | |
246 | ||
a6c45cf0 CW |
247 | if (INTEL_INFO(dev)->gen >= 4) |
248 | return true; | |
249 | ||
250 | if (obj_priv->gtt_offset & (obj->size - 1)) | |
251 | return false; | |
252 | ||
253 | if (IS_GEN3(dev)) { | |
254 | if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) | |
255 | return false; | |
256 | } else { | |
257 | if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) | |
52dc7d32 | 258 | return false; |
52dc7d32 CW |
259 | } |
260 | ||
261 | return true; | |
262 | } | |
263 | ||
673a394b EA |
264 | /** |
265 | * Sets the tiling mode of an object, returning the required swizzling of | |
266 | * bit 6 of addresses in the object. | |
267 | */ | |
268 | int | |
269 | i915_gem_set_tiling(struct drm_device *dev, void *data, | |
270 | struct drm_file *file_priv) | |
271 | { | |
272 | struct drm_i915_gem_set_tiling *args = data; | |
273 | drm_i915_private_t *dev_priv = dev->dev_private; | |
274 | struct drm_gem_object *obj; | |
275 | struct drm_i915_gem_object *obj_priv; | |
52dc7d32 | 276 | int ret = 0; |
673a394b EA |
277 | |
278 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | |
279 | if (obj == NULL) | |
bf79cb91 | 280 | return -ENOENT; |
23010e43 | 281 | obj_priv = to_intel_bo(obj); |
673a394b | 282 | |
72daad40 | 283 | if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { |
bc9025bd | 284 | drm_gem_object_unreference_unlocked(obj); |
0f973f27 | 285 | return -EINVAL; |
72daad40 | 286 | } |
0f973f27 | 287 | |
31770bd4 SV |
288 | if (obj_priv->pin_count) { |
289 | drm_gem_object_unreference_unlocked(obj); | |
290 | return -EBUSY; | |
291 | } | |
292 | ||
673a394b | 293 | if (args->tiling_mode == I915_TILING_NONE) { |
673a394b | 294 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; |
52dc7d32 | 295 | args->stride = 0; |
673a394b EA |
296 | } else { |
297 | if (args->tiling_mode == I915_TILING_X) | |
298 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; | |
299 | else | |
300 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; | |
280b713b EA |
301 | |
302 | /* Hide bit 17 swizzling from the user. This prevents old Mesa | |
303 | * from aborting the application on sw fallbacks to bit 17, | |
304 | * and we use the pread/pwrite bit17 paths to swizzle for it. | |
305 | * If there was a user that was relying on the swizzle | |
306 | * information for drm_intel_bo_map()ed reads/writes this would | |
307 | * break it, but we don't have any of those. | |
308 | */ | |
309 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) | |
310 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9; | |
311 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) | |
312 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; | |
313 | ||
673a394b EA |
314 | /* If we can't handle the swizzling, make it untiled. */ |
315 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { | |
316 | args->tiling_mode = I915_TILING_NONE; | |
317 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; | |
52dc7d32 | 318 | args->stride = 0; |
673a394b EA |
319 | } |
320 | } | |
0f973f27 | 321 | |
52dc7d32 CW |
322 | mutex_lock(&dev->struct_mutex); |
323 | if (args->tiling_mode != obj_priv->tiling_mode || | |
324 | args->stride != obj_priv->stride) { | |
325 | /* We need to rebind the object if its current allocation | |
326 | * no longer meets the alignment restrictions for its new | |
327 | * tiling mode. Otherwise we can just leave it alone, but | |
328 | * need to ensure that any fence register is cleared. | |
0f973f27 | 329 | */ |
52dc7d32 | 330 | if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) |
fe305198 SV |
331 | ret = i915_gem_object_unbind(obj); |
332 | else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | |
2cf34d7b | 333 | ret = i915_gem_object_put_fence_reg(obj, true); |
52dc7d32 | 334 | else |
fe305198 SV |
335 | i915_gem_release_mmap(obj); |
336 | ||
0f973f27 | 337 | if (ret != 0) { |
0f973f27 | 338 | args->tiling_mode = obj_priv->tiling_mode; |
52dc7d32 CW |
339 | args->stride = obj_priv->stride; |
340 | goto err; | |
0f973f27 | 341 | } |
52dc7d32 | 342 | |
0f973f27 | 343 | obj_priv->tiling_mode = args->tiling_mode; |
52dc7d32 | 344 | obj_priv->stride = args->stride; |
0f973f27 | 345 | } |
52dc7d32 | 346 | err: |
673a394b | 347 | drm_gem_object_unreference(obj); |
d6873102 | 348 | mutex_unlock(&dev->struct_mutex); |
673a394b | 349 | |
52dc7d32 | 350 | return ret; |
673a394b EA |
351 | } |
352 | ||
353 | /** | |
354 | * Returns the current tiling mode and required bit 6 swizzling for the object. | |
355 | */ | |
356 | int | |
357 | i915_gem_get_tiling(struct drm_device *dev, void *data, | |
358 | struct drm_file *file_priv) | |
359 | { | |
360 | struct drm_i915_gem_get_tiling *args = data; | |
361 | drm_i915_private_t *dev_priv = dev->dev_private; | |
362 | struct drm_gem_object *obj; | |
363 | struct drm_i915_gem_object *obj_priv; | |
364 | ||
365 | obj = drm_gem_object_lookup(dev, file_priv, args->handle); | |
366 | if (obj == NULL) | |
bf79cb91 | 367 | return -ENOENT; |
23010e43 | 368 | obj_priv = to_intel_bo(obj); |
673a394b EA |
369 | |
370 | mutex_lock(&dev->struct_mutex); | |
371 | ||
372 | args->tiling_mode = obj_priv->tiling_mode; | |
373 | switch (obj_priv->tiling_mode) { | |
374 | case I915_TILING_X: | |
375 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; | |
376 | break; | |
377 | case I915_TILING_Y: | |
378 | args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; | |
379 | break; | |
380 | case I915_TILING_NONE: | |
381 | args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; | |
382 | break; | |
383 | default: | |
384 | DRM_ERROR("unknown tiling mode\n"); | |
385 | } | |
386 | ||
280b713b EA |
387 | /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ |
388 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) | |
389 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9; | |
390 | if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) | |
391 | args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; | |
392 | ||
673a394b | 393 | drm_gem_object_unreference(obj); |
d6873102 | 394 | mutex_unlock(&dev->struct_mutex); |
673a394b EA |
395 | |
396 | return 0; | |
397 | } | |
280b713b EA |
398 | |
399 | /** | |
400 | * Swap every 64 bytes of this page around, to account for it having a new | |
401 | * bit 17 of its physical address and therefore being interpreted differently | |
402 | * by the GPU. | |
403 | */ | |
dd2575ff | 404 | static void |
280b713b EA |
405 | i915_gem_swizzle_page(struct page *page) |
406 | { | |
dd2575ff | 407 | char temp[64]; |
280b713b EA |
408 | char *vaddr; |
409 | int i; | |
280b713b EA |
410 | |
411 | vaddr = kmap(page); | |
280b713b EA |
412 | |
413 | for (i = 0; i < PAGE_SIZE; i += 128) { | |
414 | memcpy(temp, &vaddr[i], 64); | |
415 | memcpy(&vaddr[i], &vaddr[i + 64], 64); | |
416 | memcpy(&vaddr[i + 64], temp, 64); | |
417 | } | |
418 | ||
419 | kunmap(page); | |
280b713b EA |
420 | } |
421 | ||
422 | void | |
423 | i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) | |
424 | { | |
425 | struct drm_device *dev = obj->dev; | |
426 | drm_i915_private_t *dev_priv = dev->dev_private; | |
23010e43 | 427 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
280b713b EA |
428 | int page_count = obj->size >> PAGE_SHIFT; |
429 | int i; | |
430 | ||
431 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | |
432 | return; | |
433 | ||
434 | if (obj_priv->bit_17 == NULL) | |
435 | return; | |
436 | ||
437 | for (i = 0; i < page_count; i++) { | |
438 | char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; | |
439 | if ((new_bit_17 & 0x1) != | |
440 | (test_bit(i, obj_priv->bit_17) != 0)) { | |
dd2575ff | 441 | i915_gem_swizzle_page(obj_priv->pages[i]); |
280b713b EA |
442 | set_page_dirty(obj_priv->pages[i]); |
443 | } | |
444 | } | |
445 | } | |
446 | ||
447 | void | |
448 | i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) | |
449 | { | |
450 | struct drm_device *dev = obj->dev; | |
451 | drm_i915_private_t *dev_priv = dev->dev_private; | |
23010e43 | 452 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
280b713b EA |
453 | int page_count = obj->size >> PAGE_SHIFT; |
454 | int i; | |
455 | ||
456 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | |
457 | return; | |
458 | ||
459 | if (obj_priv->bit_17 == NULL) { | |
460 | obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * | |
461 | sizeof(long), GFP_KERNEL); | |
462 | if (obj_priv->bit_17 == NULL) { | |
463 | DRM_ERROR("Failed to allocate memory for bit 17 " | |
464 | "record\n"); | |
465 | return; | |
466 | } | |
467 | } | |
468 | ||
469 | for (i = 0; i < page_count; i++) { | |
470 | if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) | |
471 | __set_bit(i, obj_priv->bit_17); | |
472 | else | |
473 | __clear_bit(i, obj_priv->bit_17); | |
474 | } | |
475 | } |