]>
Commit | Line | Data |
---|---|---|
b30ab0e0 MH |
1 | /* |
2 | * linux/fs/ext4/crypto.c | |
3 | * | |
4 | * Copyright (C) 2015, Google, Inc. | |
5 | * | |
6 | * This contains encryption functions for ext4 | |
7 | * | |
8 | * Written by Michael Halcrow, 2014. | |
9 | * | |
10 | * Filename encryption additions | |
11 | * Uday Savagaonkar, 2014 | |
12 | * Encryption policy handling additions | |
13 | * Ildar Muslukhov, 2014 | |
14 | * | |
15 | * This has not yet undergone a rigorous security audit. | |
16 | * | |
17 | * The usage of AES-XTS should conform to recommendations in NIST | |
18 | * Special Publication 800-38E and IEEE P1619/D16. | |
19 | */ | |
20 | ||
3f32a5be | 21 | #include <crypto/skcipher.h> |
b30ab0e0 MH |
22 | #include <keys/user-type.h> |
23 | #include <keys/encrypted-type.h> | |
b30ab0e0 MH |
24 | #include <linux/ecryptfs.h> |
25 | #include <linux/gfp.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/key.h> | |
28 | #include <linux/list.h> | |
29 | #include <linux/mempool.h> | |
30 | #include <linux/module.h> | |
31 | #include <linux/mutex.h> | |
32 | #include <linux/random.h> | |
33 | #include <linux/scatterlist.h> | |
34 | #include <linux/spinlock_types.h> | |
03a8bb0e | 35 | #include <linux/namei.h> |
b30ab0e0 MH |
36 | |
37 | #include "ext4_extents.h" | |
38 | #include "xattr.h" | |
39 | ||
40 | /* Encryption added and removed here! (L: */ | |
41 | ||
42 | static unsigned int num_prealloc_crypto_pages = 32; | |
43 | static unsigned int num_prealloc_crypto_ctxs = 128; | |
44 | ||
45 | module_param(num_prealloc_crypto_pages, uint, 0444); | |
46 | MODULE_PARM_DESC(num_prealloc_crypto_pages, | |
47 | "Number of crypto pages to preallocate"); | |
48 | module_param(num_prealloc_crypto_ctxs, uint, 0444); | |
49 | MODULE_PARM_DESC(num_prealloc_crypto_ctxs, | |
50 | "Number of crypto contexts to preallocate"); | |
51 | ||
52 | static mempool_t *ext4_bounce_page_pool; | |
53 | ||
54 | static LIST_HEAD(ext4_free_crypto_ctxs); | |
55 | static DEFINE_SPINLOCK(ext4_crypto_ctx_lock); | |
56 | ||
8ee03714 TT |
57 | static struct kmem_cache *ext4_crypto_ctx_cachep; |
58 | struct kmem_cache *ext4_crypt_info_cachep; | |
59 | ||
b30ab0e0 MH |
60 | /** |
61 | * ext4_release_crypto_ctx() - Releases an encryption context | |
62 | * @ctx: The encryption context to release. | |
63 | * | |
64 | * If the encryption context was allocated from the pre-allocated pool, returns | |
65 | * it to that pool. Else, frees it. | |
66 | * | |
67 | * If there's a bounce page in the context, this frees that. | |
68 | */ | |
69 | void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx) | |
70 | { | |
71 | unsigned long flags; | |
72 | ||
3dbb5eb9 TT |
73 | if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page) |
74 | mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool); | |
614def70 TT |
75 | ctx->w.bounce_page = NULL; |
76 | ctx->w.control_page = NULL; | |
b30ab0e0 | 77 | if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) { |
8ee03714 | 78 | kmem_cache_free(ext4_crypto_ctx_cachep, ctx); |
b30ab0e0 MH |
79 | } else { |
80 | spin_lock_irqsave(&ext4_crypto_ctx_lock, flags); | |
81 | list_add(&ctx->free_list, &ext4_free_crypto_ctxs); | |
82 | spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); | |
83 | } | |
84 | } | |
85 | ||
b30ab0e0 MH |
86 | /** |
87 | * ext4_get_crypto_ctx() - Gets an encryption context | |
88 | * @inode: The inode for which we are doing the crypto | |
89 | * | |
90 | * Allocates and initializes an encryption context. | |
91 | * | |
92 | * Return: An allocated and initialized encryption context on success; error | |
93 | * value or NULL otherwise. | |
94 | */ | |
c9af28fd TT |
95 | struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode, |
96 | gfp_t gfp_flags) | |
b30ab0e0 MH |
97 | { |
98 | struct ext4_crypto_ctx *ctx = NULL; | |
99 | int res = 0; | |
100 | unsigned long flags; | |
b7236e21 | 101 | struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; |
b30ab0e0 | 102 | |
abdd438b TT |
103 | if (ci == NULL) |
104 | return ERR_PTR(-ENOKEY); | |
b30ab0e0 MH |
105 | |
106 | /* | |
107 | * We first try getting the ctx from a free list because in | |
108 | * the common case the ctx will have an allocated and | |
109 | * initialized crypto tfm, so it's probably a worthwhile | |
110 | * optimization. For the bounce page, we first try getting it | |
111 | * from the kernel allocator because that's just about as fast | |
112 | * as getting it from a list and because a cache of free pages | |
113 | * should generally be a "last resort" option for a filesystem | |
114 | * to be able to do its job. | |
115 | */ | |
116 | spin_lock_irqsave(&ext4_crypto_ctx_lock, flags); | |
117 | ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs, | |
118 | struct ext4_crypto_ctx, free_list); | |
119 | if (ctx) | |
120 | list_del(&ctx->free_list); | |
121 | spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); | |
122 | if (!ctx) { | |
c9af28fd | 123 | ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags); |
8ee03714 TT |
124 | if (!ctx) { |
125 | res = -ENOMEM; | |
b30ab0e0 MH |
126 | goto out; |
127 | } | |
128 | ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; | |
129 | } else { | |
130 | ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; | |
131 | } | |
614def70 | 132 | ctx->flags &= ~EXT4_WRITE_PATH_FL; |
b30ab0e0 | 133 | |
b30ab0e0 MH |
134 | out: |
135 | if (res) { | |
136 | if (!IS_ERR_OR_NULL(ctx)) | |
137 | ext4_release_crypto_ctx(ctx); | |
138 | ctx = ERR_PTR(res); | |
139 | } | |
140 | return ctx; | |
141 | } | |
142 | ||
143 | struct workqueue_struct *ext4_read_workqueue; | |
144 | static DEFINE_MUTEX(crypto_init); | |
145 | ||
146 | /** | |
147 | * ext4_exit_crypto() - Shutdown the ext4 encryption system | |
148 | */ | |
149 | void ext4_exit_crypto(void) | |
150 | { | |
151 | struct ext4_crypto_ctx *pos, *n; | |
152 | ||
c936e1ec | 153 | list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) |
8ee03714 | 154 | kmem_cache_free(ext4_crypto_ctx_cachep, pos); |
b30ab0e0 MH |
155 | INIT_LIST_HEAD(&ext4_free_crypto_ctxs); |
156 | if (ext4_bounce_page_pool) | |
157 | mempool_destroy(ext4_bounce_page_pool); | |
158 | ext4_bounce_page_pool = NULL; | |
159 | if (ext4_read_workqueue) | |
160 | destroy_workqueue(ext4_read_workqueue); | |
161 | ext4_read_workqueue = NULL; | |
8ee03714 TT |
162 | if (ext4_crypto_ctx_cachep) |
163 | kmem_cache_destroy(ext4_crypto_ctx_cachep); | |
164 | ext4_crypto_ctx_cachep = NULL; | |
165 | if (ext4_crypt_info_cachep) | |
166 | kmem_cache_destroy(ext4_crypt_info_cachep); | |
167 | ext4_crypt_info_cachep = NULL; | |
b30ab0e0 MH |
168 | } |
169 | ||
170 | /** | |
171 | * ext4_init_crypto() - Set up for ext4 encryption. | |
172 | * | |
173 | * We only call this when we start accessing encrypted files, since it | |
174 | * results in memory getting allocated that wouldn't otherwise be used. | |
175 | * | |
176 | * Return: Zero on success, non-zero otherwise. | |
177 | */ | |
178 | int ext4_init_crypto(void) | |
179 | { | |
8ee03714 | 180 | int i, res = -ENOMEM; |
b30ab0e0 MH |
181 | |
182 | mutex_lock(&crypto_init); | |
183 | if (ext4_read_workqueue) | |
184 | goto already_initialized; | |
185 | ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0); | |
8ee03714 TT |
186 | if (!ext4_read_workqueue) |
187 | goto fail; | |
188 | ||
189 | ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx, | |
190 | SLAB_RECLAIM_ACCOUNT); | |
191 | if (!ext4_crypto_ctx_cachep) | |
192 | goto fail; | |
193 | ||
194 | ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info, | |
195 | SLAB_RECLAIM_ACCOUNT); | |
196 | if (!ext4_crypt_info_cachep) | |
b30ab0e0 | 197 | goto fail; |
b30ab0e0 MH |
198 | |
199 | for (i = 0; i < num_prealloc_crypto_ctxs; i++) { | |
200 | struct ext4_crypto_ctx *ctx; | |
201 | ||
8ee03714 TT |
202 | ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS); |
203 | if (!ctx) { | |
204 | res = -ENOMEM; | |
b30ab0e0 MH |
205 | goto fail; |
206 | } | |
207 | list_add(&ctx->free_list, &ext4_free_crypto_ctxs); | |
208 | } | |
209 | ||
210 | ext4_bounce_page_pool = | |
211 | mempool_create_page_pool(num_prealloc_crypto_pages, 0); | |
212 | if (!ext4_bounce_page_pool) { | |
213 | res = -ENOMEM; | |
214 | goto fail; | |
215 | } | |
216 | already_initialized: | |
217 | mutex_unlock(&crypto_init); | |
218 | return 0; | |
219 | fail: | |
220 | ext4_exit_crypto(); | |
221 | mutex_unlock(&crypto_init); | |
222 | return res; | |
223 | } | |
224 | ||
225 | void ext4_restore_control_page(struct page *data_page) | |
226 | { | |
227 | struct ext4_crypto_ctx *ctx = | |
228 | (struct ext4_crypto_ctx *)page_private(data_page); | |
229 | ||
230 | set_page_private(data_page, (unsigned long)NULL); | |
231 | ClearPagePrivate(data_page); | |
232 | unlock_page(data_page); | |
233 | ext4_release_crypto_ctx(ctx); | |
234 | } | |
235 | ||
236 | /** | |
237 | * ext4_crypt_complete() - The completion callback for page encryption | |
238 | * @req: The asynchronous encryption request context | |
239 | * @res: The result of the encryption operation | |
240 | */ | |
241 | static void ext4_crypt_complete(struct crypto_async_request *req, int res) | |
242 | { | |
243 | struct ext4_completion_result *ecr = req->data; | |
244 | ||
245 | if (res == -EINPROGRESS) | |
246 | return; | |
247 | ecr->res = res; | |
248 | complete(&ecr->completion); | |
249 | } | |
250 | ||
251 | typedef enum { | |
252 | EXT4_DECRYPT = 0, | |
253 | EXT4_ENCRYPT, | |
254 | } ext4_direction_t; | |
255 | ||
3684de8c | 256 | static int ext4_page_crypto(struct inode *inode, |
b30ab0e0 MH |
257 | ext4_direction_t rw, |
258 | pgoff_t index, | |
259 | struct page *src_page, | |
c9af28fd TT |
260 | struct page *dest_page, |
261 | gfp_t gfp_flags) | |
b30ab0e0 MH |
262 | |
263 | { | |
264 | u8 xts_tweak[EXT4_XTS_TWEAK_SIZE]; | |
3f32a5be | 265 | struct skcipher_request *req = NULL; |
b30ab0e0 MH |
266 | DECLARE_EXT4_COMPLETION_RESULT(ecr); |
267 | struct scatterlist dst, src; | |
c936e1ec | 268 | struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info; |
3f32a5be | 269 | struct crypto_skcipher *tfm = ci->ci_ctfm; |
b30ab0e0 MH |
270 | int res = 0; |
271 | ||
c9af28fd | 272 | req = skcipher_request_alloc(tfm, gfp_flags); |
b30ab0e0 MH |
273 | if (!req) { |
274 | printk_ratelimited(KERN_ERR | |
275 | "%s: crypto_request_alloc() failed\n", | |
276 | __func__); | |
277 | return -ENOMEM; | |
278 | } | |
3f32a5be | 279 | skcipher_request_set_callback( |
b30ab0e0 MH |
280 | req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
281 | ext4_crypt_complete, &ecr); | |
282 | ||
283 | BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index)); | |
284 | memcpy(xts_tweak, &index, sizeof(index)); | |
285 | memset(&xts_tweak[sizeof(index)], 0, | |
286 | EXT4_XTS_TWEAK_SIZE - sizeof(index)); | |
287 | ||
288 | sg_init_table(&dst, 1); | |
09cbfeaf | 289 | sg_set_page(&dst, dest_page, PAGE_SIZE, 0); |
b30ab0e0 | 290 | sg_init_table(&src, 1); |
09cbfeaf KS |
291 | sg_set_page(&src, src_page, PAGE_SIZE, 0); |
292 | skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, | |
3f32a5be | 293 | xts_tweak); |
b30ab0e0 | 294 | if (rw == EXT4_DECRYPT) |
3f32a5be | 295 | res = crypto_skcipher_decrypt(req); |
b30ab0e0 | 296 | else |
3f32a5be | 297 | res = crypto_skcipher_encrypt(req); |
b30ab0e0 | 298 | if (res == -EINPROGRESS || res == -EBUSY) { |
b30ab0e0 MH |
299 | wait_for_completion(&ecr.completion); |
300 | res = ecr.res; | |
301 | } | |
3f32a5be | 302 | skcipher_request_free(req); |
b30ab0e0 MH |
303 | if (res) { |
304 | printk_ratelimited( | |
305 | KERN_ERR | |
3f32a5be | 306 | "%s: crypto_skcipher_encrypt() returned %d\n", |
b30ab0e0 MH |
307 | __func__, res); |
308 | return res; | |
309 | } | |
310 | return 0; | |
311 | } | |
312 | ||
c9af28fd TT |
313 | static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx, |
314 | gfp_t gfp_flags) | |
95ea68b4 | 315 | { |
c9af28fd | 316 | ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags); |
3dbb5eb9 TT |
317 | if (ctx->w.bounce_page == NULL) |
318 | return ERR_PTR(-ENOMEM); | |
95ea68b4 | 319 | ctx->flags |= EXT4_WRITE_PATH_FL; |
3dbb5eb9 | 320 | return ctx->w.bounce_page; |
95ea68b4 TT |
321 | } |
322 | ||
b30ab0e0 MH |
323 | /** |
324 | * ext4_encrypt() - Encrypts a page | |
325 | * @inode: The inode for which the encryption should take place | |
326 | * @plaintext_page: The page to encrypt. Must be locked. | |
327 | * | |
328 | * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx | |
329 | * encryption context. | |
330 | * | |
331 | * Called on the page write path. The caller must call | |
332 | * ext4_restore_control_page() on the returned ciphertext page to | |
333 | * release the bounce buffer and the encryption context. | |
334 | * | |
335 | * Return: An allocated page with the encrypted content on success. Else, an | |
336 | * error value or NULL. | |
337 | */ | |
338 | struct page *ext4_encrypt(struct inode *inode, | |
c9af28fd TT |
339 | struct page *plaintext_page, |
340 | gfp_t gfp_flags) | |
b30ab0e0 MH |
341 | { |
342 | struct ext4_crypto_ctx *ctx; | |
343 | struct page *ciphertext_page = NULL; | |
344 | int err; | |
345 | ||
346 | BUG_ON(!PageLocked(plaintext_page)); | |
347 | ||
c9af28fd | 348 | ctx = ext4_get_crypto_ctx(inode, gfp_flags); |
b30ab0e0 MH |
349 | if (IS_ERR(ctx)) |
350 | return (struct page *) ctx; | |
351 | ||
352 | /* The encryption operation will require a bounce page. */ | |
c9af28fd | 353 | ciphertext_page = alloc_bounce_page(ctx, gfp_flags); |
95ea68b4 TT |
354 | if (IS_ERR(ciphertext_page)) |
355 | goto errout; | |
614def70 | 356 | ctx->w.control_page = plaintext_page; |
3684de8c | 357 | err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index, |
c9af28fd | 358 | plaintext_page, ciphertext_page, gfp_flags); |
b30ab0e0 | 359 | if (err) { |
95ea68b4 TT |
360 | ciphertext_page = ERR_PTR(err); |
361 | errout: | |
b30ab0e0 | 362 | ext4_release_crypto_ctx(ctx); |
95ea68b4 | 363 | return ciphertext_page; |
b30ab0e0 MH |
364 | } |
365 | SetPagePrivate(ciphertext_page); | |
366 | set_page_private(ciphertext_page, (unsigned long)ctx); | |
367 | lock_page(ciphertext_page); | |
368 | return ciphertext_page; | |
369 | } | |
370 | ||
371 | /** | |
372 | * ext4_decrypt() - Decrypts a page in-place | |
373 | * @ctx: The encryption context. | |
374 | * @page: The page to decrypt. Must be locked. | |
375 | * | |
376 | * Decrypts page in-place using the ctx encryption context. | |
377 | * | |
378 | * Called from the read completion callback. | |
379 | * | |
380 | * Return: Zero on success, non-zero otherwise. | |
381 | */ | |
3684de8c | 382 | int ext4_decrypt(struct page *page) |
b30ab0e0 MH |
383 | { |
384 | BUG_ON(!PageLocked(page)); | |
385 | ||
c9af28fd TT |
386 | return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT, |
387 | page->index, page, page, GFP_NOFS); | |
b30ab0e0 MH |
388 | } |
389 | ||
53085fac JK |
390 | int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, |
391 | ext4_fsblk_t pblk, ext4_lblk_t len) | |
b30ab0e0 MH |
392 | { |
393 | struct ext4_crypto_ctx *ctx; | |
394 | struct page *ciphertext_page = NULL; | |
395 | struct bio *bio; | |
36086d43 TT |
396 | int ret, err = 0; |
397 | ||
398 | #if 0 | |
399 | ext4_msg(inode->i_sb, KERN_CRIT, | |
400 | "ext4_encrypted_zeroout ino %lu lblk %u len %u", | |
401 | (unsigned long) inode->i_ino, lblk, len); | |
402 | #endif | |
b30ab0e0 | 403 | |
09cbfeaf | 404 | BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); |
b30ab0e0 | 405 | |
c9af28fd | 406 | ctx = ext4_get_crypto_ctx(inode, GFP_NOFS); |
b30ab0e0 MH |
407 | if (IS_ERR(ctx)) |
408 | return PTR_ERR(ctx); | |
409 | ||
c9af28fd | 410 | ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT); |
95ea68b4 TT |
411 | if (IS_ERR(ciphertext_page)) { |
412 | err = PTR_ERR(ciphertext_page); | |
413 | goto errout; | |
b30ab0e0 | 414 | } |
b30ab0e0 MH |
415 | |
416 | while (len--) { | |
3684de8c | 417 | err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk, |
c9af28fd TT |
418 | ZERO_PAGE(0), ciphertext_page, |
419 | GFP_NOFS); | |
b30ab0e0 MH |
420 | if (err) |
421 | goto errout; | |
422 | ||
c9af28fd | 423 | bio = bio_alloc(GFP_NOWAIT, 1); |
b30ab0e0 MH |
424 | if (!bio) { |
425 | err = -ENOMEM; | |
426 | goto errout; | |
427 | } | |
428 | bio->bi_bdev = inode->i_sb->s_bdev; | |
36086d43 TT |
429 | bio->bi_iter.bi_sector = |
430 | pblk << (inode->i_sb->s_blocksize_bits - 9); | |
431 | ret = bio_add_page(bio, ciphertext_page, | |
b30ab0e0 | 432 | inode->i_sb->s_blocksize, 0); |
36086d43 TT |
433 | if (ret != inode->i_sb->s_blocksize) { |
434 | /* should never happen! */ | |
435 | ext4_msg(inode->i_sb, KERN_ERR, | |
436 | "bio_add_page failed: %d", ret); | |
437 | WARN_ON(1); | |
b30ab0e0 | 438 | bio_put(bio); |
36086d43 | 439 | err = -EIO; |
b30ab0e0 MH |
440 | goto errout; |
441 | } | |
442 | err = submit_bio_wait(WRITE, bio); | |
36086d43 TT |
443 | if ((err == 0) && bio->bi_error) |
444 | err = -EIO; | |
95ea68b4 | 445 | bio_put(bio); |
b30ab0e0 MH |
446 | if (err) |
447 | goto errout; | |
36086d43 | 448 | lblk++; pblk++; |
b30ab0e0 MH |
449 | } |
450 | err = 0; | |
451 | errout: | |
452 | ext4_release_crypto_ctx(ctx); | |
453 | return err; | |
454 | } | |
455 | ||
456 | bool ext4_valid_contents_enc_mode(uint32_t mode) | |
457 | { | |
458 | return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS); | |
459 | } | |
460 | ||
461 | /** | |
462 | * ext4_validate_encryption_key_size() - Validate the encryption key size | |
463 | * @mode: The key mode. | |
464 | * @size: The key size to validate. | |
465 | * | |
466 | * Return: The validated key size for @mode. Zero if invalid. | |
467 | */ | |
468 | uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size) | |
469 | { | |
470 | if (size == ext4_encryption_key_size(mode)) | |
471 | return size; | |
472 | return 0; | |
473 | } | |
28b4c263 TT |
474 | |
475 | /* | |
476 | * Validate dentries for encrypted directories to make sure we aren't | |
477 | * potentially caching stale data after a key has been added or | |
478 | * removed. | |
479 | */ | |
480 | static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags) | |
481 | { | |
3d43bcfe TT |
482 | struct dentry *dir; |
483 | struct ext4_crypt_info *ci; | |
28b4c263 TT |
484 | int dir_has_key, cached_with_key; |
485 | ||
03a8bb0e JK |
486 | if (flags & LOOKUP_RCU) |
487 | return -ECHILD; | |
488 | ||
3d43bcfe TT |
489 | dir = dget_parent(dentry); |
490 | if (!ext4_encrypted_inode(d_inode(dir))) { | |
491 | dput(dir); | |
28b4c263 | 492 | return 0; |
3d43bcfe TT |
493 | } |
494 | ci = EXT4_I(d_inode(dir))->i_crypt_info; | |
28b4c263 TT |
495 | if (ci && ci->ci_keyring_key && |
496 | (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | | |
497 | (1 << KEY_FLAG_REVOKED) | | |
498 | (1 << KEY_FLAG_DEAD)))) | |
499 | ci = NULL; | |
500 | ||
501 | /* this should eventually be an flag in d_flags */ | |
502 | cached_with_key = dentry->d_fsdata != NULL; | |
503 | dir_has_key = (ci != NULL); | |
3d43bcfe | 504 | dput(dir); |
28b4c263 TT |
505 | |
506 | /* | |
507 | * If the dentry was cached without the key, and it is a | |
508 | * negative dentry, it might be a valid name. We can't check | |
509 | * if the key has since been made available due to locking | |
510 | * reasons, so we fail the validation so ext4_lookup() can do | |
511 | * this check. | |
512 | * | |
513 | * We also fail the validation if the dentry was created with | |
514 | * the key present, but we no longer have the key, or vice versa. | |
515 | */ | |
516 | if ((!cached_with_key && d_is_negative(dentry)) || | |
517 | (!cached_with_key && dir_has_key) || | |
518 | (cached_with_key && !dir_has_key)) { | |
519 | #if 0 /* Revalidation debug */ | |
520 | char buf[80]; | |
521 | char *cp = simple_dname(dentry, buf, sizeof(buf)); | |
522 | ||
523 | if (IS_ERR(cp)) | |
524 | cp = (char *) "???"; | |
525 | pr_err("revalidate: %s %p %d %d %d\n", cp, dentry->d_fsdata, | |
526 | cached_with_key, d_is_negative(dentry), | |
527 | dir_has_key); | |
528 | #endif | |
529 | return 0; | |
530 | } | |
531 | return 1; | |
532 | } | |
533 | ||
534 | const struct dentry_operations ext4_encrypted_d_ops = { | |
535 | .d_revalidate = ext4_d_revalidate, | |
536 | }; |