]> Git Repo - linux.git/blob - fs/bcachefs/checksum.c
perf/x86/intel: Support Perfmon MSRs aliasing
[linux.git] / fs / bcachefs / checksum.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "checksum.h"
4 #include "errcode.h"
5 #include "super.h"
6 #include "super-io.h"
7
8 #include <linux/crc32c.h>
9 #include <linux/crypto.h>
10 #include <linux/xxhash.h>
11 #include <linux/key.h>
12 #include <linux/random.h>
13 #include <linux/scatterlist.h>
14 #include <crypto/algapi.h>
15 #include <crypto/chacha.h>
16 #include <crypto/hash.h>
17 #include <crypto/poly1305.h>
18 #include <crypto/skcipher.h>
19 #include <keys/user-type.h>
20
21 /*
22  * bch2_checksum state is an abstraction of the checksum state calculated over different pages.
23  * it features page merging without having the checksum algorithm lose its state.
24  * for native checksum aglorithms (like crc), a default seed value will do.
25  * for hash-like algorithms, a state needs to be stored
26  */
27
28 struct bch2_checksum_state {
29         union {
30                 u64 seed;
31                 struct xxh64_state h64state;
32         };
33         unsigned int type;
34 };
35
36 static void bch2_checksum_init(struct bch2_checksum_state *state)
37 {
38         switch (state->type) {
39         case BCH_CSUM_none:
40         case BCH_CSUM_crc32c:
41         case BCH_CSUM_crc64:
42                 state->seed = 0;
43                 break;
44         case BCH_CSUM_crc32c_nonzero:
45                 state->seed = U32_MAX;
46                 break;
47         case BCH_CSUM_crc64_nonzero:
48                 state->seed = U64_MAX;
49                 break;
50         case BCH_CSUM_xxhash:
51                 xxh64_reset(&state->h64state, 0);
52                 break;
53         default:
54                 BUG();
55         }
56 }
57
58 static u64 bch2_checksum_final(const struct bch2_checksum_state *state)
59 {
60         switch (state->type) {
61         case BCH_CSUM_none:
62         case BCH_CSUM_crc32c:
63         case BCH_CSUM_crc64:
64                 return state->seed;
65         case BCH_CSUM_crc32c_nonzero:
66                 return state->seed ^ U32_MAX;
67         case BCH_CSUM_crc64_nonzero:
68                 return state->seed ^ U64_MAX;
69         case BCH_CSUM_xxhash:
70                 return xxh64_digest(&state->h64state);
71         default:
72                 BUG();
73         }
74 }
75
76 static void bch2_checksum_update(struct bch2_checksum_state *state, const void *data, size_t len)
77 {
78         switch (state->type) {
79         case BCH_CSUM_none:
80                 return;
81         case BCH_CSUM_crc32c_nonzero:
82         case BCH_CSUM_crc32c:
83                 state->seed = crc32c(state->seed, data, len);
84                 break;
85         case BCH_CSUM_crc64_nonzero:
86         case BCH_CSUM_crc64:
87                 state->seed = crc64_be(state->seed, data, len);
88                 break;
89         case BCH_CSUM_xxhash:
90                 xxh64_update(&state->h64state, data, len);
91                 break;
92         default:
93                 BUG();
94         }
95 }
96
97 static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
98                                 struct nonce nonce,
99                                 struct scatterlist *sg, size_t len)
100 {
101         SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
102         int ret;
103
104         skcipher_request_set_sync_tfm(req, tfm);
105         skcipher_request_set_callback(req, 0, NULL, NULL);
106         skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
107
108         ret = crypto_skcipher_encrypt(req);
109         if (ret)
110                 pr_err("got error %i from crypto_skcipher_encrypt()", ret);
111
112         return ret;
113 }
114
115 static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
116                               struct nonce nonce,
117                               void *buf, size_t len)
118 {
119         if (!is_vmalloc_addr(buf)) {
120                 struct scatterlist sg;
121
122                 sg_init_table(&sg, 1);
123                 sg_set_page(&sg,
124                             is_vmalloc_addr(buf)
125                             ? vmalloc_to_page(buf)
126                             : virt_to_page(buf),
127                             len, offset_in_page(buf));
128                 return do_encrypt_sg(tfm, nonce, &sg, len);
129         } else {
130                 unsigned pages = buf_pages(buf, len);
131                 struct scatterlist *sg;
132                 size_t orig_len = len;
133                 int ret, i;
134
135                 sg = kmalloc_array(pages, sizeof(*sg), GFP_KERNEL);
136                 if (!sg)
137                         return -BCH_ERR_ENOMEM_do_encrypt;
138
139                 sg_init_table(sg, pages);
140
141                 for (i = 0; i < pages; i++) {
142                         unsigned offset = offset_in_page(buf);
143                         unsigned pg_len = min_t(size_t, len, PAGE_SIZE - offset);
144
145                         sg_set_page(sg + i, vmalloc_to_page(buf), pg_len, offset);
146                         buf += pg_len;
147                         len -= pg_len;
148                 }
149
150                 ret = do_encrypt_sg(tfm, nonce, sg, orig_len);
151                 kfree(sg);
152                 return ret;
153         }
154 }
155
156 int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
157                             void *buf, size_t len)
158 {
159         struct crypto_sync_skcipher *chacha20 =
160                 crypto_alloc_sync_skcipher("chacha20", 0, 0);
161         int ret;
162
163         ret = PTR_ERR_OR_ZERO(chacha20);
164         if (ret) {
165                 pr_err("error requesting chacha20 cipher: %s", bch2_err_str(ret));
166                 return ret;
167         }
168
169         ret = crypto_skcipher_setkey(&chacha20->base,
170                                      (void *) key, sizeof(*key));
171         if (ret) {
172                 pr_err("error from crypto_skcipher_setkey(): %s", bch2_err_str(ret));
173                 goto err;
174         }
175
176         ret = do_encrypt(chacha20, nonce, buf, len);
177 err:
178         crypto_free_sync_skcipher(chacha20);
179         return ret;
180 }
181
182 static int gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
183                         struct nonce nonce)
184 {
185         u8 key[POLY1305_KEY_SIZE];
186         int ret;
187
188         nonce.d[3] ^= BCH_NONCE_POLY;
189
190         memset(key, 0, sizeof(key));
191         ret = do_encrypt(c->chacha20, nonce, key, sizeof(key));
192         if (ret)
193                 return ret;
194
195         desc->tfm = c->poly1305;
196         crypto_shash_init(desc);
197         crypto_shash_update(desc, key, sizeof(key));
198         return 0;
199 }
200
201 struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
202                               struct nonce nonce, const void *data, size_t len)
203 {
204         switch (type) {
205         case BCH_CSUM_none:
206         case BCH_CSUM_crc32c_nonzero:
207         case BCH_CSUM_crc64_nonzero:
208         case BCH_CSUM_crc32c:
209         case BCH_CSUM_xxhash:
210         case BCH_CSUM_crc64: {
211                 struct bch2_checksum_state state;
212
213                 state.type = type;
214
215                 bch2_checksum_init(&state);
216                 bch2_checksum_update(&state, data, len);
217
218                 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
219         }
220
221         case BCH_CSUM_chacha20_poly1305_80:
222         case BCH_CSUM_chacha20_poly1305_128: {
223                 SHASH_DESC_ON_STACK(desc, c->poly1305);
224                 u8 digest[POLY1305_DIGEST_SIZE];
225                 struct bch_csum ret = { 0 };
226
227                 gen_poly_key(c, desc, nonce);
228
229                 crypto_shash_update(desc, data, len);
230                 crypto_shash_final(desc, digest);
231
232                 memcpy(&ret, digest, bch_crc_bytes[type]);
233                 return ret;
234         }
235         default:
236                 return (struct bch_csum) {};
237         }
238 }
239
240 int bch2_encrypt(struct bch_fs *c, unsigned type,
241                   struct nonce nonce, void *data, size_t len)
242 {
243         if (!bch2_csum_type_is_encryption(type))
244                 return 0;
245
246         return do_encrypt(c->chacha20, nonce, data, len);
247 }
248
249 static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
250                                            struct nonce nonce, struct bio *bio,
251                                            struct bvec_iter *iter)
252 {
253         struct bio_vec bv;
254
255         switch (type) {
256         case BCH_CSUM_none:
257                 return (struct bch_csum) { 0 };
258         case BCH_CSUM_crc32c_nonzero:
259         case BCH_CSUM_crc64_nonzero:
260         case BCH_CSUM_crc32c:
261         case BCH_CSUM_xxhash:
262         case BCH_CSUM_crc64: {
263                 struct bch2_checksum_state state;
264
265                 state.type = type;
266                 bch2_checksum_init(&state);
267
268 #ifdef CONFIG_HIGHMEM
269                 __bio_for_each_segment(bv, bio, *iter, *iter) {
270                         void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
271
272                         bch2_checksum_update(&state, p, bv.bv_len);
273                         kunmap_local(p);
274                 }
275 #else
276                 __bio_for_each_bvec(bv, bio, *iter, *iter)
277                         bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset,
278                                 bv.bv_len);
279 #endif
280                 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
281         }
282
283         case BCH_CSUM_chacha20_poly1305_80:
284         case BCH_CSUM_chacha20_poly1305_128: {
285                 SHASH_DESC_ON_STACK(desc, c->poly1305);
286                 u8 digest[POLY1305_DIGEST_SIZE];
287                 struct bch_csum ret = { 0 };
288
289                 gen_poly_key(c, desc, nonce);
290
291 #ifdef CONFIG_HIGHMEM
292                 __bio_for_each_segment(bv, bio, *iter, *iter) {
293                         void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
294
295                         crypto_shash_update(desc, p, bv.bv_len);
296                         kunmap_local(p);
297                 }
298 #else
299                 __bio_for_each_bvec(bv, bio, *iter, *iter)
300                         crypto_shash_update(desc,
301                                 page_address(bv.bv_page) + bv.bv_offset,
302                                 bv.bv_len);
303 #endif
304                 crypto_shash_final(desc, digest);
305
306                 memcpy(&ret, digest, bch_crc_bytes[type]);
307                 return ret;
308         }
309         default:
310                 return (struct bch_csum) {};
311         }
312 }
313
314 struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
315                                   struct nonce nonce, struct bio *bio)
316 {
317         struct bvec_iter iter = bio->bi_iter;
318
319         return __bch2_checksum_bio(c, type, nonce, bio, &iter);
320 }
321
322 int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
323                      struct nonce nonce, struct bio *bio)
324 {
325         struct bio_vec bv;
326         struct bvec_iter iter;
327         struct scatterlist sgl[16], *sg = sgl;
328         size_t bytes = 0;
329         int ret = 0;
330
331         if (!bch2_csum_type_is_encryption(type))
332                 return 0;
333
334         sg_init_table(sgl, ARRAY_SIZE(sgl));
335
336         bio_for_each_segment(bv, bio, iter) {
337                 if (sg == sgl + ARRAY_SIZE(sgl)) {
338                         sg_mark_end(sg - 1);
339
340                         ret = do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
341                         if (ret)
342                                 return ret;
343
344                         nonce = nonce_add(nonce, bytes);
345                         bytes = 0;
346
347                         sg_init_table(sgl, ARRAY_SIZE(sgl));
348                         sg = sgl;
349                 }
350
351                 sg_set_page(sg++, bv.bv_page, bv.bv_len, bv.bv_offset);
352                 bytes += bv.bv_len;
353         }
354
355         if (sg != sgl) {
356                 sg_mark_end(sg - 1);
357                 return do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
358         }
359
360         return ret;
361 }
362
363 struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
364                                     struct bch_csum b, size_t b_len)
365 {
366         struct bch2_checksum_state state;
367
368         state.type = type;
369         bch2_checksum_init(&state);
370         state.seed = le64_to_cpu(a.lo);
371
372         BUG_ON(!bch2_checksum_mergeable(type));
373
374         while (b_len) {
375                 unsigned page_len = min_t(unsigned, b_len, PAGE_SIZE);
376
377                 bch2_checksum_update(&state,
378                                 page_address(ZERO_PAGE(0)), page_len);
379                 b_len -= page_len;
380         }
381         a.lo = cpu_to_le64(bch2_checksum_final(&state));
382         a.lo ^= b.lo;
383         a.hi ^= b.hi;
384         return a;
385 }
386
387 int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
388                         struct bversion version,
389                         struct bch_extent_crc_unpacked crc_old,
390                         struct bch_extent_crc_unpacked *crc_a,
391                         struct bch_extent_crc_unpacked *crc_b,
392                         unsigned len_a, unsigned len_b,
393                         unsigned new_csum_type)
394 {
395         struct bvec_iter iter = bio->bi_iter;
396         struct nonce nonce = extent_nonce(version, crc_old);
397         struct bch_csum merged = { 0 };
398         struct crc_split {
399                 struct bch_extent_crc_unpacked  *crc;
400                 unsigned                        len;
401                 unsigned                        csum_type;
402                 struct bch_csum                 csum;
403         } splits[3] = {
404                 { crc_a, len_a, new_csum_type, { 0 }},
405                 { crc_b, len_b, new_csum_type, { 0 } },
406                 { NULL,  bio_sectors(bio) - len_a - len_b, new_csum_type, { 0 } },
407         }, *i;
408         bool mergeable = crc_old.csum_type == new_csum_type &&
409                 bch2_checksum_mergeable(new_csum_type);
410         unsigned crc_nonce = crc_old.nonce;
411
412         BUG_ON(len_a + len_b > bio_sectors(bio));
413         BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
414         BUG_ON(crc_is_compressed(crc_old));
415         BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
416                bch2_csum_type_is_encryption(new_csum_type));
417
418         for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
419                 iter.bi_size = i->len << 9;
420                 if (mergeable || i->crc)
421                         i->csum = __bch2_checksum_bio(c, i->csum_type,
422                                                       nonce, bio, &iter);
423                 else
424                         bio_advance_iter(bio, &iter, i->len << 9);
425                 nonce = nonce_add(nonce, i->len << 9);
426         }
427
428         if (mergeable)
429                 for (i = splits; i < splits + ARRAY_SIZE(splits); i++)
430                         merged = bch2_checksum_merge(new_csum_type, merged,
431                                                      i->csum, i->len << 9);
432         else
433                 merged = bch2_checksum_bio(c, crc_old.csum_type,
434                                 extent_nonce(version, crc_old), bio);
435
436         if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) {
437                 struct printbuf buf = PRINTBUF;
438                 prt_printf(&buf, "checksum error in %s() (memory corruption or bug?)\n"
439                            "expected %0llx:%0llx got %0llx:%0llx (old type ",
440                            __func__,
441                            crc_old.csum.hi,
442                            crc_old.csum.lo,
443                            merged.hi,
444                            merged.lo);
445                 bch2_prt_csum_type(&buf, crc_old.csum_type);
446                 prt_str(&buf, " new type ");
447                 bch2_prt_csum_type(&buf, new_csum_type);
448                 prt_str(&buf, ")");
449                 bch_err(c, "%s", buf.buf);
450                 printbuf_exit(&buf);
451                 return -EIO;
452         }
453
454         for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
455                 if (i->crc)
456                         *i->crc = (struct bch_extent_crc_unpacked) {
457                                 .csum_type              = i->csum_type,
458                                 .compression_type       = crc_old.compression_type,
459                                 .compressed_size        = i->len,
460                                 .uncompressed_size      = i->len,
461                                 .offset                 = 0,
462                                 .live_size              = i->len,
463                                 .nonce                  = crc_nonce,
464                                 .csum                   = i->csum,
465                         };
466
467                 if (bch2_csum_type_is_encryption(new_csum_type))
468                         crc_nonce += i->len;
469         }
470
471         return 0;
472 }
473
474 /* BCH_SB_FIELD_crypt: */
475
476 static int bch2_sb_crypt_validate(struct bch_sb *sb, struct bch_sb_field *f,
477                                   enum bch_validate_flags flags, struct printbuf *err)
478 {
479         struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
480
481         if (vstruct_bytes(&crypt->field) < sizeof(*crypt)) {
482                 prt_printf(err, "wrong size (got %zu should be %zu)",
483                        vstruct_bytes(&crypt->field), sizeof(*crypt));
484                 return -BCH_ERR_invalid_sb_crypt;
485         }
486
487         if (BCH_CRYPT_KDF_TYPE(crypt)) {
488                 prt_printf(err, "bad kdf type %llu", BCH_CRYPT_KDF_TYPE(crypt));
489                 return -BCH_ERR_invalid_sb_crypt;
490         }
491
492         return 0;
493 }
494
495 static void bch2_sb_crypt_to_text(struct printbuf *out, struct bch_sb *sb,
496                                   struct bch_sb_field *f)
497 {
498         struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
499
500         prt_printf(out, "KFD:               %llu\n", BCH_CRYPT_KDF_TYPE(crypt));
501         prt_printf(out, "scrypt n:          %llu\n", BCH_KDF_SCRYPT_N(crypt));
502         prt_printf(out, "scrypt r:          %llu\n", BCH_KDF_SCRYPT_R(crypt));
503         prt_printf(out, "scrypt p:          %llu\n", BCH_KDF_SCRYPT_P(crypt));
504 }
505
506 const struct bch_sb_field_ops bch_sb_field_ops_crypt = {
507         .validate       = bch2_sb_crypt_validate,
508         .to_text        = bch2_sb_crypt_to_text,
509 };
510
511 #ifdef __KERNEL__
512 static int __bch2_request_key(char *key_description, struct bch_key *key)
513 {
514         struct key *keyring_key;
515         const struct user_key_payload *ukp;
516         int ret;
517
518         keyring_key = request_key(&key_type_user, key_description, NULL);
519         if (IS_ERR(keyring_key))
520                 return PTR_ERR(keyring_key);
521
522         down_read(&keyring_key->sem);
523         ukp = dereference_key_locked(keyring_key);
524         if (ukp->datalen == sizeof(*key)) {
525                 memcpy(key, ukp->data, ukp->datalen);
526                 ret = 0;
527         } else {
528                 ret = -EINVAL;
529         }
530         up_read(&keyring_key->sem);
531         key_put(keyring_key);
532
533         return ret;
534 }
535 #else
536 #include <keyutils.h>
537
538 static int __bch2_request_key(char *key_description, struct bch_key *key)
539 {
540         key_serial_t key_id;
541
542         key_id = request_key("user", key_description, NULL,
543                              KEY_SPEC_SESSION_KEYRING);
544         if (key_id >= 0)
545                 goto got_key;
546
547         key_id = request_key("user", key_description, NULL,
548                              KEY_SPEC_USER_KEYRING);
549         if (key_id >= 0)
550                 goto got_key;
551
552         key_id = request_key("user", key_description, NULL,
553                              KEY_SPEC_USER_SESSION_KEYRING);
554         if (key_id >= 0)
555                 goto got_key;
556
557         return -errno;
558 got_key:
559
560         if (keyctl_read(key_id, (void *) key, sizeof(*key)) != sizeof(*key))
561                 return -1;
562
563         return 0;
564 }
565
566 #include "crypto.h"
567 #endif
568
569 int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
570 {
571         struct printbuf key_description = PRINTBUF;
572         int ret;
573
574         prt_printf(&key_description, "bcachefs:");
575         pr_uuid(&key_description, sb->user_uuid.b);
576
577         ret = __bch2_request_key(key_description.buf, key);
578         printbuf_exit(&key_description);
579
580 #ifndef __KERNEL__
581         if (ret) {
582                 char *passphrase = read_passphrase("Enter passphrase: ");
583                 struct bch_encrypted_key sb_key;
584
585                 bch2_passphrase_check(sb, passphrase,
586                                       key, &sb_key);
587                 ret = 0;
588         }
589 #endif
590
591         /* stash with memfd, pass memfd fd to mount */
592
593         return ret;
594 }
595
596 #ifndef __KERNEL__
597 int bch2_revoke_key(struct bch_sb *sb)
598 {
599         key_serial_t key_id;
600         struct printbuf key_description = PRINTBUF;
601
602         prt_printf(&key_description, "bcachefs:");
603         pr_uuid(&key_description, sb->user_uuid.b);
604
605         key_id = request_key("user", key_description.buf, NULL, KEY_SPEC_USER_KEYRING);
606         printbuf_exit(&key_description);
607         if (key_id < 0)
608                 return errno;
609
610         keyctl_revoke(key_id);
611
612         return 0;
613 }
614 #endif
615
616 int bch2_decrypt_sb_key(struct bch_fs *c,
617                         struct bch_sb_field_crypt *crypt,
618                         struct bch_key *key)
619 {
620         struct bch_encrypted_key sb_key = crypt->key;
621         struct bch_key user_key;
622         int ret = 0;
623
624         /* is key encrypted? */
625         if (!bch2_key_is_encrypted(&sb_key))
626                 goto out;
627
628         ret = bch2_request_key(c->disk_sb.sb, &user_key);
629         if (ret) {
630                 bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
631                 goto err;
632         }
633
634         /* decrypt real key: */
635         ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
636                                       &sb_key, sizeof(sb_key));
637         if (ret)
638                 goto err;
639
640         if (bch2_key_is_encrypted(&sb_key)) {
641                 bch_err(c, "incorrect encryption key");
642                 ret = -EINVAL;
643                 goto err;
644         }
645 out:
646         *key = sb_key.key;
647 err:
648         memzero_explicit(&sb_key, sizeof(sb_key));
649         memzero_explicit(&user_key, sizeof(user_key));
650         return ret;
651 }
652
653 static int bch2_alloc_ciphers(struct bch_fs *c)
654 {
655         if (c->chacha20)
656                 return 0;
657
658         struct crypto_sync_skcipher *chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
659         int ret = PTR_ERR_OR_ZERO(chacha20);
660         if (ret) {
661                 bch_err(c, "error requesting chacha20 module: %s", bch2_err_str(ret));
662                 return ret;
663         }
664
665         struct crypto_shash *poly1305 = crypto_alloc_shash("poly1305", 0, 0);
666         ret = PTR_ERR_OR_ZERO(poly1305);
667         if (ret) {
668                 bch_err(c, "error requesting poly1305 module: %s", bch2_err_str(ret));
669                 crypto_free_sync_skcipher(chacha20);
670                 return ret;
671         }
672
673         c->chacha20     = chacha20;
674         c->poly1305     = poly1305;
675         return 0;
676 }
677
678 int bch2_disable_encryption(struct bch_fs *c)
679 {
680         struct bch_sb_field_crypt *crypt;
681         struct bch_key key;
682         int ret = -EINVAL;
683
684         mutex_lock(&c->sb_lock);
685
686         crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
687         if (!crypt)
688                 goto out;
689
690         /* is key encrypted? */
691         ret = 0;
692         if (bch2_key_is_encrypted(&crypt->key))
693                 goto out;
694
695         ret = bch2_decrypt_sb_key(c, crypt, &key);
696         if (ret)
697                 goto out;
698
699         crypt->key.magic        = cpu_to_le64(BCH_KEY_MAGIC);
700         crypt->key.key          = key;
701
702         SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
703         bch2_write_super(c);
704 out:
705         mutex_unlock(&c->sb_lock);
706
707         return ret;
708 }
709
710 int bch2_enable_encryption(struct bch_fs *c, bool keyed)
711 {
712         struct bch_encrypted_key key;
713         struct bch_key user_key;
714         struct bch_sb_field_crypt *crypt;
715         int ret = -EINVAL;
716
717         mutex_lock(&c->sb_lock);
718
719         /* Do we already have an encryption key? */
720         if (bch2_sb_field_get(c->disk_sb.sb, crypt))
721                 goto err;
722
723         ret = bch2_alloc_ciphers(c);
724         if (ret)
725                 goto err;
726
727         key.magic = cpu_to_le64(BCH_KEY_MAGIC);
728         get_random_bytes(&key.key, sizeof(key.key));
729
730         if (keyed) {
731                 ret = bch2_request_key(c->disk_sb.sb, &user_key);
732                 if (ret) {
733                         bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
734                         goto err;
735                 }
736
737                 ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
738                                               &key, sizeof(key));
739                 if (ret)
740                         goto err;
741         }
742
743         ret = crypto_skcipher_setkey(&c->chacha20->base,
744                         (void *) &key.key, sizeof(key.key));
745         if (ret)
746                 goto err;
747
748         crypt = bch2_sb_field_resize(&c->disk_sb, crypt,
749                                      sizeof(*crypt) / sizeof(u64));
750         if (!crypt) {
751                 ret = -BCH_ERR_ENOSPC_sb_crypt;
752                 goto err;
753         }
754
755         crypt->key = key;
756
757         /* write superblock */
758         SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 1);
759         bch2_write_super(c);
760 err:
761         mutex_unlock(&c->sb_lock);
762         memzero_explicit(&user_key, sizeof(user_key));
763         memzero_explicit(&key, sizeof(key));
764         return ret;
765 }
766
767 void bch2_fs_encryption_exit(struct bch_fs *c)
768 {
769         if (c->poly1305)
770                 crypto_free_shash(c->poly1305);
771         if (c->chacha20)
772                 crypto_free_sync_skcipher(c->chacha20);
773         if (c->sha256)
774                 crypto_free_shash(c->sha256);
775 }
776
777 int bch2_fs_encryption_init(struct bch_fs *c)
778 {
779         struct bch_sb_field_crypt *crypt;
780         struct bch_key key;
781         int ret = 0;
782
783         c->sha256 = crypto_alloc_shash("sha256", 0, 0);
784         ret = PTR_ERR_OR_ZERO(c->sha256);
785         if (ret) {
786                 c->sha256 = NULL;
787                 bch_err(c, "error requesting sha256 module: %s", bch2_err_str(ret));
788                 goto out;
789         }
790
791         crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
792         if (!crypt)
793                 goto out;
794
795         ret = bch2_alloc_ciphers(c);
796         if (ret)
797                 goto out;
798
799         ret = bch2_decrypt_sb_key(c, crypt, &key);
800         if (ret)
801                 goto out;
802
803         ret = crypto_skcipher_setkey(&c->chacha20->base,
804                         (void *) &key.key, sizeof(key.key));
805         if (ret)
806                 goto out;
807 out:
808         memzero_explicit(&key, sizeof(key));
809         return ret;
810 }
This page took 0.078026 seconds and 4 git commands to generate.