b24413180f560 (Greg Kroah-Hartman 2017-11-01 15:07:57 +0100 1) // SPDX-License-Identifier: GPL-2.0
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 2) /*
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 3) * This contains encryption functions for per-file encryption.
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 4) *
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 5) * Copyright (C) 2015, Google, Inc.
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 6) * Copyright (C) 2015, Motorola Mobility
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 7) *
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 8) * Written by Michael Halcrow, 2014.
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 9) *
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 10) * Filename encryption additions
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 11) * Uday Savagaonkar, 2014
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 12) * Encryption policy handling additions
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 13) * Ildar Muslukhov, 2014
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 14) * Add fscrypt_pullback_bio_page()
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 15) * Jaegeuk Kim, 2015.
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 16) *
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 17) * This has not yet undergone a rigorous security audit.
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 18) *
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 19) * The usage of AES-XTS should conform to recommendations in NIST
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 20) * Special Publication 800-38E and IEEE P1619/D16.
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 21) */
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 22)
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 23) #include <linux/pagemap.h>
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 24) #include <linux/module.h>
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 25) #include <linux/bio.h>
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 26) #include <linux/namei.h>
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 27) #include "fscrypt_private.h"
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 28)
1565bdad59e97 (Eric Biggers 2019-10-09 16:34:17 -0700 29) void fscrypt_decrypt_bio(struct bio *bio)
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 30) {
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 31) struct bio_vec *bv;
6dc4f100c175d (Ming Lei 2019-02-15 19:13:19 +0800 32) struct bvec_iter_all iter_all;
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 33)
2b070cfe582b8 (Christoph Hellwig 2019-04-25 09:03:00 +0200 34) bio_for_each_segment_all(bv, bio, iter_all) {
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 35) struct page *page = bv->bv_page;
ffceeefb337b3 (Eric Biggers 2019-05-20 09:29:48 -0700 36) int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len,
ffceeefb337b3 (Eric Biggers 2019-05-20 09:29:48 -0700 37) bv->bv_offset);
ff5d3a97075c6 (Eric Biggers 2019-03-15 14:16:32 -0700 38) if (ret)
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 39) SetPageError(page);
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 40) }
0cb8dae4a0df2 (Eric Biggers 2018-04-18 11:09:47 -0700 41) }
0cb8dae4a0df2 (Eric Biggers 2018-04-18 11:09:47 -0700 42) EXPORT_SYMBOL(fscrypt_decrypt_bio);
0cb8dae4a0df2 (Eric Biggers 2018-04-18 11:09:47 -0700 43)
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 44) static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 45) pgoff_t lblk, sector_t pblk,
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 46) unsigned int len)
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 47) {
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 48) const unsigned int blockbits = inode->i_blkbits;
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 49) const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits);
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 50) struct bio *bio;
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 51) int ret, err = 0;
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 52) int num_pages = 0;
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 53)
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 54) /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
a8affc03a9b37 (Christoph Hellwig 2021-03-11 12:01:37 +0100 55) bio = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 56)
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 57) while (len) {
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 58) unsigned int blocks_this_page = min(len, blocks_per_page);
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 59) unsigned int bytes_this_page = blocks_this_page << blockbits;
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 60)
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 61) if (num_pages == 0) {
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 62) fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 63) bio_set_dev(bio, inode->i_sb->s_bdev);
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 64) bio->bi_iter.bi_sector =
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 65) pblk << (blockbits - SECTOR_SHIFT);
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 66) bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 67) }
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 68) ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 69) if (WARN_ON(ret != bytes_this_page)) {
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 70) err = -EIO;
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 71) goto out;
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 72) }
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 73) num_pages++;
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 74) len -= blocks_this_page;
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 75) lblk += blocks_this_page;
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 76) pblk += blocks_this_page;
a8affc03a9b37 (Christoph Hellwig 2021-03-11 12:01:37 +0100 77) if (num_pages == BIO_MAX_VECS || !len ||
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 78) !fscrypt_mergeable_bio(bio, inode, lblk)) {
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 79) err = submit_bio_wait(bio);
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 80) if (err)
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 81) goto out;
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 82) bio_reset(bio);
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 83) num_pages = 0;
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 84) }
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 85) }
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 86) out:
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 87) bio_put(bio);
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 88) return err;
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 89) }
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 90)
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 91) /**
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 92) * fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 93) * @inode: the file's inode
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 94) * @lblk: the first file logical block to zero out
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 95) * @pblk: the first filesystem physical block to zero out
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 96) * @len: number of blocks to zero out
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 97) *
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 98) * Zero out filesystem blocks in an encrypted regular file on-disk, i.e. write
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 99) * ciphertext blocks which decrypt to the all-zeroes block. The blocks must be
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 100) * both logically and physically contiguous. It's also assumed that the
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 101) * filesystem only uses a single block device, ->s_bdev.
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 102) *
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 103) * Note that since each block uses a different IV, this involves writing a
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 104) * different ciphertext to each block; we can't simply reuse the same one.
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 105) *
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 106) * Return: 0 on success; -errno on failure.
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 107) */
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 108) int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 109) sector_t pblk, unsigned int len)
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 110) {
930d453995bdf (Eric Biggers 2019-05-20 09:29:45 -0700 111) const unsigned int blockbits = inode->i_blkbits;
930d453995bdf (Eric Biggers 2019-05-20 09:29:45 -0700 112) const unsigned int blocksize = 1 << blockbits;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 113) const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 114) const unsigned int blocks_per_page = 1 << blocks_per_page_bits;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 115) struct page *pages[16]; /* write up to 16 pages at a time */
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 116) unsigned int nr_pages;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 117) unsigned int i;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 118) unsigned int offset;
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 119) struct bio *bio;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 120) int ret, err;
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 121)
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 122) if (len == 0)
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 123) return 0;
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 124)
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 125) if (fscrypt_inode_uses_inline_crypto(inode))
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 126) return fscrypt_zeroout_range_inline_crypt(inode, lblk, pblk,
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 127) len);
5fee36095cda4 (Satya Tangirala 2020-07-02 01:56:05 +0000 128)
a8affc03a9b37 (Christoph Hellwig 2021-03-11 12:01:37 +0100 129) BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_VECS);
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 130) nr_pages = min_t(unsigned int, ARRAY_SIZE(pages),
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 131) (len + blocks_per_page - 1) >> blocks_per_page_bits);
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 132)
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 133) /*
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 134) * We need at least one page for ciphertext. Allocate the first one
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 135) * from a mempool, with __GFP_DIRECT_RECLAIM set so that it can't fail.
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 136) *
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 137) * Any additional page allocations are allowed to fail, as they only
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 138) * help performance, and waiting on the mempool for them could deadlock.
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 139) */
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 140) for (i = 0; i < nr_pages; i++) {
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 141) pages[i] = fscrypt_alloc_bounce_page(i == 0 ? GFP_NOFS :
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 142) GFP_NOWAIT | __GFP_NOWARN);
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 143) if (!pages[i])
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 144) break;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 145) }
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 146) nr_pages = i;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 147) if (WARN_ON(nr_pages <= 0))
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 148) return -EINVAL;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 149)
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 150) /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 151) bio = bio_alloc(GFP_NOFS, nr_pages);
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 152)
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 153) do {
74d46992e0d9d (Christoph Hellwig 2017-08-23 19:10:32 +0200 154) bio_set_dev(bio, inode->i_sb->s_bdev);
930d453995bdf (Eric Biggers 2019-05-20 09:29:45 -0700 155) bio->bi_iter.bi_sector = pblk << (blockbits - 9);
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 156) bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 157)
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 158) i = 0;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 159) offset = 0;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 160) do {
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 161) err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 162) ZERO_PAGE(0), pages[i],
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 163) blocksize, offset, GFP_NOFS);
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 164) if (err)
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 165) goto out;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 166) lblk++;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 167) pblk++;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 168) len--;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 169) offset += blocksize;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 170) if (offset == PAGE_SIZE || len == 0) {
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 171) ret = bio_add_page(bio, pages[i++], offset, 0);
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 172) if (WARN_ON(ret != offset)) {
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 173) err = -EIO;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 174) goto out;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 175) }
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 176) offset = 0;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 177) }
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 178) } while (i != nr_pages && len != 0);
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 179)
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 180) err = submit_bio_wait(bio);
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 181) if (err)
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 182) goto out;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 183) bio_reset(bio);
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 184) } while (len != 0);
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 185) err = 0;
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 186) out:
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 187) bio_put(bio);
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 188) for (i = 0; i < nr_pages; i++)
796f12d742653 (Eric Biggers 2019-12-26 10:08:13 -0600 189) fscrypt_free_bounce_page(pages[i]);
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 190) return err;
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 191) }
58ae74683ae2c (Richard Weinberger 2016-12-19 12:25:32 +0100 192) EXPORT_SYMBOL(fscrypt_zeroout_range);