afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1) // SPDX-License-Identifier: GPL-2.0
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 2) /*
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 3) * Copyright (C) 2010 Red Hat, Inc.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 4) * Copyright (C) 2016-2019 Christoph Hellwig.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 5) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 6) #include <linux/module.h>
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 7) #include <linux/compiler.h>
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 8) #include <linux/fs.h>
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 9) #include <linux/iomap.h>
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 10) #include <linux/pagemap.h>
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 11) #include <linux/uio.h>
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 12) #include <linux/buffer_head.h>
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 13) #include <linux/dax.h>
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 14) #include <linux/writeback.h>
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 15) #include <linux/list_sort.h>
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 16) #include <linux/swap.h>
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 17) #include <linux/bio.h>
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 18) #include <linux/sched/signal.h>
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 19) #include <linux/migrate.h>
9e91c5728cab3 (Christoph Hellwig 2019-10-17 13:12:13 -0700 20) #include "trace.h"
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 21)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 22) #include "../internal.h"
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 23)
ab08b01ec0a20 (Christoph Hellwig 2019-10-17 13:12:19 -0700 24) /*
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 25) * Structure allocated for each page or THP when block size < page size
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 26) * to track sub-page uptodate status and I/O completions.
ab08b01ec0a20 (Christoph Hellwig 2019-10-17 13:12:19 -0700 27) */
ab08b01ec0a20 (Christoph Hellwig 2019-10-17 13:12:19 -0700 28) struct iomap_page {
7d636676d2841 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 29) atomic_t read_bytes_pending;
0fb2d7209d66a (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 30) atomic_t write_bytes_pending;
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 31) spinlock_t uptodate_lock;
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 32) unsigned long uptodate[];
ab08b01ec0a20 (Christoph Hellwig 2019-10-17 13:12:19 -0700 33) };
ab08b01ec0a20 (Christoph Hellwig 2019-10-17 13:12:19 -0700 34)
ab08b01ec0a20 (Christoph Hellwig 2019-10-17 13:12:19 -0700 35) static inline struct iomap_page *to_iomap_page(struct page *page)
ab08b01ec0a20 (Christoph Hellwig 2019-10-17 13:12:19 -0700 36) {
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 37) /*
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 38) * per-block data is stored in the head page. Callers should
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 39) * not be dealing with tail pages (and if they are, they can
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 40) * call thp_head() first.
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 41) */
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 42) VM_BUG_ON_PGFLAGS(PageTail(page), page);
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 43)
ab08b01ec0a20 (Christoph Hellwig 2019-10-17 13:12:19 -0700 44) if (page_has_private(page))
ab08b01ec0a20 (Christoph Hellwig 2019-10-17 13:12:19 -0700 45) return (struct iomap_page *)page_private(page);
ab08b01ec0a20 (Christoph Hellwig 2019-10-17 13:12:19 -0700 46) return NULL;
ab08b01ec0a20 (Christoph Hellwig 2019-10-17 13:12:19 -0700 47) }
ab08b01ec0a20 (Christoph Hellwig 2019-10-17 13:12:19 -0700 48)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 49) static struct bio_set iomap_ioend_bioset;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 50)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 51) static struct iomap_page *
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 52) iomap_page_create(struct inode *inode, struct page *page)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 53) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 54) struct iomap_page *iop = to_iomap_page(page);
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 55) unsigned int nr_blocks = i_blocks_per_page(inode, page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 56)
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 57) if (iop || nr_blocks <= 1)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 58) return iop;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 59)
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 60) iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 61) GFP_NOFS | __GFP_NOFAIL);
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 62) spin_lock_init(&iop->uptodate_lock);
4595a298d5563 (Matthew Wilcox (Oracle) 2020-09-25 11:16:53 -0700 63) if (PageUptodate(page))
4595a298d5563 (Matthew Wilcox (Oracle) 2020-09-25 11:16:53 -0700 64) bitmap_fill(iop->uptodate, nr_blocks);
58aeb731963cb (Guoqing Jiang 2020-06-01 21:47:54 -0700 65) attach_page_private(page, iop);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 66) return iop;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 67) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 68)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 69) static void
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 70) iomap_page_release(struct page *page)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 71) {
58aeb731963cb (Guoqing Jiang 2020-06-01 21:47:54 -0700 72) struct iomap_page *iop = detach_page_private(page);
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 73) unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 74)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 75) if (!iop)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 76) return;
7d636676d2841 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 77) WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
0fb2d7209d66a (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 78) WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 79) WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
0a195b91e8991 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 80) PageUptodate(page));
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 81) kfree(iop);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 82) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 83)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 84) /*
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 85) * Calculate the range inside the page that we actually need to read.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 86) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 87) static void
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 88) iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 89) loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 90) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 91) loff_t orig_pos = *pos;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 92) loff_t isize = i_size_read(inode);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 93) unsigned block_bits = inode->i_blkbits;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 94) unsigned block_size = (1 << block_bits);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 95) unsigned poff = offset_in_page(*pos);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 96) unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 97) unsigned first = poff >> block_bits;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 98) unsigned last = (poff + plen - 1) >> block_bits;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 99)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 100) /*
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 101) * If the block size is smaller than the page size we need to check the
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 102) * per-block uptodate status and adjust the offset and length if needed
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 103) * to avoid reading in already uptodate ranges.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 104) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 105) if (iop) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 106) unsigned int i;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 107)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 108) /* move forward for each leading block marked uptodate */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 109) for (i = first; i <= last; i++) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 110) if (!test_bit(i, iop->uptodate))
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 111) break;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 112) *pos += block_size;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 113) poff += block_size;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 114) plen -= block_size;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 115) first++;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 116) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 117)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 118) /* truncate len if we find any trailing uptodate block(s) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 119) for ( ; i <= last; i++) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 120) if (test_bit(i, iop->uptodate)) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 121) plen -= (last - i + 1) * block_size;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 122) last = i - 1;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 123) break;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 124) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 125) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 126) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 127)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 128) /*
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 129) * If the extent spans the block that contains the i_size we need to
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 130) * handle both halves separately so that we properly zero data in the
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 131) * page cache for blocks that are entirely outside of i_size.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 132) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 133) if (orig_pos <= isize && orig_pos + length > isize) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 134) unsigned end = offset_in_page(isize - 1) >> block_bits;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 135)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 136) if (first <= end && last > end)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 137) plen -= (last - end) * block_size;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 138) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 139)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 140) *offp = poff;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 141) *lenp = plen;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 142) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 143)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 144) static void
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 145) iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 146) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 147) struct iomap_page *iop = to_iomap_page(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 148) struct inode *inode = page->mapping->host;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 149) unsigned first = off >> inode->i_blkbits;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 150) unsigned last = (off + len - 1) >> inode->i_blkbits;
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 151) unsigned long flags;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 152)
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 153) spin_lock_irqsave(&iop->uptodate_lock, flags);
b21866f514cb5 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 154) bitmap_set(iop->uptodate, first, last - first + 1);
b21866f514cb5 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 155) if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page)))
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 156) SetPageUptodate(page);
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 157) spin_unlock_irqrestore(&iop->uptodate_lock, flags);
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 158) }
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 159)
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 160) static void
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 161) iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 162) {
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 163) if (PageError(page))
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 164) return;
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 165)
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 166) if (page_has_private(page))
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 167) iomap_iop_set_range_uptodate(page, off, len);
1cea335d1db1c (Christoph Hellwig 2019-12-04 09:33:52 -0800 168) else
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 169) SetPageUptodate(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 170) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 171)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 172) static void
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 173) iomap_read_page_end_io(struct bio_vec *bvec, int error)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 174) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 175) struct page *page = bvec->bv_page;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 176) struct iomap_page *iop = to_iomap_page(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 177)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 178) if (unlikely(error)) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 179) ClearPageUptodate(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 180) SetPageError(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 181) } else {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 182) iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 183) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 184)
7d636676d2841 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 185) if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
7d636676d2841 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 186) unlock_page(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 187) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 188)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 189) static void
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 190) iomap_read_end_io(struct bio *bio)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 191) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 192) int error = blk_status_to_errno(bio->bi_status);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 193) struct bio_vec *bvec;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 194) struct bvec_iter_all iter_all;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 195)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 196) bio_for_each_segment_all(bvec, bio, iter_all)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 197) iomap_read_page_end_io(bvec, error);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 198) bio_put(bio);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 199) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 200)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 201) struct iomap_readpage_ctx {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 202) struct page *cur_page;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 203) bool cur_page_in_bio;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 204) struct bio *bio;
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 205) struct readahead_control *rac;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 206) };
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 207)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 208) static void
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 209) iomap_read_inline_data(struct inode *inode, struct page *page,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 210) struct iomap *iomap)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 211) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 212) size_t size = i_size_read(inode);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 213) void *addr;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 214)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 215) if (PageUptodate(page))
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 216) return;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 217)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 218) BUG_ON(page->index);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 219) BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 220)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 221) addr = kmap_atomic(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 222) memcpy(addr, iomap->inline_data, size);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 223) memset(addr + size, 0, PAGE_SIZE - size);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 224) kunmap_atomic(addr);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 225) SetPageUptodate(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 226) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 227)
009d8d849d3ff (Christoph Hellwig 2019-10-17 13:12:12 -0700 228) static inline bool iomap_block_needs_zeroing(struct inode *inode,
009d8d849d3ff (Christoph Hellwig 2019-10-17 13:12:12 -0700 229) struct iomap *iomap, loff_t pos)
009d8d849d3ff (Christoph Hellwig 2019-10-17 13:12:12 -0700 230) {
009d8d849d3ff (Christoph Hellwig 2019-10-17 13:12:12 -0700 231) return iomap->type != IOMAP_MAPPED ||
009d8d849d3ff (Christoph Hellwig 2019-10-17 13:12:12 -0700 232) (iomap->flags & IOMAP_F_NEW) ||
009d8d849d3ff (Christoph Hellwig 2019-10-17 13:12:12 -0700 233) pos >= i_size_read(inode);
009d8d849d3ff (Christoph Hellwig 2019-10-17 13:12:12 -0700 234) }
009d8d849d3ff (Christoph Hellwig 2019-10-17 13:12:12 -0700 235)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 236) static loff_t
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 237) iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 238) struct iomap *iomap, struct iomap *srcmap)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 239) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 240) struct iomap_readpage_ctx *ctx = data;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 241) struct page *page = ctx->cur_page;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 242) struct iomap_page *iop = iomap_page_create(inode, page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 243) bool same_page = false, is_contig = false;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 244) loff_t orig_pos = pos;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 245) unsigned poff, plen;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 246) sector_t sector;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 247)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 248) if (iomap->type == IOMAP_INLINE) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 249) WARN_ON_ONCE(pos);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 250) iomap_read_inline_data(inode, page, iomap);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 251) return PAGE_SIZE;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 252) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 253)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 254) /* zero post-eof blocks as the page may be mapped */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 255) iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 256) if (plen == 0)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 257) goto done;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 258)
009d8d849d3ff (Christoph Hellwig 2019-10-17 13:12:12 -0700 259) if (iomap_block_needs_zeroing(inode, iomap, pos)) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 260) zero_user(page, poff, plen);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 261) iomap_set_range_uptodate(page, poff, plen);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 262) goto done;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 263) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 264)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 265) ctx->cur_page_in_bio = true;
7d636676d2841 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 266) if (iop)
7d636676d2841 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 267) atomic_add(plen, &iop->read_bytes_pending);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 268)
7d636676d2841 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 269) /* Try to merge into a previous segment if we can */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 270) sector = iomap_sector(iomap, pos);
7d636676d2841 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 271) if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
7d636676d2841 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 272) if (__bio_try_merge_page(ctx->bio, page, plen, poff,
7d636676d2841 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 273) &same_page))
7d636676d2841 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 274) goto done;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 275) is_contig = true;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 276) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 277)
7d636676d2841 (Matthew Wilcox (Oracle) 2020-09-21 08:58:40 -0700 278) if (!is_contig || bio_full(ctx->bio, plen)) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 279) gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
457df33e035a2 (Matthew Wilcox (Oracle) 2020-04-02 09:08:53 -0700 280) gfp_t orig_gfp = gfp;
5f7136db82996 (Matthew Wilcox (Oracle) 2021-01-29 04:38:57 +0000 281) unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 282)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 283) if (ctx->bio)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 284) submit_bio(ctx->bio);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 285)
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 286) if (ctx->rac) /* same as readahead_gfp_mask */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 287) gfp |= __GFP_NORETRY | __GFP_NOWARN;
5f7136db82996 (Matthew Wilcox (Oracle) 2021-01-29 04:38:57 +0000 288) ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs));
457df33e035a2 (Matthew Wilcox (Oracle) 2020-04-02 09:08:53 -0700 289) /*
457df33e035a2 (Matthew Wilcox (Oracle) 2020-04-02 09:08:53 -0700 290) * If the bio_alloc fails, try it again for a single page to
457df33e035a2 (Matthew Wilcox (Oracle) 2020-04-02 09:08:53 -0700 291) * avoid having to deal with partial page reads. This emulates
457df33e035a2 (Matthew Wilcox (Oracle) 2020-04-02 09:08:53 -0700 292) * what do_mpage_readpage does.
457df33e035a2 (Matthew Wilcox (Oracle) 2020-04-02 09:08:53 -0700 293) */
457df33e035a2 (Matthew Wilcox (Oracle) 2020-04-02 09:08:53 -0700 294) if (!ctx->bio)
457df33e035a2 (Matthew Wilcox (Oracle) 2020-04-02 09:08:53 -0700 295) ctx->bio = bio_alloc(orig_gfp, 1);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 296) ctx->bio->bi_opf = REQ_OP_READ;
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 297) if (ctx->rac)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 298) ctx->bio->bi_opf |= REQ_RAHEAD;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 299) ctx->bio->bi_iter.bi_sector = sector;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 300) bio_set_dev(ctx->bio, iomap->bdev);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 301) ctx->bio->bi_end_io = iomap_read_end_io;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 302) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 303)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 304) bio_add_page(ctx->bio, page, plen, poff);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 305) done:
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 306) /*
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 307) * Move the caller beyond our range so that it keeps making progress.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 308) * For that we have to include any leading non-uptodate ranges, but
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 309) * we can skip trailing ones as they will be handled in the next
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 310) * iteration.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 311) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 312) return pos - orig_pos + plen;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 313) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 314)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 315) int
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 316) iomap_readpage(struct page *page, const struct iomap_ops *ops)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 317) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 318) struct iomap_readpage_ctx ctx = { .cur_page = page };
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 319) struct inode *inode = page->mapping->host;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 320) unsigned poff;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 321) loff_t ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 322)
9e91c5728cab3 (Christoph Hellwig 2019-10-17 13:12:13 -0700 323) trace_iomap_readpage(page->mapping->host, 1);
9e91c5728cab3 (Christoph Hellwig 2019-10-17 13:12:13 -0700 324)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 325) for (poff = 0; poff < PAGE_SIZE; poff += ret) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 326) ret = iomap_apply(inode, page_offset(page) + poff,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 327) PAGE_SIZE - poff, 0, ops, &ctx,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 328) iomap_readpage_actor);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 329) if (ret <= 0) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 330) WARN_ON_ONCE(ret == 0);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 331) SetPageError(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 332) break;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 333) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 334) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 335)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 336) if (ctx.bio) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 337) submit_bio(ctx.bio);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 338) WARN_ON_ONCE(!ctx.cur_page_in_bio);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 339) } else {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 340) WARN_ON_ONCE(ctx.cur_page_in_bio);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 341) unlock_page(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 342) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 343)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 344) /*
d4388340ae0bc (Matthew Wilcox (Oracle) 2020-06-01 21:47:02 -0700 345) * Just like mpage_readahead and block_read_full_page we always
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 346) * return 0 and just mark the page as PageError on errors. This
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 347) * should be cleaned up all through the stack eventually.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 348) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 349) return 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 350) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 351) EXPORT_SYMBOL_GPL(iomap_readpage);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 352)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 353) static loff_t
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 354) iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 355) void *data, struct iomap *iomap, struct iomap *srcmap)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 356) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 357) struct iomap_readpage_ctx *ctx = data;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 358) loff_t done, ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 359)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 360) for (done = 0; done < length; done += ret) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 361) if (ctx->cur_page && offset_in_page(pos + done) == 0) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 362) if (!ctx->cur_page_in_bio)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 363) unlock_page(ctx->cur_page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 364) put_page(ctx->cur_page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 365) ctx->cur_page = NULL;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 366) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 367) if (!ctx->cur_page) {
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 368) ctx->cur_page = readahead_page(ctx->rac);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 369) ctx->cur_page_in_bio = false;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 370) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 371) ret = iomap_readpage_actor(inode, pos + done, length - done,
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 372) ctx, iomap, srcmap);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 373) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 374)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 375) return done;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 376) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 377)
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 378) /**
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 379) * iomap_readahead - Attempt to read pages from a file.
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 380) * @rac: Describes the pages to be read.
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 381) * @ops: The operations vector for the filesystem.
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 382) *
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 383) * This function is for filesystems to call to implement their readahead
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 384) * address_space operation.
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 385) *
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 386) * Context: The @ops callbacks may submit I/O (eg to read the addresses of
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 387) * blocks from disc), and may wait for it. The caller may be trying to
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 388) * access a different page, and so sleeping excessively should be avoided.
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 389) * It may allocate memory, but should avoid costly allocations. This
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 390) * function is called with memalloc_nofs set, so allocations will not cause
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 391) * the filesystem to be reentered.
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 392) */
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 393) void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 394) {
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 395) struct inode *inode = rac->mapping->host;
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 396) loff_t pos = readahead_pos(rac);
076171a67789a (Matthew Wilcox (Oracle) 2021-05-14 17:27:30 -0700 397) size_t length = readahead_length(rac);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 398) struct iomap_readpage_ctx ctx = {
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 399) .rac = rac,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 400) };
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 401)
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 402) trace_iomap_readahead(inode, readahead_count(rac));
9e91c5728cab3 (Christoph Hellwig 2019-10-17 13:12:13 -0700 403)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 404) while (length > 0) {
076171a67789a (Matthew Wilcox (Oracle) 2021-05-14 17:27:30 -0700 405) ssize_t ret = iomap_apply(inode, pos, length, 0, ops,
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 406) &ctx, iomap_readahead_actor);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 407) if (ret <= 0) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 408) WARN_ON_ONCE(ret == 0);
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 409) break;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 410) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 411) pos += ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 412) length -= ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 413) }
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 414)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 415) if (ctx.bio)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 416) submit_bio(ctx.bio);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 417) if (ctx.cur_page) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 418) if (!ctx.cur_page_in_bio)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 419) unlock_page(ctx.cur_page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 420) put_page(ctx.cur_page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 421) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 422) }
9d24a13a93d99 (Matthew Wilcox (Oracle) 2020-06-01 21:47:34 -0700 423) EXPORT_SYMBOL_GPL(iomap_readahead);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 424)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 425) /*
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 426) * iomap_is_partially_uptodate checks whether blocks within a page are
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 427) * uptodate or not.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 428) *
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 429) * Returns true if all blocks which correspond to a file portion
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 430) * we want to read within the page are uptodate.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 431) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 432) int
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 433) iomap_is_partially_uptodate(struct page *page, unsigned long from,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 434) unsigned long count)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 435) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 436) struct iomap_page *iop = to_iomap_page(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 437) struct inode *inode = page->mapping->host;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 438) unsigned len, first, last;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 439) unsigned i;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 440)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 441) /* Limit range to one page */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 442) len = min_t(unsigned, PAGE_SIZE - from, count);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 443)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 444) /* First and last blocks in range within page */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 445) first = from >> inode->i_blkbits;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 446) last = (from + len - 1) >> inode->i_blkbits;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 447)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 448) if (iop) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 449) for (i = first; i <= last; i++)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 450) if (!test_bit(i, iop->uptodate))
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 451) return 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 452) return 1;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 453) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 454)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 455) return 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 456) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 457) EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 458)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 459) int
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 460) iomap_releasepage(struct page *page, gfp_t gfp_mask)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 461) {
1ac994525b9d3 (Matthew Wilcox (Oracle) 2020-03-05 07:21:43 -0800 462) trace_iomap_releasepage(page->mapping->host, page_offset(page),
1ac994525b9d3 (Matthew Wilcox (Oracle) 2020-03-05 07:21:43 -0800 463) PAGE_SIZE);
9e91c5728cab3 (Christoph Hellwig 2019-10-17 13:12:13 -0700 464)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 465) /*
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 466) * mm accommodates an old ext3 case where clean pages might not have had
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 467) * the dirty bit cleared. Thus, it can send actual dirty pages to
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 468) * ->releasepage() via shrink_active_list(), skip those here.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 469) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 470) if (PageDirty(page) || PageWriteback(page))
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 471) return 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 472) iomap_page_release(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 473) return 1;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 474) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 475) EXPORT_SYMBOL_GPL(iomap_releasepage);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 476)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 477) void
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 478) iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 479) {
1ac994525b9d3 (Matthew Wilcox (Oracle) 2020-03-05 07:21:43 -0800 480) trace_iomap_invalidatepage(page->mapping->host, offset, len);
9e91c5728cab3 (Christoph Hellwig 2019-10-17 13:12:13 -0700 481)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 482) /*
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 483) * If we are invalidating the entire page, clear the dirty state from it
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 484) * and release it to avoid unnecessary buildup of the LRU.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 485) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 486) if (offset == 0 && len == PAGE_SIZE) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 487) WARN_ON_ONCE(PageWriteback(page));
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 488) cancel_dirty_page(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 489) iomap_page_release(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 490) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 491) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 492) EXPORT_SYMBOL_GPL(iomap_invalidatepage);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 493)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 494) #ifdef CONFIG_MIGRATION
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 495) int
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 496) iomap_migrate_page(struct address_space *mapping, struct page *newpage,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 497) struct page *page, enum migrate_mode mode)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 498) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 499) int ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 500)
26473f83703e6 (Linus Torvalds 2019-07-19 11:38:12 -0700 501) ret = migrate_page_move_mapping(mapping, newpage, page, 0);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 502) if (ret != MIGRATEPAGE_SUCCESS)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 503) return ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 504)
58aeb731963cb (Guoqing Jiang 2020-06-01 21:47:54 -0700 505) if (page_has_private(page))
58aeb731963cb (Guoqing Jiang 2020-06-01 21:47:54 -0700 506) attach_page_private(newpage, detach_page_private(page));
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 507)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 508) if (mode != MIGRATE_SYNC_NO_COPY)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 509) migrate_page_copy(newpage, page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 510) else
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 511) migrate_page_states(newpage, page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 512) return MIGRATEPAGE_SUCCESS;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 513) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 514) EXPORT_SYMBOL_GPL(iomap_migrate_page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 515) #endif /* CONFIG_MIGRATION */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 516)
32a38a4991043 (Christoph Hellwig 2019-10-18 16:42:50 -0700 517) enum {
32a38a4991043 (Christoph Hellwig 2019-10-18 16:42:50 -0700 518) IOMAP_WRITE_F_UNSHARE = (1 << 0),
32a38a4991043 (Christoph Hellwig 2019-10-18 16:42:50 -0700 519) };
32a38a4991043 (Christoph Hellwig 2019-10-18 16:42:50 -0700 520)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 521) static void
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 522) iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 523) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 524) loff_t i_size = i_size_read(inode);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 525)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 526) /*
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 527) * Only truncate newly allocated pages beyoned EOF, even if the
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 528) * write started inside the existing inode size.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 529) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 530) if (pos + len > i_size)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 531) truncate_pagecache_range(inode, max(pos, i_size), pos + len);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 532) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 533)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 534) static int
d3b404396977f (Christoph Hellwig 2019-10-18 16:42:24 -0700 535) iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
d3b404396977f (Christoph Hellwig 2019-10-18 16:42:24 -0700 536) unsigned plen, struct iomap *iomap)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 537) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 538) struct bio_vec bvec;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 539) struct bio bio;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 540)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 541) bio_init(&bio, &bvec, 1);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 542) bio.bi_opf = REQ_OP_READ;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 543) bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 544) bio_set_dev(&bio, iomap->bdev);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 545) __bio_add_page(&bio, page, plen, poff);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 546) return submit_bio_wait(&bio);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 547) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 548)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 549) static int
32a38a4991043 (Christoph Hellwig 2019-10-18 16:42:50 -0700 550) __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 551) struct page *page, struct iomap *srcmap)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 552) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 553) struct iomap_page *iop = iomap_page_create(inode, page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 554) loff_t block_size = i_blocksize(inode);
6cc19c5fad090 (Nikolay Borisov 2020-09-10 08:38:06 -0700 555) loff_t block_start = round_down(pos, block_size);
6cc19c5fad090 (Nikolay Borisov 2020-09-10 08:38:06 -0700 556) loff_t block_end = round_up(pos + len, block_size);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 557) unsigned from = offset_in_page(pos), to = from + len, poff, plen;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 558)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 559) if (PageUptodate(page))
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 560) return 0;
e6e7ca92623a4 (Matthew Wilcox (Oracle) 2020-09-10 08:26:17 -0700 561) ClearPageError(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 562)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 563) do {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 564) iomap_adjust_read_range(inode, iop, &block_start,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 565) block_end - block_start, &poff, &plen);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 566) if (plen == 0)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 567) break;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 568)
32a38a4991043 (Christoph Hellwig 2019-10-18 16:42:50 -0700 569) if (!(flags & IOMAP_WRITE_F_UNSHARE) &&
32a38a4991043 (Christoph Hellwig 2019-10-18 16:42:50 -0700 570) (from <= poff || from >= poff + plen) &&
d3b404396977f (Christoph Hellwig 2019-10-18 16:42:24 -0700 571) (to <= poff || to >= poff + plen))
d3b404396977f (Christoph Hellwig 2019-10-18 16:42:24 -0700 572) continue;
d3b404396977f (Christoph Hellwig 2019-10-18 16:42:24 -0700 573)
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 574) if (iomap_block_needs_zeroing(inode, srcmap, block_start)) {
32a38a4991043 (Christoph Hellwig 2019-10-18 16:42:50 -0700 575) if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
32a38a4991043 (Christoph Hellwig 2019-10-18 16:42:50 -0700 576) return -EIO;
d3b404396977f (Christoph Hellwig 2019-10-18 16:42:24 -0700 577) zero_user_segments(page, poff, from, to, poff + plen);
14284fedf59f1 (Matthew Wilcox (Oracle) 2020-09-10 08:26:18 -0700 578) } else {
14284fedf59f1 (Matthew Wilcox (Oracle) 2020-09-10 08:26:18 -0700 579) int status = iomap_read_page_sync(block_start, page,
14284fedf59f1 (Matthew Wilcox (Oracle) 2020-09-10 08:26:18 -0700 580) poff, plen, srcmap);
14284fedf59f1 (Matthew Wilcox (Oracle) 2020-09-10 08:26:18 -0700 581) if (status)
14284fedf59f1 (Matthew Wilcox (Oracle) 2020-09-10 08:26:18 -0700 582) return status;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 583) }
14284fedf59f1 (Matthew Wilcox (Oracle) 2020-09-10 08:26:18 -0700 584) iomap_set_range_uptodate(page, poff, plen);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 585) } while ((block_start += plen) < block_end);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 586)
d3b404396977f (Christoph Hellwig 2019-10-18 16:42:24 -0700 587) return 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 588) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 589)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 590) static int
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 591) iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 592) struct page **pagep, struct iomap *iomap, struct iomap *srcmap)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 593) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 594) const struct iomap_page_ops *page_ops = iomap->page_ops;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 595) struct page *page;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 596) int status = 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 597)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 598) BUG_ON(pos + len > iomap->offset + iomap->length);
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 599) if (srcmap != iomap)
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 600) BUG_ON(pos + len > srcmap->offset + srcmap->length);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 601)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 602) if (fatal_signal_pending(current))
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 603) return -EINTR;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 604)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 605) if (page_ops && page_ops->page_prepare) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 606) status = page_ops->page_prepare(inode, pos, len, iomap);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 607) if (status)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 608) return status;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 609) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 610)
dcd6158d15c7a (Christoph Hellwig 2019-10-18 16:41:12 -0700 611) page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT,
dcd6158d15c7a (Christoph Hellwig 2019-10-18 16:41:12 -0700 612) AOP_FLAG_NOFS);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 613) if (!page) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 614) status = -ENOMEM;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 615) goto out_no_page;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 616) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 617)
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 618) if (srcmap->type == IOMAP_INLINE)
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 619) iomap_read_inline_data(inode, page, srcmap);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 620) else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 621) status = __block_write_begin_int(page, pos, len, NULL, srcmap);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 622) else
32a38a4991043 (Christoph Hellwig 2019-10-18 16:42:50 -0700 623) status = __iomap_write_begin(inode, pos, len, flags, page,
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 624) srcmap);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 625)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 626) if (unlikely(status))
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 627) goto out_unlock;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 628)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 629) *pagep = page;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 630) return 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 631)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 632) out_unlock:
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 633) unlock_page(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 634) put_page(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 635) iomap_write_failed(inode, pos, len);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 636)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 637) out_no_page:
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 638) if (page_ops && page_ops->page_done)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 639) page_ops->page_done(inode, pos, 0, NULL, iomap);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 640) return status;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 641) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 642)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 643) int
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 644) iomap_set_page_dirty(struct page *page)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 645) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 646) struct address_space *mapping = page_mapping(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 647) int newly_dirty;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 648)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 649) if (unlikely(!mapping))
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 650) return !TestSetPageDirty(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 651)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 652) /*
bcfe06bf2622f (Roman Gushchin 2020-12-01 13:58:27 -0800 653) * Lock out page's memcg migration to keep PageDirty
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 654) * synchronized with per-memcg dirty page counters.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 655) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 656) lock_page_memcg(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 657) newly_dirty = !TestSetPageDirty(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 658) if (newly_dirty)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 659) __set_page_dirty(page, mapping, 0);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 660) unlock_page_memcg(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 661)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 662) if (newly_dirty)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 663) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 664) return newly_dirty;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 665) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 666) EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 667)
e25ba8cbfd16b (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 668) static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
e25ba8cbfd16b (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 669) size_t copied, struct page *page)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 670) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 671) flush_dcache_page(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 672)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 673) /*
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 674) * The blocks that were entirely written will now be uptodate, so we
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 675) * don't have to worry about a readpage reading them and overwriting a
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 676) * partial write. However if we have encountered a short write and only
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 677) * partially written into a block, it will not be marked uptodate, so a
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 678) * readpage might come in and destroy our partial write.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 679) *
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 680) * Do the simplest thing, and just treat any short write to a non
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 681) * uptodate page as a zero-length write, and force the caller to redo
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 682) * the whole thing.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 683) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 684) if (unlikely(copied < len && !PageUptodate(page)))
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 685) return 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 686) iomap_set_range_uptodate(page, offset_in_page(pos), len);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 687) iomap_set_page_dirty(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 688) return copied;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 689) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 690)
e25ba8cbfd16b (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 691) static size_t iomap_write_end_inline(struct inode *inode, struct page *page,
e25ba8cbfd16b (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 692) struct iomap *iomap, loff_t pos, size_t copied)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 693) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 694) void *addr;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 695)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 696) WARN_ON_ONCE(!PageUptodate(page));
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 697) BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 698)
7ed3cd1a69e3a (Matthew Wilcox (Oracle) 2020-09-21 08:58:38 -0700 699) flush_dcache_page(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 700) addr = kmap_atomic(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 701) memcpy(iomap->inline_data + pos, addr + pos, copied);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 702) kunmap_atomic(addr);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 703)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 704) mark_inode_dirty(inode);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 705) return copied;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 706) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 707)
e25ba8cbfd16b (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 708) /* Returns the number of bytes copied. May be 0. Cannot be an errno. */
e25ba8cbfd16b (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 709) static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
e25ba8cbfd16b (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 710) size_t copied, struct page *page, struct iomap *iomap,
e25ba8cbfd16b (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 711) struct iomap *srcmap)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 712) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 713) const struct iomap_page_ops *page_ops = iomap->page_ops;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 714) loff_t old_size = inode->i_size;
e25ba8cbfd16b (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 715) size_t ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 716)
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 717) if (srcmap->type == IOMAP_INLINE) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 718) ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 719) } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 720) ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 721) page, NULL);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 722) } else {
c12d6fa88d090 (Christoph Hellwig 2019-10-18 16:40:57 -0700 723) ret = __iomap_write_end(inode, pos, len, copied, page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 724) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 725)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 726) /*
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 727) * Update the in-memory inode size after copying the data into the page
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 728) * cache. It's up to the file system to write the updated size to disk,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 729) * preferably after I/O completion so that no stale data is exposed.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 730) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 731) if (pos + ret > old_size) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 732) i_size_write(inode, pos + ret);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 733) iomap->flags |= IOMAP_F_SIZE_CHANGED;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 734) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 735) unlock_page(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 736)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 737) if (old_size < pos)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 738) pagecache_isize_extended(inode, old_size, pos);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 739) if (page_ops && page_ops->page_done)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 740) page_ops->page_done(inode, pos, ret, page, iomap);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 741) put_page(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 742)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 743) if (ret < len)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 744) iomap_write_failed(inode, pos, len);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 745) return ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 746) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 747)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 748) static loff_t
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 749) iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 750) struct iomap *iomap, struct iomap *srcmap)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 751) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 752) struct iov_iter *i = data;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 753) long status = 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 754) ssize_t written = 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 755)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 756) do {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 757) struct page *page;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 758) unsigned long offset; /* Offset into pagecache page */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 759) unsigned long bytes; /* Bytes to write to page */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 760) size_t copied; /* Bytes copied from user */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 761)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 762) offset = offset_in_page(pos);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 763) bytes = min_t(unsigned long, PAGE_SIZE - offset,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 764) iov_iter_count(i));
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 765) again:
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 766) if (bytes > length)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 767) bytes = length;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 768)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 769) /*
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 770) * Bring in the user page that we will copy from _first_.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 771) * Otherwise there's a nasty deadlock on copying from the
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 772) * same page as we're writing to, without it being marked
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 773) * up-to-date.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 774) *
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 775) * Not only is this an optimisation, but it is also required
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 776) * to check that the address is actually valid, when atomic
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 777) * usercopies are used, below.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 778) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 779) if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 780) status = -EFAULT;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 781) break;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 782) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 783)
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 784) status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap,
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 785) srcmap);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 786) if (unlikely(status))
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 787) break;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 788)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 789) if (mapping_writably_mapped(inode->i_mapping))
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 790) flush_dcache_page(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 791)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 792) copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 793)
e25ba8cbfd16b (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 794) copied = iomap_write_end(inode, pos, bytes, copied, page, iomap,
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 795) srcmap);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 796)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 797) cond_resched();
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 798)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 799) iov_iter_advance(i, copied);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 800) if (unlikely(copied == 0)) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 801) /*
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 802) * If we were unable to copy any data at all, we must
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 803) * fall back to a single segment length write.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 804) *
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 805) * If we didn't fallback here, we could livelock
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 806) * because not all segments in the iov can be copied at
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 807) * once without a pagefault.
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 808) */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 809) bytes = min_t(unsigned long, PAGE_SIZE - offset,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 810) iov_iter_single_seg_count(i));
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 811) goto again;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 812) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 813) pos += copied;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 814) written += copied;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 815) length -= copied;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 816)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 817) balance_dirty_pages_ratelimited(inode->i_mapping);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 818) } while (iov_iter_count(i) && length);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 819)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 820) return written ? written : status;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 821) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 822)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 823) ssize_t
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 824) iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 825) const struct iomap_ops *ops)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 826) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 827) struct inode *inode = iocb->ki_filp->f_mapping->host;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 828) loff_t pos = iocb->ki_pos, ret = 0, written = 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 829)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 830) while (iov_iter_count(iter)) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 831) ret = iomap_apply(inode, pos, iov_iter_count(iter),
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 832) IOMAP_WRITE, ops, iter, iomap_write_actor);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 833) if (ret <= 0)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 834) break;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 835) pos += ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 836) written += ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 837) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 838)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 839) return written ? written : ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 840) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 841) EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 842)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 843) static loff_t
3590c4d8979bc (Christoph Hellwig 2019-10-18 16:41:34 -0700 844) iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 845) struct iomap *iomap, struct iomap *srcmap)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 846) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 847) long status = 0;
d4ff3b2ef901c (Matthew Wilcox (Oracle) 2020-06-08 20:58:29 -0700 848) loff_t written = 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 849)
3590c4d8979bc (Christoph Hellwig 2019-10-18 16:41:34 -0700 850) /* don't bother with blocks that are not shared to start with */
3590c4d8979bc (Christoph Hellwig 2019-10-18 16:41:34 -0700 851) if (!(iomap->flags & IOMAP_F_SHARED))
3590c4d8979bc (Christoph Hellwig 2019-10-18 16:41:34 -0700 852) return length;
3590c4d8979bc (Christoph Hellwig 2019-10-18 16:41:34 -0700 853) /* don't bother with holes or unwritten extents */
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 854) if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
3590c4d8979bc (Christoph Hellwig 2019-10-18 16:41:34 -0700 855) return length;
3590c4d8979bc (Christoph Hellwig 2019-10-18 16:41:34 -0700 856)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 857) do {
32a38a4991043 (Christoph Hellwig 2019-10-18 16:42:50 -0700 858) unsigned long offset = offset_in_page(pos);
32a38a4991043 (Christoph Hellwig 2019-10-18 16:42:50 -0700 859) unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
32a38a4991043 (Christoph Hellwig 2019-10-18 16:42:50 -0700 860) struct page *page;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 861)
32a38a4991043 (Christoph Hellwig 2019-10-18 16:42:50 -0700 862) status = iomap_write_begin(inode, pos, bytes,
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 863) IOMAP_WRITE_F_UNSHARE, &page, iomap, srcmap);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 864) if (unlikely(status))
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 865) return status;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 866)
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 867) status = iomap_write_end(inode, pos, bytes, bytes, page, iomap,
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 868) srcmap);
e25ba8cbfd16b (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 869) if (WARN_ON_ONCE(status == 0))
e25ba8cbfd16b (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 870) return -EIO;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 871)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 872) cond_resched();
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 873)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 874) pos += status;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 875) written += status;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 876) length -= status;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 877)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 878) balance_dirty_pages_ratelimited(inode->i_mapping);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 879) } while (length);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 880)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 881) return written;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 882) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 883)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 884) int
3590c4d8979bc (Christoph Hellwig 2019-10-18 16:41:34 -0700 885) iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 886) const struct iomap_ops *ops)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 887) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 888) loff_t ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 889)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 890) while (len) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 891) ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
3590c4d8979bc (Christoph Hellwig 2019-10-18 16:41:34 -0700 892) iomap_unshare_actor);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 893) if (ret <= 0)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 894) return ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 895) pos += ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 896) len -= ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 897) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 898)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 899) return 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 900) }
3590c4d8979bc (Christoph Hellwig 2019-10-18 16:41:34 -0700 901) EXPORT_SYMBOL_GPL(iomap_file_unshare);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 902)
81ee8e52a71c7 (Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -0700 903) static s64 iomap_zero(struct inode *inode, loff_t pos, u64 length,
81ee8e52a71c7 (Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -0700 904) struct iomap *iomap, struct iomap *srcmap)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 905) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 906) struct page *page;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 907) int status;
81ee8e52a71c7 (Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -0700 908) unsigned offset = offset_in_page(pos);
81ee8e52a71c7 (Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -0700 909) unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 910)
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 911) status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 912) if (status)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 913) return status;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 914)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 915) zero_user(page, offset, bytes);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 916) mark_page_accessed(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 917)
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 918) return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 919) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 920)
81ee8e52a71c7 (Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -0700 921) static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
81ee8e52a71c7 (Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -0700 922) loff_t length, void *data, struct iomap *iomap,
81ee8e52a71c7 (Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -0700 923) struct iomap *srcmap)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 924) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 925) bool *did_zero = data;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 926) loff_t written = 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 927)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 928) /* already zeroed? we're done. */
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 929) if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
81ee8e52a71c7 (Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -0700 930) return length;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 931)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 932) do {
81ee8e52a71c7 (Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -0700 933) s64 bytes;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 934)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 935) if (IS_DAX(inode))
81ee8e52a71c7 (Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -0700 936) bytes = dax_iomap_zero(pos, length, iomap);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 937) else
81ee8e52a71c7 (Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -0700 938) bytes = iomap_zero(inode, pos, length, iomap, srcmap);
81ee8e52a71c7 (Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -0700 939) if (bytes < 0)
81ee8e52a71c7 (Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -0700 940) return bytes;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 941)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 942) pos += bytes;
81ee8e52a71c7 (Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -0700 943) length -= bytes;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 944) written += bytes;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 945) if (did_zero)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 946) *did_zero = true;
81ee8e52a71c7 (Matthew Wilcox (Oracle) 2020-09-21 08:58:42 -0700 947) } while (length > 0);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 948)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 949) return written;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 950) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 951)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 952) int
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 953) iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 954) const struct iomap_ops *ops)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 955) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 956) loff_t ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 957)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 958) while (len > 0) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 959) ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 960) ops, did_zero, iomap_zero_range_actor);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 961) if (ret <= 0)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 962) return ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 963)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 964) pos += ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 965) len -= ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 966) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 967)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 968) return 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 969) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 970) EXPORT_SYMBOL_GPL(iomap_zero_range);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 971)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 972) int
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 973) iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 974) const struct iomap_ops *ops)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 975) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 976) unsigned int blocksize = i_blocksize(inode);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 977) unsigned int off = pos & (blocksize - 1);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 978)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 979) /* Block boundary? Nothing to do */
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 980) if (!off)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 981) return 0;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 982) return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 983) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 984) EXPORT_SYMBOL_GPL(iomap_truncate_page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 985)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 986) static loff_t
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 987) iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
c039b99792726 (Goldwyn Rodrigues 2019-10-18 16:44:10 -0700 988) void *data, struct iomap *iomap, struct iomap *srcmap)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 989) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 990) struct page *page = data;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 991) int ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 992)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 993) if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 994) ret = __block_write_begin_int(page, pos, length, NULL, iomap);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 995) if (ret)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 996) return ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 997) block_commit_write(page, 0, length);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 998) } else {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 999) WARN_ON_ONCE(!PageUptodate(page));
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1000) iomap_page_create(inode, page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1001) set_page_dirty(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1002) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1003)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1004) return length;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1005) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1006)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1007) vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1008) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1009) struct page *page = vmf->page;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1010) struct inode *inode = file_inode(vmf->vma->vm_file);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1011) unsigned long length;
243145bc43366 (Andreas Gruenbacher 2020-01-06 08:58:23 -0800 1012) loff_t offset;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1013) ssize_t ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1014)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1015) lock_page(page);
243145bc43366 (Andreas Gruenbacher 2020-01-06 08:58:23 -0800 1016) ret = page_mkwrite_check_truncate(page, inode);
243145bc43366 (Andreas Gruenbacher 2020-01-06 08:58:23 -0800 1017) if (ret < 0)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1018) goto out_unlock;
243145bc43366 (Andreas Gruenbacher 2020-01-06 08:58:23 -0800 1019) length = ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1020)
243145bc43366 (Andreas Gruenbacher 2020-01-06 08:58:23 -0800 1021) offset = page_offset(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1022) while (length > 0) {
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1023) ret = iomap_apply(inode, offset, length,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1024) IOMAP_WRITE | IOMAP_FAULT, ops, page,
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1025) iomap_page_mkwrite_actor);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1026) if (unlikely(ret <= 0))
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1027) goto out_unlock;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1028) offset += ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1029) length -= ret;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1030) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1031)
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1032) wait_for_stable_page(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1033) return VM_FAULT_LOCKED;
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1034) out_unlock:
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1035) unlock_page(page);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1036) return block_page_mkwrite_return(ret);
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1037) }
afc51aaa22f26 (Darrick J. Wong 2019-07-15 08:50:59 -0700 1038) EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1039)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1040) static void
48d64cd18b337 (Christoph Hellwig 2019-10-17 13:12:22 -0700 1041) iomap_finish_page_writeback(struct inode *inode, struct page *page,
0fb2d7209d66a (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 1042) int error, unsigned int len)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1043) {
48d64cd18b337 (Christoph Hellwig 2019-10-17 13:12:22 -0700 1044) struct iomap_page *iop = to_iomap_page(page);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1045)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1046) if (error) {
48d64cd18b337 (Christoph Hellwig 2019-10-17 13:12:22 -0700 1047) SetPageError(page);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1048) mapping_set_error(inode->i_mapping, -EIO);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1049) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1050)
24addd848a457 (Matthew Wilcox (Oracle) 2020-09-21 08:58:39 -0700 1051) WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
0fb2d7209d66a (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 1052) WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1053)
0fb2d7209d66a (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 1054) if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
48d64cd18b337 (Christoph Hellwig 2019-10-17 13:12:22 -0700 1055) end_page_writeback(page);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1056) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1057)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1058) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1059) * We're now finished for good with this ioend structure. Update the page
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1060) * state, release holds on bios, and finally free up memory. Do not use the
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1061) * ioend after this.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1062) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1063) static void
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1064) iomap_finish_ioend(struct iomap_ioend *ioend, int error)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1065) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1066) struct inode *inode = ioend->io_inode;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1067) struct bio *bio = &ioend->io_inline_bio;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1068) struct bio *last = ioend->io_bio, *next;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1069) u64 start = bio->bi_iter.bi_sector;
c275779ff2dd5 (Zorro Lang 2019-12-04 22:59:02 -0800 1070) loff_t offset = ioend->io_offset;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1071) bool quiet = bio_flagged(bio, BIO_QUIET);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1072)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1073) for (bio = &ioend->io_inline_bio; bio; bio = next) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1074) struct bio_vec *bv;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1075) struct bvec_iter_all iter_all;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1076)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1077) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1078) * For the last bio, bi_private points to the ioend, so we
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1079) * need to explicitly end the iteration here.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1080) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1081) if (bio == last)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1082) next = NULL;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1083) else
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1084) next = bio->bi_private;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1085)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1086) /* walk each page on bio, ending page IO on them */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1087) bio_for_each_segment_all(bv, bio, iter_all)
0fb2d7209d66a (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 1088) iomap_finish_page_writeback(inode, bv->bv_page, error,
0fb2d7209d66a (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 1089) bv->bv_len);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1090) bio_put(bio);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1091) }
c275779ff2dd5 (Zorro Lang 2019-12-04 22:59:02 -0800 1092) /* The ioend has been freed by bio_put() */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1093)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1094) if (unlikely(error && !quiet)) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1095) printk_ratelimited(KERN_ERR
9cd0ed63ca514 (Darrick J. Wong 2019-10-17 14:02:07 -0700 1096) "%s: writeback error on inode %lu, offset %lld, sector %llu",
c275779ff2dd5 (Zorro Lang 2019-12-04 22:59:02 -0800 1097) inode->i_sb->s_id, inode->i_ino, offset, start);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1098) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1099) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1100)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1101) void
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1102) iomap_finish_ioends(struct iomap_ioend *ioend, int error)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1103) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1104) struct list_head tmp;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1105)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1106) list_replace_init(&ioend->io_list, &tmp);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1107) iomap_finish_ioend(ioend, error);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1108)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1109) while (!list_empty(&tmp)) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1110) ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1111) list_del_init(&ioend->io_list);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1112) iomap_finish_ioend(ioend, error);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1113) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1114) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1115) EXPORT_SYMBOL_GPL(iomap_finish_ioends);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1116)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1117) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1118) * We can merge two adjacent ioends if they have the same set of work to do.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1119) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1120) static bool
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1121) iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1122) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1123) if (ioend->io_bio->bi_status != next->io_bio->bi_status)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1124) return false;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1125) if ((ioend->io_flags & IOMAP_F_SHARED) ^
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1126) (next->io_flags & IOMAP_F_SHARED))
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1127) return false;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1128) if ((ioend->io_type == IOMAP_UNWRITTEN) ^
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1129) (next->io_type == IOMAP_UNWRITTEN))
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1130) return false;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1131) if (ioend->io_offset + ioend->io_size != next->io_offset)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1132) return false;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1133) return true;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1134) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1135)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1136) void
6e552494fb90a (Brian Foster 2021-05-04 08:54:29 -0700 1137) iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1138) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1139) struct iomap_ioend *next;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1140)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1141) INIT_LIST_HEAD(&ioend->io_list);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1142)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1143) while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1144) io_list))) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1145) if (!iomap_ioend_can_merge(ioend, next))
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1146) break;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1147) list_move_tail(&next->io_list, &ioend->io_list);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1148) ioend->io_size += next->io_size;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1149) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1150) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1151) EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1152)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1153) static int
4f0f586bf0c89 (Sami Tolvanen 2021-04-08 11:28:34 -0700 1154) iomap_ioend_compare(void *priv, const struct list_head *a,
4f0f586bf0c89 (Sami Tolvanen 2021-04-08 11:28:34 -0700 1155) const struct list_head *b)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1156) {
b3d423ec898ae (Christoph Hellwig 2019-10-17 13:12:20 -0700 1157) struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
b3d423ec898ae (Christoph Hellwig 2019-10-17 13:12:20 -0700 1158) struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1159)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1160) if (ia->io_offset < ib->io_offset)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1161) return -1;
b3d423ec898ae (Christoph Hellwig 2019-10-17 13:12:20 -0700 1162) if (ia->io_offset > ib->io_offset)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1163) return 1;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1164) return 0;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1165) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1166)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1167) void
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1168) iomap_sort_ioends(struct list_head *ioend_list)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1169) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1170) list_sort(NULL, ioend_list, iomap_ioend_compare);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1171) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1172) EXPORT_SYMBOL_GPL(iomap_sort_ioends);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1173)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1174) static void iomap_writepage_end_bio(struct bio *bio)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1175) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1176) struct iomap_ioend *ioend = bio->bi_private;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1177)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1178) iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1179) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1180)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1181) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1182) * Submit the final bio for an ioend.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1183) *
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1184) * If @error is non-zero, it means that we have a situation where some part of
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1185) * the submission process has failed after we have marked paged for writeback
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1186) * and unlocked them. In this situation, we need to fail the bio instead of
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1187) * submitting it. This typically only happens on a filesystem shutdown.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1188) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1189) static int
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1190) iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1191) int error)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1192) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1193) ioend->io_bio->bi_private = ioend;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1194) ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1195)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1196) if (wpc->ops->prepare_ioend)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1197) error = wpc->ops->prepare_ioend(ioend, error);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1198) if (error) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1199) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1200) * If we are failing the IO now, just mark the ioend with an
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1201) * error and finish it. This will run IO completion immediately
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1202) * as there is only one reference to the ioend at this point in
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1203) * time.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1204) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1205) ioend->io_bio->bi_status = errno_to_blk_status(error);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1206) bio_endio(ioend->io_bio);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1207) return error;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1208) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1209)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1210) submit_bio(ioend->io_bio);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1211) return 0;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1212) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1213)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1214) static struct iomap_ioend *
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1215) iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1216) loff_t offset, sector_t sector, struct writeback_control *wbc)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1217) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1218) struct iomap_ioend *ioend;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1219) struct bio *bio;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1220)
a8affc03a9b37 (Christoph Hellwig 2021-03-11 12:01:37 +0100 1221) bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1222) bio_set_dev(bio, wpc->iomap.bdev);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1223) bio->bi_iter.bi_sector = sector;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1224) bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1225) bio->bi_write_hint = inode->i_write_hint;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1226) wbc_init_bio(wbc, bio);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1227)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1228) ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1229) INIT_LIST_HEAD(&ioend->io_list);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1230) ioend->io_type = wpc->iomap.type;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1231) ioend->io_flags = wpc->iomap.flags;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1232) ioend->io_inode = inode;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1233) ioend->io_size = 0;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1234) ioend->io_offset = offset;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1235) ioend->io_bio = bio;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1236) return ioend;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1237) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1238)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1239) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1240) * Allocate a new bio, and chain the old bio to the new one.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1241) *
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1242) * Note that we have to do perform the chaining in this unintuitive order
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1243) * so that the bi_private linkage is set up in the right direction for the
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1244) * traversal in iomap_finish_ioend().
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1245) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1246) static struct bio *
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1247) iomap_chain_bio(struct bio *prev)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1248) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1249) struct bio *new;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1250)
a8affc03a9b37 (Christoph Hellwig 2021-03-11 12:01:37 +0100 1251) new = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1252) bio_copy_dev(new, prev);/* also copies over blkcg information */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1253) new->bi_iter.bi_sector = bio_end_sector(prev);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1254) new->bi_opf = prev->bi_opf;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1255) new->bi_write_hint = prev->bi_write_hint;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1256)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1257) bio_chain(prev, new);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1258) bio_get(prev); /* for iomap_finish_ioend */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1259) submit_bio(prev);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1260) return new;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1261) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1262)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1263) static bool
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1264) iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1265) sector_t sector)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1266) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1267) if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1268) (wpc->ioend->io_flags & IOMAP_F_SHARED))
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1269) return false;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1270) if (wpc->iomap.type != wpc->ioend->io_type)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1271) return false;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1272) if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1273) return false;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1274) if (sector != bio_end_sector(wpc->ioend->io_bio))
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1275) return false;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1276) return true;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1277) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1278)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1279) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1280) * Test to see if we have an existing ioend structure that we could append to
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1281) * first, otherwise finish off the current ioend and start another.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1282) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1283) static void
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1284) iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1285) struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1286) struct writeback_control *wbc, struct list_head *iolist)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1287) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1288) sector_t sector = iomap_sector(&wpc->iomap, offset);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1289) unsigned len = i_blocksize(inode);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1290) unsigned poff = offset & (PAGE_SIZE - 1);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1291) bool merged, same_page = false;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1292)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1293) if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1294) if (wpc->ioend)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1295) list_add(&wpc->ioend->io_list, iolist);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1296) wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1297) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1298)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1299) merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1300) &same_page);
0fb2d7209d66a (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 1301) if (iop)
0fb2d7209d66a (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 1302) atomic_add(len, &iop->write_bytes_pending);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1303)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1304) if (!merged) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1305) if (bio_full(wpc->ioend->io_bio, len)) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1306) wpc->ioend->io_bio =
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1307) iomap_chain_bio(wpc->ioend->io_bio);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1308) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1309) bio_add_page(wpc->ioend->io_bio, page, len, poff);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1310) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1311)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1312) wpc->ioend->io_size += len;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1313) wbc_account_cgroup_owner(wbc, page, len);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1314) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1315)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1316) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1317) * We implement an immediate ioend submission policy here to avoid needing to
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1318) * chain multiple ioends and hence nest mempool allocations which can violate
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1319) * forward progress guarantees we need to provide. The current ioend we are
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1320) * adding blocks to is cached on the writepage context, and if the new block
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1321) * does not append to the cached ioend it will create a new ioend and cache that
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1322) * instead.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1323) *
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1324) * If a new ioend is created and cached, the old ioend is returned and queued
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1325) * locally for submission once the entire page is processed or an error has been
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1326) * detected. While ioends are submitted immediately after they are completed,
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1327) * batching optimisations are provided by higher level block plugging.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1328) *
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1329) * At the end of a writeback pass, there will be a cached ioend remaining on the
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1330) * writepage context that the caller will need to submit.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1331) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1332) static int
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1333) iomap_writepage_map(struct iomap_writepage_ctx *wpc,
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1334) struct writeback_control *wbc, struct inode *inode,
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1335) struct page *page, u64 end_offset)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1336) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1337) struct iomap_page *iop = to_iomap_page(page);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1338) struct iomap_ioend *ioend, *next;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1339) unsigned len = i_blocksize(inode);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1340) u64 file_offset; /* file offset of page */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1341) int error = 0, count = 0, i;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1342) LIST_HEAD(submit_list);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1343)
24addd848a457 (Matthew Wilcox (Oracle) 2020-09-21 08:58:39 -0700 1344) WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
0fb2d7209d66a (Matthew Wilcox (Oracle) 2020-09-21 08:58:41 -0700 1345) WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1346)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1347) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1348) * Walk through the page to find areas to write back. If we run off the
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1349) * end of the current map or find the current map invalid, grab a new
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1350) * one.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1351) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1352) for (i = 0, file_offset = page_offset(page);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1353) i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1354) i++, file_offset += len) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1355) if (iop && !test_bit(i, iop->uptodate))
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1356) continue;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1357)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1358) error = wpc->ops->map_blocks(wpc, inode, file_offset);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1359) if (error)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1360) break;
3e19e6f3eeea2 (Christoph Hellwig 2019-10-17 13:12:17 -0700 1361) if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
3e19e6f3eeea2 (Christoph Hellwig 2019-10-17 13:12:17 -0700 1362) continue;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1363) if (wpc->iomap.type == IOMAP_HOLE)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1364) continue;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1365) iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1366) &submit_list);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1367) count++;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1368) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1369)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1370) WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1371) WARN_ON_ONCE(!PageLocked(page));
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1372) WARN_ON_ONCE(PageWriteback(page));
50e7d6c7a5210 (Brian Foster 2020-10-29 14:30:49 -0700 1373) WARN_ON_ONCE(PageDirty(page));
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1374)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1375) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1376) * We cannot cancel the ioend directly here on error. We may have
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1377) * already set other pages under writeback and hence we have to run I/O
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1378) * completion to mark the error state of the pages under writeback
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1379) * appropriately.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1380) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1381) if (unlikely(error)) {
763e4cdc0f6d5 (Brian Foster 2020-10-29 14:30:48 -0700 1382) /*
763e4cdc0f6d5 (Brian Foster 2020-10-29 14:30:48 -0700 1383) * Let the filesystem know what portion of the current page
763e4cdc0f6d5 (Brian Foster 2020-10-29 14:30:48 -0700 1384) * failed to map. If the page wasn't been added to ioend, it
763e4cdc0f6d5 (Brian Foster 2020-10-29 14:30:48 -0700 1385) * won't be affected by I/O completion and we must unlock it
763e4cdc0f6d5 (Brian Foster 2020-10-29 14:30:48 -0700 1386) * now.
763e4cdc0f6d5 (Brian Foster 2020-10-29 14:30:48 -0700 1387) */
763e4cdc0f6d5 (Brian Foster 2020-10-29 14:30:48 -0700 1388) if (wpc->ops->discard_page)
763e4cdc0f6d5 (Brian Foster 2020-10-29 14:30:48 -0700 1389) wpc->ops->discard_page(page, file_offset);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1390) if (!count) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1391) ClearPageUptodate(page);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1392) unlock_page(page);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1393) goto done;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1394) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1395) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1396)
50e7d6c7a5210 (Brian Foster 2020-10-29 14:30:49 -0700 1397) set_page_writeback(page);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1398) unlock_page(page);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1399)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1400) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1401) * Preserve the original error if there was one, otherwise catch
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1402) * submission errors here and propagate into subsequent ioend
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1403) * submissions.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1404) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1405) list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1406) int error2;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1407)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1408) list_del_init(&ioend->io_list);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1409) error2 = iomap_submit_ioend(wpc, ioend, error);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1410) if (error2 && !error)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1411) error = error2;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1412) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1413)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1414) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1415) * We can end up here with no error and nothing to write only if we race
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1416) * with a partial page truncate on a sub-page block sized filesystem.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1417) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1418) if (!count)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1419) end_page_writeback(page);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1420) done:
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1421) mapping_set_error(page->mapping, error);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1422) return error;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1423) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1424)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1425) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1426) * Write out a dirty page.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1427) *
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1428) * For delalloc space on the page we need to allocate space and flush it.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1429) * For unwritten space on the page we need to start the conversion to
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1430) * regular allocated space.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1431) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1432) static int
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1433) iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1434) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1435) struct iomap_writepage_ctx *wpc = data;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1436) struct inode *inode = page->mapping->host;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1437) pgoff_t end_index;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1438) u64 end_offset;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1439) loff_t offset;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1440)
1ac994525b9d3 (Matthew Wilcox (Oracle) 2020-03-05 07:21:43 -0800 1441) trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1442)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1443) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1444) * Refuse to write the page out if we are called from reclaim context.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1445) *
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1446) * This avoids stack overflows when called from deeply used stacks in
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1447) * random callers for direct reclaim or memcg reclaim. We explicitly
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1448) * allow reclaim from kswapd as the stack usage there is relatively low.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1449) *
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1450) * This should never happen except in the case of a VM regression so
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1451) * warn about it.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1452) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1453) if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1454) PF_MEMALLOC))
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1455) goto redirty;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1456)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1457) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1458) * Is this page beyond the end of the file?
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1459) *
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1460) * The page index is less than the end_index, adjust the end_offset
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1461) * to the highest offset that this page should represent.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1462) * -----------------------------------------------------
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1463) * | file mapping | <EOF> |
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1464) * -----------------------------------------------------
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1465) * | Page ... | Page N-2 | Page N-1 | Page N | |
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1466) * ^--------------------------------^----------|--------
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1467) * | desired writeback range | see else |
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1468) * ---------------------------------^------------------|
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1469) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1470) offset = i_size_read(inode);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1471) end_index = offset >> PAGE_SHIFT;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1472) if (page->index < end_index)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1473) end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1474) else {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1475) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1476) * Check whether the page to write out is beyond or straddles
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1477) * i_size or not.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1478) * -------------------------------------------------------
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1479) * | file mapping | <EOF> |
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1480) * -------------------------------------------------------
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1481) * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1482) * ^--------------------------------^-----------|---------
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1483) * | | Straddles |
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1484) * ---------------------------------^-----------|--------|
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1485) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1486) unsigned offset_into_page = offset & (PAGE_SIZE - 1);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1487)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1488) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1489) * Skip the page if it is fully outside i_size, e.g. due to a
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1490) * truncate operation that is in progress. We must redirty the
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1491) * page so that reclaim stops reclaiming it. Otherwise
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1492) * iomap_vm_releasepage() is called on it and gets confused.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1493) *
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1494) * Note that the end_index is unsigned long, it would overflow
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1495) * if the given offset is greater than 16TB on 32-bit system
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1496) * and if we do check the page is fully outside i_size or not
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1497) * via "if (page->index >= end_index + 1)" as "end_index + 1"
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1498) * will be evaluated to 0. Hence this page will be redirtied
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1499) * and be written out repeatedly which would result in an
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1500) * infinite loop, the user program that perform this operation
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1501) * will hang. Instead, we can verify this situation by checking
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1502) * if the page to write is totally beyond the i_size or if it's
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1503) * offset is just equal to the EOF.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1504) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1505) if (page->index > end_index ||
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1506) (page->index == end_index && offset_into_page == 0))
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1507) goto redirty;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1508)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1509) /*
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1510) * The page straddles i_size. It must be zeroed out on each
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1511) * and every writepage invocation because it may be mmapped.
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1512) * "A file is mapped in multiples of the page size. For a file
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1513) * that is not a multiple of the page size, the remaining
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1514) * memory is zeroed when mapped, and writes to that region are
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1515) * not written out to the file."
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1516) */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1517) zero_user_segment(page, offset_into_page, PAGE_SIZE);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1518)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1519) /* Adjust the end_offset to the end of file */
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1520) end_offset = offset;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1521) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1522)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1523) return iomap_writepage_map(wpc, wbc, inode, page, end_offset);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1524)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1525) redirty:
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1526) redirty_page_for_writepage(wbc, page);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1527) unlock_page(page);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1528) return 0;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1529) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1530)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1531) int
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1532) iomap_writepage(struct page *page, struct writeback_control *wbc,
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1533) struct iomap_writepage_ctx *wpc,
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1534) const struct iomap_writeback_ops *ops)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1535) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1536) int ret;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1537)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1538) wpc->ops = ops;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1539) ret = iomap_do_writepage(page, wbc, wpc);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1540) if (!wpc->ioend)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1541) return ret;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1542) return iomap_submit_ioend(wpc, wpc->ioend, ret);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1543) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1544) EXPORT_SYMBOL_GPL(iomap_writepage);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1545)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1546) int
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1547) iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1548) struct iomap_writepage_ctx *wpc,
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1549) const struct iomap_writeback_ops *ops)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1550) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1551) int ret;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1552)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1553) wpc->ops = ops;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1554) ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1555) if (!wpc->ioend)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1556) return ret;
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1557) return iomap_submit_ioend(wpc, wpc->ioend, ret);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1558) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1559) EXPORT_SYMBOL_GPL(iomap_writepages);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1560)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1561) static int __init iomap_init(void)
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1562) {
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1563) return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1564) offsetof(struct iomap_ioend, io_inline_bio),
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1565) BIOSET_NEED_BVECS);
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1566) }
598ecfbaa742a (Christoph Hellwig 2019-10-17 13:12:15 -0700 1567) fs_initcall(iomap_init);