VisionFive2 Linux kernel

StarFive Tech Linux Kernel for VisionFive (JH7110) boards (mirror)

More than 9999 Commits   33 Branches   55 Tags
0b61f8a4079d9 (Dave Chinner        2018-06-05 19:42:14 -0700    1) // SPDX-License-Identifier: GPL-2.0
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000    2) /*
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000    3)  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000    4)  * Copyright (c) 2012 Red Hat, Inc.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000    5)  * All Rights Reserved.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000    6)  */
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000    7) #include "xfs.h"
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000    8) #include "xfs_fs.h"
70a9883c5f34b (Dave Chinner        2013-10-23 10:36:05 +1100    9) #include "xfs_shared.h"
239880ef6454c (Dave Chinner        2013-10-23 10:50:10 +1100   10) #include "xfs_format.h"
239880ef6454c (Dave Chinner        2013-10-23 10:50:10 +1100   11) #include "xfs_log_format.h"
239880ef6454c (Dave Chinner        2013-10-23 10:50:10 +1100   12) #include "xfs_trans_resv.h"
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   13) #include "xfs_bit.h"
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   14) #include "xfs_mount.h"
3ab78df2a59a4 (Darrick J. Wong     2016-08-03 11:15:38 +1000   15) #include "xfs_defer.h"
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   16) #include "xfs_inode.h"
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   17) #include "xfs_btree.h"
239880ef6454c (Dave Chinner        2013-10-23 10:50:10 +1100   18) #include "xfs_trans.h"
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   19) #include "xfs_alloc.h"
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   20) #include "xfs_bmap.h"
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   21) #include "xfs_bmap_util.h"
a4fbe6ab1e7ab (Dave Chinner        2013-10-23 10:51:50 +1100   22) #include "xfs_bmap_btree.h"
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   23) #include "xfs_rtalloc.h"
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   24) #include "xfs_error.h"
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   25) #include "xfs_quota.h"
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   26) #include "xfs_trans_space.h"
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   27) #include "xfs_trace.h"
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000   28) #include "xfs_icache.h"
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700   29) #include "xfs_iomap.h"
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700   30) #include "xfs_reflink.h"
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   31) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   32) /* Kernel only BMAP related definitions and functions */
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   33) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   34) /*
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   35)  * Convert the given file system block to a disk block.  We have to treat it
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   36)  * differently based on whether the file is a real time file or not, because the
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   37)  * bmap code does.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   38)  */
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   39) xfs_daddr_t
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   40) xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   41) {
ecfc28a41cf10 (Christoph Hellwig   2019-08-30 08:56:55 -0700   42) 	if (XFS_IS_REALTIME_INODE(ip))
ecfc28a41cf10 (Christoph Hellwig   2019-08-30 08:56:55 -0700   43) 		return XFS_FSB_TO_BB(ip->i_mount, fsb);
ecfc28a41cf10 (Christoph Hellwig   2019-08-30 08:56:55 -0700   44) 	return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   45) }
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   46) 
3fbbbea34bac0 (Dave Chinner        2015-11-03 12:27:22 +1100   47) /*
3fbbbea34bac0 (Dave Chinner        2015-11-03 12:27:22 +1100   48)  * Routine to zero an extent on disk allocated to the specific inode.
3fbbbea34bac0 (Dave Chinner        2015-11-03 12:27:22 +1100   49)  *
3fbbbea34bac0 (Dave Chinner        2015-11-03 12:27:22 +1100   50)  * The VFS functions take a linearised filesystem block offset, so we have to
3fbbbea34bac0 (Dave Chinner        2015-11-03 12:27:22 +1100   51)  * convert the sparse xfs fsb to the right format first.
3fbbbea34bac0 (Dave Chinner        2015-11-03 12:27:22 +1100   52)  * VFS types are real funky, too.
3fbbbea34bac0 (Dave Chinner        2015-11-03 12:27:22 +1100   53)  */
3fbbbea34bac0 (Dave Chinner        2015-11-03 12:27:22 +1100   54) int
3fbbbea34bac0 (Dave Chinner        2015-11-03 12:27:22 +1100   55) xfs_zero_extent(
30fa529e3b2e6 (Christoph Hellwig   2019-10-24 22:25:38 -0700   56) 	struct xfs_inode	*ip,
30fa529e3b2e6 (Christoph Hellwig   2019-10-24 22:25:38 -0700   57) 	xfs_fsblock_t		start_fsb,
30fa529e3b2e6 (Christoph Hellwig   2019-10-24 22:25:38 -0700   58) 	xfs_off_t		count_fsb)
3fbbbea34bac0 (Dave Chinner        2015-11-03 12:27:22 +1100   59) {
30fa529e3b2e6 (Christoph Hellwig   2019-10-24 22:25:38 -0700   60) 	struct xfs_mount	*mp = ip->i_mount;
30fa529e3b2e6 (Christoph Hellwig   2019-10-24 22:25:38 -0700   61) 	struct xfs_buftarg	*target = xfs_inode_buftarg(ip);
30fa529e3b2e6 (Christoph Hellwig   2019-10-24 22:25:38 -0700   62) 	xfs_daddr_t		sector = xfs_fsb_to_db(ip, start_fsb);
30fa529e3b2e6 (Christoph Hellwig   2019-10-24 22:25:38 -0700   63) 	sector_t		block = XFS_BB_TO_FSBT(mp, sector);
3fbbbea34bac0 (Dave Chinner        2015-11-03 12:27:22 +1100   64) 
30fa529e3b2e6 (Christoph Hellwig   2019-10-24 22:25:38 -0700   65) 	return blkdev_issue_zeroout(target->bt_bdev,
3dc29161070ab (Matthew Wilcox      2016-03-15 11:20:41 -0600   66) 		block << (mp->m_super->s_blocksize_bits - 9),
3dc29161070ab (Matthew Wilcox      2016-03-15 11:20:41 -0600   67) 		count_fsb << (mp->m_super->s_blocksize_bits - 9),
ee472d835c264 (Christoph Hellwig   2017-04-05 19:21:08 +0200   68) 		GFP_NOFS, 0);
3fbbbea34bac0 (Dave Chinner        2015-11-03 12:27:22 +1100   69) }
3fbbbea34bac0 (Dave Chinner        2015-11-03 12:27:22 +1100   70) 
bb9c2e5433250 (Dave Chinner        2017-10-09 11:37:22 -0700   71) #ifdef CONFIG_XFS_RT
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   72) int
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   73) xfs_bmap_rtalloc(
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700   74) 	struct xfs_bmalloca	*ap)
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   75) {
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700   76) 	struct xfs_mount	*mp = ap->ip->i_mount;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700   77) 	xfs_fileoff_t		orig_offset = ap->offset;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700   78) 	xfs_rtblock_t		rtb;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700   79) 	xfs_extlen_t		prod = 0;  /* product factor for allocators */
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700   80) 	xfs_extlen_t		mod = 0;   /* product factor for allocators */
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700   81) 	xfs_extlen_t		ralen = 0; /* realtime allocation length */
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700   82) 	xfs_extlen_t		align;     /* minimum allocation alignment */
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700   83) 	xfs_extlen_t		orig_length = ap->length;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700   84) 	xfs_extlen_t		minlen = mp->m_sb.sb_rextsize;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700   85) 	xfs_extlen_t		raminlen;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700   86) 	bool			rtlocked = false;
676a659b60afb (Darrick J. Wong     2021-05-09 16:22:55 -0700   87) 	bool			ignore_locality = false;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700   88) 	int			error;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700   89) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   90) 	align = xfs_get_extsz_hint(ap->ip);
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700   91) retry:
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   92) 	prod = align / mp->m_sb.sb_rextsize;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   93) 	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   94) 					align, 1, ap->eof, 0,
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   95) 					ap->conv, &ap->offset, &ap->length);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   96) 	if (error)
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   97) 		return error;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   98) 	ASSERT(ap->length);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000   99) 	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  100) 
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  101) 	/*
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  102) 	 * If we shifted the file offset downward to satisfy an extent size
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  103) 	 * hint, increase minlen by that amount so that the allocator won't
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  104) 	 * give us an allocation that's too short to cover at least one of the
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  105) 	 * blocks that the caller asked for.
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  106) 	 */
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  107) 	if (ap->offset != orig_offset)
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  108) 		minlen += orig_offset - ap->offset;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  109) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  110) 	/*
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  111) 	 * If the offset & length are not perfectly aligned
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  112) 	 * then kill prod, it will just get us in trouble.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  113) 	 */
0703a8e1c17e2 (Dave Chinner        2018-06-08 09:54:22 -0700  114) 	div_u64_rem(ap->offset, align, &mod);
0703a8e1c17e2 (Dave Chinner        2018-06-08 09:54:22 -0700  115) 	if (mod || ap->length % align)
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  116) 		prod = 1;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  117) 	/*
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  118) 	 * Set ralen to be the actual requested length in rtextents.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  119) 	 */
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  120) 	ralen = ap->length / mp->m_sb.sb_rextsize;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  121) 	/*
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  122) 	 * If the old value was close enough to MAXEXTLEN that
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  123) 	 * we rounded up to it, cut it back so it's valid again.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  124) 	 * Note that if it's a really large request (bigger than
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  125) 	 * MAXEXTLEN), we don't hear about that number, and can't
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  126) 	 * adjust the starting point to match it.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  127) 	 */
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  128) 	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  129) 		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  130) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  131) 	/*
4b680afb42b9e (Dave Chinner        2016-02-08 10:46:51 +1100  132) 	 * Lock out modifications to both the RT bitmap and summary inodes
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  133) 	 */
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  134) 	if (!rtlocked) {
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  135) 		xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  136) 		xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  137) 		xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  138) 		xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  139) 		rtlocked = true;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  140) 	}
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  141) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  142) 	/*
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  143) 	 * If it's an allocation to an empty file at offset 0,
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  144) 	 * pick an extent that will space things out in the rt area.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  145) 	 */
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  146) 	if (ap->eof && ap->offset == 0) {
3f649ab728cda (Kees Cook           2020-06-03 13:09:38 -0700  147) 		xfs_rtblock_t rtx; /* realtime extent no */
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  148) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  149) 		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  150) 		if (error)
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  151) 			return error;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  152) 		ap->blkno = rtx * mp->m_sb.sb_rextsize;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  153) 	} else {
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  154) 		ap->blkno = 0;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  155) 	}
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  156) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  157) 	xfs_bmap_adjacent(ap);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  158) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  159) 	/*
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  160) 	 * Realtime allocation, done through xfs_rtallocate_extent.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  161) 	 */
676a659b60afb (Darrick J. Wong     2021-05-09 16:22:55 -0700  162) 	if (ignore_locality)
676a659b60afb (Darrick J. Wong     2021-05-09 16:22:55 -0700  163) 		ap->blkno = 0;
676a659b60afb (Darrick J. Wong     2021-05-09 16:22:55 -0700  164) 	else
676a659b60afb (Darrick J. Wong     2021-05-09 16:22:55 -0700  165) 		do_div(ap->blkno, mp->m_sb.sb_rextsize);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  166) 	rtb = ap->blkno;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  167) 	ap->length = ralen;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  168) 	raminlen = max_t(xfs_extlen_t, 1, minlen / mp->m_sb.sb_rextsize);
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  169) 	error = xfs_rtallocate_extent(ap->tp, ap->blkno, raminlen, ap->length,
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  170) 			&ralen, ap->wasdel, prod, &rtb);
089ec2f87578b (Christoph Hellwig   2017-02-17 08:21:06 -0800  171) 	if (error)
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  172) 		return error;
089ec2f87578b (Christoph Hellwig   2017-02-17 08:21:06 -0800  173) 
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  174) 	if (rtb != NULLRTBLOCK) {
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  175) 		ap->blkno = rtb * mp->m_sb.sb_rextsize;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  176) 		ap->length = ralen * mp->m_sb.sb_rextsize;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  177) 		ap->ip->i_nblocks += ap->length;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  178) 		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  179) 		if (ap->wasdel)
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  180) 			ap->ip->i_delayed_blks -= ap->length;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  181) 		/*
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  182) 		 * Adjust the disk quota also. This was reserved
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  183) 		 * earlier.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  184) 		 */
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  185) 		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  186) 			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  187) 					XFS_TRANS_DQ_RTBCOUNT, ap->length);
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  188) 		return 0;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  189) 	}
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  190) 
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  191) 	if (align > mp->m_sb.sb_rextsize) {
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  192) 		/*
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  193) 		 * We previously enlarged the request length to try to satisfy
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  194) 		 * an extent size hint.  The allocator didn't return anything,
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  195) 		 * so reset the parameters to the original values and try again
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  196) 		 * without alignment criteria.
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  197) 		 */
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  198) 		ap->offset = orig_offset;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  199) 		ap->length = orig_length;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  200) 		minlen = align = mp->m_sb.sb_rextsize;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  201) 		goto retry;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  202) 	}
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  203) 
676a659b60afb (Darrick J. Wong     2021-05-09 16:22:55 -0700  204) 	if (!ignore_locality && ap->blkno != 0) {
676a659b60afb (Darrick J. Wong     2021-05-09 16:22:55 -0700  205) 		/*
676a659b60afb (Darrick J. Wong     2021-05-09 16:22:55 -0700  206) 		 * If we can't allocate near a specific rt extent, try again
676a659b60afb (Darrick J. Wong     2021-05-09 16:22:55 -0700  207) 		 * without locality criteria.
676a659b60afb (Darrick J. Wong     2021-05-09 16:22:55 -0700  208) 		 */
676a659b60afb (Darrick J. Wong     2021-05-09 16:22:55 -0700  209) 		ignore_locality = true;
676a659b60afb (Darrick J. Wong     2021-05-09 16:22:55 -0700  210) 		goto retry;
676a659b60afb (Darrick J. Wong     2021-05-09 16:22:55 -0700  211) 	}
676a659b60afb (Darrick J. Wong     2021-05-09 16:22:55 -0700  212) 
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  213) 	ap->blkno = NULLFSBLOCK;
9d5e8492eee01 (Darrick J. Wong     2021-05-09 16:22:54 -0700  214) 	ap->length = 0;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  215) 	return 0;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  216) }
bb9c2e5433250 (Dave Chinner        2017-10-09 11:37:22 -0700  217) #endif /* CONFIG_XFS_RT */
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  218) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  219) /*
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  220)  * Extent tree block counting routines.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  221)  */
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  222) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  223) /*
d29cb3e45e923 (Darrick J. Wong     2017-06-16 11:00:12 -0700  224)  * Count leaf blocks given a range of extent records.  Delayed allocation
d29cb3e45e923 (Darrick J. Wong     2017-06-16 11:00:12 -0700  225)  * extents are not counted towards the totals.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  226)  */
e17a5c6f0e360 (Christoph Hellwig   2017-08-29 15:44:14 -0700  227) xfs_extnum_t
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  228) xfs_bmap_count_leaves(
d29cb3e45e923 (Darrick J. Wong     2017-06-16 11:00:12 -0700  229) 	struct xfs_ifork	*ifp,
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  230) 	xfs_filblks_t		*count)
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  231) {
b2b1712a64082 (Christoph Hellwig   2017-11-03 10:34:43 -0700  232) 	struct xfs_iext_cursor	icur;
e17a5c6f0e360 (Christoph Hellwig   2017-08-29 15:44:14 -0700  233) 	struct xfs_bmbt_irec	got;
b2b1712a64082 (Christoph Hellwig   2017-11-03 10:34:43 -0700  234) 	xfs_extnum_t		numrecs = 0;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  235) 
b2b1712a64082 (Christoph Hellwig   2017-11-03 10:34:43 -0700  236) 	for_each_xfs_iext(ifp, &icur, &got) {
e17a5c6f0e360 (Christoph Hellwig   2017-08-29 15:44:14 -0700  237) 		if (!isnullstartblock(got.br_startblock)) {
e17a5c6f0e360 (Christoph Hellwig   2017-08-29 15:44:14 -0700  238) 			*count += got.br_blockcount;
e17a5c6f0e360 (Christoph Hellwig   2017-08-29 15:44:14 -0700  239) 			numrecs++;
d29cb3e45e923 (Darrick J. Wong     2017-06-16 11:00:12 -0700  240) 		}
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  241) 	}
b2b1712a64082 (Christoph Hellwig   2017-11-03 10:34:43 -0700  242) 
e17a5c6f0e360 (Christoph Hellwig   2017-08-29 15:44:14 -0700  243) 	return numrecs;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  244) }
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  245) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  246) /*
d29cb3e45e923 (Darrick J. Wong     2017-06-16 11:00:12 -0700  247)  * Count fsblocks of the given fork.  Delayed allocation extents are
d29cb3e45e923 (Darrick J. Wong     2017-06-16 11:00:12 -0700  248)  * not counted towards the totals.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  249)  */
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  250) int
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  251) xfs_bmap_count_blocks(
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  252) 	struct xfs_trans	*tp,
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  253) 	struct xfs_inode	*ip,
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  254) 	int			whichfork,
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  255) 	xfs_extnum_t		*nextents,
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  256) 	xfs_filblks_t		*count)
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  257) {
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  258) 	struct xfs_mount	*mp = ip->i_mount;
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  259) 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  260) 	struct xfs_btree_cur	*cur;
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  261) 	xfs_extlen_t		btblocks = 0;
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  262) 	int			error;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  263) 
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  264) 	*nextents = 0;
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  265) 	*count = 0;
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  266) 
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  267) 	if (!ifp)
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  268) 		return 0;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  269) 
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700  270) 	switch (ifp->if_format) {
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  271) 	case XFS_DINODE_FMT_BTREE:
862a804aae303 (Christoph Hellwig   2021-04-13 11:15:09 -0700  272) 		error = xfs_iread_extents(tp, ip, whichfork);
862a804aae303 (Christoph Hellwig   2021-04-13 11:15:09 -0700  273) 		if (error)
862a804aae303 (Christoph Hellwig   2021-04-13 11:15:09 -0700  274) 			return error;
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  275) 
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  276) 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  277) 		error = xfs_btree_count_blocks(cur, &btblocks);
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  278) 		xfs_btree_del_cursor(cur, error);
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  279) 		if (error)
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  280) 			return error;
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  281) 
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  282) 		/*
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  283) 		 * xfs_btree_count_blocks includes the root block contained in
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  284) 		 * the inode fork in @btblocks, so subtract one because we're
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  285) 		 * only interested in allocated disk blocks.
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  286) 		 */
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  287) 		*count += btblocks - 1;
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700  288) 
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  289) 		/* fall through */
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  290) 	case XFS_DINODE_FMT_EXTENTS:
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  291) 		*nextents = xfs_bmap_count_leaves(ifp, count);
fec40e220ffcb (Darrick J. Wong     2019-10-28 16:12:35 -0700  292) 		break;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  293) 	}
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  294) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  295) 	return 0;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  296) }
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  297) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  298) static int
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  299) xfs_getbmap_report_one(
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  300) 	struct xfs_inode	*ip,
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  301) 	struct getbmapx		*bmv,
232b51948b99d (Christoph Hellwig   2017-10-17 14:16:19 -0700  302) 	struct kgetbmap		*out,
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  303) 	int64_t			bmv_end,
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  304) 	struct xfs_bmbt_irec	*got)
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  305) {
232b51948b99d (Christoph Hellwig   2017-10-17 14:16:19 -0700  306) 	struct kgetbmap		*p = out + bmv->bmv_entries;
d392bc81bb7c2 (Christoph Hellwig   2018-10-18 17:19:48 +1100  307) 	bool			shared = false;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  308) 	int			error;
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  309) 
d392bc81bb7c2 (Christoph Hellwig   2018-10-18 17:19:48 +1100  310) 	error = xfs_reflink_trim_around_shared(ip, got, &shared);
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  311) 	if (error)
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  312) 		return error;
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  313) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  314) 	if (isnullstartblock(got->br_startblock) ||
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  315) 	    got->br_startblock == DELAYSTARTBLOCK) {
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  316) 		/*
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  317) 		 * Delalloc extents that start beyond EOF can occur due to
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  318) 		 * speculative EOF allocation when the delalloc extent is larger
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  319) 		 * than the largest freespace extent at conversion time.  These
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  320) 		 * extents cannot be converted by data writeback, so can exist
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  321) 		 * here even if we are not supposed to be finding delalloc
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  322) 		 * extents.
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  323) 		 */
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  324) 		if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  325) 			ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  326) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  327) 		p->bmv_oflags |= BMV_OF_DELALLOC;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  328) 		p->bmv_block = -2;
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  329) 	} else {
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  330) 		p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  331) 	}
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  332) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  333) 	if (got->br_state == XFS_EXT_UNWRITTEN &&
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  334) 	    (bmv->bmv_iflags & BMV_IF_PREALLOC))
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  335) 		p->bmv_oflags |= BMV_OF_PREALLOC;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  336) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  337) 	if (shared)
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  338) 		p->bmv_oflags |= BMV_OF_SHARED;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  339) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  340) 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  341) 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  342) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  343) 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  344) 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  345) 	bmv->bmv_entries++;
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  346) 	return 0;
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  347) }
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  348) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  349) static void
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  350) xfs_getbmap_report_hole(
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  351) 	struct xfs_inode	*ip,
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  352) 	struct getbmapx		*bmv,
232b51948b99d (Christoph Hellwig   2017-10-17 14:16:19 -0700  353) 	struct kgetbmap		*out,
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  354) 	int64_t			bmv_end,
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  355) 	xfs_fileoff_t		bno,
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  356) 	xfs_fileoff_t		end)
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  357) {
232b51948b99d (Christoph Hellwig   2017-10-17 14:16:19 -0700  358) 	struct kgetbmap		*p = out + bmv->bmv_entries;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  359) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  360) 	if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  361) 		return;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  362) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  363) 	p->bmv_block = -1;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  364) 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  365) 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  366) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  367) 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  368) 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  369) 	bmv->bmv_entries++;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  370) }
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  371) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  372) static inline bool
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  373) xfs_getbmap_full(
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  374) 	struct getbmapx		*bmv)
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  375) {
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  376) 	return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  377) }
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  378) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  379) static bool
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  380) xfs_getbmap_next_rec(
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  381) 	struct xfs_bmbt_irec	*rec,
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  382) 	xfs_fileoff_t		total_end)
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  383) {
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  384) 	xfs_fileoff_t		end = rec->br_startoff + rec->br_blockcount;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  385) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  386) 	if (end == total_end)
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  387) 		return false;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  388) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  389) 	rec->br_startoff += rec->br_blockcount;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  390) 	if (!isnullstartblock(rec->br_startblock) &&
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  391) 	    rec->br_startblock != DELAYSTARTBLOCK)
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  392) 		rec->br_startblock += rec->br_blockcount;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  393) 	rec->br_blockcount = total_end - end;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  394) 	return true;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  395) }
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  396) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  397) /*
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  398)  * Get inode's extents as described in bmv, and format for output.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  399)  * Calls formatter to fill the user's buffer until all extents
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  400)  * are mapped, until the passed-in bmv->bmv_count slots have
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  401)  * been filled, or until the formatter short-circuits the loop,
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  402)  * if it is tracking filled-in extents on its own.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  403)  */
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  404) int						/* error code */
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  405) xfs_getbmap(
232b51948b99d (Christoph Hellwig   2017-10-17 14:16:19 -0700  406) 	struct xfs_inode	*ip,
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  407) 	struct getbmapx		*bmv,		/* user bmap structure */
232b51948b99d (Christoph Hellwig   2017-10-17 14:16:19 -0700  408) 	struct kgetbmap		*out)
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  409) {
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  410) 	struct xfs_mount	*mp = ip->i_mount;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  411) 	int			iflags = bmv->bmv_iflags;
232b51948b99d (Christoph Hellwig   2017-10-17 14:16:19 -0700  412) 	int			whichfork, lock, error = 0;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  413) 	int64_t			bmv_end, max_len;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  414) 	xfs_fileoff_t		bno, first_bno;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  415) 	struct xfs_ifork	*ifp;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  416) 	struct xfs_bmbt_irec	got, rec;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  417) 	xfs_filblks_t		len;
b2b1712a64082 (Christoph Hellwig   2017-11-03 10:34:43 -0700  418) 	struct xfs_iext_cursor	icur;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  419) 
232b51948b99d (Christoph Hellwig   2017-10-17 14:16:19 -0700  420) 	if (bmv->bmv_iflags & ~BMV_IF_VALID)
232b51948b99d (Christoph Hellwig   2017-10-17 14:16:19 -0700  421) 		return -EINVAL;
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  422) #ifndef DEBUG
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  423) 	/* Only allow CoW fork queries if we're debugging. */
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  424) 	if (iflags & BMV_IF_COWFORK)
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  425) 		return -EINVAL;
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  426) #endif
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  427) 	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  428) 		return -EINVAL;
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  429) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  430) 	if (bmv->bmv_length < -1)
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  431) 		return -EINVAL;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  432) 	bmv->bmv_entries = 0;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  433) 	if (bmv->bmv_length == 0)
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  434) 		return 0;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  435) 
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  436) 	if (iflags & BMV_IF_ATTRFORK)
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  437) 		whichfork = XFS_ATTR_FORK;
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  438) 	else if (iflags & BMV_IF_COWFORK)
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  439) 		whichfork = XFS_COW_FORK;
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  440) 	else
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  441) 		whichfork = XFS_DATA_FORK;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  442) 	ifp = XFS_IFORK_PTR(ip, whichfork);
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  443) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  444) 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  445) 	switch (whichfork) {
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  446) 	case XFS_ATTR_FORK:
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  447) 		if (!XFS_IFORK_Q(ip))
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  448) 			goto out_unlock_iolock;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  449) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  450) 		max_len = 1LL << 32;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  451) 		lock = xfs_ilock_attr_map_shared(ip);
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  452) 		break;
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  453) 	case XFS_COW_FORK:
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  454) 		/* No CoW fork? Just return */
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  455) 		if (!ifp)
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  456) 			goto out_unlock_iolock;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  457) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  458) 		if (xfs_get_cowextsz_hint(ip))
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  459) 			max_len = mp->m_super->s_maxbytes;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  460) 		else
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  461) 			max_len = XFS_ISIZE(ip);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  462) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  463) 		lock = XFS_ILOCK_SHARED;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  464) 		xfs_ilock(ip, lock);
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  465) 		break;
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  466) 	case XFS_DATA_FORK:
efa70be165497 (Christoph Hellwig   2013-12-18 02:14:39 -0800  467) 		if (!(iflags & BMV_IF_DELALLOC) &&
13d2c10b05d8e (Christoph Hellwig   2021-03-29 11:11:40 -0700  468) 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
2451337dd0439 (Dave Chinner        2014-06-25 14:58:08 +1000  469) 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  470) 			if (error)
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  471) 				goto out_unlock_iolock;
efa70be165497 (Christoph Hellwig   2013-12-18 02:14:39 -0800  472) 
efa70be165497 (Christoph Hellwig   2013-12-18 02:14:39 -0800  473) 			/*
efa70be165497 (Christoph Hellwig   2013-12-18 02:14:39 -0800  474) 			 * Even after flushing the inode, there can still be
efa70be165497 (Christoph Hellwig   2013-12-18 02:14:39 -0800  475) 			 * delalloc blocks on the inode beyond EOF due to
efa70be165497 (Christoph Hellwig   2013-12-18 02:14:39 -0800  476) 			 * speculative preallocation.  These are not removed
efa70be165497 (Christoph Hellwig   2013-12-18 02:14:39 -0800  477) 			 * until the release function is called or the inode
efa70be165497 (Christoph Hellwig   2013-12-18 02:14:39 -0800  478) 			 * is inactivated.  Hence we cannot assert here that
efa70be165497 (Christoph Hellwig   2013-12-18 02:14:39 -0800  479) 			 * ip->i_delayed_blks == 0.
efa70be165497 (Christoph Hellwig   2013-12-18 02:14:39 -0800  480) 			 */
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  481) 		}
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  482) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  483) 		if (xfs_get_extsz_hint(ip) ||
db07349da2f56 (Christoph Hellwig   2021-03-29 11:11:44 -0700  484) 		    (ip->i_diflags &
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  485) 		     (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  486) 			max_len = mp->m_super->s_maxbytes;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  487) 		else
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  488) 			max_len = XFS_ISIZE(ip);
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  489) 
efa70be165497 (Christoph Hellwig   2013-12-18 02:14:39 -0800  490) 		lock = xfs_ilock_data_map_shared(ip);
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  491) 		break;
efa70be165497 (Christoph Hellwig   2013-12-18 02:14:39 -0800  492) 	}
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  493) 
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700  494) 	switch (ifp->if_format) {
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  495) 	case XFS_DINODE_FMT_EXTENTS:
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  496) 	case XFS_DINODE_FMT_BTREE:
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  497) 		break;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  498) 	case XFS_DINODE_FMT_LOCAL:
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  499) 		/* Local format inode forks report no extents. */
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  500) 		goto out_unlock_ilock;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  501) 	default:
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  502) 		error = -EINVAL;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  503) 		goto out_unlock_ilock;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  504) 	}
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  505) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  506) 	if (bmv->bmv_length == -1) {
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  507) 		max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  508) 		bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  509) 	}
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  510) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  511) 	bmv_end = bmv->bmv_offset + bmv->bmv_length;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  512) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  513) 	first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  514) 	len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  515) 
862a804aae303 (Christoph Hellwig   2021-04-13 11:15:09 -0700  516) 	error = xfs_iread_extents(NULL, ip, whichfork);
862a804aae303 (Christoph Hellwig   2021-04-13 11:15:09 -0700  517) 	if (error)
862a804aae303 (Christoph Hellwig   2021-04-13 11:15:09 -0700  518) 		goto out_unlock_ilock;
f86f403794b14 (Darrick J. Wong     2016-10-03 09:11:41 -0700  519) 
b2b1712a64082 (Christoph Hellwig   2017-11-03 10:34:43 -0700  520) 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  521) 		/*
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  522) 		 * Report a whole-file hole if the delalloc flag is set to
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  523) 		 * stay compatible with the old implementation.
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  524) 		 */
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  525) 		if (iflags & BMV_IF_DELALLOC)
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  526) 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  527) 					XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  528) 		goto out_unlock_ilock;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  529) 	}
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  530) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  531) 	while (!xfs_getbmap_full(bmv)) {
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  532) 		xfs_trim_extent(&got, first_bno, len);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  533) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  534) 		/*
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  535) 		 * Report an entry for a hole if this extent doesn't directly
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  536) 		 * follow the previous one.
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  537) 		 */
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  538) 		if (got.br_startoff > bno) {
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  539) 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  540) 					got.br_startoff);
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  541) 			if (xfs_getbmap_full(bmv))
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  542) 				break;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  543) 		}
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  544) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  545) 		/*
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  546) 		 * In order to report shared extents accurately, we report each
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  547) 		 * distinct shared / unshared part of a single bmbt record with
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  548) 		 * an individual getbmapx record.
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  549) 		 */
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  550) 		bno = got.br_startoff + got.br_blockcount;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  551) 		rec = got;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  552) 		do {
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  553) 			error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  554) 					&rec);
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  555) 			if (error || xfs_getbmap_full(bmv))
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  556) 				goto out_unlock_ilock;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  557) 		} while (xfs_getbmap_next_rec(&rec, bno));
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  558) 
b2b1712a64082 (Christoph Hellwig   2017-11-03 10:34:43 -0700  559) 		if (!xfs_iext_next_extent(ifp, &icur, &got)) {
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  560) 			xfs_fileoff_t	end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  561) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  562) 			out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  563) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  564) 			if (whichfork != XFS_ATTR_FORK && bno < end &&
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  565) 			    !xfs_getbmap_full(bmv)) {
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  566) 				xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  567) 						bno, end);
c364b6d0b6cda (Darrick J. Wong     2017-01-26 09:50:30 -0800  568) 			}
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  569) 			break;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  570) 		}
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  571) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  572) 		if (bno >= first_bno + len)
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  573) 			break;
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  574) 	}
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  575) 
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  576) out_unlock_ilock:
01f4f3277556d (Christoph Hellwig   2013-12-06 12:30:08 -0800  577) 	xfs_iunlock(ip, lock);
abbf9e8a45074 (Christoph Hellwig   2017-10-17 14:16:18 -0700  578) out_unlock_iolock:
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  579) 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  580) 	return error;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  581) }
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  582) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  583) /*
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  584)  * Dead simple method of punching delalyed allocation blocks from a range in
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  585)  * the inode.  This will always punch out both the start and end blocks, even
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  586)  * if the ranges only partially overlap them, so it is up to the caller to
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  587)  * ensure that partial blocks are not passed in.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  588)  */
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  589) int
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  590) xfs_bmap_punch_delalloc_range(
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  591) 	struct xfs_inode	*ip,
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  592) 	xfs_fileoff_t		start_fsb,
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  593) 	xfs_fileoff_t		length)
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  594) {
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  595) 	struct xfs_ifork	*ifp = &ip->i_df;
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  596) 	xfs_fileoff_t		end_fsb = start_fsb + length;
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  597) 	struct xfs_bmbt_irec	got, del;
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  598) 	struct xfs_iext_cursor	icur;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  599) 	int			error = 0;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  600) 
b2197a36c0ef5 (Christoph Hellwig   2021-04-13 11:15:12 -0700  601) 	ASSERT(!xfs_need_iread_extents(ifp));
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  602) 
0065b54119973 (Christoph Hellwig   2018-09-29 13:47:46 +1000  603) 	xfs_ilock(ip, XFS_ILOCK_EXCL);
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  604) 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
d438017757669 (Christoph Hellwig   2018-07-11 22:25:57 -0700  605) 		goto out_unlock;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  606) 
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  607) 	while (got.br_startoff + got.br_blockcount > start_fsb) {
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  608) 		del = got;
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  609) 		xfs_trim_extent(&del, start_fsb, length);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  610) 
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  611) 		/*
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  612) 		 * A delete can push the cursor forward. Step back to the
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  613) 		 * previous extent on non-delalloc or extents outside the
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  614) 		 * target range.
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  615) 		 */
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  616) 		if (!del.br_blockcount ||
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  617) 		    !isnullstartblock(del.br_startblock)) {
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  618) 			if (!xfs_iext_prev_extent(ifp, &icur, &got))
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  619) 				break;
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  620) 			continue;
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  621) 		}
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  622) 
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  623) 		error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  624) 						  &got, &del);
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  625) 		if (error || !xfs_iext_get_extent(ifp, &icur, &got))
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  626) 			break;
e2ac836307e34 (Christoph Hellwig   2018-06-21 23:24:38 -0700  627) 	}
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  628) 
d438017757669 (Christoph Hellwig   2018-07-11 22:25:57 -0700  629) out_unlock:
d438017757669 (Christoph Hellwig   2018-07-11 22:25:57 -0700  630) 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  631) 	return error;
6898811459ff5 (Dave Chinner        2013-08-12 20:49:42 +1000  632) }
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  633) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  634) /*
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  635)  * Test whether it is appropriate to check an inode for and free post EOF
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  636)  * blocks. The 'force' parameter determines whether we should also consider
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  637)  * regular files that are marked preallocated or append-only.
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  638)  */
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  639) bool
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  640) xfs_can_free_eofblocks(
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  641) 	struct xfs_inode	*ip,
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  642) 	bool			force)
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  643) {
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  644) 	struct xfs_bmbt_irec	imap;
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  645) 	struct xfs_mount	*mp = ip->i_mount;
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  646) 	xfs_fileoff_t		end_fsb;
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  647) 	xfs_fileoff_t		last_fsb;
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  648) 	int			nimaps = 1;
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  649) 	int			error;
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  650) 
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  651) 	/*
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  652) 	 * Caller must either hold the exclusive io lock; or be inactivating
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  653) 	 * the inode, which guarantees there are no other users of the inode.
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  654) 	 */
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  655) 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL) ||
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  656) 	       (VFS_I(ip)->i_state & I_FREEING));
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  657) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  658) 	/* prealloc/delalloc exists only on regular files */
c19b3b05ae440 (Dave Chinner        2016-02-09 16:54:58 +1100  659) 	if (!S_ISREG(VFS_I(ip)->i_mode))
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  660) 		return false;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  661) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  662) 	/*
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  663) 	 * Zero sized files with no cached pages and delalloc blocks will not
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  664) 	 * have speculative prealloc/delalloc blocks to remove.
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  665) 	 */
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  666) 	if (VFS_I(ip)->i_size == 0 &&
2667c6f935d97 (Dave Chinner        2014-08-04 13:23:15 +1000  667) 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  668) 	    ip->i_delayed_blks == 0)
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  669) 		return false;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  670) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  671) 	/* If we haven't read in the extent list, then don't do it now. */
b2197a36c0ef5 (Christoph Hellwig   2021-04-13 11:15:12 -0700  672) 	if (xfs_need_iread_extents(&ip->i_df))
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  673) 		return false;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  674) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  675) 	/*
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  676) 	 * Do not free real preallocated or append-only files unless the file
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  677) 	 * has delalloc blocks and we are forced to remove them.
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  678) 	 */
db07349da2f56 (Christoph Hellwig   2021-03-29 11:11:44 -0700  679) 	if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  680) 		if (!force || ip->i_delayed_blks == 0)
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  681) 			return false;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  682) 
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  683) 	/*
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  684) 	 * Do not try to free post-EOF blocks if EOF is beyond the end of the
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  685) 	 * range supported by the page cache, because the truncation will loop
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  686) 	 * forever.
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  687) 	 */
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  688) 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  689) 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  690) 	if (last_fsb <= end_fsb)
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  691) 		return false;
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  692) 
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  693) 	/*
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  694) 	 * Look up the mapping for the first block past EOF.  If we can't find
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  695) 	 * it, there's nothing to free.
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  696) 	 */
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  697) 	xfs_ilock(ip, XFS_ILOCK_SHARED);
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  698) 	error = xfs_bmapi_read(ip, end_fsb, last_fsb - end_fsb, &imap, &nimaps,
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  699) 			0);
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  700) 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  701) 	if (error || nimaps == 0)
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  702) 		return false;
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  703) 
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  704) 	/*
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  705) 	 * If there's a real mapping there or there are delayed allocation
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  706) 	 * reservations, then we have post-EOF blocks to try to free.
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  707) 	 */
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  708) 	return imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  709) }
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  710) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  711) /*
3b4683c294095 (Brian Foster        2017-04-11 10:50:05 -0700  712)  * This is called to free any blocks beyond eof. The caller must hold
3b4683c294095 (Brian Foster        2017-04-11 10:50:05 -0700  713)  * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
3b4683c294095 (Brian Foster        2017-04-11 10:50:05 -0700  714)  * reference to the inode.
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  715)  */
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  716) int
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  717) xfs_free_eofblocks(
a36b926180cda (Brian Foster        2017-01-27 23:22:55 -0800  718) 	struct xfs_inode	*ip)
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  719) {
a36b926180cda (Brian Foster        2017-01-27 23:22:55 -0800  720) 	struct xfs_trans	*tp;
a36b926180cda (Brian Foster        2017-01-27 23:22:55 -0800  721) 	struct xfs_mount	*mp = ip->i_mount;
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  722) 	int			error;
a36b926180cda (Brian Foster        2017-01-27 23:22:55 -0800  723) 
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  724) 	/* Attach the dquots to the inode up front. */
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  725) 	error = xfs_qm_dqattach(ip);
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  726) 	if (error)
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  727) 		return error;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  728) 
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  729) 	/* Wait on dio to ensure i_size has settled. */
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  730) 	inode_dio_wait(VFS_I(ip));
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  731) 
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  732) 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  733) 	if (error) {
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  734) 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  735) 		return error;
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  736) 	}
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  737) 
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  738) 	xfs_ilock(ip, XFS_ILOCK_EXCL);
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  739) 	xfs_trans_ijoin(tp, ip, 0);
e4229d6b0bc92 (Brian Foster        2017-01-27 23:22:57 -0800  740) 
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  741) 	/*
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  742) 	 * Do not update the on-disk file size.  If we update the on-disk file
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  743) 	 * size and then the system crashes before the contents of the file are
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  744) 	 * flushed to disk then the files may be full of holes (ie NULL files
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  745) 	 * bug).
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  746) 	 */
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  747) 	error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  748) 				XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  749) 	if (error)
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  750) 		goto err_cancel;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  751) 
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  752) 	error = xfs_trans_commit(tp);
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  753) 	if (error)
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  754) 		goto out_unlock;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  755) 
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  756) 	xfs_inode_clear_eofblocks_tag(ip);
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  757) 	goto out_unlock;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  758) 
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  759) err_cancel:
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  760) 	/*
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  761) 	 * If we get an error at this point we simply don't
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  762) 	 * bother truncating the file.
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  763) 	 */
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  764) 	xfs_trans_cancel(tp);
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  765) out_unlock:
7d88329e5b0fe (Darrick J. Wong     2021-03-23 16:59:31 -0700  766) 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  767) 	return error;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  768) }
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  769) 
83aee9e4c2976 (Christoph Hellwig   2013-10-12 00:55:07 -0700  770) int
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  771) xfs_alloc_file_space(
83aee9e4c2976 (Christoph Hellwig   2013-10-12 00:55:07 -0700  772) 	struct xfs_inode	*ip,
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  773) 	xfs_off_t		offset,
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  774) 	xfs_off_t		len,
5f8aca8b43f41 (Christoph Hellwig   2013-10-12 00:55:06 -0700  775) 	int			alloc_type)
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  776) {
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  777) 	xfs_mount_t		*mp = ip->i_mount;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  778) 	xfs_off_t		count;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  779) 	xfs_filblks_t		allocated_fsb;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  780) 	xfs_filblks_t		allocatesize_fsb;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  781) 	xfs_extlen_t		extsz, temp;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  782) 	xfs_fileoff_t		startoffset_fsb;
e093c4be760eb (Max Reitz           2019-09-30 11:29:44 -0700  783) 	xfs_fileoff_t		endoffset_fsb;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  784) 	int			nimaps;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  785) 	int			rt;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  786) 	xfs_trans_t		*tp;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  787) 	xfs_bmbt_irec_t		imaps[1], *imapp;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  788) 	int			error;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  789) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  790) 	trace_xfs_alloc_file_space(ip);
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  791) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  792) 	if (XFS_FORCED_SHUTDOWN(mp))
2451337dd0439 (Dave Chinner        2014-06-25 14:58:08 +1000  793) 		return -EIO;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  794) 
c14cfccabe2af (Darrick J. Wong     2018-05-04 15:30:21 -0700  795) 	error = xfs_qm_dqattach(ip);
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  796) 	if (error)
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  797) 		return error;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  798) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  799) 	if (len <= 0)
2451337dd0439 (Dave Chinner        2014-06-25 14:58:08 +1000  800) 		return -EINVAL;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  801) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  802) 	rt = XFS_IS_REALTIME_INODE(ip);
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  803) 	extsz = xfs_get_extsz_hint(ip);
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  804) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  805) 	count = len;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  806) 	imapp = &imaps[0];
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  807) 	nimaps = 1;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  808) 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
e093c4be760eb (Max Reitz           2019-09-30 11:29:44 -0700  809) 	endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
e093c4be760eb (Max Reitz           2019-09-30 11:29:44 -0700  810) 	allocatesize_fsb = endoffset_fsb - startoffset_fsb;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  811) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  812) 	/*
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  813) 	 * Allocate file space until done or until there is an error
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  814) 	 */
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  815) 	while (allocatesize_fsb && !error) {
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  816) 		xfs_fileoff_t	s, e;
3de4eb106fcc9 (Darrick J. Wong     2021-01-26 16:44:07 -0800  817) 		unsigned int	dblocks, rblocks, resblks;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  818) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  819) 		/*
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  820) 		 * Determine space reservations for data/realtime.
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  821) 		 */
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  822) 		if (unlikely(extsz)) {
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  823) 			s = startoffset_fsb;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  824) 			do_div(s, extsz);
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  825) 			s *= extsz;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  826) 			e = startoffset_fsb + allocatesize_fsb;
0703a8e1c17e2 (Dave Chinner        2018-06-08 09:54:22 -0700  827) 			div_u64_rem(startoffset_fsb, extsz, &temp);
0703a8e1c17e2 (Dave Chinner        2018-06-08 09:54:22 -0700  828) 			if (temp)
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  829) 				e += temp;
0703a8e1c17e2 (Dave Chinner        2018-06-08 09:54:22 -0700  830) 			div_u64_rem(e, extsz, &temp);
0703a8e1c17e2 (Dave Chinner        2018-06-08 09:54:22 -0700  831) 			if (temp)
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  832) 				e += extsz - temp;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  833) 		} else {
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  834) 			s = 0;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  835) 			e = allocatesize_fsb;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  836) 		}
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  837) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  838) 		/*
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  839) 		 * The transaction reservation is limited to a 32-bit block
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  840) 		 * count, hence we need to limit the number of blocks we are
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  841) 		 * trying to reserve to avoid an overflow. We can't allocate
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  842) 		 * more than @nimaps extents, and an extent is limited on disk
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  843) 		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  844) 		 */
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  845) 		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  846) 		if (unlikely(rt)) {
02b7ee4eb6132 (Darrick J. Wong     2021-01-26 17:20:42 -0800  847) 			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
02b7ee4eb6132 (Darrick J. Wong     2021-01-26 17:20:42 -0800  848) 			rblocks = resblks;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  849) 		} else {
02b7ee4eb6132 (Darrick J. Wong     2021-01-26 17:20:42 -0800  850) 			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
02b7ee4eb6132 (Darrick J. Wong     2021-01-26 17:20:42 -0800  851) 			rblocks = 0;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  852) 		}
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  853) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  854) 		/*
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  855) 		 * Allocate and setup the transaction.
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  856) 		 */
3de4eb106fcc9 (Darrick J. Wong     2021-01-26 16:44:07 -0800  857) 		error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
3de4eb106fcc9 (Darrick J. Wong     2021-01-26 16:44:07 -0800  858) 				dblocks, rblocks, false, &tp);
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  859) 		if (error)
3de4eb106fcc9 (Darrick J. Wong     2021-01-26 16:44:07 -0800  860) 			break;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  861) 
727e1acd297ca (Chandan Babu R      2021-01-22 16:48:11 -0800  862) 		error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
727e1acd297ca (Chandan Babu R      2021-01-22 16:48:11 -0800  863) 				XFS_IEXT_ADD_NOSPLIT_CNT);
727e1acd297ca (Chandan Babu R      2021-01-22 16:48:11 -0800  864) 		if (error)
35b1101099e85 (Darrick J. Wong     2021-01-26 17:23:30 -0800  865) 			goto error;
727e1acd297ca (Chandan Babu R      2021-01-22 16:48:11 -0800  866) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  867) 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
da781e64b28c1 (Brian Foster        2019-10-21 09:26:48 -0700  868) 					allocatesize_fsb, alloc_type, 0, imapp,
da781e64b28c1 (Brian Foster        2019-10-21 09:26:48 -0700  869) 					&nimaps);
f6106efae5f41 (Eric Sandeen        2016-01-11 11:34:01 +1100  870) 		if (error)
35b1101099e85 (Darrick J. Wong     2021-01-26 17:23:30 -0800  871) 			goto error;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  872) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  873) 		/*
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  874) 		 * Complete the transaction
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  875) 		 */
70393313dd0b2 (Christoph Hellwig   2015-06-04 13:48:08 +1000  876) 		error = xfs_trans_commit(tp);
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  877) 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
f6106efae5f41 (Eric Sandeen        2016-01-11 11:34:01 +1100  878) 		if (error)
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  879) 			break;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  880) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  881) 		allocated_fsb = imapp->br_blockcount;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  882) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  883) 		if (nimaps == 0) {
2451337dd0439 (Dave Chinner        2014-06-25 14:58:08 +1000  884) 			error = -ENOSPC;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  885) 			break;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  886) 		}
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  887) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  888) 		startoffset_fsb += allocated_fsb;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  889) 		allocatesize_fsb -= allocated_fsb;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  890) 	}
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  891) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  892) 	return error;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  893) 
35b1101099e85 (Darrick J. Wong     2021-01-26 17:23:30 -0800  894) error:
4906e21545814 (Christoph Hellwig   2015-06-04 13:47:56 +1000  895) 	xfs_trans_cancel(tp);
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  896) 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  897) 	return error;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  898) }
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  899) 
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  900) static int
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  901) xfs_unmap_extent(
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  902) 	struct xfs_inode	*ip,
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  903) 	xfs_fileoff_t		startoffset_fsb,
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  904) 	xfs_filblks_t		len_fsb,
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  905) 	int			*done)
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  906) {
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  907) 	struct xfs_mount	*mp = ip->i_mount;
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  908) 	struct xfs_trans	*tp;
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  909) 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  910) 	int			error;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  911) 
3de4eb106fcc9 (Darrick J. Wong     2021-01-26 16:44:07 -0800  912) 	error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
3a1af6c317d0a (Darrick J. Wong     2021-01-26 16:33:29 -0800  913) 			false, &tp);
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  914) 	if (error)
3a1af6c317d0a (Darrick J. Wong     2021-01-26 16:33:29 -0800  915) 		return error;
4f317369d4695 (Christoph Hellwig   2013-12-06 12:30:12 -0800  916) 
85ef08b5a6676 (Chandan Babu R      2021-01-22 16:48:11 -0800  917) 	error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
85ef08b5a6676 (Chandan Babu R      2021-01-22 16:48:11 -0800  918) 			XFS_IEXT_PUNCH_HOLE_CNT);
85ef08b5a6676 (Chandan Babu R      2021-01-22 16:48:11 -0800  919) 	if (error)
85ef08b5a6676 (Chandan Babu R      2021-01-22 16:48:11 -0800  920) 		goto out_trans_cancel;
85ef08b5a6676 (Chandan Babu R      2021-01-22 16:48:11 -0800  921) 
2af528425342d (Brian Foster        2018-07-11 22:26:25 -0700  922) 	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  923) 	if (error)
c8eac49ef798a (Brian Foster        2018-07-24 13:43:13 -0700  924) 		goto out_trans_cancel;
4f317369d4695 (Christoph Hellwig   2013-12-06 12:30:12 -0800  925) 
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  926) 	error = xfs_trans_commit(tp);
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  927) out_unlock:
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  928) 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  929) 	return error;
4f69f578a87d3 (Dave Chinner        2015-06-04 09:19:08 +1000  930) 
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  931) out_trans_cancel:
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  932) 	xfs_trans_cancel(tp);
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  933) 	goto out_unlock;
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  934) }
4f69f578a87d3 (Dave Chinner        2015-06-04 09:19:08 +1000  935) 
249bd9087a526 (Dave Chinner        2019-10-29 13:04:32 -0700  936) /* Caller must first wait for the completion of any pending DIOs if required. */
2c307174ab77e (Dave Chinner        2018-11-19 13:31:10 -0800  937) int
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  938) xfs_flush_unmap_range(
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  939) 	struct xfs_inode	*ip,
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  940) 	xfs_off_t		offset,
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  941) 	xfs_off_t		len)
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  942) {
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  943) 	struct xfs_mount	*mp = ip->i_mount;
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  944) 	struct inode		*inode = VFS_I(ip);
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  945) 	xfs_off_t		rounding, start, end;
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  946) 	int			error;
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  947) 
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  948) 	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  949) 	start = round_down(offset, rounding);
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  950) 	end = round_up(offset + len, rounding) - 1;
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  951) 
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  952) 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  953) 	if (error)
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  954) 		return error;
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  955) 	truncate_pagecache_range(inode, start, end);
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  956) 	return 0;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  957) }
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  958) 
83aee9e4c2976 (Christoph Hellwig   2013-10-12 00:55:07 -0700  959) int
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  960) xfs_free_file_space(
83aee9e4c2976 (Christoph Hellwig   2013-10-12 00:55:07 -0700  961) 	struct xfs_inode	*ip,
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  962) 	xfs_off_t		offset,
5f8aca8b43f41 (Christoph Hellwig   2013-10-12 00:55:06 -0700  963) 	xfs_off_t		len)
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  964) {
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  965) 	struct xfs_mount	*mp = ip->i_mount;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  966) 	xfs_fileoff_t		startoffset_fsb;
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  967) 	xfs_fileoff_t		endoffset_fsb;
3c2bdc912a1cc (Christoph Hellwig   2016-06-21 10:02:23 +1000  968) 	int			done = 0, error;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  969) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  970) 	trace_xfs_free_file_space(ip);
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  971) 
c14cfccabe2af (Darrick J. Wong     2018-05-04 15:30:21 -0700  972) 	error = xfs_qm_dqattach(ip);
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  973) 	if (error)
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  974) 		return error;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  975) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  976) 	if (len <= 0)	/* if nothing being freed */
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  977) 		return 0;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  978) 
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  979) 	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
bdb0d04fa66d8 (Christoph Hellwig   2016-06-21 10:00:55 +1000  980) 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  981) 
fe341eb151ec0 (Darrick J. Wong     2020-09-09 14:21:06 -0700  982) 	/* We can only free complete realtime extents. */
25219dbfa734e (Darrick J. Wong     2020-10-09 16:42:59 -0700  983) 	if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) {
25219dbfa734e (Darrick J. Wong     2020-10-09 16:42:59 -0700  984) 		startoffset_fsb = roundup_64(startoffset_fsb,
25219dbfa734e (Darrick J. Wong     2020-10-09 16:42:59 -0700  985) 					     mp->m_sb.sb_rextsize);
25219dbfa734e (Darrick J. Wong     2020-10-09 16:42:59 -0700  986) 		endoffset_fsb = rounddown_64(endoffset_fsb,
25219dbfa734e (Darrick J. Wong     2020-10-09 16:42:59 -0700  987) 					     mp->m_sb.sb_rextsize);
fe341eb151ec0 (Darrick J. Wong     2020-09-09 14:21:06 -0700  988) 	}
fe341eb151ec0 (Darrick J. Wong     2020-09-09 14:21:06 -0700  989) 
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  990) 	/*
daa79baefc472 (Christoph Hellwig   2018-10-18 17:18:58 +1100  991) 	 * Need to zero the stuff we're not freeing, on disk.
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  992) 	 */
3c2bdc912a1cc (Christoph Hellwig   2016-06-21 10:02:23 +1000  993) 	if (endoffset_fsb > startoffset_fsb) {
3c2bdc912a1cc (Christoph Hellwig   2016-06-21 10:02:23 +1000  994) 		while (!done) {
3c2bdc912a1cc (Christoph Hellwig   2016-06-21 10:02:23 +1000  995) 			error = xfs_unmap_extent(ip, startoffset_fsb,
3c2bdc912a1cc (Christoph Hellwig   2016-06-21 10:02:23 +1000  996) 					endoffset_fsb - startoffset_fsb, &done);
3c2bdc912a1cc (Christoph Hellwig   2016-06-21 10:02:23 +1000  997) 			if (error)
3c2bdc912a1cc (Christoph Hellwig   2016-06-21 10:02:23 +1000  998) 				return error;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000  999) 		}
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000 1000) 	}
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000 1001) 
3c2bdc912a1cc (Christoph Hellwig   2016-06-21 10:02:23 +1000 1002) 	/*
3c2bdc912a1cc (Christoph Hellwig   2016-06-21 10:02:23 +1000 1003) 	 * Now that we've unmap all full blocks we'll have to zero out any
f5c54717bf2b9 (Christoph Hellwig   2018-03-13 23:15:32 -0700 1004) 	 * partial block at the beginning and/or end.  iomap_zero_range is smart
f5c54717bf2b9 (Christoph Hellwig   2018-03-13 23:15:32 -0700 1005) 	 * enough to skip any holes, including those we just created, but we
f5c54717bf2b9 (Christoph Hellwig   2018-03-13 23:15:32 -0700 1006) 	 * must take care not to zero beyond EOF and enlarge i_size.
3c2bdc912a1cc (Christoph Hellwig   2016-06-21 10:02:23 +1000 1007) 	 */
3dd09d5a8589c (Calvin Owens        2017-04-03 12:22:29 -0700 1008) 	if (offset >= XFS_ISIZE(ip))
3dd09d5a8589c (Calvin Owens        2017-04-03 12:22:29 -0700 1009) 		return 0;
3dd09d5a8589c (Calvin Owens        2017-04-03 12:22:29 -0700 1010) 	if (offset + len > XFS_ISIZE(ip))
3dd09d5a8589c (Calvin Owens        2017-04-03 12:22:29 -0700 1011) 		len = XFS_ISIZE(ip) - offset;
f150b42343974 (Christoph Hellwig   2019-10-19 09:09:46 -0700 1012) 	error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
f150b42343974 (Christoph Hellwig   2019-10-19 09:09:46 -0700 1013) 			&xfs_buffered_write_iomap_ops);
e53c4b5983720 (Darrick J. Wong     2018-06-21 23:26:58 -0700 1014) 	if (error)
e53c4b5983720 (Darrick J. Wong     2018-06-21 23:26:58 -0700 1015) 		return error;
e53c4b5983720 (Darrick J. Wong     2018-06-21 23:26:58 -0700 1016) 
e53c4b5983720 (Darrick J. Wong     2018-06-21 23:26:58 -0700 1017) 	/*
e53c4b5983720 (Darrick J. Wong     2018-06-21 23:26:58 -0700 1018) 	 * If we zeroed right up to EOF and EOF straddles a page boundary we
e53c4b5983720 (Darrick J. Wong     2018-06-21 23:26:58 -0700 1019) 	 * must make sure that the post-EOF area is also zeroed because the
e53c4b5983720 (Darrick J. Wong     2018-06-21 23:26:58 -0700 1020) 	 * page could be mmap'd and iomap_zero_range doesn't do that for us.
e53c4b5983720 (Darrick J. Wong     2018-06-21 23:26:58 -0700 1021) 	 * Writeback of the eof page will do this, albeit clumsily.
e53c4b5983720 (Darrick J. Wong     2018-06-21 23:26:58 -0700 1022) 	 */
a579121f94aba (Darrick J. Wong     2018-11-27 11:01:43 -0800 1023) 	if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
e53c4b5983720 (Darrick J. Wong     2018-06-21 23:26:58 -0700 1024) 		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
a579121f94aba (Darrick J. Wong     2018-11-27 11:01:43 -0800 1025) 				round_down(offset + len, PAGE_SIZE), LLONG_MAX);
e53c4b5983720 (Darrick J. Wong     2018-06-21 23:26:58 -0700 1026) 	}
e53c4b5983720 (Darrick J. Wong     2018-06-21 23:26:58 -0700 1027) 
e53c4b5983720 (Darrick J. Wong     2018-06-21 23:26:58 -0700 1028) 	return error;
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000 1029) }
c24b5dfadc4a4 (Dave Chinner        2013-08-12 20:49:45 +1000 1030) 
72c1a73993cfa (kbuild test robot   2015-04-13 11:25:04 +1000 1031) static int
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1032) xfs_prepare_shift(
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1033) 	struct xfs_inode	*ip,
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1034) 	loff_t			offset)
e1d8fb88a64c1 (Namjae Jeon         2014-02-24 10:58:19 +1100 1035) {
d0c2204135a0c (Brian Foster        2019-12-11 13:18:38 -0800 1036) 	struct xfs_mount	*mp = ip->i_mount;
e1d8fb88a64c1 (Namjae Jeon         2014-02-24 10:58:19 +1100 1037) 	int			error;
e1d8fb88a64c1 (Namjae Jeon         2014-02-24 10:58:19 +1100 1038) 
f71721d061e87 (Brian Foster        2014-09-23 15:39:05 +1000 1039) 	/*
f71721d061e87 (Brian Foster        2014-09-23 15:39:05 +1000 1040) 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
f71721d061e87 (Brian Foster        2014-09-23 15:39:05 +1000 1041) 	 * into the accessible region of the file.
f71721d061e87 (Brian Foster        2014-09-23 15:39:05 +1000 1042) 	 */
41b9d7263ea1e (Brian Foster        2014-09-02 12:12:53 +1000 1043) 	if (xfs_can_free_eofblocks(ip, true)) {
a36b926180cda (Brian Foster        2017-01-27 23:22:55 -0800 1044) 		error = xfs_free_eofblocks(ip);
41b9d7263ea1e (Brian Foster        2014-09-02 12:12:53 +1000 1045) 		if (error)
41b9d7263ea1e (Brian Foster        2014-09-02 12:12:53 +1000 1046) 			return error;
41b9d7263ea1e (Brian Foster        2014-09-02 12:12:53 +1000 1047) 	}
1669a8ca21059 (Dave Chinner        2014-09-02 12:12:53 +1000 1048) 
d0c2204135a0c (Brian Foster        2019-12-11 13:18:38 -0800 1049) 	/*
d0c2204135a0c (Brian Foster        2019-12-11 13:18:38 -0800 1050) 	 * Shift operations must stabilize the start block offset boundary along
d0c2204135a0c (Brian Foster        2019-12-11 13:18:38 -0800 1051) 	 * with the full range of the operation. If we don't, a COW writeback
d0c2204135a0c (Brian Foster        2019-12-11 13:18:38 -0800 1052) 	 * completion could race with an insert, front merge with the start
d0c2204135a0c (Brian Foster        2019-12-11 13:18:38 -0800 1053) 	 * extent (after split) during the shift and corrupt the file. Start
d0c2204135a0c (Brian Foster        2019-12-11 13:18:38 -0800 1054) 	 * with the block just prior to the start to stabilize the boundary.
d0c2204135a0c (Brian Foster        2019-12-11 13:18:38 -0800 1055) 	 */
d0c2204135a0c (Brian Foster        2019-12-11 13:18:38 -0800 1056) 	offset = round_down(offset, 1 << mp->m_sb.sb_blocklog);
d0c2204135a0c (Brian Foster        2019-12-11 13:18:38 -0800 1057) 	if (offset)
d0c2204135a0c (Brian Foster        2019-12-11 13:18:38 -0800 1058) 		offset -= (1 << mp->m_sb.sb_blocklog);
d0c2204135a0c (Brian Foster        2019-12-11 13:18:38 -0800 1059) 
f71721d061e87 (Brian Foster        2014-09-23 15:39:05 +1000 1060) 	/*
f71721d061e87 (Brian Foster        2014-09-23 15:39:05 +1000 1061) 	 * Writeback and invalidate cache for the remainder of the file as we're
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1062) 	 * about to shift down every extent from offset to EOF.
f71721d061e87 (Brian Foster        2014-09-23 15:39:05 +1000 1063) 	 */
7f9f71be84bca (Dave Chinner        2018-11-19 13:31:09 -0800 1064) 	error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
1749d1ea89bdf (Brian Foster        2019-04-26 07:30:24 -0700 1065) 	if (error)
1749d1ea89bdf (Brian Foster        2019-04-26 07:30:24 -0700 1066) 		return error;
e1d8fb88a64c1 (Namjae Jeon         2014-02-24 10:58:19 +1100 1067) 
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1068) 	/*
3af423b03435c (Darrick J. Wong     2017-09-18 09:41:17 -0700 1069) 	 * Clean out anything hanging around in the cow fork now that
3af423b03435c (Darrick J. Wong     2017-09-18 09:41:17 -0700 1070) 	 * we've flushed all the dirty data out to disk to avoid having
3af423b03435c (Darrick J. Wong     2017-09-18 09:41:17 -0700 1071) 	 * CoW extents at the wrong offsets.
3af423b03435c (Darrick J. Wong     2017-09-18 09:41:17 -0700 1072) 	 */
51d626903083f (Christoph Hellwig   2018-07-17 16:51:51 -0700 1073) 	if (xfs_inode_has_cow_data(ip)) {
3af423b03435c (Darrick J. Wong     2017-09-18 09:41:17 -0700 1074) 		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
3af423b03435c (Darrick J. Wong     2017-09-18 09:41:17 -0700 1075) 				true);
3af423b03435c (Darrick J. Wong     2017-09-18 09:41:17 -0700 1076) 		if (error)
3af423b03435c (Darrick J. Wong     2017-09-18 09:41:17 -0700 1077) 			return error;
3af423b03435c (Darrick J. Wong     2017-09-18 09:41:17 -0700 1078) 	}
3af423b03435c (Darrick J. Wong     2017-09-18 09:41:17 -0700 1079) 
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1080) 	return 0;
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1081) }
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1082) 
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1083) /*
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1084)  * xfs_collapse_file_space()
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1085)  *	This routine frees disk space and shift extent for the given file.
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1086)  *	The first thing we do is to free data blocks in the specified range
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1087)  *	by calling xfs_free_file_space(). It would also sync dirty data
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1088)  *	and invalidate page cache over the region on which collapse range
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1089)  *	is working. And Shift extent records to the left to cover a hole.
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1090)  * RETURNS:
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1091)  *	0 on success
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1092)  *	errno on error
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1093)  *
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1094)  */
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1095) int
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1096) xfs_collapse_file_space(
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1097) 	struct xfs_inode	*ip,
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1098) 	xfs_off_t		offset,
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1099) 	xfs_off_t		len)
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1100) {
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1101) 	struct xfs_mount	*mp = ip->i_mount;
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1102) 	struct xfs_trans	*tp;
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1103) 	int			error;
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1104) 	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1105) 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
ecfea3f0c8c64 (Christoph Hellwig   2017-10-19 11:07:11 -0700 1106) 	bool			done = false;
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1107) 
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1108) 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
9ad1a23afb6c5 (Christoph Hellwig   2017-10-23 16:32:38 -0700 1109) 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
9ad1a23afb6c5 (Christoph Hellwig   2017-10-23 16:32:38 -0700 1110) 
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1111) 	trace_xfs_collapse_file_space(ip);
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1112) 
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1113) 	error = xfs_free_file_space(ip, offset, len);
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1114) 	if (error)
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1115) 		return error;
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1116) 
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1117) 	error = xfs_prepare_shift(ip, offset);
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1118) 	if (error)
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1119) 		return error;
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1120) 
211683b21de95 (Brian Foster        2020-02-26 09:43:16 -0800 1121) 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
211683b21de95 (Brian Foster        2020-02-26 09:43:16 -0800 1122) 	if (error)
211683b21de95 (Brian Foster        2020-02-26 09:43:16 -0800 1123) 		return error;
e1d8fb88a64c1 (Namjae Jeon         2014-02-24 10:58:19 +1100 1124) 
211683b21de95 (Brian Foster        2020-02-26 09:43:16 -0800 1125) 	xfs_ilock(ip, XFS_ILOCK_EXCL);
211683b21de95 (Brian Foster        2020-02-26 09:43:16 -0800 1126) 	xfs_trans_ijoin(tp, ip, 0);
e1d8fb88a64c1 (Namjae Jeon         2014-02-24 10:58:19 +1100 1127) 
211683b21de95 (Brian Foster        2020-02-26 09:43:16 -0800 1128) 	while (!done) {
ecfea3f0c8c64 (Christoph Hellwig   2017-10-19 11:07:11 -0700 1129) 		error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
333f950c89a17 (Brian Foster        2018-07-11 22:26:27 -0700 1130) 				&done);
e1d8fb88a64c1 (Namjae Jeon         2014-02-24 10:58:19 +1100 1131) 		if (error)
c8eac49ef798a (Brian Foster        2018-07-24 13:43:13 -0700 1132) 			goto out_trans_cancel;
211683b21de95 (Brian Foster        2020-02-26 09:43:16 -0800 1133) 		if (done)
211683b21de95 (Brian Foster        2020-02-26 09:43:16 -0800 1134) 			break;
e1d8fb88a64c1 (Namjae Jeon         2014-02-24 10:58:19 +1100 1135) 
211683b21de95 (Brian Foster        2020-02-26 09:43:16 -0800 1136) 		/* finish any deferred frees and roll the transaction */
211683b21de95 (Brian Foster        2020-02-26 09:43:16 -0800 1137) 		error = xfs_defer_finish(&tp);
211683b21de95 (Brian Foster        2020-02-26 09:43:16 -0800 1138) 		if (error)
211683b21de95 (Brian Foster        2020-02-26 09:43:16 -0800 1139) 			goto out_trans_cancel;
e1d8fb88a64c1 (Namjae Jeon         2014-02-24 10:58:19 +1100 1140) 	}
e1d8fb88a64c1 (Namjae Jeon         2014-02-24 10:58:19 +1100 1141) 
211683b21de95 (Brian Foster        2020-02-26 09:43:16 -0800 1142) 	error = xfs_trans_commit(tp);
211683b21de95 (Brian Foster        2020-02-26 09:43:16 -0800 1143) 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
e1d8fb88a64c1 (Namjae Jeon         2014-02-24 10:58:19 +1100 1144) 	return error;
e1d8fb88a64c1 (Namjae Jeon         2014-02-24 10:58:19 +1100 1145) 
d4a97a04227d5 (Brian Foster        2015-08-19 10:01:40 +1000 1146) out_trans_cancel:
4906e21545814 (Christoph Hellwig   2015-06-04 13:47:56 +1000 1147) 	xfs_trans_cancel(tp);
211683b21de95 (Brian Foster        2020-02-26 09:43:16 -0800 1148) 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
e1d8fb88a64c1 (Namjae Jeon         2014-02-24 10:58:19 +1100 1149) 	return error;
e1d8fb88a64c1 (Namjae Jeon         2014-02-24 10:58:19 +1100 1150) }
e1d8fb88a64c1 (Namjae Jeon         2014-02-24 10:58:19 +1100 1151) 
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1152) /*
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1153)  * xfs_insert_file_space()
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1154)  *	This routine create hole space by shifting extents for the given file.
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1155)  *	The first thing we do is to sync dirty data and invalidate page cache
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1156)  *	over the region on which insert range is working. And split an extent
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1157)  *	to two extents at given offset by calling xfs_bmap_split_extent.
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1158)  *	And shift all extent records which are laying between [offset,
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1159)  *	last allocated extent] to the right to reserve hole range.
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1160)  * RETURNS:
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1161)  *	0 on success
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1162)  *	errno on error
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1163)  */
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1164) int
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1165) xfs_insert_file_space(
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1166) 	struct xfs_inode	*ip,
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1167) 	loff_t			offset,
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1168) 	loff_t			len)
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1169) {
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1170) 	struct xfs_mount	*mp = ip->i_mount;
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1171) 	struct xfs_trans	*tp;
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1172) 	int			error;
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1173) 	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1174) 	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1175) 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
ecfea3f0c8c64 (Christoph Hellwig   2017-10-19 11:07:11 -0700 1176) 	bool			done = false;
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1177) 
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1178) 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
9ad1a23afb6c5 (Christoph Hellwig   2017-10-23 16:32:38 -0700 1179) 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
9ad1a23afb6c5 (Christoph Hellwig   2017-10-23 16:32:38 -0700 1180) 
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1181) 	trace_xfs_insert_file_space(ip);
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1182) 
f62cb48e43195 (Darrick J. Wong     2018-06-21 23:26:57 -0700 1183) 	error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
f62cb48e43195 (Darrick J. Wong     2018-06-21 23:26:57 -0700 1184) 	if (error)
f62cb48e43195 (Darrick J. Wong     2018-06-21 23:26:57 -0700 1185) 		return error;
f62cb48e43195 (Darrick J. Wong     2018-06-21 23:26:57 -0700 1186) 
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1187) 	error = xfs_prepare_shift(ip, offset);
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1188) 	if (error)
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1189) 		return error;
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1190) 
b73df17e4c5ba (Brian Foster        2020-02-26 09:43:15 -0800 1191) 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
b73df17e4c5ba (Brian Foster        2020-02-26 09:43:15 -0800 1192) 			XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
b73df17e4c5ba (Brian Foster        2020-02-26 09:43:15 -0800 1193) 	if (error)
b73df17e4c5ba (Brian Foster        2020-02-26 09:43:15 -0800 1194) 		return error;
b73df17e4c5ba (Brian Foster        2020-02-26 09:43:15 -0800 1195) 
b73df17e4c5ba (Brian Foster        2020-02-26 09:43:15 -0800 1196) 	xfs_ilock(ip, XFS_ILOCK_EXCL);
dd87f87d87fa4 (Brian Foster        2020-02-26 09:43:16 -0800 1197) 	xfs_trans_ijoin(tp, ip, 0);
b73df17e4c5ba (Brian Foster        2020-02-26 09:43:15 -0800 1198) 
85ef08b5a6676 (Chandan Babu R      2021-01-22 16:48:11 -0800 1199) 	error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
85ef08b5a6676 (Chandan Babu R      2021-01-22 16:48:11 -0800 1200) 			XFS_IEXT_PUNCH_HOLE_CNT);
85ef08b5a6676 (Chandan Babu R      2021-01-22 16:48:11 -0800 1201) 	if (error)
85ef08b5a6676 (Chandan Babu R      2021-01-22 16:48:11 -0800 1202) 		goto out_trans_cancel;
85ef08b5a6676 (Chandan Babu R      2021-01-22 16:48:11 -0800 1203) 
dd87f87d87fa4 (Brian Foster        2020-02-26 09:43:16 -0800 1204) 	/*
dd87f87d87fa4 (Brian Foster        2020-02-26 09:43:16 -0800 1205) 	 * The extent shifting code works on extent granularity. So, if stop_fsb
dd87f87d87fa4 (Brian Foster        2020-02-26 09:43:16 -0800 1206) 	 * is not the starting block of extent, we need to split the extent at
dd87f87d87fa4 (Brian Foster        2020-02-26 09:43:16 -0800 1207) 	 * stop_fsb.
dd87f87d87fa4 (Brian Foster        2020-02-26 09:43:16 -0800 1208) 	 */
b73df17e4c5ba (Brian Foster        2020-02-26 09:43:15 -0800 1209) 	error = xfs_bmap_split_extent(tp, ip, stop_fsb);
b73df17e4c5ba (Brian Foster        2020-02-26 09:43:15 -0800 1210) 	if (error)
b73df17e4c5ba (Brian Foster        2020-02-26 09:43:15 -0800 1211) 		goto out_trans_cancel;
b73df17e4c5ba (Brian Foster        2020-02-26 09:43:15 -0800 1212) 
dd87f87d87fa4 (Brian Foster        2020-02-26 09:43:16 -0800 1213) 	do {
9c516e0e4554e (Brian Foster        2020-08-18 08:05:58 -0700 1214) 		error = xfs_defer_finish(&tp);
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1215) 		if (error)
dd87f87d87fa4 (Brian Foster        2020-02-26 09:43:16 -0800 1216) 			goto out_trans_cancel;
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1217) 
ecfea3f0c8c64 (Christoph Hellwig   2017-10-19 11:07:11 -0700 1218) 		error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
333f950c89a17 (Brian Foster        2018-07-11 22:26:27 -0700 1219) 				&done, stop_fsb);
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1220) 		if (error)
c8eac49ef798a (Brian Foster        2018-07-24 13:43:13 -0700 1221) 			goto out_trans_cancel;
dd87f87d87fa4 (Brian Foster        2020-02-26 09:43:16 -0800 1222) 	} while (!done);
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1223) 
dd87f87d87fa4 (Brian Foster        2020-02-26 09:43:16 -0800 1224) 	error = xfs_trans_commit(tp);
dd87f87d87fa4 (Brian Foster        2020-02-26 09:43:16 -0800 1225) 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1226) 	return error;
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1227) 
c8eac49ef798a (Brian Foster        2018-07-24 13:43:13 -0700 1228) out_trans_cancel:
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1229) 	xfs_trans_cancel(tp);
dd87f87d87fa4 (Brian Foster        2020-02-26 09:43:16 -0800 1230) 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
4ed36c6b09a53 (Christoph Hellwig   2017-10-19 11:07:10 -0700 1231) 	return error;
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1232) }
a904b1ca5751f (Namjae Jeon         2015-03-25 15:08:56 +1100 1233) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1234) /*
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1235)  * We need to check that the format of the data fork in the temporary inode is
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1236)  * valid for the target inode before doing the swap. This is not a problem with
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1237)  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1238)  * data fork depending on the space the attribute fork is taking so we can get
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1239)  * invalid formats on the target inode.
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1240)  *
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1241)  * E.g. target has space for 7 extents in extent format, temp inode only has
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1242)  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1243)  * btree, but when swapped it needs to be in extent format. Hence we can't just
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1244)  * blindly swap data forks on attr2 filesystems.
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1245)  *
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1246)  * Note that we check the swap in both directions so that we don't end up with
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1247)  * a corrupt temporary inode, either.
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1248)  *
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1249)  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1250)  * inode will prevent this situation from occurring, so all we do here is
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1251)  * reject and log the attempt. basically we are putting the responsibility on
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1252)  * userspace to get this right.
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1253)  */
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1254) static int
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1255) xfs_swap_extents_check_format(
e06259aa08fbc (Darrick J. Wong     2016-10-03 09:11:52 -0700 1256) 	struct xfs_inode	*ip,	/* target inode */
e06259aa08fbc (Darrick J. Wong     2016-10-03 09:11:52 -0700 1257) 	struct xfs_inode	*tip)	/* tmp inode */
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1258) {
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1259) 	struct xfs_ifork	*ifp = &ip->i_df;
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1260) 	struct xfs_ifork	*tifp = &tip->i_df;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1261) 
765d3c393c222 (Darrick J. Wong     2020-05-12 16:42:51 -0700 1262) 	/* User/group/project quota ids must match if quotas are enforced. */
765d3c393c222 (Darrick J. Wong     2020-05-12 16:42:51 -0700 1263) 	if (XFS_IS_QUOTA_ON(ip->i_mount) &&
765d3c393c222 (Darrick J. Wong     2020-05-12 16:42:51 -0700 1264) 	    (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
765d3c393c222 (Darrick J. Wong     2020-05-12 16:42:51 -0700 1265) 	     !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
ceaf603c7024d (Christoph Hellwig   2021-03-29 11:11:39 -0700 1266) 	     ip->i_projid != tip->i_projid))
765d3c393c222 (Darrick J. Wong     2020-05-12 16:42:51 -0700 1267) 		return -EINVAL;
765d3c393c222 (Darrick J. Wong     2020-05-12 16:42:51 -0700 1268) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1269) 	/* Should never get a local format */
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1270) 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1271) 	    tifp->if_format == XFS_DINODE_FMT_LOCAL)
2451337dd0439 (Dave Chinner        2014-06-25 14:58:08 +1000 1272) 		return -EINVAL;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1273) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1274) 	/*
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1275) 	 * if the target inode has less extents that then temporary inode then
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1276) 	 * why did userspace call us?
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1277) 	 */
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1278) 	if (ifp->if_nextents < tifp->if_nextents)
2451337dd0439 (Dave Chinner        2014-06-25 14:58:08 +1000 1279) 		return -EINVAL;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1280) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1281) 	/*
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1282) 	 * If we have to use the (expensive) rmap swap method, we can
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1283) 	 * handle any number of extents and any format.
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1284) 	 */
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1285) 	if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1286) 		return 0;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1287) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1288) 	/*
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1289) 	 * if the target inode is in extent form and the temp inode is in btree
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1290) 	 * form then we will end up with the target inode in the wrong format
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1291) 	 * as we already know there are less extents in the temp inode.
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1292) 	 */
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1293) 	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1294) 	    tifp->if_format == XFS_DINODE_FMT_BTREE)
2451337dd0439 (Dave Chinner        2014-06-25 14:58:08 +1000 1295) 		return -EINVAL;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1296) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1297) 	/* Check temp in extent form to max in target */
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1298) 	if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1299) 	    tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
2451337dd0439 (Dave Chinner        2014-06-25 14:58:08 +1000 1300) 		return -EINVAL;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1301) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1302) 	/* Check target in extent form to max in temp */
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1303) 	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1304) 	    ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
2451337dd0439 (Dave Chinner        2014-06-25 14:58:08 +1000 1305) 		return -EINVAL;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1306) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1307) 	/*
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1308) 	 * If we are in a btree format, check that the temp root block will fit
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1309) 	 * in the target and that it has enough extents to be in btree format
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1310) 	 * in the target.
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1311) 	 *
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1312) 	 * Note that we have to be careful to allow btree->extent conversions
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1313) 	 * (a common defrag case) which will occur when the temp inode is in
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1314) 	 * extent format...
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1315) 	 */
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1316) 	if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
0cbe48cc5814a (Arnd Bergmann       2017-06-14 21:35:34 -0700 1317) 		if (XFS_IFORK_Q(ip) &&
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1318) 		    XFS_BMAP_BMDR_SPACE(tifp->if_broot) > XFS_IFORK_BOFF(ip))
2451337dd0439 (Dave Chinner        2014-06-25 14:58:08 +1000 1319) 			return -EINVAL;
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1320) 		if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
2451337dd0439 (Dave Chinner        2014-06-25 14:58:08 +1000 1321) 			return -EINVAL;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1322) 	}
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1323) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1324) 	/* Reciprocal target->temp btree format checks */
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1325) 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
0cbe48cc5814a (Arnd Bergmann       2017-06-14 21:35:34 -0700 1326) 		if (XFS_IFORK_Q(tip) &&
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1327) 		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
2451337dd0439 (Dave Chinner        2014-06-25 14:58:08 +1000 1328) 			return -EINVAL;
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1329) 		if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
2451337dd0439 (Dave Chinner        2014-06-25 14:58:08 +1000 1330) 			return -EINVAL;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1331) 	}
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1332) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1333) 	return 0;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1334) }
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1335) 
7abbb8f928e5b (Dave Chinner        2014-09-23 16:20:11 +1000 1336) static int
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1337) xfs_swap_extent_flush(
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1338) 	struct xfs_inode	*ip)
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1339) {
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1340) 	int	error;
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1341) 
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1342) 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1343) 	if (error)
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1344) 		return error;
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1345) 	truncate_pagecache_range(VFS_I(ip), 0, -1);
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1346) 
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1347) 	/* Verify O_DIRECT for ftmp */
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1348) 	if (VFS_I(ip)->i_mapping->nrpages)
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1349) 		return -EINVAL;
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1350) 	return 0;
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1351) }
4ef897a27543b (Dave Chinner        2014-08-04 13:44:08 +1000 1352) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1353) /*
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1354)  * Move extents from one file to another, when rmap is enabled.
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1355)  */
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1356) STATIC int
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1357) xfs_swap_extent_rmap(
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1358) 	struct xfs_trans		**tpp,
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1359) 	struct xfs_inode		*ip,
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1360) 	struct xfs_inode		*tip)
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1361) {
7a7943c7e0954 (Brian Foster        2018-07-11 22:26:17 -0700 1362) 	struct xfs_trans		*tp = *tpp;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1363) 	struct xfs_bmbt_irec		irec;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1364) 	struct xfs_bmbt_irec		uirec;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1365) 	struct xfs_bmbt_irec		tirec;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1366) 	xfs_fileoff_t			offset_fsb;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1367) 	xfs_fileoff_t			end_fsb;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1368) 	xfs_filblks_t			count_fsb;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1369) 	int				error;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1370) 	xfs_filblks_t			ilen;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1371) 	xfs_filblks_t			rlen;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1372) 	int				nimaps;
c8ce540db5f67 (Darrick J. Wong     2017-06-16 11:00:05 -0700 1373) 	uint64_t			tip_flags2;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1374) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1375) 	/*
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1376) 	 * If the source file has shared blocks, we must flag the donor
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1377) 	 * file as having shared blocks so that we get the shared-block
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1378) 	 * rmap functions when we go to fix up the rmaps.  The flags
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1379) 	 * will be switch for reals later.
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1380) 	 */
3e09ab8fdc4d4 (Christoph Hellwig   2021-03-29 11:11:45 -0700 1381) 	tip_flags2 = tip->i_diflags2;
3e09ab8fdc4d4 (Christoph Hellwig   2021-03-29 11:11:45 -0700 1382) 	if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
3e09ab8fdc4d4 (Christoph Hellwig   2021-03-29 11:11:45 -0700 1383) 		tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1384) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1385) 	offset_fsb = 0;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1386) 	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1387) 	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1388) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1389) 	while (count_fsb) {
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1390) 		/* Read extent from the donor file */
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1391) 		nimaps = 1;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1392) 		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1393) 				&nimaps, 0);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1394) 		if (error)
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1395) 			goto out;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1396) 		ASSERT(nimaps == 1);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1397) 		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1398) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1399) 		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1400) 		ilen = tirec.br_blockcount;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1401) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1402) 		/* Unmap the old blocks in the source file. */
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1403) 		while (tirec.br_blockcount) {
c8eac49ef798a (Brian Foster        2018-07-24 13:43:13 -0700 1404) 			ASSERT(tp->t_firstblock == NULLFSBLOCK);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1405) 			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1406) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1407) 			/* Read extent from the source file */
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1408) 			nimaps = 1;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1409) 			error = xfs_bmapi_read(ip, tirec.br_startoff,
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1410) 					tirec.br_blockcount, &irec,
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1411) 					&nimaps, 0);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1412) 			if (error)
d5a2e2893da0d (Brian Foster        2018-09-29 13:41:58 +1000 1413) 				goto out;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1414) 			ASSERT(nimaps == 1);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1415) 			ASSERT(tirec.br_startoff == irec.br_startoff);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1416) 			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1417) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1418) 			/* Trim the extent. */
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1419) 			uirec = tirec;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1420) 			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1421) 					tirec.br_blockcount,
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1422) 					irec.br_blockcount);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1423) 			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1424) 
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1425) 			if (xfs_bmap_is_real_extent(&uirec)) {
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1426) 				error = xfs_iext_count_may_overflow(ip,
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1427) 						XFS_DATA_FORK,
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1428) 						XFS_IEXT_SWAP_RMAP_CNT);
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1429) 				if (error)
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1430) 					goto out;
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1431) 			}
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1432) 
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1433) 			if (xfs_bmap_is_real_extent(&irec)) {
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1434) 				error = xfs_iext_count_may_overflow(tip,
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1435) 						XFS_DATA_FORK,
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1436) 						XFS_IEXT_SWAP_RMAP_CNT);
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1437) 				if (error)
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1438) 					goto out;
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1439) 			}
bcc561f21f115 (Chandan Babu R      2021-01-22 16:48:15 -0800 1440) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1441) 			/* Remove the mapping from the donor file. */
3e08f42ae7828 (Darrick J. Wong     2019-08-26 17:06:04 -0700 1442) 			xfs_bmap_unmap_extent(tp, tip, &uirec);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1443) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1444) 			/* Remove the mapping from the source file. */
3e08f42ae7828 (Darrick J. Wong     2019-08-26 17:06:04 -0700 1445) 			xfs_bmap_unmap_extent(tp, ip, &irec);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1446) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1447) 			/* Map the donor file's blocks into the source file. */
3e08f42ae7828 (Darrick J. Wong     2019-08-26 17:06:04 -0700 1448) 			xfs_bmap_map_extent(tp, ip, &uirec);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1449) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1450) 			/* Map the source file's blocks into the donor file. */
3e08f42ae7828 (Darrick J. Wong     2019-08-26 17:06:04 -0700 1451) 			xfs_bmap_map_extent(tp, tip, &irec);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1452) 
9e28a242be65b (Brian Foster        2018-07-24 13:43:15 -0700 1453) 			error = xfs_defer_finish(tpp);
7a7943c7e0954 (Brian Foster        2018-07-11 22:26:17 -0700 1454) 			tp = *tpp;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1455) 			if (error)
9b1f4e9831df2 (Brian Foster        2018-08-01 07:20:33 -0700 1456) 				goto out;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1457) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1458) 			tirec.br_startoff += rlen;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1459) 			if (tirec.br_startblock != HOLESTARTBLOCK &&
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1460) 			    tirec.br_startblock != DELAYSTARTBLOCK)
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1461) 				tirec.br_startblock += rlen;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1462) 			tirec.br_blockcount -= rlen;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1463) 		}
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1464) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1465) 		/* Roll on... */
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1466) 		count_fsb -= ilen;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1467) 		offset_fsb += ilen;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1468) 	}
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1469) 
3e09ab8fdc4d4 (Christoph Hellwig   2021-03-29 11:11:45 -0700 1470) 	tip->i_diflags2 = tip_flags2;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1471) 	return 0;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1472) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1473) out:
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1474) 	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
3e09ab8fdc4d4 (Christoph Hellwig   2021-03-29 11:11:45 -0700 1475) 	tip->i_diflags2 = tip_flags2;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1476) 	return error;
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1477) }
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1478) 
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1479) /* Swap the extents of two files by swapping data forks. */
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1480) STATIC int
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1481) xfs_swap_extent_forks(
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1482) 	struct xfs_trans	*tp,
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1483) 	struct xfs_inode	*ip,
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1484) 	struct xfs_inode	*tip,
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1485) 	int			*src_log_flags,
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1486) 	int			*target_log_flags)
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1487) {
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700 1488) 	xfs_filblks_t		aforkblks = 0;
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700 1489) 	xfs_filblks_t		taforkblks = 0;
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700 1490) 	xfs_extnum_t		junk;
c8ce540db5f67 (Darrick J. Wong     2017-06-16 11:00:05 -0700 1491) 	uint64_t		tmp;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1492) 	int			error;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1493) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1494) 	/*
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1495) 	 * Count the number of extended attribute blocks
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1496) 	 */
daf83964a3681 (Christoph Hellwig   2020-05-18 10:27:22 -0700 1497) 	if (XFS_IFORK_Q(ip) && ip->i_afp->if_nextents > 0 &&
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1498) 	    ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700 1499) 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1500) 				&aforkblks);
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1501) 		if (error)
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1502) 			return error;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1503) 	}
daf83964a3681 (Christoph Hellwig   2020-05-18 10:27:22 -0700 1504) 	if (XFS_IFORK_Q(tip) && tip->i_afp->if_nextents > 0 &&
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1505) 	    tip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
e7f5d5ca36e29 (Darrick J. Wong     2017-06-16 11:00:12 -0700 1506) 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1507) 				&taforkblks);
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1508) 		if (error)
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1509) 			return error;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1510) 	}
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1511) 
21b5c9784bceb (Dave Chinner        2013-08-30 10:23:44 +1000 1512) 	/*
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1513) 	 * Btree format (v3) inodes have the inode number stamped in the bmbt
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1514) 	 * block headers. We can't start changing the bmbt blocks until the
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1515) 	 * inode owner change is logged so recovery does the right thing in the
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1516) 	 * event of a crash. Set the owner change log flags now and leave the
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1517) 	 * bmbt scan as the last step.
21b5c9784bceb (Dave Chinner        2013-08-30 10:23:44 +1000 1518) 	 */
6471e9c5e7a10 (Christoph Hellwig   2020-03-18 08:15:11 -0700 1519) 	if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1520) 		if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
6471e9c5e7a10 (Christoph Hellwig   2020-03-18 08:15:11 -0700 1521) 			(*target_log_flags) |= XFS_ILOG_DOWNER;
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1522) 		if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
6471e9c5e7a10 (Christoph Hellwig   2020-03-18 08:15:11 -0700 1523) 			(*src_log_flags) |= XFS_ILOG_DOWNER;
6471e9c5e7a10 (Christoph Hellwig   2020-03-18 08:15:11 -0700 1524) 	}
21b5c9784bceb (Dave Chinner        2013-08-30 10:23:44 +1000 1525) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1526) 	/*
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1527) 	 * Swap the data forks of the inodes
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1528) 	 */
897992b7e3505 (Gustavo A. R. Silva 2018-07-11 22:26:38 -0700 1529) 	swap(ip->i_df, tip->i_df);
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1530) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1531) 	/*
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1532) 	 * Fix the on-disk inode values
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1533) 	 */
6e73a545f91e1 (Christoph Hellwig   2021-03-29 11:11:40 -0700 1534) 	tmp = (uint64_t)ip->i_nblocks;
6e73a545f91e1 (Christoph Hellwig   2021-03-29 11:11:40 -0700 1535) 	ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
6e73a545f91e1 (Christoph Hellwig   2021-03-29 11:11:40 -0700 1536) 	tip->i_nblocks = tmp + taforkblks - aforkblks;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1537) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1538) 	/*
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1539) 	 * The extents in the source inode could still contain speculative
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1540) 	 * preallocation beyond EOF (e.g. the file is open but not modified
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1541) 	 * while defrag is in progress). In that case, we need to copy over the
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1542) 	 * number of delalloc blocks the data fork in the source inode is
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1543) 	 * tracking beyond EOF so that when the fork is truncated away when the
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1544) 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1545) 	 * counter on that inode.
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1546) 	 */
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1547) 	ASSERT(tip->i_delayed_blks == 0);
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1548) 	tip->i_delayed_blks = ip->i_delayed_blks;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1549) 	ip->i_delayed_blks = 0;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1550) 
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1551) 	switch (ip->i_df.if_format) {
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1552) 	case XFS_DINODE_FMT_EXTENTS:
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1553) 		(*src_log_flags) |= XFS_ILOG_DEXT;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1554) 		break;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1555) 	case XFS_DINODE_FMT_BTREE:
6471e9c5e7a10 (Christoph Hellwig   2020-03-18 08:15:11 -0700 1556) 		ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1557) 		       (*src_log_flags & XFS_ILOG_DOWNER));
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1558) 		(*src_log_flags) |= XFS_ILOG_DBROOT;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1559) 		break;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1560) 	}
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1561) 
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1562) 	switch (tip->i_df.if_format) {
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1563) 	case XFS_DINODE_FMT_EXTENTS:
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1564) 		(*target_log_flags) |= XFS_ILOG_DEXT;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1565) 		break;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1566) 	case XFS_DINODE_FMT_BTREE:
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1567) 		(*target_log_flags) |= XFS_ILOG_DBROOT;
6471e9c5e7a10 (Christoph Hellwig   2020-03-18 08:15:11 -0700 1568) 		ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1569) 		       (*target_log_flags & XFS_ILOG_DOWNER));
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1570) 		break;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1571) 	}
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1572) 
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1573) 	return 0;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1574) }
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1575) 
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1576) /*
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1577)  * Fix up the owners of the bmbt blocks to refer to the current inode. The
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1578)  * change owner scan attempts to order all modified buffers in the current
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1579)  * transaction. In the event of ordered buffer failure, the offending buffer is
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1580)  * physically logged as a fallback and the scan returns -EAGAIN. We must roll
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1581)  * the transaction in this case to replenish the fallback log reservation and
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1582)  * restart the scan. This process repeats until the scan completes.
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1583)  */
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1584) static int
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1585) xfs_swap_change_owner(
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1586) 	struct xfs_trans	**tpp,
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1587) 	struct xfs_inode	*ip,
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1588) 	struct xfs_inode	*tmpip)
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1589) {
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1590) 	int			error;
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1591) 	struct xfs_trans	*tp = *tpp;
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1592) 
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1593) 	do {
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1594) 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1595) 					      NULL);
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1596) 		/* success or fatal error */
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1597) 		if (error != -EAGAIN)
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1598) 			break;
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1599) 
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1600) 		error = xfs_trans_roll(tpp);
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1601) 		if (error)
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1602) 			break;
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1603) 		tp = *tpp;
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1604) 
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1605) 		/*
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1606) 		 * Redirty both inodes so they can relog and keep the log tail
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1607) 		 * moving forward.
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1608) 		 */
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1609) 		xfs_trans_ijoin(tp, ip, 0);
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1610) 		xfs_trans_ijoin(tp, tmpip, 0);
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1611) 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1612) 		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1613) 	} while (true);
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1614) 
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1615) 	return error;
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1616) }
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1617) 
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1618) int
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1619) xfs_swap_extents(
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1620) 	struct xfs_inode	*ip,	/* target inode */
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1621) 	struct xfs_inode	*tip,	/* tmp inode */
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1622) 	struct xfs_swapext	*sxp)
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1623) {
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1624) 	struct xfs_mount	*mp = ip->i_mount;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1625) 	struct xfs_trans	*tp;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1626) 	struct xfs_bstat	*sbp = &sxp->sx_stat;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1627) 	int			src_log_flags, target_log_flags;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1628) 	int			error = 0;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1629) 	int			lock_flags;
c8ce540db5f67 (Darrick J. Wong     2017-06-16 11:00:05 -0700 1630) 	uint64_t		f;
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1631) 	int			resblks = 0;
f74681ba20064 (Brian Foster        2020-06-29 14:44:36 -0700 1632) 	unsigned int		flags = 0;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1633) 
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1634) 	/*
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1635) 	 * Lock the inodes against other IO, page faults and truncate to
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1636) 	 * begin with.  Then we can ensure the inodes are flushed and have no
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1637) 	 * page cache safely. Once we have done this we can take the ilocks and
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1638) 	 * do the rest of the checks.
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1639) 	 */
6552321831dce (Christoph Hellwig   2016-11-30 14:33:25 +1100 1640) 	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
6552321831dce (Christoph Hellwig   2016-11-30 14:33:25 +1100 1641) 	lock_flags = XFS_MMAPLOCK_EXCL;
7c2d238ac6c43 (Darrick J. Wong     2018-01-26 15:27:33 -0800 1642) 	xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1643) 
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1644) 	/* Verify that both files have the same format */
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1645) 	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1646) 		error = -EINVAL;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1647) 		goto out_unlock;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1648) 	}
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1649) 
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1650) 	/* Verify both files are either real-time or non-realtime */
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1651) 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1652) 		error = -EINVAL;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1653) 		goto out_unlock;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1654) 	}
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1655) 
2713fefa5dd51 (Darrick J. Wong     2019-11-09 12:04:30 -0800 1656) 	error = xfs_qm_dqattach(ip);
2713fefa5dd51 (Darrick J. Wong     2019-11-09 12:04:30 -0800 1657) 	if (error)
2713fefa5dd51 (Darrick J. Wong     2019-11-09 12:04:30 -0800 1658) 		goto out_unlock;
2713fefa5dd51 (Darrick J. Wong     2019-11-09 12:04:30 -0800 1659) 
2713fefa5dd51 (Darrick J. Wong     2019-11-09 12:04:30 -0800 1660) 	error = xfs_qm_dqattach(tip);
2713fefa5dd51 (Darrick J. Wong     2019-11-09 12:04:30 -0800 1661) 	if (error)
2713fefa5dd51 (Darrick J. Wong     2019-11-09 12:04:30 -0800 1662) 		goto out_unlock;
2713fefa5dd51 (Darrick J. Wong     2019-11-09 12:04:30 -0800 1663) 
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1664) 	error = xfs_swap_extent_flush(ip);
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1665) 	if (error)
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1666) 		goto out_unlock;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1667) 	error = xfs_swap_extent_flush(tip);
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1668) 	if (error)
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1669) 		goto out_unlock;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1670) 
96987eea537d6 (Christoph Hellwig   2018-10-18 17:21:55 +1100 1671) 	if (xfs_inode_has_cow_data(tip)) {
96987eea537d6 (Christoph Hellwig   2018-10-18 17:21:55 +1100 1672) 		error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
96987eea537d6 (Christoph Hellwig   2018-10-18 17:21:55 +1100 1673) 		if (error)
8bc3b5e4b70d2 (Darrick J. Wong     2020-05-04 14:06:27 -0700 1674) 			goto out_unlock;
96987eea537d6 (Christoph Hellwig   2018-10-18 17:21:55 +1100 1675) 	}
96987eea537d6 (Christoph Hellwig   2018-10-18 17:21:55 +1100 1676) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1677) 	/*
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1678) 	 * Extent "swapping" with rmap requires a permanent reservation and
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1679) 	 * a block reservation because it's really just a remap operation
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1680) 	 * performed with log redo items!
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1681) 	 */
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1682) 	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
daf83964a3681 (Christoph Hellwig   2020-05-18 10:27:22 -0700 1683) 		int		w = XFS_DATA_FORK;
daf83964a3681 (Christoph Hellwig   2020-05-18 10:27:22 -0700 1684) 		uint32_t	ipnext = ip->i_df.if_nextents;
daf83964a3681 (Christoph Hellwig   2020-05-18 10:27:22 -0700 1685) 		uint32_t	tipnext	= tip->i_df.if_nextents;
b3fed434822d0 (Brian Foster        2018-03-09 14:01:58 -0800 1686) 
b3fed434822d0 (Brian Foster        2018-03-09 14:01:58 -0800 1687) 		/*
b3fed434822d0 (Brian Foster        2018-03-09 14:01:58 -0800 1688) 		 * Conceptually this shouldn't affect the shape of either bmbt,
b3fed434822d0 (Brian Foster        2018-03-09 14:01:58 -0800 1689) 		 * but since we atomically move extents one by one, we reserve
b3fed434822d0 (Brian Foster        2018-03-09 14:01:58 -0800 1690) 		 * enough space to rebuild both trees.
b3fed434822d0 (Brian Foster        2018-03-09 14:01:58 -0800 1691) 		 */
b3fed434822d0 (Brian Foster        2018-03-09 14:01:58 -0800 1692) 		resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
b3fed434822d0 (Brian Foster        2018-03-09 14:01:58 -0800 1693) 		resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
b3fed434822d0 (Brian Foster        2018-03-09 14:01:58 -0800 1694) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1695) 		/*
f74681ba20064 (Brian Foster        2020-06-29 14:44:36 -0700 1696) 		 * If either inode straddles a bmapbt block allocation boundary,
f74681ba20064 (Brian Foster        2020-06-29 14:44:36 -0700 1697) 		 * the rmapbt algorithm triggers repeated allocs and frees as
f74681ba20064 (Brian Foster        2020-06-29 14:44:36 -0700 1698) 		 * extents are remapped. This can exhaust the block reservation
f74681ba20064 (Brian Foster        2020-06-29 14:44:36 -0700 1699) 		 * prematurely and cause shutdown. Return freed blocks to the
f74681ba20064 (Brian Foster        2020-06-29 14:44:36 -0700 1700) 		 * transaction reservation to counter this behavior.
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1701) 		 */
f74681ba20064 (Brian Foster        2020-06-29 14:44:36 -0700 1702) 		flags |= XFS_TRANS_RES_FDBLKS;
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1703) 	}
f74681ba20064 (Brian Foster        2020-06-29 14:44:36 -0700 1704) 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
f74681ba20064 (Brian Foster        2020-06-29 14:44:36 -0700 1705) 				&tp);
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1706) 	if (error)
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1707) 		goto out_unlock;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1708) 
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1709) 	/*
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1710) 	 * Lock and join the inodes to the tansaction so that transaction commit
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1711) 	 * or cancel will unlock the inodes from this point onwards.
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1712) 	 */
7c2d238ac6c43 (Darrick J. Wong     2018-01-26 15:27:33 -0800 1713) 	xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1714) 	lock_flags |= XFS_ILOCK_EXCL;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1715) 	xfs_trans_ijoin(tp, ip, 0);
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1716) 	xfs_trans_ijoin(tp, tip, 0);
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1717) 
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1718) 
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1719) 	/* Verify all data are being swapped */
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1720) 	if (sxp->sx_offset != 0 ||
13d2c10b05d8e (Christoph Hellwig   2021-03-29 11:11:40 -0700 1721) 	    sxp->sx_length != ip->i_disk_size ||
13d2c10b05d8e (Christoph Hellwig   2021-03-29 11:11:40 -0700 1722) 	    sxp->sx_length != tip->i_disk_size) {
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1723) 		error = -EFAULT;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1724) 		goto out_trans_cancel;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1725) 	}
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1726) 
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1727) 	trace_xfs_swap_extent_before(ip, 0);
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1728) 	trace_xfs_swap_extent_before(tip, 1);
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1729) 
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1730) 	/* check inode formats now that data is flushed */
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1731) 	error = xfs_swap_extents_check_format(ip, tip);
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1732) 	if (error) {
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1733) 		xfs_notice(mp,
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1734) 		    "%s: inode 0x%llx format is incompatible for exchanging.",
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1735) 				__func__, ip->i_ino);
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1736) 		goto out_trans_cancel;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1737) 	}
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1738) 
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1739) 	/*
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1740) 	 * Compare the current change & modify times with that
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1741) 	 * passed in.  If they differ, we abort this swap.
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1742) 	 * This is the mechanism used to ensure the calling
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1743) 	 * process that the file was not changed out from
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1744) 	 * under it.
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1745) 	 */
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1746) 	if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1747) 	    (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1748) 	    (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1749) 	    (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1750) 		error = -EBUSY;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1751) 		goto out_trans_cancel;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1752) 	}
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1753) 
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1754) 	/*
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1755) 	 * Note the trickiness in setting the log flags - we set the owner log
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1756) 	 * flag on the opposite inode (i.e. the inode we are setting the new
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1757) 	 * owner to be) because once we swap the forks and log that, log
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1758) 	 * recovery is going to see the fork as owned by the swapped inode,
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1759) 	 * not the pre-swapped inodes.
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1760) 	 */
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1761) 	src_log_flags = XFS_ILOG_CORE;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1762) 	target_log_flags = XFS_ILOG_CORE;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1763) 
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1764) 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1765) 		error = xfs_swap_extent_rmap(&tp, ip, tip);
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1766) 	else
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1767) 		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1f08af52e7c98 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1768) 				&target_log_flags);
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1769) 	if (error)
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1770) 		goto out_trans_cancel;
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1771) 
f0bc4d134b466 (Darrick J. Wong     2016-10-03 09:11:42 -0700 1772) 	/* Do we have to swap reflink flags? */
3e09ab8fdc4d4 (Christoph Hellwig   2021-03-29 11:11:45 -0700 1773) 	if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
3e09ab8fdc4d4 (Christoph Hellwig   2021-03-29 11:11:45 -0700 1774) 	    (tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
3e09ab8fdc4d4 (Christoph Hellwig   2021-03-29 11:11:45 -0700 1775) 		f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
3e09ab8fdc4d4 (Christoph Hellwig   2021-03-29 11:11:45 -0700 1776) 		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
3e09ab8fdc4d4 (Christoph Hellwig   2021-03-29 11:11:45 -0700 1777) 		ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
3e09ab8fdc4d4 (Christoph Hellwig   2021-03-29 11:11:45 -0700 1778) 		tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
3e09ab8fdc4d4 (Christoph Hellwig   2021-03-29 11:11:45 -0700 1779) 		tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
52bfcdd7adbc2 (Darrick J. Wong     2017-09-18 09:41:18 -0700 1780) 	}
52bfcdd7adbc2 (Darrick J. Wong     2017-09-18 09:41:18 -0700 1781) 
52bfcdd7adbc2 (Darrick J. Wong     2017-09-18 09:41:18 -0700 1782) 	/* Swap the cow forks. */
52bfcdd7adbc2 (Darrick J. Wong     2017-09-18 09:41:18 -0700 1783) 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1784) 		ASSERT(!ip->i_cowfp ||
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1785) 		       ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1786) 		ASSERT(!tip->i_cowfp ||
f7e67b20ecbbc (Christoph Hellwig   2020-05-18 10:28:05 -0700 1787) 		       tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
52bfcdd7adbc2 (Darrick J. Wong     2017-09-18 09:41:18 -0700 1788) 
897992b7e3505 (Gustavo A. R. Silva 2018-07-11 22:26:38 -0700 1789) 		swap(ip->i_cowfp, tip->i_cowfp);
52bfcdd7adbc2 (Darrick J. Wong     2017-09-18 09:41:18 -0700 1790) 
5bcffe300ca70 (Christoph Hellwig   2018-03-13 23:15:30 -0700 1791) 		if (ip->i_cowfp && ip->i_cowfp->if_bytes)
52bfcdd7adbc2 (Darrick J. Wong     2017-09-18 09:41:18 -0700 1792) 			xfs_inode_set_cowblocks_tag(ip);
52bfcdd7adbc2 (Darrick J. Wong     2017-09-18 09:41:18 -0700 1793) 		else
52bfcdd7adbc2 (Darrick J. Wong     2017-09-18 09:41:18 -0700 1794) 			xfs_inode_clear_cowblocks_tag(ip);
5bcffe300ca70 (Christoph Hellwig   2018-03-13 23:15:30 -0700 1795) 		if (tip->i_cowfp && tip->i_cowfp->if_bytes)
52bfcdd7adbc2 (Darrick J. Wong     2017-09-18 09:41:18 -0700 1796) 			xfs_inode_set_cowblocks_tag(tip);
52bfcdd7adbc2 (Darrick J. Wong     2017-09-18 09:41:18 -0700 1797) 		else
52bfcdd7adbc2 (Darrick J. Wong     2017-09-18 09:41:18 -0700 1798) 			xfs_inode_clear_cowblocks_tag(tip);
f0bc4d134b466 (Darrick J. Wong     2016-10-03 09:11:42 -0700 1799) 	}
f0bc4d134b466 (Darrick J. Wong     2016-10-03 09:11:42 -0700 1800) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1801) 	xfs_trans_log_inode(tp, ip,  src_log_flags);
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1802) 	xfs_trans_log_inode(tp, tip, target_log_flags);
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1803) 
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1804) 	/*
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1805) 	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1806) 	 * have inode number owner values in the bmbt blocks that still refer to
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1807) 	 * the old inode. Scan each bmbt to fix up the owner values with the
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1808) 	 * inode number of the current inode.
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1809) 	 */
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1810) 	if (src_log_flags & XFS_ILOG_DOWNER) {
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1811) 		error = xfs_swap_change_owner(&tp, ip, tip);
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1812) 		if (error)
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1813) 			goto out_trans_cancel;
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1814) 	}
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1815) 	if (target_log_flags & XFS_ILOG_DOWNER) {
2dd3d709fc433 (Brian Foster        2017-08-29 10:08:40 -0700 1816) 		error = xfs_swap_change_owner(&tp, tip, ip);
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1817) 		if (error)
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1818) 			goto out_trans_cancel;
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1819) 	}
6fb10d6d22094 (Brian Foster        2017-08-29 10:08:39 -0700 1820) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1821) 	/*
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1822) 	 * If this is a synchronous mount, make sure that the
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1823) 	 * transaction goes to disk before returning to the user.
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1824) 	 */
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1825) 	if (mp->m_flags & XFS_MOUNT_WSYNC)
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1826) 		xfs_trans_set_sync(tp);
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1827) 
70393313dd0b2 (Christoph Hellwig   2015-06-04 13:48:08 +1000 1828) 	error = xfs_trans_commit(tp);
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1829) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1830) 	trace_xfs_swap_extent_after(ip, 0);
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1831) 	trace_xfs_swap_extent_after(tip, 1);
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1832) 
6552321831dce (Christoph Hellwig   2016-11-30 14:33:25 +1100 1833) out_unlock:
812176832169c (Dave Chinner        2014-08-04 13:29:32 +1000 1834) 	xfs_iunlock(ip, lock_flags);
812176832169c (Dave Chinner        2014-08-04 13:29:32 +1000 1835) 	xfs_iunlock(tip, lock_flags);
6552321831dce (Christoph Hellwig   2016-11-30 14:33:25 +1100 1836) 	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
39aff5fdb91e8 (Darrick J. Wong     2016-10-03 09:11:53 -0700 1837) 	return error;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1838) 
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1839) out_trans_cancel:
4906e21545814 (Christoph Hellwig   2015-06-04 13:47:56 +1000 1840) 	xfs_trans_cancel(tp);
6552321831dce (Christoph Hellwig   2016-11-30 14:33:25 +1100 1841) 	goto out_unlock;
a133d952b44ce (Dave Chinner        2013-08-12 20:49:48 +1000 1842) }