VisionFive2 Linux kernel

StarFive Tech Linux Kernel for VisionFive (JH7110) boards (mirror)

More than 9999 Commits   32 Branches   54 Tags
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100    1) // SPDX-License-Identifier: GPL-2.0-or-later
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100    2) /* Network filesystem high-level read support.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100    3)  *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100    4)  * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100    5)  * Written by David Howells (dhowells@redhat.com)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100    6)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100    7) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100    8) #include <linux/module.h>
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100    9) #include <linux/export.h>
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   10) #include <linux/fs.h>
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   11) #include <linux/mm.h>
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   12) #include <linux/pagemap.h>
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   13) #include <linux/slab.h>
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   14) #include <linux/uio.h>
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   15) #include <linux/sched/mm.h>
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   16) #include <linux/task_io_accounting_ops.h>
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   17) #include <linux/netfs.h>
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   18) #include "internal.h"
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100   19) #define CREATE_TRACE_POINTS
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100   20) #include <trace/events/netfs.h>
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   21) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   22) MODULE_DESCRIPTION("Network fs support");
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   23) MODULE_AUTHOR("Red Hat, Inc.");
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   24) MODULE_LICENSE("GPL");
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   25) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   26) unsigned netfs_debug;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   27) module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   28) MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   29) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   30) static void netfs_rreq_work(struct work_struct *);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   31) static void __netfs_put_subrequest(struct netfs_read_subrequest *, bool);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   32) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   33) static void netfs_put_subrequest(struct netfs_read_subrequest *subreq,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   34) 				 bool was_async)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   35) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   36) 	if (refcount_dec_and_test(&subreq->usage))
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   37) 		__netfs_put_subrequest(subreq, was_async);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   38) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   39) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   40) static struct netfs_read_request *netfs_alloc_read_request(
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   41) 	const struct netfs_read_request_ops *ops, void *netfs_priv,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   42) 	struct file *file)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   43) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   44) 	static atomic_t debug_ids;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   45) 	struct netfs_read_request *rreq;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   46) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   47) 	rreq = kzalloc(sizeof(struct netfs_read_request), GFP_KERNEL);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   48) 	if (rreq) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   49) 		rreq->netfs_ops	= ops;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   50) 		rreq->netfs_priv = netfs_priv;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   51) 		rreq->inode	= file_inode(file);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   52) 		rreq->i_size	= i_size_read(rreq->inode);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   53) 		rreq->debug_id	= atomic_inc_return(&debug_ids);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   54) 		INIT_LIST_HEAD(&rreq->subrequests);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   55) 		INIT_WORK(&rreq->work, netfs_rreq_work);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   56) 		refcount_set(&rreq->usage, 1);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   57) 		__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   58) 		ops->init_rreq(rreq, file);
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000   59) 		netfs_stat(&netfs_n_rh_rreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   60) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   61) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   62) 	return rreq;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   63) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   64) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   65) static void netfs_get_read_request(struct netfs_read_request *rreq)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   66) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   67) 	refcount_inc(&rreq->usage);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   68) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   69) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   70) static void netfs_rreq_clear_subreqs(struct netfs_read_request *rreq,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   71) 				     bool was_async)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   72) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   73) 	struct netfs_read_subrequest *subreq;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   74) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   75) 	while (!list_empty(&rreq->subrequests)) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   76) 		subreq = list_first_entry(&rreq->subrequests,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   77) 					  struct netfs_read_subrequest, rreq_link);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   78) 		list_del(&subreq->rreq_link);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   79) 		netfs_put_subrequest(subreq, was_async);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   80) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   81) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   82) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   83) static void netfs_free_read_request(struct work_struct *work)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   84) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   85) 	struct netfs_read_request *rreq =
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   86) 		container_of(work, struct netfs_read_request, work);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   87) 	netfs_rreq_clear_subreqs(rreq, false);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   88) 	if (rreq->netfs_priv)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   89) 		rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv);
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100   90) 	trace_netfs_rreq(rreq, netfs_rreq_trace_free);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000   91) 	if (rreq->cache_resources.ops)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000   92) 		rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   93) 	kfree(rreq);
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000   94) 	netfs_stat_d(&netfs_n_rh_rreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   95) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   96) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   97) static void netfs_put_read_request(struct netfs_read_request *rreq, bool was_async)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   98) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100   99) 	if (refcount_dec_and_test(&rreq->usage)) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  100) 		if (was_async) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  101) 			rreq->work.func = netfs_free_read_request;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  102) 			if (!queue_work(system_unbound_wq, &rreq->work))
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  103) 				BUG();
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  104) 		} else {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  105) 			netfs_free_read_request(&rreq->work);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  106) 		}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  107) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  108) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  109) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  110) /*
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  111)  * Allocate and partially initialise an I/O request structure.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  112)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  113) static struct netfs_read_subrequest *netfs_alloc_subrequest(
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  114) 	struct netfs_read_request *rreq)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  115) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  116) 	struct netfs_read_subrequest *subreq;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  117) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  118) 	subreq = kzalloc(sizeof(struct netfs_read_subrequest), GFP_KERNEL);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  119) 	if (subreq) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  120) 		INIT_LIST_HEAD(&subreq->rreq_link);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  121) 		refcount_set(&subreq->usage, 2);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  122) 		subreq->rreq = rreq;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  123) 		netfs_get_read_request(rreq);
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  124) 		netfs_stat(&netfs_n_rh_sreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  125) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  126) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  127) 	return subreq;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  128) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  129) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  130) static void netfs_get_read_subrequest(struct netfs_read_subrequest *subreq)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  131) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  132) 	refcount_inc(&subreq->usage);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  133) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  134) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  135) static void __netfs_put_subrequest(struct netfs_read_subrequest *subreq,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  136) 				   bool was_async)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  137) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  138) 	struct netfs_read_request *rreq = subreq->rreq;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  139) 
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  140) 	trace_netfs_sreq(subreq, netfs_sreq_trace_free);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  141) 	kfree(subreq);
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  142) 	netfs_stat_d(&netfs_n_rh_sreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  143) 	netfs_put_read_request(rreq, was_async);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  144) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  145) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  146) /*
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  147)  * Clear the unread part of an I/O request.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  148)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  149) static void netfs_clear_unread(struct netfs_read_subrequest *subreq)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  150) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  151) 	struct iov_iter iter;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  152) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  153) 	iov_iter_xarray(&iter, WRITE, &subreq->rreq->mapping->i_pages,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  154) 			subreq->start + subreq->transferred,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  155) 			subreq->len   - subreq->transferred);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  156) 	iov_iter_zero(iov_iter_count(&iter), &iter);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  157) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  158) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  159) static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  160) 					bool was_async)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  161) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  162) 	struct netfs_read_subrequest *subreq = priv;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  163) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  164) 	netfs_subreq_terminated(subreq, transferred_or_error, was_async);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  165) }
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  166) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  167) /*
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  168)  * Issue a read against the cache.
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  169)  * - Eats the caller's ref on subreq.
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  170)  */
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  171) static void netfs_read_from_cache(struct netfs_read_request *rreq,
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  172) 				  struct netfs_read_subrequest *subreq,
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  173) 				  bool seek_data)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  174) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  175) 	struct netfs_cache_resources *cres = &rreq->cache_resources;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  176) 	struct iov_iter iter;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  177) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  178) 	netfs_stat(&netfs_n_rh_read);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  179) 	iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  180) 			subreq->start + subreq->transferred,
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  181) 			subreq->len   - subreq->transferred);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  182) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  183) 	cres->ops->read(cres, subreq->start, &iter, seek_data,
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  184) 			netfs_cache_read_terminated, subreq);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  185) }
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  186) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  187) /*
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  188)  * Fill a subrequest region with zeroes.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  189)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  190) static void netfs_fill_with_zeroes(struct netfs_read_request *rreq,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  191) 				   struct netfs_read_subrequest *subreq)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  192) {
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  193) 	netfs_stat(&netfs_n_rh_zero);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  194) 	__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  195) 	netfs_subreq_terminated(subreq, 0, false);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  196) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  197) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  198) /*
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  199)  * Ask the netfs to issue a read request to the server for us.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  200)  *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  201)  * The netfs is expected to read from subreq->pos + subreq->transferred to
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  202)  * subreq->pos + subreq->len - 1.  It may not backtrack and write data into the
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  203)  * buffer prior to the transferred point as it might clobber dirty data
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  204)  * obtained from the cache.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  205)  *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  206)  * Alternatively, the netfs is allowed to indicate one of two things:
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  207)  *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  208)  * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  209)  *   make progress.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  210)  *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  211)  * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  212)  *   cleared.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  213)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  214) static void netfs_read_from_server(struct netfs_read_request *rreq,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  215) 				   struct netfs_read_subrequest *subreq)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  216) {
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  217) 	netfs_stat(&netfs_n_rh_download);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  218) 	rreq->netfs_ops->issue_op(subreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  219) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  220) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  221) /*
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  222)  * Release those waiting.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  223)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  224) static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  225) {
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  226) 	trace_netfs_rreq(rreq, netfs_rreq_trace_done);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  227) 	netfs_rreq_clear_subreqs(rreq, was_async);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  228) 	netfs_put_read_request(rreq, was_async);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  229) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  230) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  231) /*
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  232)  * Deal with the completion of writing the data to the cache.  We have to clear
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  233)  * the PG_fscache bits on the pages involved and release the caller's ref.
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  234)  *
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  235)  * May be called in softirq mode and we inherit a ref from the caller.
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  236)  */
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  237) static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq,
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  238) 					  bool was_async)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  239) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  240) 	struct netfs_read_subrequest *subreq;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  241) 	struct page *page;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  242) 	pgoff_t unlocked = 0;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  243) 	bool have_unlocked = false;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  244) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  245) 	rcu_read_lock();
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  246) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  247) 	list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  248) 		XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  249) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  250) 		xas_for_each(&xas, page, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  251) 			/* We might have multiple writes from the same huge
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  252) 			 * page, but we mustn't unlock a page more than once.
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  253) 			 */
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  254) 			if (have_unlocked && page->index <= unlocked)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  255) 				continue;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  256) 			unlocked = page->index;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  257) 			end_page_fscache(page);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  258) 			have_unlocked = true;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  259) 		}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  260) 	}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  261) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  262) 	rcu_read_unlock();
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  263) 	netfs_rreq_completed(rreq, was_async);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  264) }
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  265) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  266) static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  267) 				       bool was_async)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  268) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  269) 	struct netfs_read_subrequest *subreq = priv;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  270) 	struct netfs_read_request *rreq = subreq->rreq;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  271) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  272) 	if (IS_ERR_VALUE(transferred_or_error)) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  273) 		netfs_stat(&netfs_n_rh_write_failed);
0246f3e5737d0 (David Howells 2021-04-06 17:31:54 +0100  274) 		trace_netfs_failure(rreq, subreq, transferred_or_error,
0246f3e5737d0 (David Howells 2021-04-06 17:31:54 +0100  275) 				    netfs_fail_copy_to_cache);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  276) 	} else {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  277) 		netfs_stat(&netfs_n_rh_write_done);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  278) 	}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  279) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  280) 	trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  281) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  282) 	/* If we decrement nr_wr_ops to 0, the ref belongs to us. */
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  283) 	if (atomic_dec_and_test(&rreq->nr_wr_ops))
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  284) 		netfs_rreq_unmark_after_write(rreq, was_async);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  285) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  286) 	netfs_put_subrequest(subreq, was_async);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  287) }
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  288) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  289) /*
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  290)  * Perform any outstanding writes to the cache.  We inherit a ref from the
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  291)  * caller.
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  292)  */
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  293) static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  294) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  295) 	struct netfs_cache_resources *cres = &rreq->cache_resources;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  296) 	struct netfs_read_subrequest *subreq, *next, *p;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  297) 	struct iov_iter iter;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  298) 	int ret;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  299) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  300) 	trace_netfs_rreq(rreq, netfs_rreq_trace_write);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  301) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  302) 	/* We don't want terminating writes trying to wake us up whilst we're
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  303) 	 * still going through the list.
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  304) 	 */
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  305) 	atomic_inc(&rreq->nr_wr_ops);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  306) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  307) 	list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  308) 		if (!test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags)) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  309) 			list_del_init(&subreq->rreq_link);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  310) 			netfs_put_subrequest(subreq, false);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  311) 		}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  312) 	}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  313) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  314) 	list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  315) 		/* Amalgamate adjacent writes */
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  316) 		while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  317) 			next = list_next_entry(subreq, rreq_link);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  318) 			if (next->start != subreq->start + subreq->len)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  319) 				break;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  320) 			subreq->len += next->len;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  321) 			list_del_init(&next->rreq_link);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  322) 			netfs_put_subrequest(next, false);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  323) 		}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  324) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  325) 		ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  326) 					       rreq->i_size);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  327) 		if (ret < 0) {
0246f3e5737d0 (David Howells 2021-04-06 17:31:54 +0100  328) 			trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  329) 			trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  330) 			continue;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  331) 		}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  332) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  333) 		iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  334) 				subreq->start, subreq->len);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  335) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  336) 		atomic_inc(&rreq->nr_wr_ops);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  337) 		netfs_stat(&netfs_n_rh_write);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  338) 		netfs_get_read_subrequest(subreq);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  339) 		trace_netfs_sreq(subreq, netfs_sreq_trace_write);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  340) 		cres->ops->write(cres, subreq->start, &iter,
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  341) 				 netfs_rreq_copy_terminated, subreq);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  342) 	}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  343) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  344) 	/* If we decrement nr_wr_ops to 0, the usage ref belongs to us. */
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  345) 	if (atomic_dec_and_test(&rreq->nr_wr_ops))
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  346) 		netfs_rreq_unmark_after_write(rreq, false);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  347) }
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  348) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  349) static void netfs_rreq_write_to_cache_work(struct work_struct *work)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  350) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  351) 	struct netfs_read_request *rreq =
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  352) 		container_of(work, struct netfs_read_request, work);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  353) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  354) 	netfs_rreq_do_write_to_cache(rreq);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  355) }
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  356) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  357) static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq,
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  358) 				      bool was_async)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  359) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  360) 	if (was_async) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  361) 		rreq->work.func = netfs_rreq_write_to_cache_work;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  362) 		if (!queue_work(system_unbound_wq, &rreq->work))
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  363) 			BUG();
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  364) 	} else {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  365) 		netfs_rreq_do_write_to_cache(rreq);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  366) 	}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  367) }
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  368) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  369) /*
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  370)  * Unlock the pages in a read operation.  We need to set PG_fscache on any
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  371)  * pages we're going to write back before we unlock them.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  372)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  373) static void netfs_rreq_unlock(struct netfs_read_request *rreq)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  374) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  375) 	struct netfs_read_subrequest *subreq;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  376) 	struct page *page;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  377) 	unsigned int iopos, account = 0;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  378) 	pgoff_t start_page = rreq->start / PAGE_SIZE;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  379) 	pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  380) 	bool subreq_failed = false;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  381) 	int i;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  382) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  383) 	XA_STATE(xas, &rreq->mapping->i_pages, start_page);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  384) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  385) 	if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  386) 		__clear_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  387) 		list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  388) 			__clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  389) 		}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  390) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  391) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  392) 	/* Walk through the pagecache and the I/O request lists simultaneously.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  393) 	 * We may have a mixture of cached and uncached sections and we only
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  394) 	 * really want to write out the uncached sections.  This is slightly
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  395) 	 * complicated by the possibility that we might have huge pages with a
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  396) 	 * mixture inside.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  397) 	 */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  398) 	subreq = list_first_entry(&rreq->subrequests,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  399) 				  struct netfs_read_subrequest, rreq_link);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  400) 	iopos = 0;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  401) 	subreq_failed = (subreq->error < 0);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  402) 
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  403) 	trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  404) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  405) 	rcu_read_lock();
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  406) 	xas_for_each(&xas, page, last_page) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  407) 		unsigned int pgpos = (page->index - start_page) * PAGE_SIZE;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  408) 		unsigned int pgend = pgpos + thp_size(page);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  409) 		bool pg_failed = false;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  410) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  411) 		for (;;) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  412) 			if (!subreq) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  413) 				pg_failed = true;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  414) 				break;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  415) 			}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  416) 			if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  417) 				set_page_fscache(page);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  418) 			pg_failed |= subreq_failed;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  419) 			if (pgend < iopos + subreq->len)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  420) 				break;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  421) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  422) 			account += subreq->transferred;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  423) 			iopos += subreq->len;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  424) 			if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  425) 				subreq = list_next_entry(subreq, rreq_link);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  426) 				subreq_failed = (subreq->error < 0);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  427) 			} else {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  428) 				subreq = NULL;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  429) 				subreq_failed = false;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  430) 			}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  431) 			if (pgend == iopos)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  432) 				break;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  433) 		}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  434) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  435) 		if (!pg_failed) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  436) 			for (i = 0; i < thp_nr_pages(page); i++)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  437) 				flush_dcache_page(page);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  438) 			SetPageUptodate(page);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  439) 		}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  440) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  441) 		if (!test_bit(NETFS_RREQ_DONT_UNLOCK_PAGES, &rreq->flags)) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  442) 			if (page->index == rreq->no_unlock_page &&
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  443) 			    test_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags))
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  444) 				_debug("no unlock");
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  445) 			else
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  446) 				unlock_page(page);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  447) 		}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  448) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  449) 	rcu_read_unlock();
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  450) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  451) 	task_io_account_read(account);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  452) 	if (rreq->netfs_ops->done)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  453) 		rreq->netfs_ops->done(rreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  454) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  455) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  456) /*
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  457)  * Handle a short read.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  458)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  459) static void netfs_rreq_short_read(struct netfs_read_request *rreq,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  460) 				  struct netfs_read_subrequest *subreq)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  461) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  462) 	__clear_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  463) 	__set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  464) 
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  465) 	netfs_stat(&netfs_n_rh_short_read);
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  466) 	trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  467) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  468) 	netfs_get_read_subrequest(subreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  469) 	atomic_inc(&rreq->nr_rd_ops);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  470) 	if (subreq->source == NETFS_READ_FROM_CACHE)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  471) 		netfs_read_from_cache(rreq, subreq, true);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  472) 	else
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  473) 		netfs_read_from_server(rreq, subreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  474) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  475) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  476) /*
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  477)  * Resubmit any short or failed operations.  Returns true if we got the rreq
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  478)  * ref back.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  479)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  480) static bool netfs_rreq_perform_resubmissions(struct netfs_read_request *rreq)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  481) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  482) 	struct netfs_read_subrequest *subreq;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  483) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  484) 	WARN_ON(in_interrupt());
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  485) 
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  486) 	trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  487) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  488) 	/* We don't want terminating submissions trying to wake us up whilst
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  489) 	 * we're still going through the list.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  490) 	 */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  491) 	atomic_inc(&rreq->nr_rd_ops);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  492) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  493) 	__clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  494) 	list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  495) 		if (subreq->error) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  496) 			if (subreq->source != NETFS_READ_FROM_CACHE)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  497) 				break;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  498) 			subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  499) 			subreq->error = 0;
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  500) 			netfs_stat(&netfs_n_rh_download_instead);
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  501) 			trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  502) 			netfs_get_read_subrequest(subreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  503) 			atomic_inc(&rreq->nr_rd_ops);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  504) 			netfs_read_from_server(rreq, subreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  505) 		} else if (test_bit(NETFS_SREQ_SHORT_READ, &subreq->flags)) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  506) 			netfs_rreq_short_read(rreq, subreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  507) 		}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  508) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  509) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  510) 	/* If we decrement nr_rd_ops to 0, the usage ref belongs to us. */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  511) 	if (atomic_dec_and_test(&rreq->nr_rd_ops))
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  512) 		return true;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  513) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  514) 	wake_up_var(&rreq->nr_rd_ops);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  515) 	return false;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  516) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  517) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  518) /*
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  519)  * Check to see if the data read is still valid.
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  520)  */
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  521) static void netfs_rreq_is_still_valid(struct netfs_read_request *rreq)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  522) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  523) 	struct netfs_read_subrequest *subreq;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  524) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  525) 	if (!rreq->netfs_ops->is_still_valid ||
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  526) 	    rreq->netfs_ops->is_still_valid(rreq))
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  527) 		return;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  528) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  529) 	list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  530) 		if (subreq->source == NETFS_READ_FROM_CACHE) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  531) 			subreq->error = -ESTALE;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  532) 			__set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  533) 		}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  534) 	}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  535) }
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  536) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  537) /*
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  538)  * Assess the state of a read request and decide what to do next.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  539)  *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  540)  * Note that we could be in an ordinary kernel thread, on a workqueue or in
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  541)  * softirq context at this point.  We inherit a ref from the caller.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  542)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  543) static void netfs_rreq_assess(struct netfs_read_request *rreq, bool was_async)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  544) {
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  545) 	trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  546) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  547) again:
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  548) 	netfs_rreq_is_still_valid(rreq);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  549) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  550) 	if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  551) 	    test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  552) 		if (netfs_rreq_perform_resubmissions(rreq))
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  553) 			goto again;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  554) 		return;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  555) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  556) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  557) 	netfs_rreq_unlock(rreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  558) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  559) 	clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  560) 	wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  561) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  562) 	if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags))
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  563) 		return netfs_rreq_write_to_cache(rreq, was_async);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  564) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  565) 	netfs_rreq_completed(rreq, was_async);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  566) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  567) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  568) static void netfs_rreq_work(struct work_struct *work)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  569) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  570) 	struct netfs_read_request *rreq =
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  571) 		container_of(work, struct netfs_read_request, work);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  572) 	netfs_rreq_assess(rreq, false);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  573) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  574) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  575) /*
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  576)  * Handle the completion of all outstanding I/O operations on a read request.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  577)  * We inherit a ref from the caller.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  578)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  579) static void netfs_rreq_terminated(struct netfs_read_request *rreq,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  580) 				  bool was_async)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  581) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  582) 	if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  583) 	    was_async) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  584) 		if (!queue_work(system_unbound_wq, &rreq->work))
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  585) 			BUG();
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  586) 	} else {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  587) 		netfs_rreq_assess(rreq, was_async);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  588) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  589) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  590) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  591) /**
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  592)  * netfs_subreq_terminated - Note the termination of an I/O operation.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  593)  * @subreq: The I/O request that has terminated.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  594)  * @transferred_or_error: The amount of data transferred or an error code.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  595)  * @was_async: The termination was asynchronous
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  596)  *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  597)  * This tells the read helper that a contributory I/O operation has terminated,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  598)  * one way or another, and that it should integrate the results.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  599)  *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  600)  * The caller indicates in @transferred_or_error the outcome of the operation,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  601)  * supplying a positive value to indicate the number of bytes transferred, 0 to
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  602)  * indicate a failure to transfer anything that should be retried or a negative
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  603)  * error code.  The helper will look after reissuing I/O operations as
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  604)  * appropriate and writing downloaded data to the cache.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  605)  *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  606)  * If @was_async is true, the caller might be running in softirq or interrupt
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  607)  * context and we can't sleep.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  608)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  609) void netfs_subreq_terminated(struct netfs_read_subrequest *subreq,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  610) 			     ssize_t transferred_or_error,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  611) 			     bool was_async)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  612) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  613) 	struct netfs_read_request *rreq = subreq->rreq;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  614) 	int u;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  615) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  616) 	_enter("[%u]{%llx,%lx},%zd",
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  617) 	       subreq->debug_index, subreq->start, subreq->flags,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  618) 	       transferred_or_error);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  619) 
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  620) 	switch (subreq->source) {
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  621) 	case NETFS_READ_FROM_CACHE:
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  622) 		netfs_stat(&netfs_n_rh_read_done);
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  623) 		break;
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  624) 	case NETFS_DOWNLOAD_FROM_SERVER:
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  625) 		netfs_stat(&netfs_n_rh_download_done);
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  626) 		break;
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  627) 	default:
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  628) 		break;
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  629) 	}
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  630) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  631) 	if (IS_ERR_VALUE(transferred_or_error)) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  632) 		subreq->error = transferred_or_error;
0246f3e5737d0 (David Howells 2021-04-06 17:31:54 +0100  633) 		trace_netfs_failure(rreq, subreq, transferred_or_error,
0246f3e5737d0 (David Howells 2021-04-06 17:31:54 +0100  634) 				    netfs_fail_read);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  635) 		goto failed;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  636) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  637) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  638) 	if (WARN(transferred_or_error > subreq->len - subreq->transferred,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  639) 		 "Subreq overread: R%x[%x] %zd > %zu - %zu",
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  640) 		 rreq->debug_id, subreq->debug_index,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  641) 		 transferred_or_error, subreq->len, subreq->transferred))
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  642) 		transferred_or_error = subreq->len - subreq->transferred;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  643) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  644) 	subreq->error = 0;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  645) 	subreq->transferred += transferred_or_error;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  646) 	if (subreq->transferred < subreq->len)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  647) 		goto incomplete;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  648) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  649) complete:
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  650) 	__clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  651) 	if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  652) 		set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  653) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  654) out:
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  655) 	trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  656) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  657) 	/* If we decrement nr_rd_ops to 0, the ref belongs to us. */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  658) 	u = atomic_dec_return(&rreq->nr_rd_ops);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  659) 	if (u == 0)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  660) 		netfs_rreq_terminated(rreq, was_async);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  661) 	else if (u == 1)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  662) 		wake_up_var(&rreq->nr_rd_ops);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  663) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  664) 	netfs_put_subrequest(subreq, was_async);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  665) 	return;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  666) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  667) incomplete:
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  668) 	if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  669) 		netfs_clear_unread(subreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  670) 		subreq->transferred = subreq->len;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  671) 		goto complete;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  672) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  673) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  674) 	if (transferred_or_error == 0) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  675) 		if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  676) 			subreq->error = -ENODATA;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  677) 			goto failed;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  678) 		}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  679) 	} else {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  680) 		__clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  681) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  682) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  683) 	__set_bit(NETFS_SREQ_SHORT_READ, &subreq->flags);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  684) 	set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  685) 	goto out;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  686) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  687) failed:
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  688) 	if (subreq->source == NETFS_READ_FROM_CACHE) {
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  689) 		netfs_stat(&netfs_n_rh_read_failed);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  690) 		set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  691) 	} else {
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  692) 		netfs_stat(&netfs_n_rh_download_failed);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  693) 		set_bit(NETFS_RREQ_FAILED, &rreq->flags);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  694) 		rreq->error = subreq->error;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  695) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  696) 	goto out;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  697) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  698) EXPORT_SYMBOL(netfs_subreq_terminated);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  699) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  700) static enum netfs_read_source netfs_cache_prepare_read(struct netfs_read_subrequest *subreq,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  701) 						       loff_t i_size)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  702) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  703) 	struct netfs_read_request *rreq = subreq->rreq;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  704) 	struct netfs_cache_resources *cres = &rreq->cache_resources;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  705) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  706) 	if (cres->ops)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  707) 		return cres->ops->prepare_read(subreq, i_size);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  708) 	if (subreq->start >= rreq->i_size)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  709) 		return NETFS_FILL_WITH_ZEROES;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  710) 	return NETFS_DOWNLOAD_FROM_SERVER;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  711) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  712) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  713) /*
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  714)  * Work out what sort of subrequest the next one will be.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  715)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  716) static enum netfs_read_source
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  717) netfs_rreq_prepare_read(struct netfs_read_request *rreq,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  718) 			struct netfs_read_subrequest *subreq)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  719) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  720) 	enum netfs_read_source source;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  721) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  722) 	_enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  723) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  724) 	source = netfs_cache_prepare_read(subreq, rreq->i_size);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  725) 	if (source == NETFS_INVALID_READ)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  726) 		goto out;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  727) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  728) 	if (source == NETFS_DOWNLOAD_FROM_SERVER) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  729) 		/* Call out to the netfs to let it shrink the request to fit
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  730) 		 * its own I/O sizes and boundaries.  If it shinks it here, it
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  731) 		 * will be called again to make simultaneous calls; if it wants
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  732) 		 * to make serial calls, it can indicate a short read and then
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  733) 		 * we will call it again.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  734) 		 */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  735) 		if (subreq->len > rreq->i_size - subreq->start)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  736) 			subreq->len = rreq->i_size - subreq->start;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  737) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  738) 		if (rreq->netfs_ops->clamp_length &&
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  739) 		    !rreq->netfs_ops->clamp_length(subreq)) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  740) 			source = NETFS_INVALID_READ;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  741) 			goto out;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  742) 		}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  743) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  744) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  745) 	if (WARN_ON(subreq->len == 0))
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  746) 		source = NETFS_INVALID_READ;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  747) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  748) out:
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  749) 	subreq->source = source;
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  750) 	trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  751) 	return source;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  752) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  753) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  754) /*
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  755)  * Slice off a piece of a read request and submit an I/O request for it.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  756)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  757) static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  758) 				    unsigned int *_debug_index)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  759) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  760) 	struct netfs_read_subrequest *subreq;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  761) 	enum netfs_read_source source;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  762) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  763) 	subreq = netfs_alloc_subrequest(rreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  764) 	if (!subreq)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  765) 		return false;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  766) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  767) 	subreq->debug_index	= (*_debug_index)++;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  768) 	subreq->start		= rreq->start + rreq->submitted;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  769) 	subreq->len		= rreq->len   - rreq->submitted;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  770) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  771) 	_debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  772) 	list_add_tail(&subreq->rreq_link, &rreq->subrequests);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  773) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  774) 	/* Call out to the cache to find out what it can do with the remaining
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  775) 	 * subset.  It tells us in subreq->flags what it decided should be done
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  776) 	 * and adjusts subreq->len down if the subset crosses a cache boundary.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  777) 	 *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  778) 	 * Then when we hand the subset, it can choose to take a subset of that
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  779) 	 * (the starts must coincide), in which case, we go around the loop
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  780) 	 * again and ask it to download the next piece.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  781) 	 */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  782) 	source = netfs_rreq_prepare_read(rreq, subreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  783) 	if (source == NETFS_INVALID_READ)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  784) 		goto subreq_failed;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  785) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  786) 	atomic_inc(&rreq->nr_rd_ops);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  787) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  788) 	rreq->submitted += subreq->len;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  789) 
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  790) 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  791) 	switch (source) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  792) 	case NETFS_FILL_WITH_ZEROES:
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  793) 		netfs_fill_with_zeroes(rreq, subreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  794) 		break;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  795) 	case NETFS_DOWNLOAD_FROM_SERVER:
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  796) 		netfs_read_from_server(rreq, subreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  797) 		break;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  798) 	case NETFS_READ_FROM_CACHE:
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  799) 		netfs_read_from_cache(rreq, subreq, false);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  800) 		break;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  801) 	default:
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  802) 		BUG();
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  803) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  804) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  805) 	return true;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  806) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  807) subreq_failed:
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  808) 	rreq->error = subreq->error;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  809) 	netfs_put_subrequest(subreq, false);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  810) 	return false;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  811) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  812) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  813) static void netfs_cache_expand_readahead(struct netfs_read_request *rreq,
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  814) 					 loff_t *_start, size_t *_len, loff_t i_size)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  815) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  816) 	struct netfs_cache_resources *cres = &rreq->cache_resources;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  817) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  818) 	if (cres->ops && cres->ops->expand_readahead)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  819) 		cres->ops->expand_readahead(cres, _start, _len, i_size);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  820) }
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  821) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  822) static void netfs_rreq_expand(struct netfs_read_request *rreq,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  823) 			      struct readahead_control *ractl)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  824) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  825) 	/* Give the cache a chance to change the request parameters.  The
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  826) 	 * resultant request must contain the original region.
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  827) 	 */
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  828) 	netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  829) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  830) 	/* Give the netfs a chance to change the request parameters.  The
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  831) 	 * resultant request must contain the original region.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  832) 	 */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  833) 	if (rreq->netfs_ops->expand_readahead)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  834) 		rreq->netfs_ops->expand_readahead(rreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  835) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  836) 	/* Expand the request if the cache wants it to start earlier.  Note
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  837) 	 * that the expansion may get further extended if the VM wishes to
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  838) 	 * insert THPs and the preferred start and/or end wind up in the middle
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  839) 	 * of THPs.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  840) 	 *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  841) 	 * If this is the case, however, the THP size should be an integer
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  842) 	 * multiple of the cache granule size, so we get a whole number of
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  843) 	 * granules to deal with.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  844) 	 */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  845) 	if (rreq->start  != readahead_pos(ractl) ||
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  846) 	    rreq->len != readahead_length(ractl)) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  847) 		readahead_expand(ractl, rreq->start, rreq->len);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  848) 		rreq->start  = readahead_pos(ractl);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  849) 		rreq->len = readahead_length(ractl);
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  850) 
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  851) 		trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  852) 				 netfs_read_trace_expanded);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  853) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  854) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  855) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  856) /**
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  857)  * netfs_readahead - Helper to manage a read request
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  858)  * @ractl: The description of the readahead request
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  859)  * @ops: The network filesystem's operations for the helper to use
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  860)  * @netfs_priv: Private netfs data to be retained in the request
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  861)  *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  862)  * Fulfil a readahead request by drawing data from the cache if possible, or
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  863)  * the netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  864)  * requests from different sources will get munged together.  If necessary, the
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  865)  * readahead window can be expanded in either direction to a more convenient
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  866)  * alighment for RPC efficiency or to make storage in the cache feasible.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  867)  *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  868)  * The calling netfs must provide a table of operations, only one of which,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  869)  * issue_op, is mandatory.  It may also be passed a private token, which will
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  870)  * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  871)  *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  872)  * This is usable whether or not caching is enabled.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  873)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  874) void netfs_readahead(struct readahead_control *ractl,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  875) 		     const struct netfs_read_request_ops *ops,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  876) 		     void *netfs_priv)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  877) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  878) 	struct netfs_read_request *rreq;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  879) 	struct page *page;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  880) 	unsigned int debug_index = 0;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  881) 	int ret;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  882) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  883) 	_enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  884) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  885) 	if (readahead_count(ractl) == 0)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  886) 		goto cleanup;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  887) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  888) 	rreq = netfs_alloc_read_request(ops, netfs_priv, ractl->file);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  889) 	if (!rreq)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  890) 		goto cleanup;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  891) 	rreq->mapping	= ractl->mapping;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  892) 	rreq->start	= readahead_pos(ractl);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  893) 	rreq->len	= readahead_length(ractl);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  894) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  895) 	if (ops->begin_cache_operation) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  896) 		ret = ops->begin_cache_operation(rreq);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  897) 		if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  898) 			goto cleanup_free;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  899) 	}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  900) 
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  901) 	netfs_stat(&netfs_n_rh_readahead);
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  902) 	trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  903) 			 netfs_read_trace_readahead);
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  904) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  905) 	netfs_rreq_expand(rreq, ractl);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  906) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  907) 	atomic_set(&rreq->nr_rd_ops, 1);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  908) 	do {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  909) 		if (!netfs_rreq_submit_slice(rreq, &debug_index))
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  910) 			break;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  911) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  912) 	} while (rreq->submitted < rreq->len);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  913) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  914) 	/* Drop the refs on the pages here rather than in the cache or
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  915) 	 * filesystem.  The locks will be dropped in netfs_rreq_unlock().
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  916) 	 */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  917) 	while ((page = readahead_page(ractl)))
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  918) 		put_page(page);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  919) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  920) 	/* If we decrement nr_rd_ops to 0, the ref belongs to us. */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  921) 	if (atomic_dec_and_test(&rreq->nr_rd_ops))
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  922) 		netfs_rreq_assess(rreq, false);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  923) 	return;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  924) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  925) cleanup_free:
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  926) 	netfs_put_read_request(rreq, false);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  927) 	return;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  928) cleanup:
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  929) 	if (netfs_priv)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  930) 		ops->cleanup(ractl->mapping, netfs_priv);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  931) 	return;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  932) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  933) EXPORT_SYMBOL(netfs_readahead);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  934) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  935) /**
53b776c77aca9 (David Howells 2021-04-26 21:16:16 +0100  936)  * netfs_readpage - Helper to manage a readpage request
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  937)  * @file: The file to read from
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  938)  * @page: The page to read
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  939)  * @ops: The network filesystem's operations for the helper to use
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  940)  * @netfs_priv: Private netfs data to be retained in the request
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  941)  *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  942)  * Fulfil a readpage request by drawing data from the cache if possible, or the
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  943)  * netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O requests
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  944)  * from different sources will get munged together.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  945)  *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  946)  * The calling netfs must provide a table of operations, only one of which,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  947)  * issue_op, is mandatory.  It may also be passed a private token, which will
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  948)  * be retained in rreq->netfs_priv and will be cleaned up by ops->cleanup().
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  949)  *
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  950)  * This is usable whether or not caching is enabled.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  951)  */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  952) int netfs_readpage(struct file *file,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  953) 		   struct page *page,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  954) 		   const struct netfs_read_request_ops *ops,
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  955) 		   void *netfs_priv)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  956) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  957) 	struct netfs_read_request *rreq;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  958) 	unsigned int debug_index = 0;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  959) 	int ret;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  960) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  961) 	_enter("%lx", page_index(page));
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  962) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  963) 	rreq = netfs_alloc_read_request(ops, netfs_priv, file);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  964) 	if (!rreq) {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  965) 		if (netfs_priv)
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  966) 			ops->cleanup(netfs_priv, page_file_mapping(page));
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  967) 		unlock_page(page);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  968) 		return -ENOMEM;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  969) 	}
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  970) 	rreq->mapping	= page_file_mapping(page);
53b776c77aca9 (David Howells 2021-04-26 21:16:16 +0100  971) 	rreq->start	= page_file_offset(page);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  972) 	rreq->len	= thp_size(page);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  973) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  974) 	if (ops->begin_cache_operation) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  975) 		ret = ops->begin_cache_operation(rreq);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  976) 		if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  977) 			unlock_page(page);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  978) 			goto out;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  979) 		}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  980) 	}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000  981) 
289af54cc67ac (David Howells 2020-11-03 11:32:41 +0000  982) 	netfs_stat(&netfs_n_rh_readpage);
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  983) 	trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
77b4d2c6316ab (David Howells 2020-09-18 09:25:13 +0100  984) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  985) 	netfs_get_read_request(rreq);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  986) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  987) 	atomic_set(&rreq->nr_rd_ops, 1);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  988) 	do {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  989) 		if (!netfs_rreq_submit_slice(rreq, &debug_index))
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  990) 			break;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  991) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  992) 	} while (rreq->submitted < rreq->len);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  993) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  994) 	/* Keep nr_rd_ops incremented so that the ref always belongs to us, and
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  995) 	 * the service code isn't punted off to a random thread pool to
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  996) 	 * process.
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  997) 	 */
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  998) 	do {
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100  999) 		wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100 1000) 		netfs_rreq_assess(rreq, false);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100 1001) 	} while (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags));
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100 1002) 
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100 1003) 	ret = rreq->error;
0246f3e5737d0 (David Howells 2021-04-06 17:31:54 +0100 1004) 	if (ret == 0 && rreq->submitted < rreq->len) {
0246f3e5737d0 (David Howells 2021-04-06 17:31:54 +0100 1005) 		trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_readpage);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100 1006) 		ret = -EIO;
0246f3e5737d0 (David Howells 2021-04-06 17:31:54 +0100 1007) 	}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000 1008) out:
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100 1009) 	netfs_put_read_request(rreq, false);
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100 1010) 	return ret;
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100 1011) }
3d3c95046742e (David Howells 2020-05-13 17:41:20 +0100 1012) EXPORT_SYMBOL(netfs_readpage);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1013) 
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1014) /**
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1015)  * netfs_skip_page_read - prep a page for writing without reading first
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1016)  * @page: page being prepared
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1017)  * @pos: starting position for the write
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1018)  * @len: length of write
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1019)  *
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1020)  * In some cases, write_begin doesn't need to read at all:
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1021)  * - full page write
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1022)  * - write that lies in a page that is completely beyond EOF
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1023)  * - write that covers the the page from start to EOF or beyond it
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1024)  *
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1025)  * If any of these criteria are met, then zero out the unwritten parts
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1026)  * of the page and return true. Otherwise, return false.
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1027)  */
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1028) static bool netfs_skip_page_read(struct page *page, loff_t pos, size_t len)
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1029) {
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1030) 	struct inode *inode = page->mapping->host;
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1031) 	loff_t i_size = i_size_read(inode);
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1032) 	size_t offset = offset_in_thp(page, pos);
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1033) 
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1034) 	/* Full page write */
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1035) 	if (offset == 0 && len >= thp_size(page))
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1036) 		return true;
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1037) 
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1038) 	/* pos beyond last page in the file */
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1039) 	if (pos - offset >= i_size)
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1040) 		goto zero_out;
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1041) 
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1042) 	/* Write that covers from the start of the page to EOF or beyond */
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1043) 	if (offset == 0 && (pos + len) >= i_size)
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1044) 		goto zero_out;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1045) 
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1046) 	return false;
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1047) zero_out:
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1048) 	zero_user_segments(page, 0, offset, offset + len, thp_size(page));
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1049) 	return true;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1050) }
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1051) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1052) /**
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1053)  * netfs_write_begin - Helper to prepare for writing
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1054)  * @file: The file to read from
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1055)  * @mapping: The mapping to read from
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1056)  * @pos: File position at which the write will begin
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1057)  * @len: The length of the write (may extend beyond the end of the page chosen)
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1058)  * @flags: AOP_* flags
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1059)  * @_page: Where to put the resultant page
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1060)  * @_fsdata: Place for the netfs to store a cookie
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1061)  * @ops: The network filesystem's operations for the helper to use
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1062)  * @netfs_priv: Private netfs data to be retained in the request
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1063)  *
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1064)  * Pre-read data for a write-begin request by drawing data from the cache if
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1065)  * possible, or the netfs if not.  Space beyond the EOF is zero-filled.
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1066)  * Multiple I/O requests from different sources will get munged together.  If
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1067)  * necessary, the readahead window can be expanded in either direction to a
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1068)  * more convenient alighment for RPC efficiency or to make storage in the cache
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1069)  * feasible.
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1070)  *
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1071)  * The calling netfs must provide a table of operations, only one of which,
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1072)  * issue_op, is mandatory.
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1073)  *
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1074)  * The check_write_begin() operation can be provided to check for and flush
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1075)  * conflicting writes once the page is grabbed and locked.  It is passed a
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1076)  * pointer to the fsdata cookie that gets returned to the VM to be passed to
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1077)  * write_end.  It is permitted to sleep.  It should return 0 if the request
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1078)  * should go ahead; unlock the page and return -EAGAIN to cause the page to be
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1079)  * regot; or return an error.
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1080)  *
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1081)  * This is usable whether or not caching is enabled.
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1082)  */
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1083) int netfs_write_begin(struct file *file, struct address_space *mapping,
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1084) 		      loff_t pos, unsigned int len, unsigned int flags,
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1085) 		      struct page **_page, void **_fsdata,
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1086) 		      const struct netfs_read_request_ops *ops,
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1087) 		      void *netfs_priv)
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1088) {
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1089) 	struct netfs_read_request *rreq;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1090) 	struct page *page, *xpage;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1091) 	struct inode *inode = file_inode(file);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1092) 	unsigned int debug_index = 0;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1093) 	pgoff_t index = pos >> PAGE_SHIFT;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1094) 	int ret;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1095) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1096) 	DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1097) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1098) retry:
19dee613816d5 (David Howells 2021-05-13 11:03:32 +0100 1099) 	page = grab_cache_page_write_begin(mapping, index, flags);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1100) 	if (!page)
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1101) 		return -ENOMEM;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1102) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1103) 	if (ops->check_write_begin) {
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1104) 		/* Allow the netfs (eg. ceph) to flush conflicts. */
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1105) 		ret = ops->check_write_begin(file, pos, len, page, _fsdata);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1106) 		if (ret < 0) {
0246f3e5737d0 (David Howells 2021-04-06 17:31:54 +0100 1107) 			trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1108) 			if (ret == -EAGAIN)
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1109) 				goto retry;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1110) 			goto error;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1111) 		}
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1112) 	}
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1113) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1114) 	if (PageUptodate(page))
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1115) 		goto have_page;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1116) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1117) 	/* If the page is beyond the EOF, we want to clear it - unless it's
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1118) 	 * within the cache granule containing the EOF, in which case we need
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1119) 	 * to preload the granule.
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1120) 	 */
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1121) 	if (!ops->is_cache_enabled(inode) &&
827a746f405d2 (Jeff Layton   2021-06-13 19:33:45 -0400 1122) 	    netfs_skip_page_read(page, pos, len)) {
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1123) 		netfs_stat(&netfs_n_rh_write_zskip);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1124) 		goto have_page_no_wait;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1125) 	}
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1126) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1127) 	ret = -ENOMEM;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1128) 	rreq = netfs_alloc_read_request(ops, netfs_priv, file);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1129) 	if (!rreq)
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1130) 		goto error;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1131) 	rreq->mapping		= page->mapping;
53b776c77aca9 (David Howells 2021-04-26 21:16:16 +0100 1132) 	rreq->start		= page_offset(page);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1133) 	rreq->len		= thp_size(page);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1134) 	rreq->no_unlock_page	= page->index;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1135) 	__set_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1136) 	netfs_priv = NULL;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1137) 
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000 1138) 	if (ops->begin_cache_operation) {
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000 1139) 		ret = ops->begin_cache_operation(rreq);
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000 1140) 		if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000 1141) 			goto error_put;
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000 1142) 	}
726218fdc22c9 (David Howells 2020-02-06 14:22:24 +0000 1143) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1144) 	netfs_stat(&netfs_n_rh_write_begin);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1145) 	trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1146) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1147) 	/* Expand the request to meet caching requirements and download
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1148) 	 * preferences.
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1149) 	 */
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1150) 	ractl._nr_pages = thp_nr_pages(page);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1151) 	netfs_rreq_expand(rreq, &ractl);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1152) 	netfs_get_read_request(rreq);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1153) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1154) 	/* We hold the page locks, so we can drop the references */
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1155) 	while ((xpage = readahead_page(&ractl)))
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1156) 		if (xpage != page)
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1157) 			put_page(xpage);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1158) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1159) 	atomic_set(&rreq->nr_rd_ops, 1);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1160) 	do {
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1161) 		if (!netfs_rreq_submit_slice(rreq, &debug_index))
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1162) 			break;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1163) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1164) 	} while (rreq->submitted < rreq->len);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1165) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1166) 	/* Keep nr_rd_ops incremented so that the ref always belongs to us, and
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1167) 	 * the service code isn't punted off to a random thread pool to
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1168) 	 * process.
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1169) 	 */
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1170) 	for (;;) {
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1171) 		wait_var_event(&rreq->nr_rd_ops, atomic_read(&rreq->nr_rd_ops) == 1);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1172) 		netfs_rreq_assess(rreq, false);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1173) 		if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1174) 			break;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1175) 		cond_resched();
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1176) 	}
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1177) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1178) 	ret = rreq->error;
0246f3e5737d0 (David Howells 2021-04-06 17:31:54 +0100 1179) 	if (ret == 0 && rreq->submitted < rreq->len) {
0246f3e5737d0 (David Howells 2021-04-06 17:31:54 +0100 1180) 		trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1181) 		ret = -EIO;
0246f3e5737d0 (David Howells 2021-04-06 17:31:54 +0100 1182) 	}
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1183) 	netfs_put_read_request(rreq, false);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1184) 	if (ret < 0)
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1185) 		goto error;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1186) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1187) have_page:
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1188) 	ret = wait_on_page_fscache_killable(page);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1189) 	if (ret < 0)
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1190) 		goto error;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1191) have_page_no_wait:
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1192) 	if (netfs_priv)
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1193) 		ops->cleanup(netfs_priv, mapping);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1194) 	*_page = page;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1195) 	_leave(" = 0");
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1196) 	return 0;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1197) 
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1198) error_put:
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1199) 	netfs_put_read_request(rreq, false);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1200) error:
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1201) 	unlock_page(page);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1202) 	put_page(page);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1203) 	if (netfs_priv)
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1204) 		ops->cleanup(netfs_priv, mapping);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1205) 	_leave(" = %d", ret);
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1206) 	return ret;
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1207) }
e1b1240c1ff5f (David Howells 2020-09-22 11:06:07 +0100 1208) EXPORT_SYMBOL(netfs_write_begin);