Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_buf.h"
9 : #include "xfs_buf_xfile.h"
10 : #include "scrub/xfile.h"
11 :
12 : /* Perform a buffer IO to an xfile. Caller must be in process context. */
13 : int
14 91657 : xfile_buf_ioapply(
15 : struct xfs_buf *bp)
16 : {
17 91657 : struct xfile *xfile = bp->b_target->bt_xfile;
18 91657 : loff_t pos = BBTOB(xfs_buf_daddr(bp));
19 91657 : size_t size = BBTOB(bp->b_length);
20 :
21 91657 : if (bp->b_target->bt_flags & XFS_BUFTARG_DIRECT_MAP) {
22 : /* direct mapping means no io necessary */
23 : return 0;
24 : }
25 :
26 0 : if (bp->b_map_count > 1) {
27 : /* We don't need or support multi-map buffers. */
28 0 : ASSERT(0);
29 0 : return -EIO;
30 : }
31 :
32 0 : if (bp->b_flags & XBF_WRITE)
33 0 : return xfile_obj_store(xfile, bp->b_addr, size, pos);
34 0 : return xfile_obj_load(xfile, bp->b_addr, size, pos);
35 : }
36 :
37 : /* Allocate a buffer cache target for a memory-backed file. */
38 : int
39 47052 : xfile_alloc_buftarg(
40 : struct xfs_mount *mp,
41 : const char *descr,
42 : struct xfs_buftarg **btpp)
43 : {
44 47052 : struct xfs_buftarg *btp;
45 47052 : struct xfile *xfile;
46 47052 : int error;
47 :
48 47052 : error = xfile_create(descr, 0, &xfile);
49 47069 : if (error)
50 : return error;
51 :
52 : /*
53 : * We're hooking the xfile up to the buffer cache, so disable its
54 : * internal page caching because all callers should be using xfs_buf
55 : * functions.
56 : */
57 47058 : xfile_cache_disable(xfile);
58 :
59 47029 : error = xfs_buf_cache_init(&xfile->bcache);
60 47033 : if (error)
61 0 : goto out_xfile;
62 :
63 47033 : btp = xfs_alloc_buftarg_common(mp, descr);
64 47102 : if (!btp) {
65 0 : error = -ENOMEM;
66 0 : goto out_bcache;
67 : }
68 :
69 47102 : btp->bt_xfile = xfile;
70 47102 : btp->bt_dev = (dev_t)-1U;
71 47102 : btp->bt_flags |= XFS_BUFTARG_XFILE;
72 47102 : btp->bt_cache = &xfile->bcache;
73 :
74 47102 : btp->bt_meta_sectorsize = SECTOR_SIZE;
75 47102 : btp->bt_meta_sectormask = SECTOR_SIZE - 1;
76 47102 : btp->bt_logical_sectorsize = SECTOR_SIZE;
77 47102 : btp->bt_logical_sectormask = SECTOR_SIZE - 1;
78 :
79 47102 : *btpp = btp;
80 47102 : return 0;
81 :
82 : out_bcache:
83 0 : xfs_buf_cache_destroy(&xfile->bcache);
84 0 : out_xfile:
85 0 : xfile_destroy(xfile);
86 0 : return error;
87 : }
88 :
89 : /* Free a buffer cache target for a memory-backed file. */
90 : void
91 46965 : xfile_free_buftarg(
92 : struct xfs_buftarg *btp)
93 : {
94 46965 : struct xfile *xfile = btp->bt_xfile;
95 :
96 46965 : ASSERT(btp->bt_flags & XFS_BUFTARG_XFILE);
97 :
98 46965 : xfs_free_buftarg(btp);
99 47098 : xfs_buf_cache_destroy(&xfile->bcache);
100 46913 : xfile_destroy(xfile);
101 46955 : }
102 :
103 : /* Sector count for this xfile buftarg. */
104 : xfs_daddr_t
105 3341807782 : xfile_buftarg_nr_sectors(
106 : struct xfs_buftarg *btp)
107 : {
108 3341807782 : return xfile_size(btp->bt_xfile) >> SECTOR_SHIFT;
109 : }
110 :
111 : /* Free an xfile page that was directly mapped into the buffer cache. */
112 : static int
113 470485 : xfile_buf_put_page(
114 : struct xfile *xfile,
115 : loff_t pos,
116 : struct page *page)
117 : {
118 470485 : struct xfile_page xfpage = {
119 : .page = page,
120 470485 : .pos = round_down(pos, PAGE_SIZE),
121 : };
122 :
123 470485 : lock_page(xfpage.page);
124 :
125 470635 : return xfile_put_page(xfile, &xfpage);
126 : }
127 :
128 : /* Grab the xfile page for this part of the xfile. */
129 : static int
130 470570 : xfile_buf_get_page(
131 : struct xfile *xfile,
132 : loff_t pos,
133 : unsigned int len,
134 : struct page **pagep)
135 : {
136 470570 : struct xfile_page xfpage = { NULL };
137 470570 : int error;
138 :
139 470570 : error = xfile_get_page(xfile, pos, len, &xfpage);
140 470642 : if (error)
141 : return error;
142 :
143 : /*
144 : * Fall back to regular DRAM buffers if tmpfs gives us fsdata or the
145 : * page pos isn't what we were expecting.
146 : */
147 470642 : if (xfpage.fsdata || xfpage.pos != round_down(pos, PAGE_SIZE)) {
148 0 : xfile_put_page(xfile, &xfpage);
149 0 : return -ENOTBLK;
150 : }
151 :
152 : /* Unlock the page before we start using them for the buffer cache. */
153 470642 : ASSERT(PageUptodate(xfpage.page));
154 470556 : unlock_page(xfpage.page);
155 :
156 470662 : *pagep = xfpage.page;
157 470662 : return 0;
158 : }
159 :
160 : /*
161 : * Try to map storage directly, if the target supports it. Returns 0 for
162 : * success, -ENOTBLK to mean "not supported", or the usual negative errno.
163 : */
164 : int
165 470620 : xfile_buf_map_pages(
166 : struct xfs_buf *bp,
167 : xfs_buf_flags_t flags)
168 : {
169 470620 : struct xfs_buf_map *map;
170 470620 : gfp_t gfp_mask = __GFP_NOWARN;
171 470620 : const unsigned int page_align_mask = PAGE_SIZE - 1;
172 470620 : unsigned int m, p, n;
173 470620 : unsigned int first_page_offset;
174 470620 : int error;
175 :
176 470620 : ASSERT(xfile_buftarg_can_direct_map(bp->b_target));
177 :
178 : /*
179 : * For direct-map buffer targets with multiple mappings, the first map
180 : * must end on a page boundary and the rest of the mappings must start
181 : * and end on a page boundary. For single-mapping buffers, we don't
182 : * care.
183 : */
184 470620 : if (bp->b_map_count > 1) {
185 0 : map = &bp->b_maps[0];
186 0 : if (BBTOB(map->bm_bn + map->bm_len) & page_align_mask)
187 : return -ENOTBLK;
188 :
189 0 : for (m = 1, map++; m < bp->b_map_count - 1; m++, map++)
190 0 : if (BBTOB(map->bm_bn | map->bm_len) & page_align_mask)
191 : return -ENOTBLK;
192 : }
193 :
194 470620 : if (flags & XBF_READ_AHEAD)
195 : gfp_mask |= __GFP_NORETRY;
196 : else
197 470620 : gfp_mask |= GFP_NOFS;
198 :
199 470620 : error = xfs_buf_alloc_page_array(bp, gfp_mask);
200 470543 : if (error)
201 : return error;
202 :
203 : /* Map in the xfile pages. */
204 470543 : first_page_offset = offset_in_page(BBTOB(xfs_buf_daddr(bp)));
205 941189 : for (m = 0, p = 0, map = bp->b_maps; m < bp->b_map_count; m++, map++) {
206 941200 : for (n = 0; n < map->bm_len; n += BTOBB(PAGE_SIZE)) {
207 470554 : unsigned int len;
208 :
209 470554 : len = min_t(unsigned int, BBTOB(map->bm_len - n),
210 : PAGE_SIZE);
211 :
212 470554 : error = xfile_buf_get_page(bp->b_target->bt_xfile,
213 470554 : BBTOB(map->bm_bn + n), len,
214 470554 : &bp->b_pages[p++]);
215 470655 : if (error)
216 0 : goto fail;
217 : }
218 : }
219 :
220 470644 : bp->b_flags |= _XBF_DIRECT_MAP;
221 470644 : bp->b_offset = first_page_offset;
222 470644 : return 0;
223 :
224 : fail:
225 : /*
226 : * Release all the xfile pages and free the page array, we're falling
227 : * back to a DRAM buffer, which could be pages or a slab allocation.
228 : */
229 0 : for (m = 0, p = 0, map = bp->b_maps; m < bp->b_map_count; m++, map++) {
230 0 : for (n = 0; n < map->bm_len; n += BTOBB(PAGE_SIZE)) {
231 0 : if (bp->b_pages[p] == NULL)
232 0 : continue;
233 :
234 0 : xfile_buf_put_page(bp->b_target->bt_xfile,
235 0 : BBTOB(map->bm_bn + n),
236 0 : bp->b_pages[p++]);
237 : }
238 : }
239 :
240 0 : xfs_buf_free_page_array(bp);
241 0 : return error;
242 : }
243 :
244 : /* Unmap all the direct-mapped buffer pages. */
245 : void
246 470547 : xfile_buf_unmap_pages(
247 : struct xfs_buf *bp)
248 : {
249 470547 : struct xfs_buf_map *map;
250 470547 : unsigned int m, p, n;
251 470547 : int error = 0, err2;
252 :
253 470547 : ASSERT(xfile_buftarg_can_direct_map(bp->b_target));
254 :
255 941168 : for (m = 0, p = 0, map = bp->b_maps; m < bp->b_map_count; m++, map++) {
256 941174 : for (n = 0; n < map->bm_len; n += BTOBB(PAGE_SIZE)) {
257 470553 : err2 = xfile_buf_put_page(bp->b_target->bt_xfile,
258 470553 : BBTOB(map->bm_bn + n),
259 470553 : bp->b_pages[p++]);
260 470627 : if (!error && err2)
261 0 : error = err2;
262 : }
263 : }
264 :
265 470621 : if (error)
266 0 : xfs_err(bp->b_mount, "%s failed errno %d", __func__, error);
267 :
268 470621 : bp->b_flags &= ~_XBF_DIRECT_MAP;
269 470621 : xfs_buf_free_page_array(bp);
270 470405 : }
|