1 |
62 |
marcus.erl |
/*
|
2 |
|
|
* linux/fs/buffer.c
|
3 |
|
|
*
|
4 |
|
|
* Copyright (C) 1991, 1992, 2002 Linus Torvalds
|
5 |
|
|
*/
|
6 |
|
|
|
7 |
|
|
/*
|
8 |
|
|
* Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
|
9 |
|
|
*
|
10 |
|
|
* Removed a lot of unnecessary code and simplified things now that
|
11 |
|
|
* the buffer cache isn't our primary cache - Andrew Tridgell 12/96
|
12 |
|
|
*
|
13 |
|
|
* Speed up hash, lru, and free list operations. Use gfp() for allocating
|
14 |
|
|
* hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
|
15 |
|
|
*
|
16 |
|
|
* Added 32k buffer block sizes - these are required older ARM systems. - RMK
|
17 |
|
|
*
|
18 |
|
|
* async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
|
19 |
|
|
*/
|
20 |
|
|
|
21 |
|
|
#include <linux/kernel.h>
|
22 |
|
|
#include <linux/syscalls.h>
|
23 |
|
|
#include <linux/fs.h>
|
24 |
|
|
#include <linux/mm.h>
|
25 |
|
|
#include <linux/percpu.h>
|
26 |
|
|
#include <linux/slab.h>
|
27 |
|
|
#include <linux/capability.h>
|
28 |
|
|
#include <linux/blkdev.h>
|
29 |
|
|
#include <linux/file.h>
|
30 |
|
|
#include <linux/quotaops.h>
|
31 |
|
|
#include <linux/highmem.h>
|
32 |
|
|
#include <linux/module.h>
|
33 |
|
|
#include <linux/writeback.h>
|
34 |
|
|
#include <linux/hash.h>
|
35 |
|
|
#include <linux/suspend.h>
|
36 |
|
|
#include <linux/buffer_head.h>
|
37 |
|
|
#include <linux/task_io_accounting_ops.h>
|
38 |
|
|
#include <linux/bio.h>
|
39 |
|
|
#include <linux/notifier.h>
|
40 |
|
|
#include <linux/cpu.h>
|
41 |
|
|
#include <linux/bitops.h>
|
42 |
|
|
#include <linux/mpage.h>
|
43 |
|
|
#include <linux/bit_spinlock.h>
|
44 |
|
|
|
45 |
|
|
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
|
46 |
|
|
|
47 |
|
|
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
|
48 |
|
|
|
49 |
|
|
inline void
|
50 |
|
|
init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
|
51 |
|
|
{
|
52 |
|
|
bh->b_end_io = handler;
|
53 |
|
|
bh->b_private = private;
|
54 |
|
|
}
|
55 |
|
|
|
56 |
|
|
static int sync_buffer(void *word)
|
57 |
|
|
{
|
58 |
|
|
struct block_device *bd;
|
59 |
|
|
struct buffer_head *bh
|
60 |
|
|
= container_of(word, struct buffer_head, b_state);
|
61 |
|
|
|
62 |
|
|
smp_mb();
|
63 |
|
|
bd = bh->b_bdev;
|
64 |
|
|
if (bd)
|
65 |
|
|
blk_run_address_space(bd->bd_inode->i_mapping);
|
66 |
|
|
io_schedule();
|
67 |
|
|
return 0;
|
68 |
|
|
}
|
69 |
|
|
|
70 |
|
|
void fastcall __lock_buffer(struct buffer_head *bh)
|
71 |
|
|
{
|
72 |
|
|
wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
|
73 |
|
|
TASK_UNINTERRUPTIBLE);
|
74 |
|
|
}
|
75 |
|
|
EXPORT_SYMBOL(__lock_buffer);
|
76 |
|
|
|
77 |
|
|
void fastcall unlock_buffer(struct buffer_head *bh)
|
78 |
|
|
{
|
79 |
|
|
smp_mb__before_clear_bit();
|
80 |
|
|
clear_buffer_locked(bh);
|
81 |
|
|
smp_mb__after_clear_bit();
|
82 |
|
|
wake_up_bit(&bh->b_state, BH_Lock);
|
83 |
|
|
}
|
84 |
|
|
|
85 |
|
|
/*
|
86 |
|
|
* Block until a buffer comes unlocked. This doesn't stop it
|
87 |
|
|
* from becoming locked again - you have to lock it yourself
|
88 |
|
|
* if you want to preserve its state.
|
89 |
|
|
*/
|
90 |
|
|
void __wait_on_buffer(struct buffer_head * bh)
|
91 |
|
|
{
|
92 |
|
|
wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
|
93 |
|
|
}
|
94 |
|
|
|
95 |
|
|
static void
|
96 |
|
|
__clear_page_buffers(struct page *page)
|
97 |
|
|
{
|
98 |
|
|
ClearPagePrivate(page);
|
99 |
|
|
set_page_private(page, 0);
|
100 |
|
|
page_cache_release(page);
|
101 |
|
|
}
|
102 |
|
|
|
103 |
|
|
static void buffer_io_error(struct buffer_head *bh)
|
104 |
|
|
{
|
105 |
|
|
char b[BDEVNAME_SIZE];
|
106 |
|
|
|
107 |
|
|
printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
|
108 |
|
|
bdevname(bh->b_bdev, b),
|
109 |
|
|
(unsigned long long)bh->b_blocknr);
|
110 |
|
|
}
|
111 |
|
|
|
112 |
|
|
/*
|
113 |
|
|
* End-of-IO handler helper function which does not touch the bh after
|
114 |
|
|
* unlocking it.
|
115 |
|
|
* Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
|
116 |
|
|
* a race there is benign: unlock_buffer() only use the bh's address for
|
117 |
|
|
* hashing after unlocking the buffer, so it doesn't actually touch the bh
|
118 |
|
|
* itself.
|
119 |
|
|
*/
|
120 |
|
|
static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
|
121 |
|
|
{
|
122 |
|
|
if (uptodate) {
|
123 |
|
|
set_buffer_uptodate(bh);
|
124 |
|
|
} else {
|
125 |
|
|
/* This happens, due to failed READA attempts. */
|
126 |
|
|
clear_buffer_uptodate(bh);
|
127 |
|
|
}
|
128 |
|
|
unlock_buffer(bh);
|
129 |
|
|
}
|
130 |
|
|
|
131 |
|
|
/*
|
132 |
|
|
* Default synchronous end-of-IO handler.. Just mark it up-to-date and
|
133 |
|
|
* unlock the buffer. This is what ll_rw_block uses too.
|
134 |
|
|
*/
|
135 |
|
|
void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
|
136 |
|
|
{
|
137 |
|
|
__end_buffer_read_notouch(bh, uptodate);
|
138 |
|
|
put_bh(bh);
|
139 |
|
|
}
|
140 |
|
|
|
141 |
|
|
void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
|
142 |
|
|
{
|
143 |
|
|
char b[BDEVNAME_SIZE];
|
144 |
|
|
|
145 |
|
|
if (uptodate) {
|
146 |
|
|
set_buffer_uptodate(bh);
|
147 |
|
|
} else {
|
148 |
|
|
if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
|
149 |
|
|
buffer_io_error(bh);
|
150 |
|
|
printk(KERN_WARNING "lost page write due to "
|
151 |
|
|
"I/O error on %s\n",
|
152 |
|
|
bdevname(bh->b_bdev, b));
|
153 |
|
|
}
|
154 |
|
|
set_buffer_write_io_error(bh);
|
155 |
|
|
clear_buffer_uptodate(bh);
|
156 |
|
|
}
|
157 |
|
|
unlock_buffer(bh);
|
158 |
|
|
put_bh(bh);
|
159 |
|
|
}
|
160 |
|
|
|
161 |
|
|
/*
|
162 |
|
|
* Write out and wait upon all the dirty data associated with a block
|
163 |
|
|
* device via its mapping. Does not take the superblock lock.
|
164 |
|
|
*/
|
165 |
|
|
int sync_blockdev(struct block_device *bdev)
|
166 |
|
|
{
|
167 |
|
|
int ret = 0;
|
168 |
|
|
|
169 |
|
|
if (bdev)
|
170 |
|
|
ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
|
171 |
|
|
return ret;
|
172 |
|
|
}
|
173 |
|
|
EXPORT_SYMBOL(sync_blockdev);
|
174 |
|
|
|
175 |
|
|
/*
|
176 |
|
|
* Write out and wait upon all dirty data associated with this
|
177 |
|
|
* device. Filesystem data as well as the underlying block
|
178 |
|
|
* device. Takes the superblock lock.
|
179 |
|
|
*/
|
180 |
|
|
int fsync_bdev(struct block_device *bdev)
|
181 |
|
|
{
|
182 |
|
|
struct super_block *sb = get_super(bdev);
|
183 |
|
|
if (sb) {
|
184 |
|
|
int res = fsync_super(sb);
|
185 |
|
|
drop_super(sb);
|
186 |
|
|
return res;
|
187 |
|
|
}
|
188 |
|
|
return sync_blockdev(bdev);
|
189 |
|
|
}
|
190 |
|
|
|
191 |
|
|
/**
|
192 |
|
|
* freeze_bdev -- lock a filesystem and force it into a consistent state
|
193 |
|
|
* @bdev: blockdevice to lock
|
194 |
|
|
*
|
195 |
|
|
* This takes the block device bd_mount_sem to make sure no new mounts
|
196 |
|
|
* happen on bdev until thaw_bdev() is called.
|
197 |
|
|
* If a superblock is found on this device, we take the s_umount semaphore
|
198 |
|
|
* on it to make sure nobody unmounts until the snapshot creation is done.
|
199 |
|
|
*/
|
200 |
|
|
struct super_block *freeze_bdev(struct block_device *bdev)
|
201 |
|
|
{
|
202 |
|
|
struct super_block *sb;
|
203 |
|
|
|
204 |
|
|
down(&bdev->bd_mount_sem);
|
205 |
|
|
sb = get_super(bdev);
|
206 |
|
|
if (sb && !(sb->s_flags & MS_RDONLY)) {
|
207 |
|
|
sb->s_frozen = SB_FREEZE_WRITE;
|
208 |
|
|
smp_wmb();
|
209 |
|
|
|
210 |
|
|
__fsync_super(sb);
|
211 |
|
|
|
212 |
|
|
sb->s_frozen = SB_FREEZE_TRANS;
|
213 |
|
|
smp_wmb();
|
214 |
|
|
|
215 |
|
|
sync_blockdev(sb->s_bdev);
|
216 |
|
|
|
217 |
|
|
if (sb->s_op->write_super_lockfs)
|
218 |
|
|
sb->s_op->write_super_lockfs(sb);
|
219 |
|
|
}
|
220 |
|
|
|
221 |
|
|
sync_blockdev(bdev);
|
222 |
|
|
return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */
|
223 |
|
|
}
|
224 |
|
|
EXPORT_SYMBOL(freeze_bdev);
|
225 |
|
|
|
226 |
|
|
/**
|
227 |
|
|
* thaw_bdev -- unlock filesystem
|
228 |
|
|
* @bdev: blockdevice to unlock
|
229 |
|
|
* @sb: associated superblock
|
230 |
|
|
*
|
231 |
|
|
* Unlocks the filesystem and marks it writeable again after freeze_bdev().
|
232 |
|
|
*/
|
233 |
|
|
void thaw_bdev(struct block_device *bdev, struct super_block *sb)
|
234 |
|
|
{
|
235 |
|
|
if (sb) {
|
236 |
|
|
BUG_ON(sb->s_bdev != bdev);
|
237 |
|
|
|
238 |
|
|
if (sb->s_op->unlockfs)
|
239 |
|
|
sb->s_op->unlockfs(sb);
|
240 |
|
|
sb->s_frozen = SB_UNFROZEN;
|
241 |
|
|
smp_wmb();
|
242 |
|
|
wake_up(&sb->s_wait_unfrozen);
|
243 |
|
|
drop_super(sb);
|
244 |
|
|
}
|
245 |
|
|
|
246 |
|
|
up(&bdev->bd_mount_sem);
|
247 |
|
|
}
|
248 |
|
|
EXPORT_SYMBOL(thaw_bdev);
|
249 |
|
|
|
250 |
|
|
/*
|
251 |
|
|
* Various filesystems appear to want __find_get_block to be non-blocking.
|
252 |
|
|
* But it's the page lock which protects the buffers. To get around this,
|
253 |
|
|
* we get exclusion from try_to_free_buffers with the blockdev mapping's
|
254 |
|
|
* private_lock.
|
255 |
|
|
*
|
256 |
|
|
* Hack idea: for the blockdev mapping, i_bufferlist_lock contention
|
257 |
|
|
* may be quite high. This code could TryLock the page, and if that
|
258 |
|
|
* succeeds, there is no need to take private_lock. (But if
|
259 |
|
|
* private_lock is contended then so is mapping->tree_lock).
|
260 |
|
|
*/
|
261 |
|
|
static struct buffer_head *
|
262 |
|
|
__find_get_block_slow(struct block_device *bdev, sector_t block)
|
263 |
|
|
{
|
264 |
|
|
struct inode *bd_inode = bdev->bd_inode;
|
265 |
|
|
struct address_space *bd_mapping = bd_inode->i_mapping;
|
266 |
|
|
struct buffer_head *ret = NULL;
|
267 |
|
|
pgoff_t index;
|
268 |
|
|
struct buffer_head *bh;
|
269 |
|
|
struct buffer_head *head;
|
270 |
|
|
struct page *page;
|
271 |
|
|
int all_mapped = 1;
|
272 |
|
|
|
273 |
|
|
index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
|
274 |
|
|
page = find_get_page(bd_mapping, index);
|
275 |
|
|
if (!page)
|
276 |
|
|
goto out;
|
277 |
|
|
|
278 |
|
|
spin_lock(&bd_mapping->private_lock);
|
279 |
|
|
if (!page_has_buffers(page))
|
280 |
|
|
goto out_unlock;
|
281 |
|
|
head = page_buffers(page);
|
282 |
|
|
bh = head;
|
283 |
|
|
do {
|
284 |
|
|
if (bh->b_blocknr == block) {
|
285 |
|
|
ret = bh;
|
286 |
|
|
get_bh(bh);
|
287 |
|
|
goto out_unlock;
|
288 |
|
|
}
|
289 |
|
|
if (!buffer_mapped(bh))
|
290 |
|
|
all_mapped = 0;
|
291 |
|
|
bh = bh->b_this_page;
|
292 |
|
|
} while (bh != head);
|
293 |
|
|
|
294 |
|
|
/* we might be here because some of the buffers on this page are
|
295 |
|
|
* not mapped. This is due to various races between
|
296 |
|
|
* file io on the block device and getblk. It gets dealt with
|
297 |
|
|
* elsewhere, don't buffer_error if we had some unmapped buffers
|
298 |
|
|
*/
|
299 |
|
|
if (all_mapped) {
|
300 |
|
|
printk("__find_get_block_slow() failed. "
|
301 |
|
|
"block=%llu, b_blocknr=%llu\n",
|
302 |
|
|
(unsigned long long)block,
|
303 |
|
|
(unsigned long long)bh->b_blocknr);
|
304 |
|
|
printk("b_state=0x%08lx, b_size=%zu\n",
|
305 |
|
|
bh->b_state, bh->b_size);
|
306 |
|
|
printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
|
307 |
|
|
}
|
308 |
|
|
out_unlock:
|
309 |
|
|
spin_unlock(&bd_mapping->private_lock);
|
310 |
|
|
page_cache_release(page);
|
311 |
|
|
out:
|
312 |
|
|
return ret;
|
313 |
|
|
}
|
314 |
|
|
|
315 |
|
|
/* If invalidate_buffers() will trash dirty buffers, it means some kind
|
316 |
|
|
of fs corruption is going on. Trashing dirty data always imply losing
|
317 |
|
|
information that was supposed to be just stored on the physical layer
|
318 |
|
|
by the user.
|
319 |
|
|
|
320 |
|
|
Thus invalidate_buffers in general usage is not allwowed to trash
|
321 |
|
|
dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
|
322 |
|
|
be preserved. These buffers are simply skipped.
|
323 |
|
|
|
324 |
|
|
We also skip buffers which are still in use. For example this can
|
325 |
|
|
happen if a userspace program is reading the block device.
|
326 |
|
|
|
327 |
|
|
NOTE: In the case where the user removed a removable-media-disk even if
|
328 |
|
|
there's still dirty data not synced on disk (due a bug in the device driver
|
329 |
|
|
or due an error of the user), by not destroying the dirty buffers we could
|
330 |
|
|
generate corruption also on the next media inserted, thus a parameter is
|
331 |
|
|
necessary to handle this case in the most safe way possible (trying
|
332 |
|
|
to not corrupt also the new disk inserted with the data belonging to
|
333 |
|
|
the old now corrupted disk). Also for the ramdisk the natural thing
|
334 |
|
|
to do in order to release the ramdisk memory is to destroy dirty buffers.
|
335 |
|
|
|
336 |
|
|
These are two special cases. Normal usage imply the device driver
|
337 |
|
|
to issue a sync on the device (without waiting I/O completion) and
|
338 |
|
|
then an invalidate_buffers call that doesn't trash dirty buffers.
|
339 |
|
|
|
340 |
|
|
For handling cache coherency with the blkdev pagecache the 'update' case
|
341 |
|
|
is been introduced. It is needed to re-read from disk any pinned
|
342 |
|
|
buffer. NOTE: re-reading from disk is destructive so we can do it only
|
343 |
|
|
when we assume nobody is changing the buffercache under our I/O and when
|
344 |
|
|
we think the disk contains more recent information than the buffercache.
|
345 |
|
|
The update == 1 pass marks the buffers we need to update, the update == 2
|
346 |
|
|
pass does the actual I/O. */
|
347 |
|
|
void invalidate_bdev(struct block_device *bdev)
|
348 |
|
|
{
|
349 |
|
|
struct address_space *mapping = bdev->bd_inode->i_mapping;
|
350 |
|
|
|
351 |
|
|
if (mapping->nrpages == 0)
|
352 |
|
|
return;
|
353 |
|
|
|
354 |
|
|
invalidate_bh_lrus();
|
355 |
|
|
invalidate_mapping_pages(mapping, 0, -1);
|
356 |
|
|
}
|
357 |
|
|
|
358 |
|
|
/*
|
359 |
|
|
* Kick pdflush then try to free up some ZONE_NORMAL memory.
|
360 |
|
|
*/
|
361 |
|
|
static void free_more_memory(void)
|
362 |
|
|
{
|
363 |
|
|
struct zone **zones;
|
364 |
|
|
pg_data_t *pgdat;
|
365 |
|
|
|
366 |
|
|
wakeup_pdflush(1024);
|
367 |
|
|
yield();
|
368 |
|
|
|
369 |
|
|
for_each_online_pgdat(pgdat) {
|
370 |
|
|
zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
|
371 |
|
|
if (*zones)
|
372 |
|
|
try_to_free_pages(zones, 0, GFP_NOFS);
|
373 |
|
|
}
|
374 |
|
|
}
|
375 |
|
|
|
376 |
|
|
/*
|
377 |
|
|
* I/O completion handler for block_read_full_page() - pages
|
378 |
|
|
* which come unlocked at the end of I/O.
|
379 |
|
|
*/
|
380 |
|
|
static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
381 |
|
|
{
|
382 |
|
|
unsigned long flags;
|
383 |
|
|
struct buffer_head *first;
|
384 |
|
|
struct buffer_head *tmp;
|
385 |
|
|
struct page *page;
|
386 |
|
|
int page_uptodate = 1;
|
387 |
|
|
|
388 |
|
|
BUG_ON(!buffer_async_read(bh));
|
389 |
|
|
|
390 |
|
|
page = bh->b_page;
|
391 |
|
|
if (uptodate) {
|
392 |
|
|
set_buffer_uptodate(bh);
|
393 |
|
|
} else {
|
394 |
|
|
clear_buffer_uptodate(bh);
|
395 |
|
|
if (printk_ratelimit())
|
396 |
|
|
buffer_io_error(bh);
|
397 |
|
|
SetPageError(page);
|
398 |
|
|
}
|
399 |
|
|
|
400 |
|
|
/*
|
401 |
|
|
* Be _very_ careful from here on. Bad things can happen if
|
402 |
|
|
* two buffer heads end IO at almost the same time and both
|
403 |
|
|
* decide that the page is now completely done.
|
404 |
|
|
*/
|
405 |
|
|
first = page_buffers(page);
|
406 |
|
|
local_irq_save(flags);
|
407 |
|
|
bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
408 |
|
|
clear_buffer_async_read(bh);
|
409 |
|
|
unlock_buffer(bh);
|
410 |
|
|
tmp = bh;
|
411 |
|
|
do {
|
412 |
|
|
if (!buffer_uptodate(tmp))
|
413 |
|
|
page_uptodate = 0;
|
414 |
|
|
if (buffer_async_read(tmp)) {
|
415 |
|
|
BUG_ON(!buffer_locked(tmp));
|
416 |
|
|
goto still_busy;
|
417 |
|
|
}
|
418 |
|
|
tmp = tmp->b_this_page;
|
419 |
|
|
} while (tmp != bh);
|
420 |
|
|
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
421 |
|
|
local_irq_restore(flags);
|
422 |
|
|
|
423 |
|
|
/*
|
424 |
|
|
* If none of the buffers had errors and they are all
|
425 |
|
|
* uptodate then we can set the page uptodate.
|
426 |
|
|
*/
|
427 |
|
|
if (page_uptodate && !PageError(page))
|
428 |
|
|
SetPageUptodate(page);
|
429 |
|
|
unlock_page(page);
|
430 |
|
|
return;
|
431 |
|
|
|
432 |
|
|
still_busy:
|
433 |
|
|
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
434 |
|
|
local_irq_restore(flags);
|
435 |
|
|
return;
|
436 |
|
|
}
|
437 |
|
|
|
438 |
|
|
/*
|
439 |
|
|
* Completion handler for block_write_full_page() - pages which are unlocked
|
440 |
|
|
* during I/O, and which have PageWriteback cleared upon I/O completion.
|
441 |
|
|
*/
|
442 |
|
|
static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
|
443 |
|
|
{
|
444 |
|
|
char b[BDEVNAME_SIZE];
|
445 |
|
|
unsigned long flags;
|
446 |
|
|
struct buffer_head *first;
|
447 |
|
|
struct buffer_head *tmp;
|
448 |
|
|
struct page *page;
|
449 |
|
|
|
450 |
|
|
BUG_ON(!buffer_async_write(bh));
|
451 |
|
|
|
452 |
|
|
page = bh->b_page;
|
453 |
|
|
if (uptodate) {
|
454 |
|
|
set_buffer_uptodate(bh);
|
455 |
|
|
} else {
|
456 |
|
|
if (printk_ratelimit()) {
|
457 |
|
|
buffer_io_error(bh);
|
458 |
|
|
printk(KERN_WARNING "lost page write due to "
|
459 |
|
|
"I/O error on %s\n",
|
460 |
|
|
bdevname(bh->b_bdev, b));
|
461 |
|
|
}
|
462 |
|
|
set_bit(AS_EIO, &page->mapping->flags);
|
463 |
|
|
set_buffer_write_io_error(bh);
|
464 |
|
|
clear_buffer_uptodate(bh);
|
465 |
|
|
SetPageError(page);
|
466 |
|
|
}
|
467 |
|
|
|
468 |
|
|
first = page_buffers(page);
|
469 |
|
|
local_irq_save(flags);
|
470 |
|
|
bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
|
471 |
|
|
|
472 |
|
|
clear_buffer_async_write(bh);
|
473 |
|
|
unlock_buffer(bh);
|
474 |
|
|
tmp = bh->b_this_page;
|
475 |
|
|
while (tmp != bh) {
|
476 |
|
|
if (buffer_async_write(tmp)) {
|
477 |
|
|
BUG_ON(!buffer_locked(tmp));
|
478 |
|
|
goto still_busy;
|
479 |
|
|
}
|
480 |
|
|
tmp = tmp->b_this_page;
|
481 |
|
|
}
|
482 |
|
|
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
483 |
|
|
local_irq_restore(flags);
|
484 |
|
|
end_page_writeback(page);
|
485 |
|
|
return;
|
486 |
|
|
|
487 |
|
|
still_busy:
|
488 |
|
|
bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
|
489 |
|
|
local_irq_restore(flags);
|
490 |
|
|
return;
|
491 |
|
|
}
|
492 |
|
|
|
493 |
|
|
/*
|
494 |
|
|
* If a page's buffers are under async readin (end_buffer_async_read
|
495 |
|
|
* completion) then there is a possibility that another thread of
|
496 |
|
|
* control could lock one of the buffers after it has completed
|
497 |
|
|
* but while some of the other buffers have not completed. This
|
498 |
|
|
* locked buffer would confuse end_buffer_async_read() into not unlocking
|
499 |
|
|
* the page. So the absence of BH_Async_Read tells end_buffer_async_read()
|
500 |
|
|
* that this buffer is not under async I/O.
|
501 |
|
|
*
|
502 |
|
|
* The page comes unlocked when it has no locked buffer_async buffers
|
503 |
|
|
* left.
|
504 |
|
|
*
|
505 |
|
|
* PageLocked prevents anyone starting new async I/O reads any of
|
506 |
|
|
* the buffers.
|
507 |
|
|
*
|
508 |
|
|
* PageWriteback is used to prevent simultaneous writeout of the same
|
509 |
|
|
* page.
|
510 |
|
|
*
|
511 |
|
|
* PageLocked prevents anyone from starting writeback of a page which is
|
512 |
|
|
* under read I/O (PageWriteback is only ever set against a locked page).
|
513 |
|
|
*/
|
514 |
|
|
static void mark_buffer_async_read(struct buffer_head *bh)
|
515 |
|
|
{
|
516 |
|
|
bh->b_end_io = end_buffer_async_read;
|
517 |
|
|
set_buffer_async_read(bh);
|
518 |
|
|
}
|
519 |
|
|
|
520 |
|
|
void mark_buffer_async_write(struct buffer_head *bh)
|
521 |
|
|
{
|
522 |
|
|
bh->b_end_io = end_buffer_async_write;
|
523 |
|
|
set_buffer_async_write(bh);
|
524 |
|
|
}
|
525 |
|
|
EXPORT_SYMBOL(mark_buffer_async_write);
|
526 |
|
|
|
527 |
|
|
|
528 |
|
|
/*
|
529 |
|
|
* fs/buffer.c contains helper functions for buffer-backed address space's
|
530 |
|
|
* fsync functions. A common requirement for buffer-based filesystems is
|
531 |
|
|
* that certain data from the backing blockdev needs to be written out for
|
532 |
|
|
* a successful fsync(). For example, ext2 indirect blocks need to be
|
533 |
|
|
* written back and waited upon before fsync() returns.
|
534 |
|
|
*
|
535 |
|
|
* The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
|
536 |
|
|
* inode_has_buffers() and invalidate_inode_buffers() are provided for the
|
537 |
|
|
* management of a list of dependent buffers at ->i_mapping->private_list.
|
538 |
|
|
*
|
539 |
|
|
* Locking is a little subtle: try_to_free_buffers() will remove buffers
|
540 |
|
|
* from their controlling inode's queue when they are being freed. But
|
541 |
|
|
* try_to_free_buffers() will be operating against the *blockdev* mapping
|
542 |
|
|
* at the time, not against the S_ISREG file which depends on those buffers.
|
543 |
|
|
* So the locking for private_list is via the private_lock in the address_space
|
544 |
|
|
* which backs the buffers. Which is different from the address_space
|
545 |
|
|
* against which the buffers are listed. So for a particular address_space,
|
546 |
|
|
* mapping->private_lock does *not* protect mapping->private_list! In fact,
|
547 |
|
|
* mapping->private_list will always be protected by the backing blockdev's
|
548 |
|
|
* ->private_lock.
|
549 |
|
|
*
|
550 |
|
|
* Which introduces a requirement: all buffers on an address_space's
|
551 |
|
|
* ->private_list must be from the same address_space: the blockdev's.
|
552 |
|
|
*
|
553 |
|
|
* address_spaces which do not place buffers at ->private_list via these
|
554 |
|
|
* utility functions are free to use private_lock and private_list for
|
555 |
|
|
* whatever they want. The only requirement is that list_empty(private_list)
|
556 |
|
|
* be true at clear_inode() time.
|
557 |
|
|
*
|
558 |
|
|
* FIXME: clear_inode should not call invalidate_inode_buffers(). The
|
559 |
|
|
* filesystems should do that. invalidate_inode_buffers() should just go
|
560 |
|
|
* BUG_ON(!list_empty).
|
561 |
|
|
*
|
562 |
|
|
* FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
|
563 |
|
|
* take an address_space, not an inode. And it should be called
|
564 |
|
|
* mark_buffer_dirty_fsync() to clearly define why those buffers are being
|
565 |
|
|
* queued up.
|
566 |
|
|
*
|
567 |
|
|
* FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
|
568 |
|
|
* list if it is already on a list. Because if the buffer is on a list,
|
569 |
|
|
* it *must* already be on the right one. If not, the filesystem is being
|
570 |
|
|
* silly. This will save a ton of locking. But first we have to ensure
|
571 |
|
|
* that buffers are taken *off* the old inode's list when they are freed
|
572 |
|
|
* (presumably in truncate). That requires careful auditing of all
|
573 |
|
|
* filesystems (do it inside bforget()). It could also be done by bringing
|
574 |
|
|
* b_inode back.
|
575 |
|
|
*/
|
576 |
|
|
|
577 |
|
|
/*
|
578 |
|
|
* The buffer's backing address_space's private_lock must be held
|
579 |
|
|
*/
|
580 |
|
|
static inline void __remove_assoc_queue(struct buffer_head *bh)
|
581 |
|
|
{
|
582 |
|
|
list_del_init(&bh->b_assoc_buffers);
|
583 |
|
|
WARN_ON(!bh->b_assoc_map);
|
584 |
|
|
if (buffer_write_io_error(bh))
|
585 |
|
|
set_bit(AS_EIO, &bh->b_assoc_map->flags);
|
586 |
|
|
bh->b_assoc_map = NULL;
|
587 |
|
|
}
|
588 |
|
|
|
589 |
|
|
int inode_has_buffers(struct inode *inode)
|
590 |
|
|
{
|
591 |
|
|
return !list_empty(&inode->i_data.private_list);
|
592 |
|
|
}
|
593 |
|
|
|
594 |
|
|
/*
|
595 |
|
|
* osync is designed to support O_SYNC io. It waits synchronously for
|
596 |
|
|
* all already-submitted IO to complete, but does not queue any new
|
597 |
|
|
* writes to the disk.
|
598 |
|
|
*
|
599 |
|
|
* To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
|
600 |
|
|
* you dirty the buffers, and then use osync_inode_buffers to wait for
|
601 |
|
|
* completion. Any other dirty buffers which are not yet queued for
|
602 |
|
|
* write will not be flushed to disk by the osync.
|
603 |
|
|
*/
|
604 |
|
|
static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
|
605 |
|
|
{
|
606 |
|
|
struct buffer_head *bh;
|
607 |
|
|
struct list_head *p;
|
608 |
|
|
int err = 0;
|
609 |
|
|
|
610 |
|
|
spin_lock(lock);
|
611 |
|
|
repeat:
|
612 |
|
|
list_for_each_prev(p, list) {
|
613 |
|
|
bh = BH_ENTRY(p);
|
614 |
|
|
if (buffer_locked(bh)) {
|
615 |
|
|
get_bh(bh);
|
616 |
|
|
spin_unlock(lock);
|
617 |
|
|
wait_on_buffer(bh);
|
618 |
|
|
if (!buffer_uptodate(bh))
|
619 |
|
|
err = -EIO;
|
620 |
|
|
brelse(bh);
|
621 |
|
|
spin_lock(lock);
|
622 |
|
|
goto repeat;
|
623 |
|
|
}
|
624 |
|
|
}
|
625 |
|
|
spin_unlock(lock);
|
626 |
|
|
return err;
|
627 |
|
|
}
|
628 |
|
|
|
629 |
|
|
/**
|
630 |
|
|
* sync_mapping_buffers - write out and wait upon a mapping's "associated"
|
631 |
|
|
* buffers
|
632 |
|
|
* @mapping: the mapping which wants those buffers written
|
633 |
|
|
*
|
634 |
|
|
* Starts I/O against the buffers at mapping->private_list, and waits upon
|
635 |
|
|
* that I/O.
|
636 |
|
|
*
|
637 |
|
|
* Basically, this is a convenience function for fsync().
|
638 |
|
|
* @mapping is a file or directory which needs those buffers to be written for
|
639 |
|
|
* a successful fsync().
|
640 |
|
|
*/
|
641 |
|
|
int sync_mapping_buffers(struct address_space *mapping)
|
642 |
|
|
{
|
643 |
|
|
struct address_space *buffer_mapping = mapping->assoc_mapping;
|
644 |
|
|
|
645 |
|
|
if (buffer_mapping == NULL || list_empty(&mapping->private_list))
|
646 |
|
|
return 0;
|
647 |
|
|
|
648 |
|
|
return fsync_buffers_list(&buffer_mapping->private_lock,
|
649 |
|
|
&mapping->private_list);
|
650 |
|
|
}
|
651 |
|
|
EXPORT_SYMBOL(sync_mapping_buffers);
|
652 |
|
|
|
653 |
|
|
/*
|
654 |
|
|
* Called when we've recently written block `bblock', and it is known that
|
655 |
|
|
* `bblock' was for a buffer_boundary() buffer. This means that the block at
|
656 |
|
|
* `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
|
657 |
|
|
* dirty, schedule it for IO. So that indirects merge nicely with their data.
|
658 |
|
|
*/
|
659 |
|
|
void write_boundary_block(struct block_device *bdev,
|
660 |
|
|
sector_t bblock, unsigned blocksize)
|
661 |
|
|
{
|
662 |
|
|
struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
|
663 |
|
|
if (bh) {
|
664 |
|
|
if (buffer_dirty(bh))
|
665 |
|
|
ll_rw_block(WRITE, 1, &bh);
|
666 |
|
|
put_bh(bh);
|
667 |
|
|
}
|
668 |
|
|
}
|
669 |
|
|
|
670 |
|
|
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
|
671 |
|
|
{
|
672 |
|
|
struct address_space *mapping = inode->i_mapping;
|
673 |
|
|
struct address_space *buffer_mapping = bh->b_page->mapping;
|
674 |
|
|
|
675 |
|
|
mark_buffer_dirty(bh);
|
676 |
|
|
if (!mapping->assoc_mapping) {
|
677 |
|
|
mapping->assoc_mapping = buffer_mapping;
|
678 |
|
|
} else {
|
679 |
|
|
BUG_ON(mapping->assoc_mapping != buffer_mapping);
|
680 |
|
|
}
|
681 |
|
|
if (list_empty(&bh->b_assoc_buffers)) {
|
682 |
|
|
spin_lock(&buffer_mapping->private_lock);
|
683 |
|
|
list_move_tail(&bh->b_assoc_buffers,
|
684 |
|
|
&mapping->private_list);
|
685 |
|
|
bh->b_assoc_map = mapping;
|
686 |
|
|
spin_unlock(&buffer_mapping->private_lock);
|
687 |
|
|
}
|
688 |
|
|
}
|
689 |
|
|
EXPORT_SYMBOL(mark_buffer_dirty_inode);
|
690 |
|
|
|
691 |
|
|
/*
|
692 |
|
|
* Mark the page dirty, and set it dirty in the radix tree, and mark the inode
|
693 |
|
|
* dirty.
|
694 |
|
|
*
|
695 |
|
|
* If warn is true, then emit a warning if the page is not uptodate and has
|
696 |
|
|
* not been truncated.
|
697 |
|
|
*/
|
698 |
|
|
static int __set_page_dirty(struct page *page,
|
699 |
|
|
struct address_space *mapping, int warn)
|
700 |
|
|
{
|
701 |
|
|
if (unlikely(!mapping))
|
702 |
|
|
return !TestSetPageDirty(page);
|
703 |
|
|
|
704 |
|
|
if (TestSetPageDirty(page))
|
705 |
|
|
return 0;
|
706 |
|
|
|
707 |
|
|
write_lock_irq(&mapping->tree_lock);
|
708 |
|
|
if (page->mapping) { /* Race with truncate? */
|
709 |
|
|
WARN_ON_ONCE(warn && !PageUptodate(page));
|
710 |
|
|
|
711 |
|
|
if (mapping_cap_account_dirty(mapping)) {
|
712 |
|
|
__inc_zone_page_state(page, NR_FILE_DIRTY);
|
713 |
|
|
__inc_bdi_stat(mapping->backing_dev_info,
|
714 |
|
|
BDI_RECLAIMABLE);
|
715 |
|
|
task_io_account_write(PAGE_CACHE_SIZE);
|
716 |
|
|
}
|
717 |
|
|
radix_tree_tag_set(&mapping->page_tree,
|
718 |
|
|
page_index(page), PAGECACHE_TAG_DIRTY);
|
719 |
|
|
}
|
720 |
|
|
write_unlock_irq(&mapping->tree_lock);
|
721 |
|
|
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
|
722 |
|
|
|
723 |
|
|
return 1;
|
724 |
|
|
}
|
725 |
|
|
|
726 |
|
|
/*
|
727 |
|
|
* Add a page to the dirty page list.
|
728 |
|
|
*
|
729 |
|
|
* It is a sad fact of life that this function is called from several places
|
730 |
|
|
* deeply under spinlocking. It may not sleep.
|
731 |
|
|
*
|
732 |
|
|
* If the page has buffers, the uptodate buffers are set dirty, to preserve
|
733 |
|
|
* dirty-state coherency between the page and the buffers. It the page does
|
734 |
|
|
* not have buffers then when they are later attached they will all be set
|
735 |
|
|
* dirty.
|
736 |
|
|
*
|
737 |
|
|
* The buffers are dirtied before the page is dirtied. There's a small race
|
738 |
|
|
* window in which a writepage caller may see the page cleanness but not the
|
739 |
|
|
* buffer dirtiness. That's fine. If this code were to set the page dirty
|
740 |
|
|
* before the buffers, a concurrent writepage caller could clear the page dirty
|
741 |
|
|
* bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
|
742 |
|
|
* page on the dirty page list.
|
743 |
|
|
*
|
744 |
|
|
* We use private_lock to lock against try_to_free_buffers while using the
|
745 |
|
|
* page's buffer list. Also use this to protect against clean buffers being
|
746 |
|
|
* added to the page after it was set dirty.
|
747 |
|
|
*
|
748 |
|
|
* FIXME: may need to call ->reservepage here as well. That's rather up to the
|
749 |
|
|
* address_space though.
|
750 |
|
|
*/
|
751 |
|
|
int __set_page_dirty_buffers(struct page *page)
|
752 |
|
|
{
|
753 |
|
|
struct address_space *mapping = page_mapping(page);
|
754 |
|
|
|
755 |
|
|
if (unlikely(!mapping))
|
756 |
|
|
return !TestSetPageDirty(page);
|
757 |
|
|
|
758 |
|
|
spin_lock(&mapping->private_lock);
|
759 |
|
|
if (page_has_buffers(page)) {
|
760 |
|
|
struct buffer_head *head = page_buffers(page);
|
761 |
|
|
struct buffer_head *bh = head;
|
762 |
|
|
|
763 |
|
|
do {
|
764 |
|
|
set_buffer_dirty(bh);
|
765 |
|
|
bh = bh->b_this_page;
|
766 |
|
|
} while (bh != head);
|
767 |
|
|
}
|
768 |
|
|
spin_unlock(&mapping->private_lock);
|
769 |
|
|
|
770 |
|
|
return __set_page_dirty(page, mapping, 1);
|
771 |
|
|
}
|
772 |
|
|
EXPORT_SYMBOL(__set_page_dirty_buffers);
|
773 |
|
|
|
774 |
|
|
/*
|
775 |
|
|
* Write out and wait upon a list of buffers.
|
776 |
|
|
*
|
777 |
|
|
* We have conflicting pressures: we want to make sure that all
|
778 |
|
|
* initially dirty buffers get waited on, but that any subsequently
|
779 |
|
|
* dirtied buffers don't. After all, we don't want fsync to last
|
780 |
|
|
* forever if somebody is actively writing to the file.
|
781 |
|
|
*
|
782 |
|
|
* Do this in two main stages: first we copy dirty buffers to a
|
783 |
|
|
* temporary inode list, queueing the writes as we go. Then we clean
|
784 |
|
|
* up, waiting for those writes to complete.
|
785 |
|
|
*
|
786 |
|
|
* During this second stage, any subsequent updates to the file may end
|
787 |
|
|
* up refiling the buffer on the original inode's dirty list again, so
|
788 |
|
|
* there is a chance we will end up with a buffer queued for write but
|
789 |
|
|
* not yet completed on that list. So, as a final cleanup we go through
|
790 |
|
|
* the osync code to catch these locked, dirty buffers without requeuing
|
791 |
|
|
* any newly dirty buffers for write.
|
792 |
|
|
*/
|
793 |
|
|
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
|
794 |
|
|
{
|
795 |
|
|
struct buffer_head *bh;
|
796 |
|
|
struct list_head tmp;
|
797 |
|
|
int err = 0, err2;
|
798 |
|
|
|
799 |
|
|
INIT_LIST_HEAD(&tmp);
|
800 |
|
|
|
801 |
|
|
spin_lock(lock);
|
802 |
|
|
while (!list_empty(list)) {
|
803 |
|
|
bh = BH_ENTRY(list->next);
|
804 |
|
|
__remove_assoc_queue(bh);
|
805 |
|
|
if (buffer_dirty(bh) || buffer_locked(bh)) {
|
806 |
|
|
list_add(&bh->b_assoc_buffers, &tmp);
|
807 |
|
|
if (buffer_dirty(bh)) {
|
808 |
|
|
get_bh(bh);
|
809 |
|
|
spin_unlock(lock);
|
810 |
|
|
/*
|
811 |
|
|
* Ensure any pending I/O completes so that
|
812 |
|
|
* ll_rw_block() actually writes the current
|
813 |
|
|
* contents - it is a noop if I/O is still in
|
814 |
|
|
* flight on potentially older contents.
|
815 |
|
|
*/
|
816 |
|
|
ll_rw_block(SWRITE, 1, &bh);
|
817 |
|
|
brelse(bh);
|
818 |
|
|
spin_lock(lock);
|
819 |
|
|
}
|
820 |
|
|
}
|
821 |
|
|
}
|
822 |
|
|
|
823 |
|
|
while (!list_empty(&tmp)) {
|
824 |
|
|
bh = BH_ENTRY(tmp.prev);
|
825 |
|
|
list_del_init(&bh->b_assoc_buffers);
|
826 |
|
|
get_bh(bh);
|
827 |
|
|
spin_unlock(lock);
|
828 |
|
|
wait_on_buffer(bh);
|
829 |
|
|
if (!buffer_uptodate(bh))
|
830 |
|
|
err = -EIO;
|
831 |
|
|
brelse(bh);
|
832 |
|
|
spin_lock(lock);
|
833 |
|
|
}
|
834 |
|
|
|
835 |
|
|
spin_unlock(lock);
|
836 |
|
|
err2 = osync_buffers_list(lock, list);
|
837 |
|
|
if (err)
|
838 |
|
|
return err;
|
839 |
|
|
else
|
840 |
|
|
return err2;
|
841 |
|
|
}
|
842 |
|
|
|
843 |
|
|
/*
|
844 |
|
|
* Invalidate any and all dirty buffers on a given inode. We are
|
845 |
|
|
* probably unmounting the fs, but that doesn't mean we have already
|
846 |
|
|
* done a sync(). Just drop the buffers from the inode list.
|
847 |
|
|
*
|
848 |
|
|
* NOTE: we take the inode's blockdev's mapping's private_lock. Which
|
849 |
|
|
* assumes that all the buffers are against the blockdev. Not true
|
850 |
|
|
* for reiserfs.
|
851 |
|
|
*/
|
852 |
|
|
void invalidate_inode_buffers(struct inode *inode)
|
853 |
|
|
{
|
854 |
|
|
if (inode_has_buffers(inode)) {
|
855 |
|
|
struct address_space *mapping = &inode->i_data;
|
856 |
|
|
struct list_head *list = &mapping->private_list;
|
857 |
|
|
struct address_space *buffer_mapping = mapping->assoc_mapping;
|
858 |
|
|
|
859 |
|
|
spin_lock(&buffer_mapping->private_lock);
|
860 |
|
|
while (!list_empty(list))
|
861 |
|
|
__remove_assoc_queue(BH_ENTRY(list->next));
|
862 |
|
|
spin_unlock(&buffer_mapping->private_lock);
|
863 |
|
|
}
|
864 |
|
|
}
|
865 |
|
|
|
866 |
|
|
/*
|
867 |
|
|
* Remove any clean buffers from the inode's buffer list. This is called
|
868 |
|
|
* when we're trying to free the inode itself. Those buffers can pin it.
|
869 |
|
|
*
|
870 |
|
|
* Returns true if all buffers were removed.
|
871 |
|
|
*/
|
872 |
|
|
int remove_inode_buffers(struct inode *inode)
|
873 |
|
|
{
|
874 |
|
|
int ret = 1;
|
875 |
|
|
|
876 |
|
|
if (inode_has_buffers(inode)) {
|
877 |
|
|
struct address_space *mapping = &inode->i_data;
|
878 |
|
|
struct list_head *list = &mapping->private_list;
|
879 |
|
|
struct address_space *buffer_mapping = mapping->assoc_mapping;
|
880 |
|
|
|
881 |
|
|
spin_lock(&buffer_mapping->private_lock);
|
882 |
|
|
while (!list_empty(list)) {
|
883 |
|
|
struct buffer_head *bh = BH_ENTRY(list->next);
|
884 |
|
|
if (buffer_dirty(bh)) {
|
885 |
|
|
ret = 0;
|
886 |
|
|
break;
|
887 |
|
|
}
|
888 |
|
|
__remove_assoc_queue(bh);
|
889 |
|
|
}
|
890 |
|
|
spin_unlock(&buffer_mapping->private_lock);
|
891 |
|
|
}
|
892 |
|
|
return ret;
|
893 |
|
|
}
|
894 |
|
|
|
895 |
|
|
/*
|
896 |
|
|
* Create the appropriate buffers when given a page for data area and
|
897 |
|
|
* the size of each buffer.. Use the bh->b_this_page linked list to
|
898 |
|
|
* follow the buffers created. Return NULL if unable to create more
|
899 |
|
|
* buffers.
|
900 |
|
|
*
|
901 |
|
|
* The retry flag is used to differentiate async IO (paging, swapping)
|
902 |
|
|
* which may not fail from ordinary buffer allocations.
|
903 |
|
|
*/
|
904 |
|
|
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
|
905 |
|
|
int retry)
|
906 |
|
|
{
|
907 |
|
|
struct buffer_head *bh, *head;
|
908 |
|
|
long offset;
|
909 |
|
|
|
910 |
|
|
try_again:
|
911 |
|
|
head = NULL;
|
912 |
|
|
offset = PAGE_SIZE;
|
913 |
|
|
while ((offset -= size) >= 0) {
|
914 |
|
|
bh = alloc_buffer_head(GFP_NOFS);
|
915 |
|
|
if (!bh)
|
916 |
|
|
goto no_grow;
|
917 |
|
|
|
918 |
|
|
bh->b_bdev = NULL;
|
919 |
|
|
bh->b_this_page = head;
|
920 |
|
|
bh->b_blocknr = -1;
|
921 |
|
|
head = bh;
|
922 |
|
|
|
923 |
|
|
bh->b_state = 0;
|
924 |
|
|
atomic_set(&bh->b_count, 0);
|
925 |
|
|
bh->b_private = NULL;
|
926 |
|
|
bh->b_size = size;
|
927 |
|
|
|
928 |
|
|
/* Link the buffer to its page */
|
929 |
|
|
set_bh_page(bh, page, offset);
|
930 |
|
|
|
931 |
|
|
init_buffer(bh, NULL, NULL);
|
932 |
|
|
}
|
933 |
|
|
return head;
|
934 |
|
|
/*
|
935 |
|
|
* In case anything failed, we just free everything we got.
|
936 |
|
|
*/
|
937 |
|
|
no_grow:
|
938 |
|
|
if (head) {
|
939 |
|
|
do {
|
940 |
|
|
bh = head;
|
941 |
|
|
head = head->b_this_page;
|
942 |
|
|
free_buffer_head(bh);
|
943 |
|
|
} while (head);
|
944 |
|
|
}
|
945 |
|
|
|
946 |
|
|
/*
|
947 |
|
|
* Return failure for non-async IO requests. Async IO requests
|
948 |
|
|
* are not allowed to fail, so we have to wait until buffer heads
|
949 |
|
|
* become available. But we don't want tasks sleeping with
|
950 |
|
|
* partially complete buffers, so all were released above.
|
951 |
|
|
*/
|
952 |
|
|
if (!retry)
|
953 |
|
|
return NULL;
|
954 |
|
|
|
955 |
|
|
/* We're _really_ low on memory. Now we just
|
956 |
|
|
* wait for old buffer heads to become free due to
|
957 |
|
|
* finishing IO. Since this is an async request and
|
958 |
|
|
* the reserve list is empty, we're sure there are
|
959 |
|
|
* async buffer heads in use.
|
960 |
|
|
*/
|
961 |
|
|
free_more_memory();
|
962 |
|
|
goto try_again;
|
963 |
|
|
}
|
964 |
|
|
EXPORT_SYMBOL_GPL(alloc_page_buffers);
|
965 |
|
|
|
966 |
|
|
static inline void
|
967 |
|
|
link_dev_buffers(struct page *page, struct buffer_head *head)
|
968 |
|
|
{
|
969 |
|
|
struct buffer_head *bh, *tail;
|
970 |
|
|
|
971 |
|
|
bh = head;
|
972 |
|
|
do {
|
973 |
|
|
tail = bh;
|
974 |
|
|
bh = bh->b_this_page;
|
975 |
|
|
} while (bh);
|
976 |
|
|
tail->b_this_page = head;
|
977 |
|
|
attach_page_buffers(page, head);
|
978 |
|
|
}
|
979 |
|
|
|
980 |
|
|
/*
|
981 |
|
|
* Initialise the state of a blockdev page's buffers.
|
982 |
|
|
*/
|
983 |
|
|
static void
|
984 |
|
|
init_page_buffers(struct page *page, struct block_device *bdev,
|
985 |
|
|
sector_t block, int size)
|
986 |
|
|
{
|
987 |
|
|
struct buffer_head *head = page_buffers(page);
|
988 |
|
|
struct buffer_head *bh = head;
|
989 |
|
|
int uptodate = PageUptodate(page);
|
990 |
|
|
|
991 |
|
|
do {
|
992 |
|
|
if (!buffer_mapped(bh)) {
|
993 |
|
|
init_buffer(bh, NULL, NULL);
|
994 |
|
|
bh->b_bdev = bdev;
|
995 |
|
|
bh->b_blocknr = block;
|
996 |
|
|
if (uptodate)
|
997 |
|
|
set_buffer_uptodate(bh);
|
998 |
|
|
set_buffer_mapped(bh);
|
999 |
|
|
}
|
1000 |
|
|
block++;
|
1001 |
|
|
bh = bh->b_this_page;
|
1002 |
|
|
} while (bh != head);
|
1003 |
|
|
}
|
1004 |
|
|
|
1005 |
|
|
/*
|
1006 |
|
|
* Create the page-cache page that contains the requested block.
|
1007 |
|
|
*
|
1008 |
|
|
* This is user purely for blockdev mappings.
|
1009 |
|
|
*/
|
1010 |
|
|
static struct page *
|
1011 |
|
|
grow_dev_page(struct block_device *bdev, sector_t block,
|
1012 |
|
|
pgoff_t index, int size)
|
1013 |
|
|
{
|
1014 |
|
|
struct inode *inode = bdev->bd_inode;
|
1015 |
|
|
struct page *page;
|
1016 |
|
|
struct buffer_head *bh;
|
1017 |
|
|
|
1018 |
|
|
page = find_or_create_page(inode->i_mapping, index,
|
1019 |
|
|
(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
|
1020 |
|
|
if (!page)
|
1021 |
|
|
return NULL;
|
1022 |
|
|
|
1023 |
|
|
BUG_ON(!PageLocked(page));
|
1024 |
|
|
|
1025 |
|
|
if (page_has_buffers(page)) {
|
1026 |
|
|
bh = page_buffers(page);
|
1027 |
|
|
if (bh->b_size == size) {
|
1028 |
|
|
init_page_buffers(page, bdev, block, size);
|
1029 |
|
|
return page;
|
1030 |
|
|
}
|
1031 |
|
|
if (!try_to_free_buffers(page))
|
1032 |
|
|
goto failed;
|
1033 |
|
|
}
|
1034 |
|
|
|
1035 |
|
|
/*
|
1036 |
|
|
* Allocate some buffers for this page
|
1037 |
|
|
*/
|
1038 |
|
|
bh = alloc_page_buffers(page, size, 0);
|
1039 |
|
|
if (!bh)
|
1040 |
|
|
goto failed;
|
1041 |
|
|
|
1042 |
|
|
/*
|
1043 |
|
|
* Link the page to the buffers and initialise them. Take the
|
1044 |
|
|
* lock to be atomic wrt __find_get_block(), which does not
|
1045 |
|
|
* run under the page lock.
|
1046 |
|
|
*/
|
1047 |
|
|
spin_lock(&inode->i_mapping->private_lock);
|
1048 |
|
|
link_dev_buffers(page, bh);
|
1049 |
|
|
init_page_buffers(page, bdev, block, size);
|
1050 |
|
|
spin_unlock(&inode->i_mapping->private_lock);
|
1051 |
|
|
return page;
|
1052 |
|
|
|
1053 |
|
|
failed:
|
1054 |
|
|
BUG();
|
1055 |
|
|
unlock_page(page);
|
1056 |
|
|
page_cache_release(page);
|
1057 |
|
|
return NULL;
|
1058 |
|
|
}
|
1059 |
|
|
|
1060 |
|
|
/*
|
1061 |
|
|
* Create buffers for the specified block device block's page. If
|
1062 |
|
|
* that page was dirty, the buffers are set dirty also.
|
1063 |
|
|
*/
|
1064 |
|
|
static int
|
1065 |
|
|
grow_buffers(struct block_device *bdev, sector_t block, int size)
|
1066 |
|
|
{
|
1067 |
|
|
struct page *page;
|
1068 |
|
|
pgoff_t index;
|
1069 |
|
|
int sizebits;
|
1070 |
|
|
|
1071 |
|
|
sizebits = -1;
|
1072 |
|
|
do {
|
1073 |
|
|
sizebits++;
|
1074 |
|
|
} while ((size << sizebits) < PAGE_SIZE);
|
1075 |
|
|
|
1076 |
|
|
index = block >> sizebits;
|
1077 |
|
|
|
1078 |
|
|
/*
|
1079 |
|
|
* Check for a block which wants to lie outside our maximum possible
|
1080 |
|
|
* pagecache index. (this comparison is done using sector_t types).
|
1081 |
|
|
*/
|
1082 |
|
|
if (unlikely(index != block >> sizebits)) {
|
1083 |
|
|
char b[BDEVNAME_SIZE];
|
1084 |
|
|
|
1085 |
|
|
printk(KERN_ERR "%s: requested out-of-range block %llu for "
|
1086 |
|
|
"device %s\n",
|
1087 |
|
|
__FUNCTION__, (unsigned long long)block,
|
1088 |
|
|
bdevname(bdev, b));
|
1089 |
|
|
return -EIO;
|
1090 |
|
|
}
|
1091 |
|
|
block = index << sizebits;
|
1092 |
|
|
/* Create a page with the proper size buffers.. */
|
1093 |
|
|
page = grow_dev_page(bdev, block, index, size);
|
1094 |
|
|
if (!page)
|
1095 |
|
|
return 0;
|
1096 |
|
|
unlock_page(page);
|
1097 |
|
|
page_cache_release(page);
|
1098 |
|
|
return 1;
|
1099 |
|
|
}
|
1100 |
|
|
|
1101 |
|
|
static struct buffer_head *
|
1102 |
|
|
__getblk_slow(struct block_device *bdev, sector_t block, int size)
|
1103 |
|
|
{
|
1104 |
|
|
/* Size must be multiple of hard sectorsize */
|
1105 |
|
|
if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
|
1106 |
|
|
(size < 512 || size > PAGE_SIZE))) {
|
1107 |
|
|
printk(KERN_ERR "getblk(): invalid block size %d requested\n",
|
1108 |
|
|
size);
|
1109 |
|
|
printk(KERN_ERR "hardsect size: %d\n",
|
1110 |
|
|
bdev_hardsect_size(bdev));
|
1111 |
|
|
|
1112 |
|
|
dump_stack();
|
1113 |
|
|
return NULL;
|
1114 |
|
|
}
|
1115 |
|
|
|
1116 |
|
|
for (;;) {
|
1117 |
|
|
struct buffer_head * bh;
|
1118 |
|
|
int ret;
|
1119 |
|
|
|
1120 |
|
|
bh = __find_get_block(bdev, block, size);
|
1121 |
|
|
if (bh)
|
1122 |
|
|
return bh;
|
1123 |
|
|
|
1124 |
|
|
ret = grow_buffers(bdev, block, size);
|
1125 |
|
|
if (ret < 0)
|
1126 |
|
|
return NULL;
|
1127 |
|
|
if (ret == 0)
|
1128 |
|
|
free_more_memory();
|
1129 |
|
|
}
|
1130 |
|
|
}
|
1131 |
|
|
|
1132 |
|
|
/*
|
1133 |
|
|
* The relationship between dirty buffers and dirty pages:
|
1134 |
|
|
*
|
1135 |
|
|
* Whenever a page has any dirty buffers, the page's dirty bit is set, and
|
1136 |
|
|
* the page is tagged dirty in its radix tree.
|
1137 |
|
|
*
|
1138 |
|
|
* At all times, the dirtiness of the buffers represents the dirtiness of
|
1139 |
|
|
* subsections of the page. If the page has buffers, the page dirty bit is
|
1140 |
|
|
* merely a hint about the true dirty state.
|
1141 |
|
|
*
|
1142 |
|
|
* When a page is set dirty in its entirety, all its buffers are marked dirty
|
1143 |
|
|
* (if the page has buffers).
|
1144 |
|
|
*
|
1145 |
|
|
* When a buffer is marked dirty, its page is dirtied, but the page's other
|
1146 |
|
|
* buffers are not.
|
1147 |
|
|
*
|
1148 |
|
|
* Also. When blockdev buffers are explicitly read with bread(), they
|
1149 |
|
|
* individually become uptodate. But their backing page remains not
|
1150 |
|
|
* uptodate - even if all of its buffers are uptodate. A subsequent
|
1151 |
|
|
* block_read_full_page() against that page will discover all the uptodate
|
1152 |
|
|
* buffers, will set the page uptodate and will perform no I/O.
|
1153 |
|
|
*/
|
1154 |
|
|
|
1155 |
|
|
/**
|
1156 |
|
|
* mark_buffer_dirty - mark a buffer_head as needing writeout
|
1157 |
|
|
* @bh: the buffer_head to mark dirty
|
1158 |
|
|
*
|
1159 |
|
|
* mark_buffer_dirty() will set the dirty bit against the buffer, then set its
|
1160 |
|
|
* backing page dirty, then tag the page as dirty in its address_space's radix
|
1161 |
|
|
* tree and then attach the address_space's inode to its superblock's dirty
|
1162 |
|
|
* inode list.
|
1163 |
|
|
*
|
1164 |
|
|
* mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
|
1165 |
|
|
* mapping->tree_lock and the global inode_lock.
|
1166 |
|
|
*/
|
1167 |
|
|
void fastcall mark_buffer_dirty(struct buffer_head *bh)
|
1168 |
|
|
{
|
1169 |
|
|
WARN_ON_ONCE(!buffer_uptodate(bh));
|
1170 |
|
|
if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
|
1171 |
|
|
__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
|
1172 |
|
|
}
|
1173 |
|
|
|
1174 |
|
|
/*
|
1175 |
|
|
* Decrement a buffer_head's reference count. If all buffers against a page
|
1176 |
|
|
* have zero reference count, are clean and unlocked, and if the page is clean
|
1177 |
|
|
* and unlocked then try_to_free_buffers() may strip the buffers from the page
|
1178 |
|
|
* in preparation for freeing it (sometimes, rarely, buffers are removed from
|
1179 |
|
|
* a page but it ends up not being freed, and buffers may later be reattached).
|
1180 |
|
|
*/
|
1181 |
|
|
void __brelse(struct buffer_head * buf)
|
1182 |
|
|
{
|
1183 |
|
|
if (atomic_read(&buf->b_count)) {
|
1184 |
|
|
put_bh(buf);
|
1185 |
|
|
return;
|
1186 |
|
|
}
|
1187 |
|
|
printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
|
1188 |
|
|
WARN_ON(1);
|
1189 |
|
|
}
|
1190 |
|
|
|
1191 |
|
|
/*
|
1192 |
|
|
* bforget() is like brelse(), except it discards any
|
1193 |
|
|
* potentially dirty data.
|
1194 |
|
|
*/
|
1195 |
|
|
void __bforget(struct buffer_head *bh)
|
1196 |
|
|
{
|
1197 |
|
|
clear_buffer_dirty(bh);
|
1198 |
|
|
if (!list_empty(&bh->b_assoc_buffers)) {
|
1199 |
|
|
struct address_space *buffer_mapping = bh->b_page->mapping;
|
1200 |
|
|
|
1201 |
|
|
spin_lock(&buffer_mapping->private_lock);
|
1202 |
|
|
list_del_init(&bh->b_assoc_buffers);
|
1203 |
|
|
bh->b_assoc_map = NULL;
|
1204 |
|
|
spin_unlock(&buffer_mapping->private_lock);
|
1205 |
|
|
}
|
1206 |
|
|
__brelse(bh);
|
1207 |
|
|
}
|
1208 |
|
|
|
1209 |
|
|
static struct buffer_head *__bread_slow(struct buffer_head *bh)
|
1210 |
|
|
{
|
1211 |
|
|
lock_buffer(bh);
|
1212 |
|
|
if (buffer_uptodate(bh)) {
|
1213 |
|
|
unlock_buffer(bh);
|
1214 |
|
|
return bh;
|
1215 |
|
|
} else {
|
1216 |
|
|
get_bh(bh);
|
1217 |
|
|
bh->b_end_io = end_buffer_read_sync;
|
1218 |
|
|
submit_bh(READ, bh);
|
1219 |
|
|
wait_on_buffer(bh);
|
1220 |
|
|
if (buffer_uptodate(bh))
|
1221 |
|
|
return bh;
|
1222 |
|
|
}
|
1223 |
|
|
brelse(bh);
|
1224 |
|
|
return NULL;
|
1225 |
|
|
}
|
1226 |
|
|
|
1227 |
|
|
/*
|
1228 |
|
|
* Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
|
1229 |
|
|
* The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
|
1230 |
|
|
* refcount elevated by one when they're in an LRU. A buffer can only appear
|
1231 |
|
|
* once in a particular CPU's LRU. A single buffer can be present in multiple
|
1232 |
|
|
* CPU's LRUs at the same time.
|
1233 |
|
|
*
|
1234 |
|
|
* This is a transparent caching front-end to sb_bread(), sb_getblk() and
|
1235 |
|
|
* sb_find_get_block().
|
1236 |
|
|
*
|
1237 |
|
|
* The LRUs themselves only need locking against invalidate_bh_lrus. We use
|
1238 |
|
|
* a local interrupt disable for that.
|
1239 |
|
|
*/
|
1240 |
|
|
|
1241 |
|
|
#define BH_LRU_SIZE 8
|
1242 |
|
|
|
1243 |
|
|
struct bh_lru {
|
1244 |
|
|
struct buffer_head *bhs[BH_LRU_SIZE];
|
1245 |
|
|
};
|
1246 |
|
|
|
1247 |
|
|
static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
|
1248 |
|
|
|
1249 |
|
|
#ifdef CONFIG_SMP
|
1250 |
|
|
#define bh_lru_lock() local_irq_disable()
|
1251 |
|
|
#define bh_lru_unlock() local_irq_enable()
|
1252 |
|
|
#else
|
1253 |
|
|
#define bh_lru_lock() preempt_disable()
|
1254 |
|
|
#define bh_lru_unlock() preempt_enable()
|
1255 |
|
|
#endif
|
1256 |
|
|
|
1257 |
|
|
static inline void check_irqs_on(void)
|
1258 |
|
|
{
|
1259 |
|
|
#ifdef irqs_disabled
|
1260 |
|
|
BUG_ON(irqs_disabled());
|
1261 |
|
|
#endif
|
1262 |
|
|
}
|
1263 |
|
|
|
1264 |
|
|
/*
|
1265 |
|
|
* The LRU management algorithm is dopey-but-simple. Sorry.
|
1266 |
|
|
*/
|
1267 |
|
|
static void bh_lru_install(struct buffer_head *bh)
|
1268 |
|
|
{
|
1269 |
|
|
struct buffer_head *evictee = NULL;
|
1270 |
|
|
struct bh_lru *lru;
|
1271 |
|
|
|
1272 |
|
|
check_irqs_on();
|
1273 |
|
|
bh_lru_lock();
|
1274 |
|
|
lru = &__get_cpu_var(bh_lrus);
|
1275 |
|
|
if (lru->bhs[0] != bh) {
|
1276 |
|
|
struct buffer_head *bhs[BH_LRU_SIZE];
|
1277 |
|
|
int in;
|
1278 |
|
|
int out = 0;
|
1279 |
|
|
|
1280 |
|
|
get_bh(bh);
|
1281 |
|
|
bhs[out++] = bh;
|
1282 |
|
|
for (in = 0; in < BH_LRU_SIZE; in++) {
|
1283 |
|
|
struct buffer_head *bh2 = lru->bhs[in];
|
1284 |
|
|
|
1285 |
|
|
if (bh2 == bh) {
|
1286 |
|
|
__brelse(bh2);
|
1287 |
|
|
} else {
|
1288 |
|
|
if (out >= BH_LRU_SIZE) {
|
1289 |
|
|
BUG_ON(evictee != NULL);
|
1290 |
|
|
evictee = bh2;
|
1291 |
|
|
} else {
|
1292 |
|
|
bhs[out++] = bh2;
|
1293 |
|
|
}
|
1294 |
|
|
}
|
1295 |
|
|
}
|
1296 |
|
|
while (out < BH_LRU_SIZE)
|
1297 |
|
|
bhs[out++] = NULL;
|
1298 |
|
|
memcpy(lru->bhs, bhs, sizeof(bhs));
|
1299 |
|
|
}
|
1300 |
|
|
bh_lru_unlock();
|
1301 |
|
|
|
1302 |
|
|
if (evictee)
|
1303 |
|
|
__brelse(evictee);
|
1304 |
|
|
}
|
1305 |
|
|
|
1306 |
|
|
/*
|
1307 |
|
|
* Look up the bh in this cpu's LRU. If it's there, move it to the head.
|
1308 |
|
|
*/
|
1309 |
|
|
static struct buffer_head *
|
1310 |
|
|
lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
|
1311 |
|
|
{
|
1312 |
|
|
struct buffer_head *ret = NULL;
|
1313 |
|
|
struct bh_lru *lru;
|
1314 |
|
|
unsigned int i;
|
1315 |
|
|
|
1316 |
|
|
check_irqs_on();
|
1317 |
|
|
bh_lru_lock();
|
1318 |
|
|
lru = &__get_cpu_var(bh_lrus);
|
1319 |
|
|
for (i = 0; i < BH_LRU_SIZE; i++) {
|
1320 |
|
|
struct buffer_head *bh = lru->bhs[i];
|
1321 |
|
|
|
1322 |
|
|
if (bh && bh->b_bdev == bdev &&
|
1323 |
|
|
bh->b_blocknr == block && bh->b_size == size) {
|
1324 |
|
|
if (i) {
|
1325 |
|
|
while (i) {
|
1326 |
|
|
lru->bhs[i] = lru->bhs[i - 1];
|
1327 |
|
|
i--;
|
1328 |
|
|
}
|
1329 |
|
|
lru->bhs[0] = bh;
|
1330 |
|
|
}
|
1331 |
|
|
get_bh(bh);
|
1332 |
|
|
ret = bh;
|
1333 |
|
|
break;
|
1334 |
|
|
}
|
1335 |
|
|
}
|
1336 |
|
|
bh_lru_unlock();
|
1337 |
|
|
return ret;
|
1338 |
|
|
}
|
1339 |
|
|
|
1340 |
|
|
/*
|
1341 |
|
|
* Perform a pagecache lookup for the matching buffer. If it's there, refresh
|
1342 |
|
|
* it in the LRU and mark it as accessed. If it is not present then return
|
1343 |
|
|
* NULL
|
1344 |
|
|
*/
|
1345 |
|
|
struct buffer_head *
|
1346 |
|
|
__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
|
1347 |
|
|
{
|
1348 |
|
|
struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
|
1349 |
|
|
|
1350 |
|
|
if (bh == NULL) {
|
1351 |
|
|
bh = __find_get_block_slow(bdev, block);
|
1352 |
|
|
if (bh)
|
1353 |
|
|
bh_lru_install(bh);
|
1354 |
|
|
}
|
1355 |
|
|
if (bh)
|
1356 |
|
|
touch_buffer(bh);
|
1357 |
|
|
return bh;
|
1358 |
|
|
}
|
1359 |
|
|
EXPORT_SYMBOL(__find_get_block);
|
1360 |
|
|
|
1361 |
|
|
/*
|
1362 |
|
|
* __getblk will locate (and, if necessary, create) the buffer_head
|
1363 |
|
|
* which corresponds to the passed block_device, block and size. The
|
1364 |
|
|
* returned buffer has its reference count incremented.
|
1365 |
|
|
*
|
1366 |
|
|
* __getblk() cannot fail - it just keeps trying. If you pass it an
|
1367 |
|
|
* illegal block number, __getblk() will happily return a buffer_head
|
1368 |
|
|
* which represents the non-existent block. Very weird.
|
1369 |
|
|
*
|
1370 |
|
|
* __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
|
1371 |
|
|
* attempt is failing. FIXME, perhaps?
|
1372 |
|
|
*/
|
1373 |
|
|
struct buffer_head *
|
1374 |
|
|
__getblk(struct block_device *bdev, sector_t block, unsigned size)
|
1375 |
|
|
{
|
1376 |
|
|
struct buffer_head *bh = __find_get_block(bdev, block, size);
|
1377 |
|
|
|
1378 |
|
|
might_sleep();
|
1379 |
|
|
if (bh == NULL)
|
1380 |
|
|
bh = __getblk_slow(bdev, block, size);
|
1381 |
|
|
return bh;
|
1382 |
|
|
}
|
1383 |
|
|
EXPORT_SYMBOL(__getblk);
|
1384 |
|
|
|
1385 |
|
|
/*
|
1386 |
|
|
* Do async read-ahead on a buffer..
|
1387 |
|
|
*/
|
1388 |
|
|
void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
|
1389 |
|
|
{
|
1390 |
|
|
struct buffer_head *bh = __getblk(bdev, block, size);
|
1391 |
|
|
if (likely(bh)) {
|
1392 |
|
|
ll_rw_block(READA, 1, &bh);
|
1393 |
|
|
brelse(bh);
|
1394 |
|
|
}
|
1395 |
|
|
}
|
1396 |
|
|
EXPORT_SYMBOL(__breadahead);
|
1397 |
|
|
|
1398 |
|
|
/**
|
1399 |
|
|
* __bread() - reads a specified block and returns the bh
|
1400 |
|
|
* @bdev: the block_device to read from
|
1401 |
|
|
* @block: number of block
|
1402 |
|
|
* @size: size (in bytes) to read
|
1403 |
|
|
*
|
1404 |
|
|
* Reads a specified block, and returns buffer head that contains it.
|
1405 |
|
|
* It returns NULL if the block was unreadable.
|
1406 |
|
|
*/
|
1407 |
|
|
struct buffer_head *
|
1408 |
|
|
__bread(struct block_device *bdev, sector_t block, unsigned size)
|
1409 |
|
|
{
|
1410 |
|
|
struct buffer_head *bh = __getblk(bdev, block, size);
|
1411 |
|
|
|
1412 |
|
|
if (likely(bh) && !buffer_uptodate(bh))
|
1413 |
|
|
bh = __bread_slow(bh);
|
1414 |
|
|
return bh;
|
1415 |
|
|
}
|
1416 |
|
|
EXPORT_SYMBOL(__bread);
|
1417 |
|
|
|
1418 |
|
|
/*
|
1419 |
|
|
* invalidate_bh_lrus() is called rarely - but not only at unmount.
|
1420 |
|
|
* This doesn't race because it runs in each cpu either in irq
|
1421 |
|
|
* or with preempt disabled.
|
1422 |
|
|
*/
|
1423 |
|
|
static void invalidate_bh_lru(void *arg)
|
1424 |
|
|
{
|
1425 |
|
|
struct bh_lru *b = &get_cpu_var(bh_lrus);
|
1426 |
|
|
int i;
|
1427 |
|
|
|
1428 |
|
|
for (i = 0; i < BH_LRU_SIZE; i++) {
|
1429 |
|
|
brelse(b->bhs[i]);
|
1430 |
|
|
b->bhs[i] = NULL;
|
1431 |
|
|
}
|
1432 |
|
|
put_cpu_var(bh_lrus);
|
1433 |
|
|
}
|
1434 |
|
|
|
1435 |
|
|
void invalidate_bh_lrus(void)
|
1436 |
|
|
{
|
1437 |
|
|
on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
|
1438 |
|
|
}
|
1439 |
|
|
|
1440 |
|
|
void set_bh_page(struct buffer_head *bh,
|
1441 |
|
|
struct page *page, unsigned long offset)
|
1442 |
|
|
{
|
1443 |
|
|
bh->b_page = page;
|
1444 |
|
|
BUG_ON(offset >= PAGE_SIZE);
|
1445 |
|
|
if (PageHighMem(page))
|
1446 |
|
|
/*
|
1447 |
|
|
* This catches illegal uses and preserves the offset:
|
1448 |
|
|
*/
|
1449 |
|
|
bh->b_data = (char *)(0 + offset);
|
1450 |
|
|
else
|
1451 |
|
|
bh->b_data = page_address(page) + offset;
|
1452 |
|
|
}
|
1453 |
|
|
EXPORT_SYMBOL(set_bh_page);
|
1454 |
|
|
|
1455 |
|
|
/*
|
1456 |
|
|
* Called when truncating a buffer on a page completely.
|
1457 |
|
|
*/
|
1458 |
|
|
static void discard_buffer(struct buffer_head * bh)
|
1459 |
|
|
{
|
1460 |
|
|
lock_buffer(bh);
|
1461 |
|
|
clear_buffer_dirty(bh);
|
1462 |
|
|
bh->b_bdev = NULL;
|
1463 |
|
|
clear_buffer_mapped(bh);
|
1464 |
|
|
clear_buffer_req(bh);
|
1465 |
|
|
clear_buffer_new(bh);
|
1466 |
|
|
clear_buffer_delay(bh);
|
1467 |
|
|
clear_buffer_unwritten(bh);
|
1468 |
|
|
unlock_buffer(bh);
|
1469 |
|
|
}
|
1470 |
|
|
|
1471 |
|
|
/**
|
1472 |
|
|
* block_invalidatepage - invalidate part of all of a buffer-backed page
|
1473 |
|
|
*
|
1474 |
|
|
* @page: the page which is affected
|
1475 |
|
|
* @offset: the index of the truncation point
|
1476 |
|
|
*
|
1477 |
|
|
* block_invalidatepage() is called when all or part of the page has become
|
1478 |
|
|
* invalidatedby a truncate operation.
|
1479 |
|
|
*
|
1480 |
|
|
* block_invalidatepage() does not have to release all buffers, but it must
|
1481 |
|
|
* ensure that no dirty buffer is left outside @offset and that no I/O
|
1482 |
|
|
* is underway against any of the blocks which are outside the truncation
|
1483 |
|
|
* point. Because the caller is about to free (and possibly reuse) those
|
1484 |
|
|
* blocks on-disk.
|
1485 |
|
|
*/
|
1486 |
|
|
void block_invalidatepage(struct page *page, unsigned long offset)
|
1487 |
|
|
{
|
1488 |
|
|
struct buffer_head *head, *bh, *next;
|
1489 |
|
|
unsigned int curr_off = 0;
|
1490 |
|
|
|
1491 |
|
|
BUG_ON(!PageLocked(page));
|
1492 |
|
|
if (!page_has_buffers(page))
|
1493 |
|
|
goto out;
|
1494 |
|
|
|
1495 |
|
|
head = page_buffers(page);
|
1496 |
|
|
bh = head;
|
1497 |
|
|
do {
|
1498 |
|
|
unsigned int next_off = curr_off + bh->b_size;
|
1499 |
|
|
next = bh->b_this_page;
|
1500 |
|
|
|
1501 |
|
|
/*
|
1502 |
|
|
* is this block fully invalidated?
|
1503 |
|
|
*/
|
1504 |
|
|
if (offset <= curr_off)
|
1505 |
|
|
discard_buffer(bh);
|
1506 |
|
|
curr_off = next_off;
|
1507 |
|
|
bh = next;
|
1508 |
|
|
} while (bh != head);
|
1509 |
|
|
|
1510 |
|
|
/*
|
1511 |
|
|
* We release buffers only if the entire page is being invalidated.
|
1512 |
|
|
* The get_block cached value has been unconditionally invalidated,
|
1513 |
|
|
* so real IO is not possible anymore.
|
1514 |
|
|
*/
|
1515 |
|
|
if (offset == 0)
|
1516 |
|
|
try_to_release_page(page, 0);
|
1517 |
|
|
out:
|
1518 |
|
|
return;
|
1519 |
|
|
}
|
1520 |
|
|
EXPORT_SYMBOL(block_invalidatepage);
|
1521 |
|
|
|
1522 |
|
|
/*
|
1523 |
|
|
* We attach and possibly dirty the buffers atomically wrt
|
1524 |
|
|
* __set_page_dirty_buffers() via private_lock. try_to_free_buffers
|
1525 |
|
|
* is already excluded via the page lock.
|
1526 |
|
|
*/
|
1527 |
|
|
void create_empty_buffers(struct page *page,
|
1528 |
|
|
unsigned long blocksize, unsigned long b_state)
|
1529 |
|
|
{
|
1530 |
|
|
struct buffer_head *bh, *head, *tail;
|
1531 |
|
|
|
1532 |
|
|
head = alloc_page_buffers(page, blocksize, 1);
|
1533 |
|
|
bh = head;
|
1534 |
|
|
do {
|
1535 |
|
|
bh->b_state |= b_state;
|
1536 |
|
|
tail = bh;
|
1537 |
|
|
bh = bh->b_this_page;
|
1538 |
|
|
} while (bh);
|
1539 |
|
|
tail->b_this_page = head;
|
1540 |
|
|
|
1541 |
|
|
spin_lock(&page->mapping->private_lock);
|
1542 |
|
|
if (PageUptodate(page) || PageDirty(page)) {
|
1543 |
|
|
bh = head;
|
1544 |
|
|
do {
|
1545 |
|
|
if (PageDirty(page))
|
1546 |
|
|
set_buffer_dirty(bh);
|
1547 |
|
|
if (PageUptodate(page))
|
1548 |
|
|
set_buffer_uptodate(bh);
|
1549 |
|
|
bh = bh->b_this_page;
|
1550 |
|
|
} while (bh != head);
|
1551 |
|
|
}
|
1552 |
|
|
attach_page_buffers(page, head);
|
1553 |
|
|
spin_unlock(&page->mapping->private_lock);
|
1554 |
|
|
}
|
1555 |
|
|
EXPORT_SYMBOL(create_empty_buffers);
|
1556 |
|
|
|
1557 |
|
|
/*
|
1558 |
|
|
* We are taking a block for data and we don't want any output from any
|
1559 |
|
|
* buffer-cache aliases starting from return from that function and
|
1560 |
|
|
* until the moment when something will explicitly mark the buffer
|
1561 |
|
|
* dirty (hopefully that will not happen until we will free that block ;-)
|
1562 |
|
|
* We don't even need to mark it not-uptodate - nobody can expect
|
1563 |
|
|
* anything from a newly allocated buffer anyway. We used to used
|
1564 |
|
|
* unmap_buffer() for such invalidation, but that was wrong. We definitely
|
1565 |
|
|
* don't want to mark the alias unmapped, for example - it would confuse
|
1566 |
|
|
* anyone who might pick it with bread() afterwards...
|
1567 |
|
|
*
|
1568 |
|
|
* Also.. Note that bforget() doesn't lock the buffer. So there can
|
1569 |
|
|
* be writeout I/O going on against recently-freed buffers. We don't
|
1570 |
|
|
* wait on that I/O in bforget() - it's more efficient to wait on the I/O
|
1571 |
|
|
* only if we really need to. That happens here.
|
1572 |
|
|
*/
|
1573 |
|
|
void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
|
1574 |
|
|
{
|
1575 |
|
|
struct buffer_head *old_bh;
|
1576 |
|
|
|
1577 |
|
|
might_sleep();
|
1578 |
|
|
|
1579 |
|
|
old_bh = __find_get_block_slow(bdev, block);
|
1580 |
|
|
if (old_bh) {
|
1581 |
|
|
clear_buffer_dirty(old_bh);
|
1582 |
|
|
wait_on_buffer(old_bh);
|
1583 |
|
|
clear_buffer_req(old_bh);
|
1584 |
|
|
__brelse(old_bh);
|
1585 |
|
|
}
|
1586 |
|
|
}
|
1587 |
|
|
EXPORT_SYMBOL(unmap_underlying_metadata);
|
1588 |
|
|
|
1589 |
|
|
/*
|
1590 |
|
|
* NOTE! All mapped/uptodate combinations are valid:
|
1591 |
|
|
*
|
1592 |
|
|
* Mapped Uptodate Meaning
|
1593 |
|
|
*
|
1594 |
|
|
* No No "unknown" - must do get_block()
|
1595 |
|
|
* No Yes "hole" - zero-filled
|
1596 |
|
|
* Yes No "allocated" - allocated on disk, not read in
|
1597 |
|
|
* Yes Yes "valid" - allocated and up-to-date in memory.
|
1598 |
|
|
*
|
1599 |
|
|
* "Dirty" is valid only with the last case (mapped+uptodate).
|
1600 |
|
|
*/
|
1601 |
|
|
|
1602 |
|
|
/*
|
1603 |
|
|
* While block_write_full_page is writing back the dirty buffers under
|
1604 |
|
|
* the page lock, whoever dirtied the buffers may decide to clean them
|
1605 |
|
|
* again at any time. We handle that by only looking at the buffer
|
1606 |
|
|
* state inside lock_buffer().
|
1607 |
|
|
*
|
1608 |
|
|
* If block_write_full_page() is called for regular writeback
|
1609 |
|
|
* (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
|
1610 |
|
|
* locked buffer. This only can happen if someone has written the buffer
|
1611 |
|
|
* directly, with submit_bh(). At the address_space level PageWriteback
|
1612 |
|
|
* prevents this contention from occurring.
|
1613 |
|
|
*/
|
1614 |
|
|
static int __block_write_full_page(struct inode *inode, struct page *page,
|
1615 |
|
|
get_block_t *get_block, struct writeback_control *wbc)
|
1616 |
|
|
{
|
1617 |
|
|
int err;
|
1618 |
|
|
sector_t block;
|
1619 |
|
|
sector_t last_block;
|
1620 |
|
|
struct buffer_head *bh, *head;
|
1621 |
|
|
const unsigned blocksize = 1 << inode->i_blkbits;
|
1622 |
|
|
int nr_underway = 0;
|
1623 |
|
|
|
1624 |
|
|
BUG_ON(!PageLocked(page));
|
1625 |
|
|
|
1626 |
|
|
last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
|
1627 |
|
|
|
1628 |
|
|
if (!page_has_buffers(page)) {
|
1629 |
|
|
create_empty_buffers(page, blocksize,
|
1630 |
|
|
(1 << BH_Dirty)|(1 << BH_Uptodate));
|
1631 |
|
|
}
|
1632 |
|
|
|
1633 |
|
|
/*
|
1634 |
|
|
* Be very careful. We have no exclusion from __set_page_dirty_buffers
|
1635 |
|
|
* here, and the (potentially unmapped) buffers may become dirty at
|
1636 |
|
|
* any time. If a buffer becomes dirty here after we've inspected it
|
1637 |
|
|
* then we just miss that fact, and the page stays dirty.
|
1638 |
|
|
*
|
1639 |
|
|
* Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
|
1640 |
|
|
* handle that here by just cleaning them.
|
1641 |
|
|
*/
|
1642 |
|
|
|
1643 |
|
|
block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
|
1644 |
|
|
head = page_buffers(page);
|
1645 |
|
|
bh = head;
|
1646 |
|
|
|
1647 |
|
|
/*
|
1648 |
|
|
* Get all the dirty buffers mapped to disk addresses and
|
1649 |
|
|
* handle any aliases from the underlying blockdev's mapping.
|
1650 |
|
|
*/
|
1651 |
|
|
do {
|
1652 |
|
|
if (block > last_block) {
|
1653 |
|
|
/*
|
1654 |
|
|
* mapped buffers outside i_size will occur, because
|
1655 |
|
|
* this page can be outside i_size when there is a
|
1656 |
|
|
* truncate in progress.
|
1657 |
|
|
*/
|
1658 |
|
|
/*
|
1659 |
|
|
* The buffer was zeroed by block_write_full_page()
|
1660 |
|
|
*/
|
1661 |
|
|
clear_buffer_dirty(bh);
|
1662 |
|
|
set_buffer_uptodate(bh);
|
1663 |
|
|
} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
|
1664 |
|
|
WARN_ON(bh->b_size != blocksize);
|
1665 |
|
|
err = get_block(inode, block, bh, 1);
|
1666 |
|
|
if (err)
|
1667 |
|
|
goto recover;
|
1668 |
|
|
if (buffer_new(bh)) {
|
1669 |
|
|
/* blockdev mappings never come here */
|
1670 |
|
|
clear_buffer_new(bh);
|
1671 |
|
|
unmap_underlying_metadata(bh->b_bdev,
|
1672 |
|
|
bh->b_blocknr);
|
1673 |
|
|
}
|
1674 |
|
|
}
|
1675 |
|
|
bh = bh->b_this_page;
|
1676 |
|
|
block++;
|
1677 |
|
|
} while (bh != head);
|
1678 |
|
|
|
1679 |
|
|
do {
|
1680 |
|
|
if (!buffer_mapped(bh))
|
1681 |
|
|
continue;
|
1682 |
|
|
/*
|
1683 |
|
|
* If it's a fully non-blocking write attempt and we cannot
|
1684 |
|
|
* lock the buffer then redirty the page. Note that this can
|
1685 |
|
|
* potentially cause a busy-wait loop from pdflush and kswapd
|
1686 |
|
|
* activity, but those code paths have their own higher-level
|
1687 |
|
|
* throttling.
|
1688 |
|
|
*/
|
1689 |
|
|
if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
|
1690 |
|
|
lock_buffer(bh);
|
1691 |
|
|
} else if (test_set_buffer_locked(bh)) {
|
1692 |
|
|
redirty_page_for_writepage(wbc, page);
|
1693 |
|
|
continue;
|
1694 |
|
|
}
|
1695 |
|
|
if (test_clear_buffer_dirty(bh)) {
|
1696 |
|
|
mark_buffer_async_write(bh);
|
1697 |
|
|
} else {
|
1698 |
|
|
unlock_buffer(bh);
|
1699 |
|
|
}
|
1700 |
|
|
} while ((bh = bh->b_this_page) != head);
|
1701 |
|
|
|
1702 |
|
|
/*
|
1703 |
|
|
* The page and its buffers are protected by PageWriteback(), so we can
|
1704 |
|
|
* drop the bh refcounts early.
|
1705 |
|
|
*/
|
1706 |
|
|
BUG_ON(PageWriteback(page));
|
1707 |
|
|
set_page_writeback(page);
|
1708 |
|
|
|
1709 |
|
|
do {
|
1710 |
|
|
struct buffer_head *next = bh->b_this_page;
|
1711 |
|
|
if (buffer_async_write(bh)) {
|
1712 |
|
|
submit_bh(WRITE, bh);
|
1713 |
|
|
nr_underway++;
|
1714 |
|
|
}
|
1715 |
|
|
bh = next;
|
1716 |
|
|
} while (bh != head);
|
1717 |
|
|
unlock_page(page);
|
1718 |
|
|
|
1719 |
|
|
err = 0;
|
1720 |
|
|
done:
|
1721 |
|
|
if (nr_underway == 0) {
|
1722 |
|
|
/*
|
1723 |
|
|
* The page was marked dirty, but the buffers were
|
1724 |
|
|
* clean. Someone wrote them back by hand with
|
1725 |
|
|
* ll_rw_block/submit_bh. A rare case.
|
1726 |
|
|
*/
|
1727 |
|
|
end_page_writeback(page);
|
1728 |
|
|
|
1729 |
|
|
/*
|
1730 |
|
|
* The page and buffer_heads can be released at any time from
|
1731 |
|
|
* here on.
|
1732 |
|
|
*/
|
1733 |
|
|
}
|
1734 |
|
|
return err;
|
1735 |
|
|
|
1736 |
|
|
recover:
|
1737 |
|
|
/*
|
1738 |
|
|
* ENOSPC, or some other error. We may already have added some
|
1739 |
|
|
* blocks to the file, so we need to write these out to avoid
|
1740 |
|
|
* exposing stale data.
|
1741 |
|
|
* The page is currently locked and not marked for writeback
|
1742 |
|
|
*/
|
1743 |
|
|
bh = head;
|
1744 |
|
|
/* Recovery: lock and submit the mapped buffers */
|
1745 |
|
|
do {
|
1746 |
|
|
if (buffer_mapped(bh) && buffer_dirty(bh)) {
|
1747 |
|
|
lock_buffer(bh);
|
1748 |
|
|
mark_buffer_async_write(bh);
|
1749 |
|
|
} else {
|
1750 |
|
|
/*
|
1751 |
|
|
* The buffer may have been set dirty during
|
1752 |
|
|
* attachment to a dirty page.
|
1753 |
|
|
*/
|
1754 |
|
|
clear_buffer_dirty(bh);
|
1755 |
|
|
}
|
1756 |
|
|
} while ((bh = bh->b_this_page) != head);
|
1757 |
|
|
SetPageError(page);
|
1758 |
|
|
BUG_ON(PageWriteback(page));
|
1759 |
|
|
mapping_set_error(page->mapping, err);
|
1760 |
|
|
set_page_writeback(page);
|
1761 |
|
|
do {
|
1762 |
|
|
struct buffer_head *next = bh->b_this_page;
|
1763 |
|
|
if (buffer_async_write(bh)) {
|
1764 |
|
|
clear_buffer_dirty(bh);
|
1765 |
|
|
submit_bh(WRITE, bh);
|
1766 |
|
|
nr_underway++;
|
1767 |
|
|
}
|
1768 |
|
|
bh = next;
|
1769 |
|
|
} while (bh != head);
|
1770 |
|
|
unlock_page(page);
|
1771 |
|
|
goto done;
|
1772 |
|
|
}
|
1773 |
|
|
|
1774 |
|
|
/*
|
1775 |
|
|
* If a page has any new buffers, zero them out here, and mark them uptodate
|
1776 |
|
|
* and dirty so they'll be written out (in order to prevent uninitialised
|
1777 |
|
|
* block data from leaking). And clear the new bit.
|
1778 |
|
|
*/
|
1779 |
|
|
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
|
1780 |
|
|
{
|
1781 |
|
|
unsigned int block_start, block_end;
|
1782 |
|
|
struct buffer_head *head, *bh;
|
1783 |
|
|
|
1784 |
|
|
BUG_ON(!PageLocked(page));
|
1785 |
|
|
if (!page_has_buffers(page))
|
1786 |
|
|
return;
|
1787 |
|
|
|
1788 |
|
|
bh = head = page_buffers(page);
|
1789 |
|
|
block_start = 0;
|
1790 |
|
|
do {
|
1791 |
|
|
block_end = block_start + bh->b_size;
|
1792 |
|
|
|
1793 |
|
|
if (buffer_new(bh)) {
|
1794 |
|
|
if (block_end > from && block_start < to) {
|
1795 |
|
|
if (!PageUptodate(page)) {
|
1796 |
|
|
unsigned start, size;
|
1797 |
|
|
|
1798 |
|
|
start = max(from, block_start);
|
1799 |
|
|
size = min(to, block_end) - start;
|
1800 |
|
|
|
1801 |
|
|
zero_user_page(page, start, size, KM_USER0);
|
1802 |
|
|
set_buffer_uptodate(bh);
|
1803 |
|
|
}
|
1804 |
|
|
|
1805 |
|
|
clear_buffer_new(bh);
|
1806 |
|
|
mark_buffer_dirty(bh);
|
1807 |
|
|
}
|
1808 |
|
|
}
|
1809 |
|
|
|
1810 |
|
|
block_start = block_end;
|
1811 |
|
|
bh = bh->b_this_page;
|
1812 |
|
|
} while (bh != head);
|
1813 |
|
|
}
|
1814 |
|
|
EXPORT_SYMBOL(page_zero_new_buffers);
|
1815 |
|
|
|
1816 |
|
|
static int __block_prepare_write(struct inode *inode, struct page *page,
|
1817 |
|
|
unsigned from, unsigned to, get_block_t *get_block)
|
1818 |
|
|
{
|
1819 |
|
|
unsigned block_start, block_end;
|
1820 |
|
|
sector_t block;
|
1821 |
|
|
int err = 0;
|
1822 |
|
|
unsigned blocksize, bbits;
|
1823 |
|
|
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
|
1824 |
|
|
|
1825 |
|
|
BUG_ON(!PageLocked(page));
|
1826 |
|
|
BUG_ON(from > PAGE_CACHE_SIZE);
|
1827 |
|
|
BUG_ON(to > PAGE_CACHE_SIZE);
|
1828 |
|
|
BUG_ON(from > to);
|
1829 |
|
|
|
1830 |
|
|
blocksize = 1 << inode->i_blkbits;
|
1831 |
|
|
if (!page_has_buffers(page))
|
1832 |
|
|
create_empty_buffers(page, blocksize, 0);
|
1833 |
|
|
head = page_buffers(page);
|
1834 |
|
|
|
1835 |
|
|
bbits = inode->i_blkbits;
|
1836 |
|
|
block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
|
1837 |
|
|
|
1838 |
|
|
for(bh = head, block_start = 0; bh != head || !block_start;
|
1839 |
|
|
block++, block_start=block_end, bh = bh->b_this_page) {
|
1840 |
|
|
block_end = block_start + blocksize;
|
1841 |
|
|
if (block_end <= from || block_start >= to) {
|
1842 |
|
|
if (PageUptodate(page)) {
|
1843 |
|
|
if (!buffer_uptodate(bh))
|
1844 |
|
|
set_buffer_uptodate(bh);
|
1845 |
|
|
}
|
1846 |
|
|
continue;
|
1847 |
|
|
}
|
1848 |
|
|
if (buffer_new(bh))
|
1849 |
|
|
clear_buffer_new(bh);
|
1850 |
|
|
if (!buffer_mapped(bh)) {
|
1851 |
|
|
WARN_ON(bh->b_size != blocksize);
|
1852 |
|
|
err = get_block(inode, block, bh, 1);
|
1853 |
|
|
if (err)
|
1854 |
|
|
break;
|
1855 |
|
|
if (buffer_new(bh)) {
|
1856 |
|
|
unmap_underlying_metadata(bh->b_bdev,
|
1857 |
|
|
bh->b_blocknr);
|
1858 |
|
|
if (PageUptodate(page)) {
|
1859 |
|
|
clear_buffer_new(bh);
|
1860 |
|
|
set_buffer_uptodate(bh);
|
1861 |
|
|
mark_buffer_dirty(bh);
|
1862 |
|
|
continue;
|
1863 |
|
|
}
|
1864 |
|
|
if (block_end > to || block_start < from) {
|
1865 |
|
|
void *kaddr;
|
1866 |
|
|
|
1867 |
|
|
kaddr = kmap_atomic(page, KM_USER0);
|
1868 |
|
|
if (block_end > to)
|
1869 |
|
|
memset(kaddr+to, 0,
|
1870 |
|
|
block_end-to);
|
1871 |
|
|
if (block_start < from)
|
1872 |
|
|
memset(kaddr+block_start,
|
1873 |
|
|
0, from-block_start);
|
1874 |
|
|
flush_dcache_page(page);
|
1875 |
|
|
kunmap_atomic(kaddr, KM_USER0);
|
1876 |
|
|
}
|
1877 |
|
|
continue;
|
1878 |
|
|
}
|
1879 |
|
|
}
|
1880 |
|
|
if (PageUptodate(page)) {
|
1881 |
|
|
if (!buffer_uptodate(bh))
|
1882 |
|
|
set_buffer_uptodate(bh);
|
1883 |
|
|
continue;
|
1884 |
|
|
}
|
1885 |
|
|
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
|
1886 |
|
|
!buffer_unwritten(bh) &&
|
1887 |
|
|
(block_start < from || block_end > to)) {
|
1888 |
|
|
ll_rw_block(READ, 1, &bh);
|
1889 |
|
|
*wait_bh++=bh;
|
1890 |
|
|
}
|
1891 |
|
|
}
|
1892 |
|
|
/*
|
1893 |
|
|
* If we issued read requests - let them complete.
|
1894 |
|
|
*/
|
1895 |
|
|
while(wait_bh > wait) {
|
1896 |
|
|
wait_on_buffer(*--wait_bh);
|
1897 |
|
|
if (!buffer_uptodate(*wait_bh))
|
1898 |
|
|
err = -EIO;
|
1899 |
|
|
}
|
1900 |
|
|
if (unlikely(err))
|
1901 |
|
|
page_zero_new_buffers(page, from, to);
|
1902 |
|
|
return err;
|
1903 |
|
|
}
|
1904 |
|
|
|
1905 |
|
|
static int __block_commit_write(struct inode *inode, struct page *page,
|
1906 |
|
|
unsigned from, unsigned to)
|
1907 |
|
|
{
|
1908 |
|
|
unsigned block_start, block_end;
|
1909 |
|
|
int partial = 0;
|
1910 |
|
|
unsigned blocksize;
|
1911 |
|
|
struct buffer_head *bh, *head;
|
1912 |
|
|
|
1913 |
|
|
blocksize = 1 << inode->i_blkbits;
|
1914 |
|
|
|
1915 |
|
|
for(bh = head = page_buffers(page), block_start = 0;
|
1916 |
|
|
bh != head || !block_start;
|
1917 |
|
|
block_start=block_end, bh = bh->b_this_page) {
|
1918 |
|
|
block_end = block_start + blocksize;
|
1919 |
|
|
if (block_end <= from || block_start >= to) {
|
1920 |
|
|
if (!buffer_uptodate(bh))
|
1921 |
|
|
partial = 1;
|
1922 |
|
|
} else {
|
1923 |
|
|
set_buffer_uptodate(bh);
|
1924 |
|
|
mark_buffer_dirty(bh);
|
1925 |
|
|
}
|
1926 |
|
|
clear_buffer_new(bh);
|
1927 |
|
|
}
|
1928 |
|
|
|
1929 |
|
|
/*
|
1930 |
|
|
* If this is a partial write which happened to make all buffers
|
1931 |
|
|
* uptodate then we can optimize away a bogus readpage() for
|
1932 |
|
|
* the next read(). Here we 'discover' whether the page went
|
1933 |
|
|
* uptodate as a result of this (potentially partial) write.
|
1934 |
|
|
*/
|
1935 |
|
|
if (!partial)
|
1936 |
|
|
SetPageUptodate(page);
|
1937 |
|
|
return 0;
|
1938 |
|
|
}
|
1939 |
|
|
|
1940 |
|
|
/*
|
1941 |
|
|
* block_write_begin takes care of the basic task of block allocation and
|
1942 |
|
|
* bringing partial write blocks uptodate first.
|
1943 |
|
|
*
|
1944 |
|
|
* If *pagep is not NULL, then block_write_begin uses the locked page
|
1945 |
|
|
* at *pagep rather than allocating its own. In this case, the page will
|
1946 |
|
|
* not be unlocked or deallocated on failure.
|
1947 |
|
|
*/
|
1948 |
|
|
int block_write_begin(struct file *file, struct address_space *mapping,
|
1949 |
|
|
loff_t pos, unsigned len, unsigned flags,
|
1950 |
|
|
struct page **pagep, void **fsdata,
|
1951 |
|
|
get_block_t *get_block)
|
1952 |
|
|
{
|
1953 |
|
|
struct inode *inode = mapping->host;
|
1954 |
|
|
int status = 0;
|
1955 |
|
|
struct page *page;
|
1956 |
|
|
pgoff_t index;
|
1957 |
|
|
unsigned start, end;
|
1958 |
|
|
int ownpage = 0;
|
1959 |
|
|
|
1960 |
|
|
index = pos >> PAGE_CACHE_SHIFT;
|
1961 |
|
|
start = pos & (PAGE_CACHE_SIZE - 1);
|
1962 |
|
|
end = start + len;
|
1963 |
|
|
|
1964 |
|
|
page = *pagep;
|
1965 |
|
|
if (page == NULL) {
|
1966 |
|
|
ownpage = 1;
|
1967 |
|
|
page = __grab_cache_page(mapping, index);
|
1968 |
|
|
if (!page) {
|
1969 |
|
|
status = -ENOMEM;
|
1970 |
|
|
goto out;
|
1971 |
|
|
}
|
1972 |
|
|
*pagep = page;
|
1973 |
|
|
} else
|
1974 |
|
|
BUG_ON(!PageLocked(page));
|
1975 |
|
|
|
1976 |
|
|
status = __block_prepare_write(inode, page, start, end, get_block);
|
1977 |
|
|
if (unlikely(status)) {
|
1978 |
|
|
ClearPageUptodate(page);
|
1979 |
|
|
|
1980 |
|
|
if (ownpage) {
|
1981 |
|
|
unlock_page(page);
|
1982 |
|
|
page_cache_release(page);
|
1983 |
|
|
*pagep = NULL;
|
1984 |
|
|
|
1985 |
|
|
/*
|
1986 |
|
|
* prepare_write() may have instantiated a few blocks
|
1987 |
|
|
* outside i_size. Trim these off again. Don't need
|
1988 |
|
|
* i_size_read because we hold i_mutex.
|
1989 |
|
|
*/
|
1990 |
|
|
if (pos + len > inode->i_size)
|
1991 |
|
|
vmtruncate(inode, inode->i_size);
|
1992 |
|
|
}
|
1993 |
|
|
goto out;
|
1994 |
|
|
}
|
1995 |
|
|
|
1996 |
|
|
out:
|
1997 |
|
|
return status;
|
1998 |
|
|
}
|
1999 |
|
|
EXPORT_SYMBOL(block_write_begin);
|
2000 |
|
|
|
2001 |
|
|
int block_write_end(struct file *file, struct address_space *mapping,
|
2002 |
|
|
loff_t pos, unsigned len, unsigned copied,
|
2003 |
|
|
struct page *page, void *fsdata)
|
2004 |
|
|
{
|
2005 |
|
|
struct inode *inode = mapping->host;
|
2006 |
|
|
unsigned start;
|
2007 |
|
|
|
2008 |
|
|
start = pos & (PAGE_CACHE_SIZE - 1);
|
2009 |
|
|
|
2010 |
|
|
if (unlikely(copied < len)) {
|
2011 |
|
|
/*
|
2012 |
|
|
* The buffers that were written will now be uptodate, so we
|
2013 |
|
|
* don't have to worry about a readpage reading them and
|
2014 |
|
|
* overwriting a partial write. However if we have encountered
|
2015 |
|
|
* a short write and only partially written into a buffer, it
|
2016 |
|
|
* will not be marked uptodate, so a readpage might come in and
|
2017 |
|
|
* destroy our partial write.
|
2018 |
|
|
*
|
2019 |
|
|
* Do the simplest thing, and just treat any short write to a
|
2020 |
|
|
* non uptodate page as a zero-length write, and force the
|
2021 |
|
|
* caller to redo the whole thing.
|
2022 |
|
|
*/
|
2023 |
|
|
if (!PageUptodate(page))
|
2024 |
|
|
copied = 0;
|
2025 |
|
|
|
2026 |
|
|
page_zero_new_buffers(page, start+copied, start+len);
|
2027 |
|
|
}
|
2028 |
|
|
flush_dcache_page(page);
|
2029 |
|
|
|
2030 |
|
|
/* This could be a short (even 0-length) commit */
|
2031 |
|
|
__block_commit_write(inode, page, start, start+copied);
|
2032 |
|
|
|
2033 |
|
|
return copied;
|
2034 |
|
|
}
|
2035 |
|
|
EXPORT_SYMBOL(block_write_end);
|
2036 |
|
|
|
2037 |
|
|
int generic_write_end(struct file *file, struct address_space *mapping,
|
2038 |
|
|
loff_t pos, unsigned len, unsigned copied,
|
2039 |
|
|
struct page *page, void *fsdata)
|
2040 |
|
|
{
|
2041 |
|
|
struct inode *inode = mapping->host;
|
2042 |
|
|
|
2043 |
|
|
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
|
2044 |
|
|
|
2045 |
|
|
/*
|
2046 |
|
|
* No need to use i_size_read() here, the i_size
|
2047 |
|
|
* cannot change under us because we hold i_mutex.
|
2048 |
|
|
*
|
2049 |
|
|
* But it's important to update i_size while still holding page lock:
|
2050 |
|
|
* page writeout could otherwise come in and zero beyond i_size.
|
2051 |
|
|
*/
|
2052 |
|
|
if (pos+copied > inode->i_size) {
|
2053 |
|
|
i_size_write(inode, pos+copied);
|
2054 |
|
|
mark_inode_dirty(inode);
|
2055 |
|
|
}
|
2056 |
|
|
|
2057 |
|
|
unlock_page(page);
|
2058 |
|
|
page_cache_release(page);
|
2059 |
|
|
|
2060 |
|
|
return copied;
|
2061 |
|
|
}
|
2062 |
|
|
EXPORT_SYMBOL(generic_write_end);
|
2063 |
|
|
|
2064 |
|
|
/*
|
2065 |
|
|
* Generic "read page" function for block devices that have the normal
|
2066 |
|
|
* get_block functionality. This is most of the block device filesystems.
|
2067 |
|
|
* Reads the page asynchronously --- the unlock_buffer() and
|
2068 |
|
|
* set/clear_buffer_uptodate() functions propagate buffer state into the
|
2069 |
|
|
* page struct once IO has completed.
|
2070 |
|
|
*/
|
2071 |
|
|
int block_read_full_page(struct page *page, get_block_t *get_block)
|
2072 |
|
|
{
|
2073 |
|
|
struct inode *inode = page->mapping->host;
|
2074 |
|
|
sector_t iblock, lblock;
|
2075 |
|
|
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
|
2076 |
|
|
unsigned int blocksize;
|
2077 |
|
|
int nr, i;
|
2078 |
|
|
int fully_mapped = 1;
|
2079 |
|
|
|
2080 |
|
|
BUG_ON(!PageLocked(page));
|
2081 |
|
|
blocksize = 1 << inode->i_blkbits;
|
2082 |
|
|
if (!page_has_buffers(page))
|
2083 |
|
|
create_empty_buffers(page, blocksize, 0);
|
2084 |
|
|
head = page_buffers(page);
|
2085 |
|
|
|
2086 |
|
|
iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
|
2087 |
|
|
lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
|
2088 |
|
|
bh = head;
|
2089 |
|
|
nr = 0;
|
2090 |
|
|
i = 0;
|
2091 |
|
|
|
2092 |
|
|
do {
|
2093 |
|
|
if (buffer_uptodate(bh))
|
2094 |
|
|
continue;
|
2095 |
|
|
|
2096 |
|
|
if (!buffer_mapped(bh)) {
|
2097 |
|
|
int err = 0;
|
2098 |
|
|
|
2099 |
|
|
fully_mapped = 0;
|
2100 |
|
|
if (iblock < lblock) {
|
2101 |
|
|
WARN_ON(bh->b_size != blocksize);
|
2102 |
|
|
err = get_block(inode, iblock, bh, 0);
|
2103 |
|
|
if (err)
|
2104 |
|
|
SetPageError(page);
|
2105 |
|
|
}
|
2106 |
|
|
if (!buffer_mapped(bh)) {
|
2107 |
|
|
zero_user_page(page, i * blocksize, blocksize,
|
2108 |
|
|
KM_USER0);
|
2109 |
|
|
if (!err)
|
2110 |
|
|
set_buffer_uptodate(bh);
|
2111 |
|
|
continue;
|
2112 |
|
|
}
|
2113 |
|
|
/*
|
2114 |
|
|
* get_block() might have updated the buffer
|
2115 |
|
|
* synchronously
|
2116 |
|
|
*/
|
2117 |
|
|
if (buffer_uptodate(bh))
|
2118 |
|
|
continue;
|
2119 |
|
|
}
|
2120 |
|
|
arr[nr++] = bh;
|
2121 |
|
|
} while (i++, iblock++, (bh = bh->b_this_page) != head);
|
2122 |
|
|
|
2123 |
|
|
if (fully_mapped)
|
2124 |
|
|
SetPageMappedToDisk(page);
|
2125 |
|
|
|
2126 |
|
|
if (!nr) {
|
2127 |
|
|
/*
|
2128 |
|
|
* All buffers are uptodate - we can set the page uptodate
|
2129 |
|
|
* as well. But not if get_block() returned an error.
|
2130 |
|
|
*/
|
2131 |
|
|
if (!PageError(page))
|
2132 |
|
|
SetPageUptodate(page);
|
2133 |
|
|
unlock_page(page);
|
2134 |
|
|
return 0;
|
2135 |
|
|
}
|
2136 |
|
|
|
2137 |
|
|
/* Stage two: lock the buffers */
|
2138 |
|
|
for (i = 0; i < nr; i++) {
|
2139 |
|
|
bh = arr[i];
|
2140 |
|
|
lock_buffer(bh);
|
2141 |
|
|
mark_buffer_async_read(bh);
|
2142 |
|
|
}
|
2143 |
|
|
|
2144 |
|
|
/*
|
2145 |
|
|
* Stage 3: start the IO. Check for uptodateness
|
2146 |
|
|
* inside the buffer lock in case another process reading
|
2147 |
|
|
* the underlying blockdev brought it uptodate (the sct fix).
|
2148 |
|
|
*/
|
2149 |
|
|
for (i = 0; i < nr; i++) {
|
2150 |
|
|
bh = arr[i];
|
2151 |
|
|
if (buffer_uptodate(bh))
|
2152 |
|
|
end_buffer_async_read(bh, 1);
|
2153 |
|
|
else
|
2154 |
|
|
submit_bh(READ, bh);
|
2155 |
|
|
}
|
2156 |
|
|
return 0;
|
2157 |
|
|
}
|
2158 |
|
|
|
2159 |
|
|
/* utility function for filesystems that need to do work on expanding
|
2160 |
|
|
* truncates. Uses filesystem pagecache writes to allow the filesystem to
|
2161 |
|
|
* deal with the hole.
|
2162 |
|
|
*/
|
2163 |
|
|
int generic_cont_expand_simple(struct inode *inode, loff_t size)
|
2164 |
|
|
{
|
2165 |
|
|
struct address_space *mapping = inode->i_mapping;
|
2166 |
|
|
struct page *page;
|
2167 |
|
|
void *fsdata;
|
2168 |
|
|
unsigned long limit;
|
2169 |
|
|
int err;
|
2170 |
|
|
|
2171 |
|
|
err = -EFBIG;
|
2172 |
|
|
limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
|
2173 |
|
|
if (limit != RLIM_INFINITY && size > (loff_t)limit) {
|
2174 |
|
|
send_sig(SIGXFSZ, current, 0);
|
2175 |
|
|
goto out;
|
2176 |
|
|
}
|
2177 |
|
|
if (size > inode->i_sb->s_maxbytes)
|
2178 |
|
|
goto out;
|
2179 |
|
|
|
2180 |
|
|
err = pagecache_write_begin(NULL, mapping, size, 0,
|
2181 |
|
|
AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
|
2182 |
|
|
&page, &fsdata);
|
2183 |
|
|
if (err)
|
2184 |
|
|
goto out;
|
2185 |
|
|
|
2186 |
|
|
err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
|
2187 |
|
|
BUG_ON(err > 0);
|
2188 |
|
|
|
2189 |
|
|
out:
|
2190 |
|
|
return err;
|
2191 |
|
|
}
|
2192 |
|
|
|
2193 |
|
|
int cont_expand_zero(struct file *file, struct address_space *mapping,
|
2194 |
|
|
loff_t pos, loff_t *bytes)
|
2195 |
|
|
{
|
2196 |
|
|
struct inode *inode = mapping->host;
|
2197 |
|
|
unsigned blocksize = 1 << inode->i_blkbits;
|
2198 |
|
|
struct page *page;
|
2199 |
|
|
void *fsdata;
|
2200 |
|
|
pgoff_t index, curidx;
|
2201 |
|
|
loff_t curpos;
|
2202 |
|
|
unsigned zerofrom, offset, len;
|
2203 |
|
|
int err = 0;
|
2204 |
|
|
|
2205 |
|
|
index = pos >> PAGE_CACHE_SHIFT;
|
2206 |
|
|
offset = pos & ~PAGE_CACHE_MASK;
|
2207 |
|
|
|
2208 |
|
|
while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
|
2209 |
|
|
zerofrom = curpos & ~PAGE_CACHE_MASK;
|
2210 |
|
|
if (zerofrom & (blocksize-1)) {
|
2211 |
|
|
*bytes |= (blocksize-1);
|
2212 |
|
|
(*bytes)++;
|
2213 |
|
|
}
|
2214 |
|
|
len = PAGE_CACHE_SIZE - zerofrom;
|
2215 |
|
|
|
2216 |
|
|
err = pagecache_write_begin(file, mapping, curpos, len,
|
2217 |
|
|
AOP_FLAG_UNINTERRUPTIBLE,
|
2218 |
|
|
&page, &fsdata);
|
2219 |
|
|
if (err)
|
2220 |
|
|
goto out;
|
2221 |
|
|
zero_user_page(page, zerofrom, len, KM_USER0);
|
2222 |
|
|
err = pagecache_write_end(file, mapping, curpos, len, len,
|
2223 |
|
|
page, fsdata);
|
2224 |
|
|
if (err < 0)
|
2225 |
|
|
goto out;
|
2226 |
|
|
BUG_ON(err != len);
|
2227 |
|
|
err = 0;
|
2228 |
|
|
}
|
2229 |
|
|
|
2230 |
|
|
/* page covers the boundary, find the boundary offset */
|
2231 |
|
|
if (index == curidx) {
|
2232 |
|
|
zerofrom = curpos & ~PAGE_CACHE_MASK;
|
2233 |
|
|
/* if we will expand the thing last block will be filled */
|
2234 |
|
|
if (offset <= zerofrom) {
|
2235 |
|
|
goto out;
|
2236 |
|
|
}
|
2237 |
|
|
if (zerofrom & (blocksize-1)) {
|
2238 |
|
|
*bytes |= (blocksize-1);
|
2239 |
|
|
(*bytes)++;
|
2240 |
|
|
}
|
2241 |
|
|
len = offset - zerofrom;
|
2242 |
|
|
|
2243 |
|
|
err = pagecache_write_begin(file, mapping, curpos, len,
|
2244 |
|
|
AOP_FLAG_UNINTERRUPTIBLE,
|
2245 |
|
|
&page, &fsdata);
|
2246 |
|
|
if (err)
|
2247 |
|
|
goto out;
|
2248 |
|
|
zero_user_page(page, zerofrom, len, KM_USER0);
|
2249 |
|
|
err = pagecache_write_end(file, mapping, curpos, len, len,
|
2250 |
|
|
page, fsdata);
|
2251 |
|
|
if (err < 0)
|
2252 |
|
|
goto out;
|
2253 |
|
|
BUG_ON(err != len);
|
2254 |
|
|
err = 0;
|
2255 |
|
|
}
|
2256 |
|
|
out:
|
2257 |
|
|
return err;
|
2258 |
|
|
}
|
2259 |
|
|
|
2260 |
|
|
/*
|
2261 |
|
|
* For moronic filesystems that do not allow holes in file.
|
2262 |
|
|
* We may have to extend the file.
|
2263 |
|
|
*/
|
2264 |
|
|
int cont_write_begin(struct file *file, struct address_space *mapping,
|
2265 |
|
|
loff_t pos, unsigned len, unsigned flags,
|
2266 |
|
|
struct page **pagep, void **fsdata,
|
2267 |
|
|
get_block_t *get_block, loff_t *bytes)
|
2268 |
|
|
{
|
2269 |
|
|
struct inode *inode = mapping->host;
|
2270 |
|
|
unsigned blocksize = 1 << inode->i_blkbits;
|
2271 |
|
|
unsigned zerofrom;
|
2272 |
|
|
int err;
|
2273 |
|
|
|
2274 |
|
|
err = cont_expand_zero(file, mapping, pos, bytes);
|
2275 |
|
|
if (err)
|
2276 |
|
|
goto out;
|
2277 |
|
|
|
2278 |
|
|
zerofrom = *bytes & ~PAGE_CACHE_MASK;
|
2279 |
|
|
if (pos+len > *bytes && zerofrom & (blocksize-1)) {
|
2280 |
|
|
*bytes |= (blocksize-1);
|
2281 |
|
|
(*bytes)++;
|
2282 |
|
|
}
|
2283 |
|
|
|
2284 |
|
|
*pagep = NULL;
|
2285 |
|
|
err = block_write_begin(file, mapping, pos, len,
|
2286 |
|
|
flags, pagep, fsdata, get_block);
|
2287 |
|
|
out:
|
2288 |
|
|
return err;
|
2289 |
|
|
}
|
2290 |
|
|
|
2291 |
|
|
int block_prepare_write(struct page *page, unsigned from, unsigned to,
|
2292 |
|
|
get_block_t *get_block)
|
2293 |
|
|
{
|
2294 |
|
|
struct inode *inode = page->mapping->host;
|
2295 |
|
|
int err = __block_prepare_write(inode, page, from, to, get_block);
|
2296 |
|
|
if (err)
|
2297 |
|
|
ClearPageUptodate(page);
|
2298 |
|
|
return err;
|
2299 |
|
|
}
|
2300 |
|
|
|
2301 |
|
|
int block_commit_write(struct page *page, unsigned from, unsigned to)
|
2302 |
|
|
{
|
2303 |
|
|
struct inode *inode = page->mapping->host;
|
2304 |
|
|
__block_commit_write(inode,page,from,to);
|
2305 |
|
|
return 0;
|
2306 |
|
|
}
|
2307 |
|
|
|
2308 |
|
|
int generic_commit_write(struct file *file, struct page *page,
|
2309 |
|
|
unsigned from, unsigned to)
|
2310 |
|
|
{
|
2311 |
|
|
struct inode *inode = page->mapping->host;
|
2312 |
|
|
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
|
2313 |
|
|
__block_commit_write(inode,page,from,to);
|
2314 |
|
|
/*
|
2315 |
|
|
* No need to use i_size_read() here, the i_size
|
2316 |
|
|
* cannot change under us because we hold i_mutex.
|
2317 |
|
|
*/
|
2318 |
|
|
if (pos > inode->i_size) {
|
2319 |
|
|
i_size_write(inode, pos);
|
2320 |
|
|
mark_inode_dirty(inode);
|
2321 |
|
|
}
|
2322 |
|
|
return 0;
|
2323 |
|
|
}
|
2324 |
|
|
|
2325 |
|
|
/*
|
2326 |
|
|
* block_page_mkwrite() is not allowed to change the file size as it gets
|
2327 |
|
|
* called from a page fault handler when a page is first dirtied. Hence we must
|
2328 |
|
|
* be careful to check for EOF conditions here. We set the page up correctly
|
2329 |
|
|
* for a written page which means we get ENOSPC checking when writing into
|
2330 |
|
|
* holes and correct delalloc and unwritten extent mapping on filesystems that
|
2331 |
|
|
* support these features.
|
2332 |
|
|
*
|
2333 |
|
|
* We are not allowed to take the i_mutex here so we have to play games to
|
2334 |
|
|
* protect against truncate races as the page could now be beyond EOF. Because
|
2335 |
|
|
* vmtruncate() writes the inode size before removing pages, once we have the
|
2336 |
|
|
* page lock we can determine safely if the page is beyond EOF. If it is not
|
2337 |
|
|
* beyond EOF, then the page is guaranteed safe against truncation until we
|
2338 |
|
|
* unlock the page.
|
2339 |
|
|
*/
|
2340 |
|
|
int
|
2341 |
|
|
block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
|
2342 |
|
|
get_block_t get_block)
|
2343 |
|
|
{
|
2344 |
|
|
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
|
2345 |
|
|
unsigned long end;
|
2346 |
|
|
loff_t size;
|
2347 |
|
|
int ret = -EINVAL;
|
2348 |
|
|
|
2349 |
|
|
lock_page(page);
|
2350 |
|
|
size = i_size_read(inode);
|
2351 |
|
|
if ((page->mapping != inode->i_mapping) ||
|
2352 |
|
|
(page_offset(page) > size)) {
|
2353 |
|
|
/* page got truncated out from underneath us */
|
2354 |
|
|
goto out_unlock;
|
2355 |
|
|
}
|
2356 |
|
|
|
2357 |
|
|
/* page is wholly or partially inside EOF */
|
2358 |
|
|
if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
|
2359 |
|
|
end = size & ~PAGE_CACHE_MASK;
|
2360 |
|
|
else
|
2361 |
|
|
end = PAGE_CACHE_SIZE;
|
2362 |
|
|
|
2363 |
|
|
ret = block_prepare_write(page, 0, end, get_block);
|
2364 |
|
|
if (!ret)
|
2365 |
|
|
ret = block_commit_write(page, 0, end);
|
2366 |
|
|
|
2367 |
|
|
out_unlock:
|
2368 |
|
|
unlock_page(page);
|
2369 |
|
|
return ret;
|
2370 |
|
|
}
|
2371 |
|
|
|
2372 |
|
|
/*
|
2373 |
|
|
* nobh_write_begin()'s prereads are special: the buffer_heads are freed
|
2374 |
|
|
* immediately, while under the page lock. So it needs a special end_io
|
2375 |
|
|
* handler which does not touch the bh after unlocking it.
|
2376 |
|
|
*/
|
2377 |
|
|
static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
|
2378 |
|
|
{
|
2379 |
|
|
__end_buffer_read_notouch(bh, uptodate);
|
2380 |
|
|
}
|
2381 |
|
|
|
2382 |
|
|
/*
|
2383 |
|
|
* Attach the singly-linked list of buffers created by nobh_write_begin, to
|
2384 |
|
|
* the page (converting it to circular linked list and taking care of page
|
2385 |
|
|
* dirty races).
|
2386 |
|
|
*/
|
2387 |
|
|
static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
|
2388 |
|
|
{
|
2389 |
|
|
struct buffer_head *bh;
|
2390 |
|
|
|
2391 |
|
|
BUG_ON(!PageLocked(page));
|
2392 |
|
|
|
2393 |
|
|
spin_lock(&page->mapping->private_lock);
|
2394 |
|
|
bh = head;
|
2395 |
|
|
do {
|
2396 |
|
|
if (PageDirty(page))
|
2397 |
|
|
set_buffer_dirty(bh);
|
2398 |
|
|
if (!bh->b_this_page)
|
2399 |
|
|
bh->b_this_page = head;
|
2400 |
|
|
bh = bh->b_this_page;
|
2401 |
|
|
} while (bh != head);
|
2402 |
|
|
attach_page_buffers(page, head);
|
2403 |
|
|
spin_unlock(&page->mapping->private_lock);
|
2404 |
|
|
}
|
2405 |
|
|
|
2406 |
|
|
/*
|
2407 |
|
|
* On entry, the page is fully not uptodate.
|
2408 |
|
|
* On exit the page is fully uptodate in the areas outside (from,to)
|
2409 |
|
|
*/
|
2410 |
|
|
int nobh_write_begin(struct file *file, struct address_space *mapping,
|
2411 |
|
|
loff_t pos, unsigned len, unsigned flags,
|
2412 |
|
|
struct page **pagep, void **fsdata,
|
2413 |
|
|
get_block_t *get_block)
|
2414 |
|
|
{
|
2415 |
|
|
struct inode *inode = mapping->host;
|
2416 |
|
|
const unsigned blkbits = inode->i_blkbits;
|
2417 |
|
|
const unsigned blocksize = 1 << blkbits;
|
2418 |
|
|
struct buffer_head *head, *bh;
|
2419 |
|
|
struct page *page;
|
2420 |
|
|
pgoff_t index;
|
2421 |
|
|
unsigned from, to;
|
2422 |
|
|
unsigned block_in_page;
|
2423 |
|
|
unsigned block_start, block_end;
|
2424 |
|
|
sector_t block_in_file;
|
2425 |
|
|
char *kaddr;
|
2426 |
|
|
int nr_reads = 0;
|
2427 |
|
|
int ret = 0;
|
2428 |
|
|
int is_mapped_to_disk = 1;
|
2429 |
|
|
|
2430 |
|
|
index = pos >> PAGE_CACHE_SHIFT;
|
2431 |
|
|
from = pos & (PAGE_CACHE_SIZE - 1);
|
2432 |
|
|
to = from + len;
|
2433 |
|
|
|
2434 |
|
|
page = __grab_cache_page(mapping, index);
|
2435 |
|
|
if (!page)
|
2436 |
|
|
return -ENOMEM;
|
2437 |
|
|
*pagep = page;
|
2438 |
|
|
*fsdata = NULL;
|
2439 |
|
|
|
2440 |
|
|
if (page_has_buffers(page)) {
|
2441 |
|
|
unlock_page(page);
|
2442 |
|
|
page_cache_release(page);
|
2443 |
|
|
*pagep = NULL;
|
2444 |
|
|
return block_write_begin(file, mapping, pos, len, flags, pagep,
|
2445 |
|
|
fsdata, get_block);
|
2446 |
|
|
}
|
2447 |
|
|
|
2448 |
|
|
if (PageMappedToDisk(page))
|
2449 |
|
|
return 0;
|
2450 |
|
|
|
2451 |
|
|
/*
|
2452 |
|
|
* Allocate buffers so that we can keep track of state, and potentially
|
2453 |
|
|
* attach them to the page if an error occurs. In the common case of
|
2454 |
|
|
* no error, they will just be freed again without ever being attached
|
2455 |
|
|
* to the page (which is all OK, because we're under the page lock).
|
2456 |
|
|
*
|
2457 |
|
|
* Be careful: the buffer linked list is a NULL terminated one, rather
|
2458 |
|
|
* than the circular one we're used to.
|
2459 |
|
|
*/
|
2460 |
|
|
head = alloc_page_buffers(page, blocksize, 0);
|
2461 |
|
|
if (!head) {
|
2462 |
|
|
ret = -ENOMEM;
|
2463 |
|
|
goto out_release;
|
2464 |
|
|
}
|
2465 |
|
|
|
2466 |
|
|
block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
|
2467 |
|
|
|
2468 |
|
|
/*
|
2469 |
|
|
* We loop across all blocks in the page, whether or not they are
|
2470 |
|
|
* part of the affected region. This is so we can discover if the
|
2471 |
|
|
* page is fully mapped-to-disk.
|
2472 |
|
|
*/
|
2473 |
|
|
for (block_start = 0, block_in_page = 0, bh = head;
|
2474 |
|
|
block_start < PAGE_CACHE_SIZE;
|
2475 |
|
|
block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
|
2476 |
|
|
int create;
|
2477 |
|
|
|
2478 |
|
|
block_end = block_start + blocksize;
|
2479 |
|
|
bh->b_state = 0;
|
2480 |
|
|
create = 1;
|
2481 |
|
|
if (block_start >= to)
|
2482 |
|
|
create = 0;
|
2483 |
|
|
ret = get_block(inode, block_in_file + block_in_page,
|
2484 |
|
|
bh, create);
|
2485 |
|
|
if (ret)
|
2486 |
|
|
goto failed;
|
2487 |
|
|
if (!buffer_mapped(bh))
|
2488 |
|
|
is_mapped_to_disk = 0;
|
2489 |
|
|
if (buffer_new(bh))
|
2490 |
|
|
unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
|
2491 |
|
|
if (PageUptodate(page)) {
|
2492 |
|
|
set_buffer_uptodate(bh);
|
2493 |
|
|
continue;
|
2494 |
|
|
}
|
2495 |
|
|
if (buffer_new(bh) || !buffer_mapped(bh)) {
|
2496 |
|
|
kaddr = kmap_atomic(page, KM_USER0);
|
2497 |
|
|
if (block_start < from)
|
2498 |
|
|
memset(kaddr+block_start, 0, from-block_start);
|
2499 |
|
|
if (block_end > to)
|
2500 |
|
|
memset(kaddr + to, 0, block_end - to);
|
2501 |
|
|
flush_dcache_page(page);
|
2502 |
|
|
kunmap_atomic(kaddr, KM_USER0);
|
2503 |
|
|
continue;
|
2504 |
|
|
}
|
2505 |
|
|
if (buffer_uptodate(bh))
|
2506 |
|
|
continue; /* reiserfs does this */
|
2507 |
|
|
if (block_start < from || block_end > to) {
|
2508 |
|
|
lock_buffer(bh);
|
2509 |
|
|
bh->b_end_io = end_buffer_read_nobh;
|
2510 |
|
|
submit_bh(READ, bh);
|
2511 |
|
|
nr_reads++;
|
2512 |
|
|
}
|
2513 |
|
|
}
|
2514 |
|
|
|
2515 |
|
|
if (nr_reads) {
|
2516 |
|
|
/*
|
2517 |
|
|
* The page is locked, so these buffers are protected from
|
2518 |
|
|
* any VM or truncate activity. Hence we don't need to care
|
2519 |
|
|
* for the buffer_head refcounts.
|
2520 |
|
|
*/
|
2521 |
|
|
for (bh = head; bh; bh = bh->b_this_page) {
|
2522 |
|
|
wait_on_buffer(bh);
|
2523 |
|
|
if (!buffer_uptodate(bh))
|
2524 |
|
|
ret = -EIO;
|
2525 |
|
|
}
|
2526 |
|
|
if (ret)
|
2527 |
|
|
goto failed;
|
2528 |
|
|
}
|
2529 |
|
|
|
2530 |
|
|
if (is_mapped_to_disk)
|
2531 |
|
|
SetPageMappedToDisk(page);
|
2532 |
|
|
|
2533 |
|
|
*fsdata = head; /* to be released by nobh_write_end */
|
2534 |
|
|
|
2535 |
|
|
return 0;
|
2536 |
|
|
|
2537 |
|
|
failed:
|
2538 |
|
|
BUG_ON(!ret);
|
2539 |
|
|
/*
|
2540 |
|
|
* Error recovery is a bit difficult. We need to zero out blocks that
|
2541 |
|
|
* were newly allocated, and dirty them to ensure they get written out.
|
2542 |
|
|
* Buffers need to be attached to the page at this point, otherwise
|
2543 |
|
|
* the handling of potential IO errors during writeout would be hard
|
2544 |
|
|
* (could try doing synchronous writeout, but what if that fails too?)
|
2545 |
|
|
*/
|
2546 |
|
|
attach_nobh_buffers(page, head);
|
2547 |
|
|
page_zero_new_buffers(page, from, to);
|
2548 |
|
|
|
2549 |
|
|
out_release:
|
2550 |
|
|
unlock_page(page);
|
2551 |
|
|
page_cache_release(page);
|
2552 |
|
|
*pagep = NULL;
|
2553 |
|
|
|
2554 |
|
|
if (pos + len > inode->i_size)
|
2555 |
|
|
vmtruncate(inode, inode->i_size);
|
2556 |
|
|
|
2557 |
|
|
return ret;
|
2558 |
|
|
}
|
2559 |
|
|
EXPORT_SYMBOL(nobh_write_begin);
|
2560 |
|
|
|
2561 |
|
|
int nobh_write_end(struct file *file, struct address_space *mapping,
|
2562 |
|
|
loff_t pos, unsigned len, unsigned copied,
|
2563 |
|
|
struct page *page, void *fsdata)
|
2564 |
|
|
{
|
2565 |
|
|
struct inode *inode = page->mapping->host;
|
2566 |
|
|
struct buffer_head *head = fsdata;
|
2567 |
|
|
struct buffer_head *bh;
|
2568 |
|
|
|
2569 |
|
|
if (!PageMappedToDisk(page)) {
|
2570 |
|
|
if (unlikely(copied < len) && !page_has_buffers(page))
|
2571 |
|
|
attach_nobh_buffers(page, head);
|
2572 |
|
|
if (page_has_buffers(page))
|
2573 |
|
|
return generic_write_end(file, mapping, pos, len,
|
2574 |
|
|
copied, page, fsdata);
|
2575 |
|
|
}
|
2576 |
|
|
|
2577 |
|
|
SetPageUptodate(page);
|
2578 |
|
|
set_page_dirty(page);
|
2579 |
|
|
if (pos+copied > inode->i_size) {
|
2580 |
|
|
i_size_write(inode, pos+copied);
|
2581 |
|
|
mark_inode_dirty(inode);
|
2582 |
|
|
}
|
2583 |
|
|
|
2584 |
|
|
unlock_page(page);
|
2585 |
|
|
page_cache_release(page);
|
2586 |
|
|
|
2587 |
|
|
while (head) {
|
2588 |
|
|
bh = head;
|
2589 |
|
|
head = head->b_this_page;
|
2590 |
|
|
free_buffer_head(bh);
|
2591 |
|
|
}
|
2592 |
|
|
|
2593 |
|
|
return copied;
|
2594 |
|
|
}
|
2595 |
|
|
EXPORT_SYMBOL(nobh_write_end);
|
2596 |
|
|
|
2597 |
|
|
/*
|
2598 |
|
|
* nobh_writepage() - based on block_full_write_page() except
|
2599 |
|
|
* that it tries to operate without attaching bufferheads to
|
2600 |
|
|
* the page.
|
2601 |
|
|
*/
|
2602 |
|
|
int nobh_writepage(struct page *page, get_block_t *get_block,
|
2603 |
|
|
struct writeback_control *wbc)
|
2604 |
|
|
{
|
2605 |
|
|
struct inode * const inode = page->mapping->host;
|
2606 |
|
|
loff_t i_size = i_size_read(inode);
|
2607 |
|
|
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
|
2608 |
|
|
unsigned offset;
|
2609 |
|
|
int ret;
|
2610 |
|
|
|
2611 |
|
|
/* Is the page fully inside i_size? */
|
2612 |
|
|
if (page->index < end_index)
|
2613 |
|
|
goto out;
|
2614 |
|
|
|
2615 |
|
|
/* Is the page fully outside i_size? (truncate in progress) */
|
2616 |
|
|
offset = i_size & (PAGE_CACHE_SIZE-1);
|
2617 |
|
|
if (page->index >= end_index+1 || !offset) {
|
2618 |
|
|
/*
|
2619 |
|
|
* The page may have dirty, unmapped buffers. For example,
|
2620 |
|
|
* they may have been added in ext3_writepage(). Make them
|
2621 |
|
|
* freeable here, so the page does not leak.
|
2622 |
|
|
*/
|
2623 |
|
|
#if 0
|
2624 |
|
|
/* Not really sure about this - do we need this ? */
|
2625 |
|
|
if (page->mapping->a_ops->invalidatepage)
|
2626 |
|
|
page->mapping->a_ops->invalidatepage(page, offset);
|
2627 |
|
|
#endif
|
2628 |
|
|
unlock_page(page);
|
2629 |
|
|
return 0; /* don't care */
|
2630 |
|
|
}
|
2631 |
|
|
|
2632 |
|
|
/*
|
2633 |
|
|
* The page straddles i_size. It must be zeroed out on each and every
|
2634 |
|
|
* writepage invocation because it may be mmapped. "A file is mapped
|
2635 |
|
|
* in multiples of the page size. For a file that is not a multiple of
|
2636 |
|
|
* the page size, the remaining memory is zeroed when mapped, and
|
2637 |
|
|
* writes to that region are not written out to the file."
|
2638 |
|
|
*/
|
2639 |
|
|
zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
|
2640 |
|
|
out:
|
2641 |
|
|
ret = mpage_writepage(page, get_block, wbc);
|
2642 |
|
|
if (ret == -EAGAIN)
|
2643 |
|
|
ret = __block_write_full_page(inode, page, get_block, wbc);
|
2644 |
|
|
return ret;
|
2645 |
|
|
}
|
2646 |
|
|
EXPORT_SYMBOL(nobh_writepage);
|
2647 |
|
|
|
2648 |
|
|
int nobh_truncate_page(struct address_space *mapping,
|
2649 |
|
|
loff_t from, get_block_t *get_block)
|
2650 |
|
|
{
|
2651 |
|
|
pgoff_t index = from >> PAGE_CACHE_SHIFT;
|
2652 |
|
|
unsigned offset = from & (PAGE_CACHE_SIZE-1);
|
2653 |
|
|
unsigned blocksize;
|
2654 |
|
|
sector_t iblock;
|
2655 |
|
|
unsigned length, pos;
|
2656 |
|
|
struct inode *inode = mapping->host;
|
2657 |
|
|
struct page *page;
|
2658 |
|
|
struct buffer_head map_bh;
|
2659 |
|
|
int err;
|
2660 |
|
|
|
2661 |
|
|
blocksize = 1 << inode->i_blkbits;
|
2662 |
|
|
length = offset & (blocksize - 1);
|
2663 |
|
|
|
2664 |
|
|
/* Block boundary? Nothing to do */
|
2665 |
|
|
if (!length)
|
2666 |
|
|
return 0;
|
2667 |
|
|
|
2668 |
|
|
length = blocksize - length;
|
2669 |
|
|
iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
|
2670 |
|
|
|
2671 |
|
|
page = grab_cache_page(mapping, index);
|
2672 |
|
|
err = -ENOMEM;
|
2673 |
|
|
if (!page)
|
2674 |
|
|
goto out;
|
2675 |
|
|
|
2676 |
|
|
if (page_has_buffers(page)) {
|
2677 |
|
|
has_buffers:
|
2678 |
|
|
unlock_page(page);
|
2679 |
|
|
page_cache_release(page);
|
2680 |
|
|
return block_truncate_page(mapping, from, get_block);
|
2681 |
|
|
}
|
2682 |
|
|
|
2683 |
|
|
/* Find the buffer that contains "offset" */
|
2684 |
|
|
pos = blocksize;
|
2685 |
|
|
while (offset >= pos) {
|
2686 |
|
|
iblock++;
|
2687 |
|
|
pos += blocksize;
|
2688 |
|
|
}
|
2689 |
|
|
|
2690 |
|
|
err = get_block(inode, iblock, &map_bh, 0);
|
2691 |
|
|
if (err)
|
2692 |
|
|
goto unlock;
|
2693 |
|
|
/* unmapped? It's a hole - nothing to do */
|
2694 |
|
|
if (!buffer_mapped(&map_bh))
|
2695 |
|
|
goto unlock;
|
2696 |
|
|
|
2697 |
|
|
/* Ok, it's mapped. Make sure it's up-to-date */
|
2698 |
|
|
if (!PageUptodate(page)) {
|
2699 |
|
|
err = mapping->a_ops->readpage(NULL, page);
|
2700 |
|
|
if (err) {
|
2701 |
|
|
page_cache_release(page);
|
2702 |
|
|
goto out;
|
2703 |
|
|
}
|
2704 |
|
|
lock_page(page);
|
2705 |
|
|
if (!PageUptodate(page)) {
|
2706 |
|
|
err = -EIO;
|
2707 |
|
|
goto unlock;
|
2708 |
|
|
}
|
2709 |
|
|
if (page_has_buffers(page))
|
2710 |
|
|
goto has_buffers;
|
2711 |
|
|
}
|
2712 |
|
|
zero_user_page(page, offset, length, KM_USER0);
|
2713 |
|
|
set_page_dirty(page);
|
2714 |
|
|
err = 0;
|
2715 |
|
|
|
2716 |
|
|
unlock:
|
2717 |
|
|
unlock_page(page);
|
2718 |
|
|
page_cache_release(page);
|
2719 |
|
|
out:
|
2720 |
|
|
return err;
|
2721 |
|
|
}
|
2722 |
|
|
EXPORT_SYMBOL(nobh_truncate_page);
|
2723 |
|
|
|
2724 |
|
|
int block_truncate_page(struct address_space *mapping,
|
2725 |
|
|
loff_t from, get_block_t *get_block)
|
2726 |
|
|
{
|
2727 |
|
|
pgoff_t index = from >> PAGE_CACHE_SHIFT;
|
2728 |
|
|
unsigned offset = from & (PAGE_CACHE_SIZE-1);
|
2729 |
|
|
unsigned blocksize;
|
2730 |
|
|
sector_t iblock;
|
2731 |
|
|
unsigned length, pos;
|
2732 |
|
|
struct inode *inode = mapping->host;
|
2733 |
|
|
struct page *page;
|
2734 |
|
|
struct buffer_head *bh;
|
2735 |
|
|
int err;
|
2736 |
|
|
|
2737 |
|
|
blocksize = 1 << inode->i_blkbits;
|
2738 |
|
|
length = offset & (blocksize - 1);
|
2739 |
|
|
|
2740 |
|
|
/* Block boundary? Nothing to do */
|
2741 |
|
|
if (!length)
|
2742 |
|
|
return 0;
|
2743 |
|
|
|
2744 |
|
|
length = blocksize - length;
|
2745 |
|
|
iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
|
2746 |
|
|
|
2747 |
|
|
page = grab_cache_page(mapping, index);
|
2748 |
|
|
err = -ENOMEM;
|
2749 |
|
|
if (!page)
|
2750 |
|
|
goto out;
|
2751 |
|
|
|
2752 |
|
|
if (!page_has_buffers(page))
|
2753 |
|
|
create_empty_buffers(page, blocksize, 0);
|
2754 |
|
|
|
2755 |
|
|
/* Find the buffer that contains "offset" */
|
2756 |
|
|
bh = page_buffers(page);
|
2757 |
|
|
pos = blocksize;
|
2758 |
|
|
while (offset >= pos) {
|
2759 |
|
|
bh = bh->b_this_page;
|
2760 |
|
|
iblock++;
|
2761 |
|
|
pos += blocksize;
|
2762 |
|
|
}
|
2763 |
|
|
|
2764 |
|
|
err = 0;
|
2765 |
|
|
if (!buffer_mapped(bh)) {
|
2766 |
|
|
WARN_ON(bh->b_size != blocksize);
|
2767 |
|
|
err = get_block(inode, iblock, bh, 0);
|
2768 |
|
|
if (err)
|
2769 |
|
|
goto unlock;
|
2770 |
|
|
/* unmapped? It's a hole - nothing to do */
|
2771 |
|
|
if (!buffer_mapped(bh))
|
2772 |
|
|
goto unlock;
|
2773 |
|
|
}
|
2774 |
|
|
|
2775 |
|
|
/* Ok, it's mapped. Make sure it's up-to-date */
|
2776 |
|
|
if (PageUptodate(page))
|
2777 |
|
|
set_buffer_uptodate(bh);
|
2778 |
|
|
|
2779 |
|
|
if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
|
2780 |
|
|
err = -EIO;
|
2781 |
|
|
ll_rw_block(READ, 1, &bh);
|
2782 |
|
|
wait_on_buffer(bh);
|
2783 |
|
|
/* Uhhuh. Read error. Complain and punt. */
|
2784 |
|
|
if (!buffer_uptodate(bh))
|
2785 |
|
|
goto unlock;
|
2786 |
|
|
}
|
2787 |
|
|
|
2788 |
|
|
zero_user_page(page, offset, length, KM_USER0);
|
2789 |
|
|
mark_buffer_dirty(bh);
|
2790 |
|
|
err = 0;
|
2791 |
|
|
|
2792 |
|
|
unlock:
|
2793 |
|
|
unlock_page(page);
|
2794 |
|
|
page_cache_release(page);
|
2795 |
|
|
out:
|
2796 |
|
|
return err;
|
2797 |
|
|
}
|
2798 |
|
|
|
2799 |
|
|
/*
|
2800 |
|
|
* The generic ->writepage function for buffer-backed address_spaces
|
2801 |
|
|
*/
|
2802 |
|
|
int block_write_full_page(struct page *page, get_block_t *get_block,
|
2803 |
|
|
struct writeback_control *wbc)
|
2804 |
|
|
{
|
2805 |
|
|
struct inode * const inode = page->mapping->host;
|
2806 |
|
|
loff_t i_size = i_size_read(inode);
|
2807 |
|
|
const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
|
2808 |
|
|
unsigned offset;
|
2809 |
|
|
|
2810 |
|
|
/* Is the page fully inside i_size? */
|
2811 |
|
|
if (page->index < end_index)
|
2812 |
|
|
return __block_write_full_page(inode, page, get_block, wbc);
|
2813 |
|
|
|
2814 |
|
|
/* Is the page fully outside i_size? (truncate in progress) */
|
2815 |
|
|
offset = i_size & (PAGE_CACHE_SIZE-1);
|
2816 |
|
|
if (page->index >= end_index+1 || !offset) {
|
2817 |
|
|
/*
|
2818 |
|
|
* The page may have dirty, unmapped buffers. For example,
|
2819 |
|
|
* they may have been added in ext3_writepage(). Make them
|
2820 |
|
|
* freeable here, so the page does not leak.
|
2821 |
|
|
*/
|
2822 |
|
|
do_invalidatepage(page, 0);
|
2823 |
|
|
unlock_page(page);
|
2824 |
|
|
return 0; /* don't care */
|
2825 |
|
|
}
|
2826 |
|
|
|
2827 |
|
|
/*
|
2828 |
|
|
* The page straddles i_size. It must be zeroed out on each and every
|
2829 |
|
|
* writepage invokation because it may be mmapped. "A file is mapped
|
2830 |
|
|
* in multiples of the page size. For a file that is not a multiple of
|
2831 |
|
|
* the page size, the remaining memory is zeroed when mapped, and
|
2832 |
|
|
* writes to that region are not written out to the file."
|
2833 |
|
|
*/
|
2834 |
|
|
zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
|
2835 |
|
|
return __block_write_full_page(inode, page, get_block, wbc);
|
2836 |
|
|
}
|
2837 |
|
|
|
2838 |
|
|
sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
|
2839 |
|
|
get_block_t *get_block)
|
2840 |
|
|
{
|
2841 |
|
|
struct buffer_head tmp;
|
2842 |
|
|
struct inode *inode = mapping->host;
|
2843 |
|
|
tmp.b_state = 0;
|
2844 |
|
|
tmp.b_blocknr = 0;
|
2845 |
|
|
tmp.b_size = 1 << inode->i_blkbits;
|
2846 |
|
|
get_block(inode, block, &tmp, 0);
|
2847 |
|
|
return tmp.b_blocknr;
|
2848 |
|
|
}
|
2849 |
|
|
|
2850 |
|
|
static void end_bio_bh_io_sync(struct bio *bio, int err)
|
2851 |
|
|
{
|
2852 |
|
|
struct buffer_head *bh = bio->bi_private;
|
2853 |
|
|
|
2854 |
|
|
if (err == -EOPNOTSUPP) {
|
2855 |
|
|
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
|
2856 |
|
|
set_bit(BH_Eopnotsupp, &bh->b_state);
|
2857 |
|
|
}
|
2858 |
|
|
|
2859 |
|
|
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
|
2860 |
|
|
bio_put(bio);
|
2861 |
|
|
}
|
2862 |
|
|
|
2863 |
|
|
int submit_bh(int rw, struct buffer_head * bh)
|
2864 |
|
|
{
|
2865 |
|
|
struct bio *bio;
|
2866 |
|
|
int ret = 0;
|
2867 |
|
|
|
2868 |
|
|
BUG_ON(!buffer_locked(bh));
|
2869 |
|
|
BUG_ON(!buffer_mapped(bh));
|
2870 |
|
|
BUG_ON(!bh->b_end_io);
|
2871 |
|
|
|
2872 |
|
|
if (buffer_ordered(bh) && (rw == WRITE))
|
2873 |
|
|
rw = WRITE_BARRIER;
|
2874 |
|
|
|
2875 |
|
|
/*
|
2876 |
|
|
* Only clear out a write error when rewriting, should this
|
2877 |
|
|
* include WRITE_SYNC as well?
|
2878 |
|
|
*/
|
2879 |
|
|
if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
|
2880 |
|
|
clear_buffer_write_io_error(bh);
|
2881 |
|
|
|
2882 |
|
|
/*
|
2883 |
|
|
* from here on down, it's all bio -- do the initial mapping,
|
2884 |
|
|
* submit_bio -> generic_make_request may further map this bio around
|
2885 |
|
|
*/
|
2886 |
|
|
bio = bio_alloc(GFP_NOIO, 1);
|
2887 |
|
|
|
2888 |
|
|
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
|
2889 |
|
|
bio->bi_bdev = bh->b_bdev;
|
2890 |
|
|
bio->bi_io_vec[0].bv_page = bh->b_page;
|
2891 |
|
|
bio->bi_io_vec[0].bv_len = bh->b_size;
|
2892 |
|
|
bio->bi_io_vec[0].bv_offset = bh_offset(bh);
|
2893 |
|
|
|
2894 |
|
|
bio->bi_vcnt = 1;
|
2895 |
|
|
bio->bi_idx = 0;
|
2896 |
|
|
bio->bi_size = bh->b_size;
|
2897 |
|
|
|
2898 |
|
|
bio->bi_end_io = end_bio_bh_io_sync;
|
2899 |
|
|
bio->bi_private = bh;
|
2900 |
|
|
|
2901 |
|
|
bio_get(bio);
|
2902 |
|
|
submit_bio(rw, bio);
|
2903 |
|
|
|
2904 |
|
|
if (bio_flagged(bio, BIO_EOPNOTSUPP))
|
2905 |
|
|
ret = -EOPNOTSUPP;
|
2906 |
|
|
|
2907 |
|
|
bio_put(bio);
|
2908 |
|
|
return ret;
|
2909 |
|
|
}
|
2910 |
|
|
|
2911 |
|
|
/**
|
2912 |
|
|
* ll_rw_block: low-level access to block devices (DEPRECATED)
|
2913 |
|
|
* @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
|
2914 |
|
|
* @nr: number of &struct buffer_heads in the array
|
2915 |
|
|
* @bhs: array of pointers to &struct buffer_head
|
2916 |
|
|
*
|
2917 |
|
|
* ll_rw_block() takes an array of pointers to &struct buffer_heads, and
|
2918 |
|
|
* requests an I/O operation on them, either a %READ or a %WRITE. The third
|
2919 |
|
|
* %SWRITE is like %WRITE only we make sure that the *current* data in buffers
|
2920 |
|
|
* are sent to disk. The fourth %READA option is described in the documentation
|
2921 |
|
|
* for generic_make_request() which ll_rw_block() calls.
|
2922 |
|
|
*
|
2923 |
|
|
* This function drops any buffer that it cannot get a lock on (with the
|
2924 |
|
|
* BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
|
2925 |
|
|
* clean when doing a write request, and any buffer that appears to be
|
2926 |
|
|
* up-to-date when doing read request. Further it marks as clean buffers that
|
2927 |
|
|
* are processed for writing (the buffer cache won't assume that they are
|
2928 |
|
|
* actually clean until the buffer gets unlocked).
|
2929 |
|
|
*
|
2930 |
|
|
* ll_rw_block sets b_end_io to simple completion handler that marks
|
2931 |
|
|
* the buffer up-to-date (if approriate), unlocks the buffer and wakes
|
2932 |
|
|
* any waiters.
|
2933 |
|
|
*
|
2934 |
|
|
* All of the buffers must be for the same device, and must also be a
|
2935 |
|
|
* multiple of the current approved size for the device.
|
2936 |
|
|
*/
|
2937 |
|
|
void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
|
2938 |
|
|
{
|
2939 |
|
|
int i;
|
2940 |
|
|
|
2941 |
|
|
for (i = 0; i < nr; i++) {
|
2942 |
|
|
struct buffer_head *bh = bhs[i];
|
2943 |
|
|
|
2944 |
|
|
if (rw == SWRITE)
|
2945 |
|
|
lock_buffer(bh);
|
2946 |
|
|
else if (test_set_buffer_locked(bh))
|
2947 |
|
|
continue;
|
2948 |
|
|
|
2949 |
|
|
if (rw == WRITE || rw == SWRITE) {
|
2950 |
|
|
if (test_clear_buffer_dirty(bh)) {
|
2951 |
|
|
bh->b_end_io = end_buffer_write_sync;
|
2952 |
|
|
get_bh(bh);
|
2953 |
|
|
submit_bh(WRITE, bh);
|
2954 |
|
|
continue;
|
2955 |
|
|
}
|
2956 |
|
|
} else {
|
2957 |
|
|
if (!buffer_uptodate(bh)) {
|
2958 |
|
|
bh->b_end_io = end_buffer_read_sync;
|
2959 |
|
|
get_bh(bh);
|
2960 |
|
|
submit_bh(rw, bh);
|
2961 |
|
|
continue;
|
2962 |
|
|
}
|
2963 |
|
|
}
|
2964 |
|
|
unlock_buffer(bh);
|
2965 |
|
|
}
|
2966 |
|
|
}
|
2967 |
|
|
|
2968 |
|
|
/*
|
2969 |
|
|
* For a data-integrity writeout, we need to wait upon any in-progress I/O
|
2970 |
|
|
* and then start new I/O and then wait upon it. The caller must have a ref on
|
2971 |
|
|
* the buffer_head.
|
2972 |
|
|
*/
|
2973 |
|
|
int sync_dirty_buffer(struct buffer_head *bh)
|
2974 |
|
|
{
|
2975 |
|
|
int ret = 0;
|
2976 |
|
|
|
2977 |
|
|
WARN_ON(atomic_read(&bh->b_count) < 1);
|
2978 |
|
|
lock_buffer(bh);
|
2979 |
|
|
if (test_clear_buffer_dirty(bh)) {
|
2980 |
|
|
get_bh(bh);
|
2981 |
|
|
bh->b_end_io = end_buffer_write_sync;
|
2982 |
|
|
ret = submit_bh(WRITE, bh);
|
2983 |
|
|
wait_on_buffer(bh);
|
2984 |
|
|
if (buffer_eopnotsupp(bh)) {
|
2985 |
|
|
clear_buffer_eopnotsupp(bh);
|
2986 |
|
|
ret = -EOPNOTSUPP;
|
2987 |
|
|
}
|
2988 |
|
|
if (!ret && !buffer_uptodate(bh))
|
2989 |
|
|
ret = -EIO;
|
2990 |
|
|
} else {
|
2991 |
|
|
unlock_buffer(bh);
|
2992 |
|
|
}
|
2993 |
|
|
return ret;
|
2994 |
|
|
}
|
2995 |
|
|
|
2996 |
|
|
/*
|
2997 |
|
|
* try_to_free_buffers() checks if all the buffers on this particular page
|
2998 |
|
|
* are unused, and releases them if so.
|
2999 |
|
|
*
|
3000 |
|
|
* Exclusion against try_to_free_buffers may be obtained by either
|
3001 |
|
|
* locking the page or by holding its mapping's private_lock.
|
3002 |
|
|
*
|
3003 |
|
|
* If the page is dirty but all the buffers are clean then we need to
|
3004 |
|
|
* be sure to mark the page clean as well. This is because the page
|
3005 |
|
|
* may be against a block device, and a later reattachment of buffers
|
3006 |
|
|
* to a dirty page will set *all* buffers dirty. Which would corrupt
|
3007 |
|
|
* filesystem data on the same device.
|
3008 |
|
|
*
|
3009 |
|
|
* The same applies to regular filesystem pages: if all the buffers are
|
3010 |
|
|
* clean then we set the page clean and proceed. To do that, we require
|
3011 |
|
|
* total exclusion from __set_page_dirty_buffers(). That is obtained with
|
3012 |
|
|
* private_lock.
|
3013 |
|
|
*
|
3014 |
|
|
* try_to_free_buffers() is non-blocking.
|
3015 |
|
|
*/
|
3016 |
|
|
static inline int buffer_busy(struct buffer_head *bh)
|
3017 |
|
|
{
|
3018 |
|
|
return atomic_read(&bh->b_count) |
|
3019 |
|
|
(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
|
3020 |
|
|
}
|
3021 |
|
|
|
3022 |
|
|
static int
|
3023 |
|
|
drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
|
3024 |
|
|
{
|
3025 |
|
|
struct buffer_head *head = page_buffers(page);
|
3026 |
|
|
struct buffer_head *bh;
|
3027 |
|
|
|
3028 |
|
|
bh = head;
|
3029 |
|
|
do {
|
3030 |
|
|
if (buffer_write_io_error(bh) && page->mapping)
|
3031 |
|
|
set_bit(AS_EIO, &page->mapping->flags);
|
3032 |
|
|
if (buffer_busy(bh))
|
3033 |
|
|
goto failed;
|
3034 |
|
|
bh = bh->b_this_page;
|
3035 |
|
|
} while (bh != head);
|
3036 |
|
|
|
3037 |
|
|
do {
|
3038 |
|
|
struct buffer_head *next = bh->b_this_page;
|
3039 |
|
|
|
3040 |
|
|
if (!list_empty(&bh->b_assoc_buffers))
|
3041 |
|
|
__remove_assoc_queue(bh);
|
3042 |
|
|
bh = next;
|
3043 |
|
|
} while (bh != head);
|
3044 |
|
|
*buffers_to_free = head;
|
3045 |
|
|
__clear_page_buffers(page);
|
3046 |
|
|
return 1;
|
3047 |
|
|
failed:
|
3048 |
|
|
return 0;
|
3049 |
|
|
}
|
3050 |
|
|
|
3051 |
|
|
int try_to_free_buffers(struct page *page)
|
3052 |
|
|
{
|
3053 |
|
|
struct address_space * const mapping = page->mapping;
|
3054 |
|
|
struct buffer_head *buffers_to_free = NULL;
|
3055 |
|
|
int ret = 0;
|
3056 |
|
|
|
3057 |
|
|
BUG_ON(!PageLocked(page));
|
3058 |
|
|
if (PageWriteback(page))
|
3059 |
|
|
return 0;
|
3060 |
|
|
|
3061 |
|
|
if (mapping == NULL) { /* can this still happen? */
|
3062 |
|
|
ret = drop_buffers(page, &buffers_to_free);
|
3063 |
|
|
goto out;
|
3064 |
|
|
}
|
3065 |
|
|
|
3066 |
|
|
spin_lock(&mapping->private_lock);
|
3067 |
|
|
ret = drop_buffers(page, &buffers_to_free);
|
3068 |
|
|
|
3069 |
|
|
/*
|
3070 |
|
|
* If the filesystem writes its buffers by hand (eg ext3)
|
3071 |
|
|
* then we can have clean buffers against a dirty page. We
|
3072 |
|
|
* clean the page here; otherwise the VM will never notice
|
3073 |
|
|
* that the filesystem did any IO at all.
|
3074 |
|
|
*
|
3075 |
|
|
* Also, during truncate, discard_buffer will have marked all
|
3076 |
|
|
* the page's buffers clean. We discover that here and clean
|
3077 |
|
|
* the page also.
|
3078 |
|
|
*
|
3079 |
|
|
* private_lock must be held over this entire operation in order
|
3080 |
|
|
* to synchronise against __set_page_dirty_buffers and prevent the
|
3081 |
|
|
* dirty bit from being lost.
|
3082 |
|
|
*/
|
3083 |
|
|
if (ret)
|
3084 |
|
|
cancel_dirty_page(page, PAGE_CACHE_SIZE);
|
3085 |
|
|
spin_unlock(&mapping->private_lock);
|
3086 |
|
|
out:
|
3087 |
|
|
if (buffers_to_free) {
|
3088 |
|
|
struct buffer_head *bh = buffers_to_free;
|
3089 |
|
|
|
3090 |
|
|
do {
|
3091 |
|
|
struct buffer_head *next = bh->b_this_page;
|
3092 |
|
|
free_buffer_head(bh);
|
3093 |
|
|
bh = next;
|
3094 |
|
|
} while (bh != buffers_to_free);
|
3095 |
|
|
}
|
3096 |
|
|
return ret;
|
3097 |
|
|
}
|
3098 |
|
|
EXPORT_SYMBOL(try_to_free_buffers);
|
3099 |
|
|
|
3100 |
|
|
void block_sync_page(struct page *page)
|
3101 |
|
|
{
|
3102 |
|
|
struct address_space *mapping;
|
3103 |
|
|
|
3104 |
|
|
smp_mb();
|
3105 |
|
|
mapping = page_mapping(page);
|
3106 |
|
|
if (mapping)
|
3107 |
|
|
blk_run_backing_dev(mapping->backing_dev_info, page);
|
3108 |
|
|
}
|
3109 |
|
|
|
3110 |
|
|
/*
|
3111 |
|
|
* There are no bdflush tunables left. But distributions are
|
3112 |
|
|
* still running obsolete flush daemons, so we terminate them here.
|
3113 |
|
|
*
|
3114 |
|
|
* Use of bdflush() is deprecated and will be removed in a future kernel.
|
3115 |
|
|
* The `pdflush' kernel threads fully replace bdflush daemons and this call.
|
3116 |
|
|
*/
|
3117 |
|
|
asmlinkage long sys_bdflush(int func, long data)
|
3118 |
|
|
{
|
3119 |
|
|
static int msg_count;
|
3120 |
|
|
|
3121 |
|
|
if (!capable(CAP_SYS_ADMIN))
|
3122 |
|
|
return -EPERM;
|
3123 |
|
|
|
3124 |
|
|
if (msg_count < 5) {
|
3125 |
|
|
msg_count++;
|
3126 |
|
|
printk(KERN_INFO
|
3127 |
|
|
"warning: process `%s' used the obsolete bdflush"
|
3128 |
|
|
" system call\n", current->comm);
|
3129 |
|
|
printk(KERN_INFO "Fix your initscripts?\n");
|
3130 |
|
|
}
|
3131 |
|
|
|
3132 |
|
|
if (func == 1)
|
3133 |
|
|
do_exit(0);
|
3134 |
|
|
return 0;
|
3135 |
|
|
}
|
3136 |
|
|
|
3137 |
|
|
/*
|
3138 |
|
|
* Buffer-head allocation
|
3139 |
|
|
*/
|
3140 |
|
|
static struct kmem_cache *bh_cachep;
|
3141 |
|
|
|
3142 |
|
|
/*
|
3143 |
|
|
* Once the number of bh's in the machine exceeds this level, we start
|
3144 |
|
|
* stripping them in writeback.
|
3145 |
|
|
*/
|
3146 |
|
|
static int max_buffer_heads;
|
3147 |
|
|
|
3148 |
|
|
int buffer_heads_over_limit;
|
3149 |
|
|
|
3150 |
|
|
struct bh_accounting {
|
3151 |
|
|
int nr; /* Number of live bh's */
|
3152 |
|
|
int ratelimit; /* Limit cacheline bouncing */
|
3153 |
|
|
};
|
3154 |
|
|
|
3155 |
|
|
static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
|
3156 |
|
|
|
3157 |
|
|
static void recalc_bh_state(void)
|
3158 |
|
|
{
|
3159 |
|
|
int i;
|
3160 |
|
|
int tot = 0;
|
3161 |
|
|
|
3162 |
|
|
if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
|
3163 |
|
|
return;
|
3164 |
|
|
__get_cpu_var(bh_accounting).ratelimit = 0;
|
3165 |
|
|
for_each_online_cpu(i)
|
3166 |
|
|
tot += per_cpu(bh_accounting, i).nr;
|
3167 |
|
|
buffer_heads_over_limit = (tot > max_buffer_heads);
|
3168 |
|
|
}
|
3169 |
|
|
|
3170 |
|
|
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
|
3171 |
|
|
{
|
3172 |
|
|
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep,
|
3173 |
|
|
set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
|
3174 |
|
|
if (ret) {
|
3175 |
|
|
INIT_LIST_HEAD(&ret->b_assoc_buffers);
|
3176 |
|
|
get_cpu_var(bh_accounting).nr++;
|
3177 |
|
|
recalc_bh_state();
|
3178 |
|
|
put_cpu_var(bh_accounting);
|
3179 |
|
|
}
|
3180 |
|
|
return ret;
|
3181 |
|
|
}
|
3182 |
|
|
EXPORT_SYMBOL(alloc_buffer_head);
|
3183 |
|
|
|
3184 |
|
|
void free_buffer_head(struct buffer_head *bh)
|
3185 |
|
|
{
|
3186 |
|
|
BUG_ON(!list_empty(&bh->b_assoc_buffers));
|
3187 |
|
|
kmem_cache_free(bh_cachep, bh);
|
3188 |
|
|
get_cpu_var(bh_accounting).nr--;
|
3189 |
|
|
recalc_bh_state();
|
3190 |
|
|
put_cpu_var(bh_accounting);
|
3191 |
|
|
}
|
3192 |
|
|
EXPORT_SYMBOL(free_buffer_head);
|
3193 |
|
|
|
3194 |
|
|
static void buffer_exit_cpu(int cpu)
|
3195 |
|
|
{
|
3196 |
|
|
int i;
|
3197 |
|
|
struct bh_lru *b = &per_cpu(bh_lrus, cpu);
|
3198 |
|
|
|
3199 |
|
|
for (i = 0; i < BH_LRU_SIZE; i++) {
|
3200 |
|
|
brelse(b->bhs[i]);
|
3201 |
|
|
b->bhs[i] = NULL;
|
3202 |
|
|
}
|
3203 |
|
|
get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
|
3204 |
|
|
per_cpu(bh_accounting, cpu).nr = 0;
|
3205 |
|
|
put_cpu_var(bh_accounting);
|
3206 |
|
|
}
|
3207 |
|
|
|
3208 |
|
|
static int buffer_cpu_notify(struct notifier_block *self,
|
3209 |
|
|
unsigned long action, void *hcpu)
|
3210 |
|
|
{
|
3211 |
|
|
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
|
3212 |
|
|
buffer_exit_cpu((unsigned long)hcpu);
|
3213 |
|
|
return NOTIFY_OK;
|
3214 |
|
|
}
|
3215 |
|
|
|
3216 |
|
|
void __init buffer_init(void)
|
3217 |
|
|
{
|
3218 |
|
|
int nrpages;
|
3219 |
|
|
|
3220 |
|
|
bh_cachep = KMEM_CACHE(buffer_head,
|
3221 |
|
|
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
|
3222 |
|
|
|
3223 |
|
|
/*
|
3224 |
|
|
* Limit the bh occupancy to 10% of ZONE_NORMAL
|
3225 |
|
|
*/
|
3226 |
|
|
nrpages = (nr_free_buffer_pages() * 10) / 100;
|
3227 |
|
|
max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
|
3228 |
|
|
hotcpu_notifier(buffer_cpu_notify, 0);
|
3229 |
|
|
}
|
3230 |
|
|
|
3231 |
|
|
EXPORT_SYMBOL(__bforget);
|
3232 |
|
|
EXPORT_SYMBOL(__brelse);
|
3233 |
|
|
EXPORT_SYMBOL(__wait_on_buffer);
|
3234 |
|
|
EXPORT_SYMBOL(block_commit_write);
|
3235 |
|
|
EXPORT_SYMBOL(block_prepare_write);
|
3236 |
|
|
EXPORT_SYMBOL(block_page_mkwrite);
|
3237 |
|
|
EXPORT_SYMBOL(block_read_full_page);
|
3238 |
|
|
EXPORT_SYMBOL(block_sync_page);
|
3239 |
|
|
EXPORT_SYMBOL(block_truncate_page);
|
3240 |
|
|
EXPORT_SYMBOL(block_write_full_page);
|
3241 |
|
|
EXPORT_SYMBOL(cont_write_begin);
|
3242 |
|
|
EXPORT_SYMBOL(end_buffer_read_sync);
|
3243 |
|
|
EXPORT_SYMBOL(end_buffer_write_sync);
|
3244 |
|
|
EXPORT_SYMBOL(file_fsync);
|
3245 |
|
|
EXPORT_SYMBOL(fsync_bdev);
|
3246 |
|
|
EXPORT_SYMBOL(generic_block_bmap);
|
3247 |
|
|
EXPORT_SYMBOL(generic_commit_write);
|
3248 |
|
|
EXPORT_SYMBOL(generic_cont_expand_simple);
|
3249 |
|
|
EXPORT_SYMBOL(init_buffer);
|
3250 |
|
|
EXPORT_SYMBOL(invalidate_bdev);
|
3251 |
|
|
EXPORT_SYMBOL(ll_rw_block);
|
3252 |
|
|
EXPORT_SYMBOL(mark_buffer_dirty);
|
3253 |
|
|
EXPORT_SYMBOL(submit_bh);
|
3254 |
|
|
EXPORT_SYMBOL(sync_dirty_buffer);
|
3255 |
|
|
EXPORT_SYMBOL(unlock_buffer);
|