1 |
62 |
marcus.erl |
/*
|
2 |
|
|
* fs/inotify.c - inode-based file event notifications
|
3 |
|
|
*
|
4 |
|
|
* Authors:
|
5 |
|
|
* John McCutchan <ttb@tentacle.dhs.org>
|
6 |
|
|
* Robert Love <rml@novell.com>
|
7 |
|
|
*
|
8 |
|
|
* Kernel API added by: Amy Griffis <amy.griffis@hp.com>
|
9 |
|
|
*
|
10 |
|
|
* Copyright (C) 2005 John McCutchan
|
11 |
|
|
* Copyright 2006 Hewlett-Packard Development Company, L.P.
|
12 |
|
|
*
|
13 |
|
|
* This program is free software; you can redistribute it and/or modify it
|
14 |
|
|
* under the terms of the GNU General Public License as published by the
|
15 |
|
|
* Free Software Foundation; either version 2, or (at your option) any
|
16 |
|
|
* later version.
|
17 |
|
|
*
|
18 |
|
|
* This program is distributed in the hope that it will be useful, but
|
19 |
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
20 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
21 |
|
|
* General Public License for more details.
|
22 |
|
|
*/
|
23 |
|
|
|
24 |
|
|
#include <linux/module.h>
|
25 |
|
|
#include <linux/kernel.h>
|
26 |
|
|
#include <linux/spinlock.h>
|
27 |
|
|
#include <linux/idr.h>
|
28 |
|
|
#include <linux/slab.h>
|
29 |
|
|
#include <linux/fs.h>
|
30 |
|
|
#include <linux/sched.h>
|
31 |
|
|
#include <linux/init.h>
|
32 |
|
|
#include <linux/list.h>
|
33 |
|
|
#include <linux/writeback.h>
|
34 |
|
|
#include <linux/inotify.h>
|
35 |
|
|
|
36 |
|
|
static atomic_t inotify_cookie;
|
37 |
|
|
|
38 |
|
|
/*
|
39 |
|
|
* Lock ordering:
|
40 |
|
|
*
|
41 |
|
|
* dentry->d_lock (used to keep d_move() away from dentry->d_parent)
|
42 |
|
|
* iprune_mutex (synchronize shrink_icache_memory())
|
43 |
|
|
* inode_lock (protects the super_block->s_inodes list)
|
44 |
|
|
* inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
|
45 |
|
|
* inotify_handle->mutex (protects inotify_handle and watches->h_list)
|
46 |
|
|
*
|
47 |
|
|
* The inode->inotify_mutex and inotify_handle->mutex and held during execution
|
48 |
|
|
* of a caller's event handler. Thus, the caller must not hold any locks
|
49 |
|
|
* taken in their event handler while calling any of the published inotify
|
50 |
|
|
* interfaces.
|
51 |
|
|
*/
|
52 |
|
|
|
53 |
|
|
/*
|
54 |
|
|
* Lifetimes of the three main data structures--inotify_handle, inode, and
|
55 |
|
|
* inotify_watch--are managed by reference count.
|
56 |
|
|
*
|
57 |
|
|
* inotify_handle: Lifetime is from inotify_init() to inotify_destroy().
|
58 |
|
|
* Additional references can bump the count via get_inotify_handle() and drop
|
59 |
|
|
* the count via put_inotify_handle().
|
60 |
|
|
*
|
61 |
|
|
* inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch()
|
62 |
|
|
* to remove_watch_no_event(). Additional references can bump the count via
|
63 |
|
|
* get_inotify_watch() and drop the count via put_inotify_watch(). The caller
|
64 |
|
|
* is reponsible for the final put after receiving IN_IGNORED, or when using
|
65 |
|
|
* IN_ONESHOT after receiving the first event. Inotify does the final put if
|
66 |
|
|
* inotify_destroy() is called.
|
67 |
|
|
*
|
68 |
|
|
* inode: Pinned so long as the inode is associated with a watch, from
|
69 |
|
|
* inotify_add_watch() to the final put_inotify_watch().
|
70 |
|
|
*/
|
71 |
|
|
|
72 |
|
|
/*
|
73 |
|
|
* struct inotify_handle - represents an inotify instance
|
74 |
|
|
*
|
75 |
|
|
* This structure is protected by the mutex 'mutex'.
|
76 |
|
|
*/
|
77 |
|
|
struct inotify_handle {
|
78 |
|
|
struct idr idr; /* idr mapping wd -> watch */
|
79 |
|
|
struct mutex mutex; /* protects this bad boy */
|
80 |
|
|
struct list_head watches; /* list of watches */
|
81 |
|
|
atomic_t count; /* reference count */
|
82 |
|
|
u32 last_wd; /* the last wd allocated */
|
83 |
|
|
const struct inotify_operations *in_ops; /* inotify caller operations */
|
84 |
|
|
};
|
85 |
|
|
|
86 |
|
|
static inline void get_inotify_handle(struct inotify_handle *ih)
|
87 |
|
|
{
|
88 |
|
|
atomic_inc(&ih->count);
|
89 |
|
|
}
|
90 |
|
|
|
91 |
|
|
static inline void put_inotify_handle(struct inotify_handle *ih)
|
92 |
|
|
{
|
93 |
|
|
if (atomic_dec_and_test(&ih->count)) {
|
94 |
|
|
idr_destroy(&ih->idr);
|
95 |
|
|
kfree(ih);
|
96 |
|
|
}
|
97 |
|
|
}
|
98 |
|
|
|
99 |
|
|
/**
|
100 |
|
|
* get_inotify_watch - grab a reference to an inotify_watch
|
101 |
|
|
* @watch: watch to grab
|
102 |
|
|
*/
|
103 |
|
|
void get_inotify_watch(struct inotify_watch *watch)
|
104 |
|
|
{
|
105 |
|
|
atomic_inc(&watch->count);
|
106 |
|
|
}
|
107 |
|
|
EXPORT_SYMBOL_GPL(get_inotify_watch);
|
108 |
|
|
|
109 |
|
|
/**
|
110 |
|
|
* put_inotify_watch - decrements the ref count on a given watch. cleans up
|
111 |
|
|
* watch references if the count reaches zero. inotify_watch is freed by
|
112 |
|
|
* inotify callers via the destroy_watch() op.
|
113 |
|
|
* @watch: watch to release
|
114 |
|
|
*/
|
115 |
|
|
void put_inotify_watch(struct inotify_watch *watch)
|
116 |
|
|
{
|
117 |
|
|
if (atomic_dec_and_test(&watch->count)) {
|
118 |
|
|
struct inotify_handle *ih = watch->ih;
|
119 |
|
|
|
120 |
|
|
iput(watch->inode);
|
121 |
|
|
ih->in_ops->destroy_watch(watch);
|
122 |
|
|
put_inotify_handle(ih);
|
123 |
|
|
}
|
124 |
|
|
}
|
125 |
|
|
EXPORT_SYMBOL_GPL(put_inotify_watch);
|
126 |
|
|
|
127 |
|
|
/*
|
128 |
|
|
* inotify_handle_get_wd - returns the next WD for use by the given handle
|
129 |
|
|
*
|
130 |
|
|
* Callers must hold ih->mutex. This function can sleep.
|
131 |
|
|
*/
|
132 |
|
|
static int inotify_handle_get_wd(struct inotify_handle *ih,
|
133 |
|
|
struct inotify_watch *watch)
|
134 |
|
|
{
|
135 |
|
|
int ret;
|
136 |
|
|
|
137 |
|
|
do {
|
138 |
|
|
if (unlikely(!idr_pre_get(&ih->idr, GFP_KERNEL)))
|
139 |
|
|
return -ENOSPC;
|
140 |
|
|
ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd);
|
141 |
|
|
} while (ret == -EAGAIN);
|
142 |
|
|
|
143 |
|
|
if (likely(!ret))
|
144 |
|
|
ih->last_wd = watch->wd;
|
145 |
|
|
|
146 |
|
|
return ret;
|
147 |
|
|
}
|
148 |
|
|
|
149 |
|
|
/*
|
150 |
|
|
* inotify_inode_watched - returns nonzero if there are watches on this inode
|
151 |
|
|
* and zero otherwise. We call this lockless, we do not care if we race.
|
152 |
|
|
*/
|
153 |
|
|
static inline int inotify_inode_watched(struct inode *inode)
|
154 |
|
|
{
|
155 |
|
|
return !list_empty(&inode->inotify_watches);
|
156 |
|
|
}
|
157 |
|
|
|
158 |
|
|
/*
|
159 |
|
|
* Get child dentry flag into synch with parent inode.
|
160 |
|
|
* Flag should always be clear for negative dentrys.
|
161 |
|
|
*/
|
162 |
|
|
static void set_dentry_child_flags(struct inode *inode, int watched)
|
163 |
|
|
{
|
164 |
|
|
struct dentry *alias;
|
165 |
|
|
|
166 |
|
|
spin_lock(&dcache_lock);
|
167 |
|
|
list_for_each_entry(alias, &inode->i_dentry, d_alias) {
|
168 |
|
|
struct dentry *child;
|
169 |
|
|
|
170 |
|
|
list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
|
171 |
|
|
if (!child->d_inode) {
|
172 |
|
|
WARN_ON(child->d_flags & DCACHE_INOTIFY_PARENT_WATCHED);
|
173 |
|
|
continue;
|
174 |
|
|
}
|
175 |
|
|
spin_lock(&child->d_lock);
|
176 |
|
|
if (watched) {
|
177 |
|
|
WARN_ON(child->d_flags &
|
178 |
|
|
DCACHE_INOTIFY_PARENT_WATCHED);
|
179 |
|
|
child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
|
180 |
|
|
} else {
|
181 |
|
|
WARN_ON(!(child->d_flags &
|
182 |
|
|
DCACHE_INOTIFY_PARENT_WATCHED));
|
183 |
|
|
child->d_flags&=~DCACHE_INOTIFY_PARENT_WATCHED;
|
184 |
|
|
}
|
185 |
|
|
spin_unlock(&child->d_lock);
|
186 |
|
|
}
|
187 |
|
|
}
|
188 |
|
|
spin_unlock(&dcache_lock);
|
189 |
|
|
}
|
190 |
|
|
|
191 |
|
|
/*
|
192 |
|
|
* inotify_find_handle - find the watch associated with the given inode and
|
193 |
|
|
* handle
|
194 |
|
|
*
|
195 |
|
|
* Callers must hold inode->inotify_mutex.
|
196 |
|
|
*/
|
197 |
|
|
static struct inotify_watch *inode_find_handle(struct inode *inode,
|
198 |
|
|
struct inotify_handle *ih)
|
199 |
|
|
{
|
200 |
|
|
struct inotify_watch *watch;
|
201 |
|
|
|
202 |
|
|
list_for_each_entry(watch, &inode->inotify_watches, i_list) {
|
203 |
|
|
if (watch->ih == ih)
|
204 |
|
|
return watch;
|
205 |
|
|
}
|
206 |
|
|
|
207 |
|
|
return NULL;
|
208 |
|
|
}
|
209 |
|
|
|
210 |
|
|
/*
|
211 |
|
|
* remove_watch_no_event - remove watch without the IN_IGNORED event.
|
212 |
|
|
*
|
213 |
|
|
* Callers must hold both inode->inotify_mutex and ih->mutex.
|
214 |
|
|
*/
|
215 |
|
|
static void remove_watch_no_event(struct inotify_watch *watch,
|
216 |
|
|
struct inotify_handle *ih)
|
217 |
|
|
{
|
218 |
|
|
list_del(&watch->i_list);
|
219 |
|
|
list_del(&watch->h_list);
|
220 |
|
|
|
221 |
|
|
if (!inotify_inode_watched(watch->inode))
|
222 |
|
|
set_dentry_child_flags(watch->inode, 0);
|
223 |
|
|
|
224 |
|
|
idr_remove(&ih->idr, watch->wd);
|
225 |
|
|
}
|
226 |
|
|
|
227 |
|
|
/**
|
228 |
|
|
* inotify_remove_watch_locked - Remove a watch from both the handle and the
|
229 |
|
|
* inode. Sends the IN_IGNORED event signifying that the inode is no longer
|
230 |
|
|
* watched. May be invoked from a caller's event handler.
|
231 |
|
|
* @ih: inotify handle associated with watch
|
232 |
|
|
* @watch: watch to remove
|
233 |
|
|
*
|
234 |
|
|
* Callers must hold both inode->inotify_mutex and ih->mutex.
|
235 |
|
|
*/
|
236 |
|
|
void inotify_remove_watch_locked(struct inotify_handle *ih,
|
237 |
|
|
struct inotify_watch *watch)
|
238 |
|
|
{
|
239 |
|
|
remove_watch_no_event(watch, ih);
|
240 |
|
|
ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL);
|
241 |
|
|
}
|
242 |
|
|
EXPORT_SYMBOL_GPL(inotify_remove_watch_locked);
|
243 |
|
|
|
244 |
|
|
/* Kernel API for producing events */
|
245 |
|
|
|
246 |
|
|
/*
|
247 |
|
|
* inotify_d_instantiate - instantiate dcache entry for inode
|
248 |
|
|
*/
|
249 |
|
|
void inotify_d_instantiate(struct dentry *entry, struct inode *inode)
|
250 |
|
|
{
|
251 |
|
|
struct dentry *parent;
|
252 |
|
|
|
253 |
|
|
if (!inode)
|
254 |
|
|
return;
|
255 |
|
|
|
256 |
|
|
WARN_ON(entry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED);
|
257 |
|
|
spin_lock(&entry->d_lock);
|
258 |
|
|
parent = entry->d_parent;
|
259 |
|
|
if (parent->d_inode && inotify_inode_watched(parent->d_inode))
|
260 |
|
|
entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
|
261 |
|
|
spin_unlock(&entry->d_lock);
|
262 |
|
|
}
|
263 |
|
|
|
264 |
|
|
/*
|
265 |
|
|
* inotify_d_move - dcache entry has been moved
|
266 |
|
|
*/
|
267 |
|
|
void inotify_d_move(struct dentry *entry)
|
268 |
|
|
{
|
269 |
|
|
struct dentry *parent;
|
270 |
|
|
|
271 |
|
|
parent = entry->d_parent;
|
272 |
|
|
if (inotify_inode_watched(parent->d_inode))
|
273 |
|
|
entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
|
274 |
|
|
else
|
275 |
|
|
entry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
|
276 |
|
|
}
|
277 |
|
|
|
278 |
|
|
/**
|
279 |
|
|
* inotify_inode_queue_event - queue an event to all watches on this inode
|
280 |
|
|
* @inode: inode event is originating from
|
281 |
|
|
* @mask: event mask describing this event
|
282 |
|
|
* @cookie: cookie for synchronization, or zero
|
283 |
|
|
* @name: filename, if any
|
284 |
|
|
* @n_inode: inode associated with name
|
285 |
|
|
*/
|
286 |
|
|
void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
|
287 |
|
|
const char *name, struct inode *n_inode)
|
288 |
|
|
{
|
289 |
|
|
struct inotify_watch *watch, *next;
|
290 |
|
|
|
291 |
|
|
if (!inotify_inode_watched(inode))
|
292 |
|
|
return;
|
293 |
|
|
|
294 |
|
|
mutex_lock(&inode->inotify_mutex);
|
295 |
|
|
list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
|
296 |
|
|
u32 watch_mask = watch->mask;
|
297 |
|
|
if (watch_mask & mask) {
|
298 |
|
|
struct inotify_handle *ih= watch->ih;
|
299 |
|
|
mutex_lock(&ih->mutex);
|
300 |
|
|
if (watch_mask & IN_ONESHOT)
|
301 |
|
|
remove_watch_no_event(watch, ih);
|
302 |
|
|
ih->in_ops->handle_event(watch, watch->wd, mask, cookie,
|
303 |
|
|
name, n_inode);
|
304 |
|
|
mutex_unlock(&ih->mutex);
|
305 |
|
|
}
|
306 |
|
|
}
|
307 |
|
|
mutex_unlock(&inode->inotify_mutex);
|
308 |
|
|
}
|
309 |
|
|
EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
|
310 |
|
|
|
311 |
|
|
/**
|
312 |
|
|
* inotify_dentry_parent_queue_event - queue an event to a dentry's parent
|
313 |
|
|
* @dentry: the dentry in question, we queue against this dentry's parent
|
314 |
|
|
* @mask: event mask describing this event
|
315 |
|
|
* @cookie: cookie for synchronization, or zero
|
316 |
|
|
* @name: filename, if any
|
317 |
|
|
*/
|
318 |
|
|
void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
|
319 |
|
|
u32 cookie, const char *name)
|
320 |
|
|
{
|
321 |
|
|
struct dentry *parent;
|
322 |
|
|
struct inode *inode;
|
323 |
|
|
|
324 |
|
|
if (!(dentry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED))
|
325 |
|
|
return;
|
326 |
|
|
|
327 |
|
|
spin_lock(&dentry->d_lock);
|
328 |
|
|
parent = dentry->d_parent;
|
329 |
|
|
inode = parent->d_inode;
|
330 |
|
|
|
331 |
|
|
if (inotify_inode_watched(inode)) {
|
332 |
|
|
dget(parent);
|
333 |
|
|
spin_unlock(&dentry->d_lock);
|
334 |
|
|
inotify_inode_queue_event(inode, mask, cookie, name,
|
335 |
|
|
dentry->d_inode);
|
336 |
|
|
dput(parent);
|
337 |
|
|
} else
|
338 |
|
|
spin_unlock(&dentry->d_lock);
|
339 |
|
|
}
|
340 |
|
|
EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
|
341 |
|
|
|
342 |
|
|
/**
|
343 |
|
|
* inotify_get_cookie - return a unique cookie for use in synchronizing events.
|
344 |
|
|
*/
|
345 |
|
|
u32 inotify_get_cookie(void)
|
346 |
|
|
{
|
347 |
|
|
return atomic_inc_return(&inotify_cookie);
|
348 |
|
|
}
|
349 |
|
|
EXPORT_SYMBOL_GPL(inotify_get_cookie);
|
350 |
|
|
|
351 |
|
|
/**
|
352 |
|
|
* inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
|
353 |
|
|
* @list: list of inodes being unmounted (sb->s_inodes)
|
354 |
|
|
*
|
355 |
|
|
* Called with inode_lock held, protecting the unmounting super block's list
|
356 |
|
|
* of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
|
357 |
|
|
* We temporarily drop inode_lock, however, and CAN block.
|
358 |
|
|
*/
|
359 |
|
|
void inotify_unmount_inodes(struct list_head *list)
|
360 |
|
|
{
|
361 |
|
|
struct inode *inode, *next_i, *need_iput = NULL;
|
362 |
|
|
|
363 |
|
|
list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
|
364 |
|
|
struct inotify_watch *watch, *next_w;
|
365 |
|
|
struct inode *need_iput_tmp;
|
366 |
|
|
struct list_head *watches;
|
367 |
|
|
|
368 |
|
|
/*
|
369 |
|
|
* If i_count is zero, the inode cannot have any watches and
|
370 |
|
|
* doing an __iget/iput with MS_ACTIVE clear would actually
|
371 |
|
|
* evict all inodes with zero i_count from icache which is
|
372 |
|
|
* unnecessarily violent and may in fact be illegal to do.
|
373 |
|
|
*/
|
374 |
|
|
if (!atomic_read(&inode->i_count))
|
375 |
|
|
continue;
|
376 |
|
|
|
377 |
|
|
/*
|
378 |
|
|
* We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
|
379 |
|
|
* I_WILL_FREE which is fine because by that point the inode
|
380 |
|
|
* cannot have any associated watches.
|
381 |
|
|
*/
|
382 |
|
|
if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))
|
383 |
|
|
continue;
|
384 |
|
|
|
385 |
|
|
need_iput_tmp = need_iput;
|
386 |
|
|
need_iput = NULL;
|
387 |
|
|
/* In case inotify_remove_watch_locked() drops a reference. */
|
388 |
|
|
if (inode != need_iput_tmp)
|
389 |
|
|
__iget(inode);
|
390 |
|
|
else
|
391 |
|
|
need_iput_tmp = NULL;
|
392 |
|
|
/* In case the dropping of a reference would nuke next_i. */
|
393 |
|
|
if ((&next_i->i_sb_list != list) &&
|
394 |
|
|
atomic_read(&next_i->i_count) &&
|
395 |
|
|
!(next_i->i_state & (I_CLEAR | I_FREEING |
|
396 |
|
|
I_WILL_FREE))) {
|
397 |
|
|
__iget(next_i);
|
398 |
|
|
need_iput = next_i;
|
399 |
|
|
}
|
400 |
|
|
|
401 |
|
|
/*
|
402 |
|
|
* We can safely drop inode_lock here because we hold
|
403 |
|
|
* references on both inode and next_i. Also no new inodes
|
404 |
|
|
* will be added since the umount has begun. Finally,
|
405 |
|
|
* iprune_mutex keeps shrink_icache_memory() away.
|
406 |
|
|
*/
|
407 |
|
|
spin_unlock(&inode_lock);
|
408 |
|
|
|
409 |
|
|
if (need_iput_tmp)
|
410 |
|
|
iput(need_iput_tmp);
|
411 |
|
|
|
412 |
|
|
/* for each watch, send IN_UNMOUNT and then remove it */
|
413 |
|
|
mutex_lock(&inode->inotify_mutex);
|
414 |
|
|
watches = &inode->inotify_watches;
|
415 |
|
|
list_for_each_entry_safe(watch, next_w, watches, i_list) {
|
416 |
|
|
struct inotify_handle *ih= watch->ih;
|
417 |
|
|
mutex_lock(&ih->mutex);
|
418 |
|
|
ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0,
|
419 |
|
|
NULL, NULL);
|
420 |
|
|
inotify_remove_watch_locked(ih, watch);
|
421 |
|
|
mutex_unlock(&ih->mutex);
|
422 |
|
|
}
|
423 |
|
|
mutex_unlock(&inode->inotify_mutex);
|
424 |
|
|
iput(inode);
|
425 |
|
|
|
426 |
|
|
spin_lock(&inode_lock);
|
427 |
|
|
}
|
428 |
|
|
}
|
429 |
|
|
EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
|
430 |
|
|
|
431 |
|
|
/**
|
432 |
|
|
* inotify_inode_is_dead - an inode has been deleted, cleanup any watches
|
433 |
|
|
* @inode: inode that is about to be removed
|
434 |
|
|
*/
|
435 |
|
|
void inotify_inode_is_dead(struct inode *inode)
|
436 |
|
|
{
|
437 |
|
|
struct inotify_watch *watch, *next;
|
438 |
|
|
|
439 |
|
|
mutex_lock(&inode->inotify_mutex);
|
440 |
|
|
list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
|
441 |
|
|
struct inotify_handle *ih = watch->ih;
|
442 |
|
|
mutex_lock(&ih->mutex);
|
443 |
|
|
inotify_remove_watch_locked(ih, watch);
|
444 |
|
|
mutex_unlock(&ih->mutex);
|
445 |
|
|
}
|
446 |
|
|
mutex_unlock(&inode->inotify_mutex);
|
447 |
|
|
}
|
448 |
|
|
EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
|
449 |
|
|
|
450 |
|
|
/* Kernel Consumer API */
|
451 |
|
|
|
452 |
|
|
/**
|
453 |
|
|
* inotify_init - allocate and initialize an inotify instance
|
454 |
|
|
* @ops: caller's inotify operations
|
455 |
|
|
*/
|
456 |
|
|
struct inotify_handle *inotify_init(const struct inotify_operations *ops)
|
457 |
|
|
{
|
458 |
|
|
struct inotify_handle *ih;
|
459 |
|
|
|
460 |
|
|
ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL);
|
461 |
|
|
if (unlikely(!ih))
|
462 |
|
|
return ERR_PTR(-ENOMEM);
|
463 |
|
|
|
464 |
|
|
idr_init(&ih->idr);
|
465 |
|
|
INIT_LIST_HEAD(&ih->watches);
|
466 |
|
|
mutex_init(&ih->mutex);
|
467 |
|
|
ih->last_wd = 0;
|
468 |
|
|
ih->in_ops = ops;
|
469 |
|
|
atomic_set(&ih->count, 0);
|
470 |
|
|
get_inotify_handle(ih);
|
471 |
|
|
|
472 |
|
|
return ih;
|
473 |
|
|
}
|
474 |
|
|
EXPORT_SYMBOL_GPL(inotify_init);
|
475 |
|
|
|
476 |
|
|
/**
|
477 |
|
|
* inotify_init_watch - initialize an inotify watch
|
478 |
|
|
* @watch: watch to initialize
|
479 |
|
|
*/
|
480 |
|
|
void inotify_init_watch(struct inotify_watch *watch)
|
481 |
|
|
{
|
482 |
|
|
INIT_LIST_HEAD(&watch->h_list);
|
483 |
|
|
INIT_LIST_HEAD(&watch->i_list);
|
484 |
|
|
atomic_set(&watch->count, 0);
|
485 |
|
|
get_inotify_watch(watch); /* initial get */
|
486 |
|
|
}
|
487 |
|
|
EXPORT_SYMBOL_GPL(inotify_init_watch);
|
488 |
|
|
|
489 |
|
|
/**
|
490 |
|
|
* inotify_destroy - clean up and destroy an inotify instance
|
491 |
|
|
* @ih: inotify handle
|
492 |
|
|
*/
|
493 |
|
|
void inotify_destroy(struct inotify_handle *ih)
|
494 |
|
|
{
|
495 |
|
|
/*
|
496 |
|
|
* Destroy all of the watches for this handle. Unfortunately, not very
|
497 |
|
|
* pretty. We cannot do a simple iteration over the list, because we
|
498 |
|
|
* do not know the inode until we iterate to the watch. But we need to
|
499 |
|
|
* hold inode->inotify_mutex before ih->mutex. The following works.
|
500 |
|
|
*/
|
501 |
|
|
while (1) {
|
502 |
|
|
struct inotify_watch *watch;
|
503 |
|
|
struct list_head *watches;
|
504 |
|
|
struct inode *inode;
|
505 |
|
|
|
506 |
|
|
mutex_lock(&ih->mutex);
|
507 |
|
|
watches = &ih->watches;
|
508 |
|
|
if (list_empty(watches)) {
|
509 |
|
|
mutex_unlock(&ih->mutex);
|
510 |
|
|
break;
|
511 |
|
|
}
|
512 |
|
|
watch = list_first_entry(watches, struct inotify_watch, h_list);
|
513 |
|
|
get_inotify_watch(watch);
|
514 |
|
|
mutex_unlock(&ih->mutex);
|
515 |
|
|
|
516 |
|
|
inode = watch->inode;
|
517 |
|
|
mutex_lock(&inode->inotify_mutex);
|
518 |
|
|
mutex_lock(&ih->mutex);
|
519 |
|
|
|
520 |
|
|
/* make sure we didn't race with another list removal */
|
521 |
|
|
if (likely(idr_find(&ih->idr, watch->wd))) {
|
522 |
|
|
remove_watch_no_event(watch, ih);
|
523 |
|
|
put_inotify_watch(watch);
|
524 |
|
|
}
|
525 |
|
|
|
526 |
|
|
mutex_unlock(&ih->mutex);
|
527 |
|
|
mutex_unlock(&inode->inotify_mutex);
|
528 |
|
|
put_inotify_watch(watch);
|
529 |
|
|
}
|
530 |
|
|
|
531 |
|
|
/* free this handle: the put matching the get in inotify_init() */
|
532 |
|
|
put_inotify_handle(ih);
|
533 |
|
|
}
|
534 |
|
|
EXPORT_SYMBOL_GPL(inotify_destroy);
|
535 |
|
|
|
536 |
|
|
/**
|
537 |
|
|
* inotify_find_watch - find an existing watch for an (ih,inode) pair
|
538 |
|
|
* @ih: inotify handle
|
539 |
|
|
* @inode: inode to watch
|
540 |
|
|
* @watchp: pointer to existing inotify_watch
|
541 |
|
|
*
|
542 |
|
|
* Caller must pin given inode (via nameidata).
|
543 |
|
|
*/
|
544 |
|
|
s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
|
545 |
|
|
struct inotify_watch **watchp)
|
546 |
|
|
{
|
547 |
|
|
struct inotify_watch *old;
|
548 |
|
|
int ret = -ENOENT;
|
549 |
|
|
|
550 |
|
|
mutex_lock(&inode->inotify_mutex);
|
551 |
|
|
mutex_lock(&ih->mutex);
|
552 |
|
|
|
553 |
|
|
old = inode_find_handle(inode, ih);
|
554 |
|
|
if (unlikely(old)) {
|
555 |
|
|
get_inotify_watch(old); /* caller must put watch */
|
556 |
|
|
*watchp = old;
|
557 |
|
|
ret = old->wd;
|
558 |
|
|
}
|
559 |
|
|
|
560 |
|
|
mutex_unlock(&ih->mutex);
|
561 |
|
|
mutex_unlock(&inode->inotify_mutex);
|
562 |
|
|
|
563 |
|
|
return ret;
|
564 |
|
|
}
|
565 |
|
|
EXPORT_SYMBOL_GPL(inotify_find_watch);
|
566 |
|
|
|
567 |
|
|
/**
|
568 |
|
|
* inotify_find_update_watch - find and update the mask of an existing watch
|
569 |
|
|
* @ih: inotify handle
|
570 |
|
|
* @inode: inode's watch to update
|
571 |
|
|
* @mask: mask of events to watch
|
572 |
|
|
*
|
573 |
|
|
* Caller must pin given inode (via nameidata).
|
574 |
|
|
*/
|
575 |
|
|
s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode,
|
576 |
|
|
u32 mask)
|
577 |
|
|
{
|
578 |
|
|
struct inotify_watch *old;
|
579 |
|
|
int mask_add = 0;
|
580 |
|
|
int ret;
|
581 |
|
|
|
582 |
|
|
if (mask & IN_MASK_ADD)
|
583 |
|
|
mask_add = 1;
|
584 |
|
|
|
585 |
|
|
/* don't allow invalid bits: we don't want flags set */
|
586 |
|
|
mask &= IN_ALL_EVENTS | IN_ONESHOT;
|
587 |
|
|
if (unlikely(!mask))
|
588 |
|
|
return -EINVAL;
|
589 |
|
|
|
590 |
|
|
mutex_lock(&inode->inotify_mutex);
|
591 |
|
|
mutex_lock(&ih->mutex);
|
592 |
|
|
|
593 |
|
|
/*
|
594 |
|
|
* Handle the case of re-adding a watch on an (inode,ih) pair that we
|
595 |
|
|
* are already watching. We just update the mask and return its wd.
|
596 |
|
|
*/
|
597 |
|
|
old = inode_find_handle(inode, ih);
|
598 |
|
|
if (unlikely(!old)) {
|
599 |
|
|
ret = -ENOENT;
|
600 |
|
|
goto out;
|
601 |
|
|
}
|
602 |
|
|
|
603 |
|
|
if (mask_add)
|
604 |
|
|
old->mask |= mask;
|
605 |
|
|
else
|
606 |
|
|
old->mask = mask;
|
607 |
|
|
ret = old->wd;
|
608 |
|
|
out:
|
609 |
|
|
mutex_unlock(&ih->mutex);
|
610 |
|
|
mutex_unlock(&inode->inotify_mutex);
|
611 |
|
|
return ret;
|
612 |
|
|
}
|
613 |
|
|
EXPORT_SYMBOL_GPL(inotify_find_update_watch);
|
614 |
|
|
|
615 |
|
|
/**
|
616 |
|
|
* inotify_add_watch - add a watch to an inotify instance
|
617 |
|
|
* @ih: inotify handle
|
618 |
|
|
* @watch: caller allocated watch structure
|
619 |
|
|
* @inode: inode to watch
|
620 |
|
|
* @mask: mask of events to watch
|
621 |
|
|
*
|
622 |
|
|
* Caller must pin given inode (via nameidata).
|
623 |
|
|
* Caller must ensure it only calls inotify_add_watch() once per watch.
|
624 |
|
|
* Calls inotify_handle_get_wd() so may sleep.
|
625 |
|
|
*/
|
626 |
|
|
s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
|
627 |
|
|
struct inode *inode, u32 mask)
|
628 |
|
|
{
|
629 |
|
|
int ret = 0;
|
630 |
|
|
|
631 |
|
|
/* don't allow invalid bits: we don't want flags set */
|
632 |
|
|
mask &= IN_ALL_EVENTS | IN_ONESHOT;
|
633 |
|
|
if (unlikely(!mask))
|
634 |
|
|
return -EINVAL;
|
635 |
|
|
watch->mask = mask;
|
636 |
|
|
|
637 |
|
|
mutex_lock(&inode->inotify_mutex);
|
638 |
|
|
mutex_lock(&ih->mutex);
|
639 |
|
|
|
640 |
|
|
/* Initialize a new watch */
|
641 |
|
|
ret = inotify_handle_get_wd(ih, watch);
|
642 |
|
|
if (unlikely(ret))
|
643 |
|
|
goto out;
|
644 |
|
|
ret = watch->wd;
|
645 |
|
|
|
646 |
|
|
/* save a reference to handle and bump the count to make it official */
|
647 |
|
|
get_inotify_handle(ih);
|
648 |
|
|
watch->ih = ih;
|
649 |
|
|
|
650 |
|
|
/*
|
651 |
|
|
* Save a reference to the inode and bump the ref count to make it
|
652 |
|
|
* official. We hold a reference to nameidata, which makes this safe.
|
653 |
|
|
*/
|
654 |
|
|
watch->inode = igrab(inode);
|
655 |
|
|
|
656 |
|
|
if (!inotify_inode_watched(inode))
|
657 |
|
|
set_dentry_child_flags(inode, 1);
|
658 |
|
|
|
659 |
|
|
/* Add the watch to the handle's and the inode's list */
|
660 |
|
|
list_add(&watch->h_list, &ih->watches);
|
661 |
|
|
list_add(&watch->i_list, &inode->inotify_watches);
|
662 |
|
|
out:
|
663 |
|
|
mutex_unlock(&ih->mutex);
|
664 |
|
|
mutex_unlock(&inode->inotify_mutex);
|
665 |
|
|
return ret;
|
666 |
|
|
}
|
667 |
|
|
EXPORT_SYMBOL_GPL(inotify_add_watch);
|
668 |
|
|
|
669 |
|
|
/**
|
670 |
|
|
* inotify_clone_watch - put the watch next to existing one
|
671 |
|
|
* @old: already installed watch
|
672 |
|
|
* @new: new watch
|
673 |
|
|
*
|
674 |
|
|
* Caller must hold the inotify_mutex of inode we are dealing with;
|
675 |
|
|
* it is expected to remove the old watch before unlocking the inode.
|
676 |
|
|
*/
|
677 |
|
|
s32 inotify_clone_watch(struct inotify_watch *old, struct inotify_watch *new)
|
678 |
|
|
{
|
679 |
|
|
struct inotify_handle *ih = old->ih;
|
680 |
|
|
int ret = 0;
|
681 |
|
|
|
682 |
|
|
new->mask = old->mask;
|
683 |
|
|
new->ih = ih;
|
684 |
|
|
|
685 |
|
|
mutex_lock(&ih->mutex);
|
686 |
|
|
|
687 |
|
|
/* Initialize a new watch */
|
688 |
|
|
ret = inotify_handle_get_wd(ih, new);
|
689 |
|
|
if (unlikely(ret))
|
690 |
|
|
goto out;
|
691 |
|
|
ret = new->wd;
|
692 |
|
|
|
693 |
|
|
get_inotify_handle(ih);
|
694 |
|
|
|
695 |
|
|
new->inode = igrab(old->inode);
|
696 |
|
|
|
697 |
|
|
list_add(&new->h_list, &ih->watches);
|
698 |
|
|
list_add(&new->i_list, &old->inode->inotify_watches);
|
699 |
|
|
out:
|
700 |
|
|
mutex_unlock(&ih->mutex);
|
701 |
|
|
return ret;
|
702 |
|
|
}
|
703 |
|
|
|
704 |
|
|
void inotify_evict_watch(struct inotify_watch *watch)
|
705 |
|
|
{
|
706 |
|
|
get_inotify_watch(watch);
|
707 |
|
|
mutex_lock(&watch->ih->mutex);
|
708 |
|
|
inotify_remove_watch_locked(watch->ih, watch);
|
709 |
|
|
mutex_unlock(&watch->ih->mutex);
|
710 |
|
|
}
|
711 |
|
|
|
712 |
|
|
/**
|
713 |
|
|
* inotify_rm_wd - remove a watch from an inotify instance
|
714 |
|
|
* @ih: inotify handle
|
715 |
|
|
* @wd: watch descriptor to remove
|
716 |
|
|
*
|
717 |
|
|
* Can sleep.
|
718 |
|
|
*/
|
719 |
|
|
int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
|
720 |
|
|
{
|
721 |
|
|
struct inotify_watch *watch;
|
722 |
|
|
struct inode *inode;
|
723 |
|
|
|
724 |
|
|
mutex_lock(&ih->mutex);
|
725 |
|
|
watch = idr_find(&ih->idr, wd);
|
726 |
|
|
if (unlikely(!watch)) {
|
727 |
|
|
mutex_unlock(&ih->mutex);
|
728 |
|
|
return -EINVAL;
|
729 |
|
|
}
|
730 |
|
|
get_inotify_watch(watch);
|
731 |
|
|
inode = watch->inode;
|
732 |
|
|
mutex_unlock(&ih->mutex);
|
733 |
|
|
|
734 |
|
|
mutex_lock(&inode->inotify_mutex);
|
735 |
|
|
mutex_lock(&ih->mutex);
|
736 |
|
|
|
737 |
|
|
/* make sure that we did not race */
|
738 |
|
|
if (likely(idr_find(&ih->idr, wd) == watch))
|
739 |
|
|
inotify_remove_watch_locked(ih, watch);
|
740 |
|
|
|
741 |
|
|
mutex_unlock(&ih->mutex);
|
742 |
|
|
mutex_unlock(&inode->inotify_mutex);
|
743 |
|
|
put_inotify_watch(watch);
|
744 |
|
|
|
745 |
|
|
return 0;
|
746 |
|
|
}
|
747 |
|
|
EXPORT_SYMBOL_GPL(inotify_rm_wd);
|
748 |
|
|
|
749 |
|
|
/**
|
750 |
|
|
* inotify_rm_watch - remove a watch from an inotify instance
|
751 |
|
|
* @ih: inotify handle
|
752 |
|
|
* @watch: watch to remove
|
753 |
|
|
*
|
754 |
|
|
* Can sleep.
|
755 |
|
|
*/
|
756 |
|
|
int inotify_rm_watch(struct inotify_handle *ih,
|
757 |
|
|
struct inotify_watch *watch)
|
758 |
|
|
{
|
759 |
|
|
return inotify_rm_wd(ih, watch->wd);
|
760 |
|
|
}
|
761 |
|
|
EXPORT_SYMBOL_GPL(inotify_rm_watch);
|
762 |
|
|
|
763 |
|
|
/*
|
764 |
|
|
* inotify_setup - core initialization function
|
765 |
|
|
*/
|
766 |
|
|
static int __init inotify_setup(void)
|
767 |
|
|
{
|
768 |
|
|
atomic_set(&inotify_cookie, 0);
|
769 |
|
|
|
770 |
|
|
return 0;
|
771 |
|
|
}
|
772 |
|
|
|
773 |
|
|
module_init(inotify_setup);
|