OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [fs/] [cifs/] [file.c] - Blame information for rev 78

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 *   fs/cifs/file.c
3
 *
4
 *   vfs operations that deal with files
5
 *
6
 *   Copyright (C) International Business Machines  Corp., 2002,2007
7
 *   Author(s): Steve French (sfrench@us.ibm.com)
8
 *              Jeremy Allison (jra@samba.org)
9
 *
10
 *   This library is free software; you can redistribute it and/or modify
11
 *   it under the terms of the GNU Lesser General Public License as published
12
 *   by the Free Software Foundation; either version 2.1 of the License, or
13
 *   (at your option) any later version.
14
 *
15
 *   This library is distributed in the hope that it will be useful,
16
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18
 *   the GNU Lesser General Public License for more details.
19
 *
20
 *   You should have received a copy of the GNU Lesser General Public License
21
 *   along with this library; if not, write to the Free Software
22
 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23
 */
24
#include <linux/fs.h>
25
#include <linux/backing-dev.h>
26
#include <linux/stat.h>
27
#include <linux/fcntl.h>
28
#include <linux/pagemap.h>
29
#include <linux/pagevec.h>
30
#include <linux/writeback.h>
31
#include <linux/task_io_accounting_ops.h>
32
#include <linux/delay.h>
33
#include <asm/div64.h>
34
#include "cifsfs.h"
35
#include "cifspdu.h"
36
#include "cifsglob.h"
37
#include "cifsproto.h"
38
#include "cifs_unicode.h"
39
#include "cifs_debug.h"
40
#include "cifs_fs_sb.h"
41
 
42
static inline struct cifsFileInfo *cifs_init_private(
43
        struct cifsFileInfo *private_data, struct inode *inode,
44
        struct file *file, __u16 netfid)
45
{
46
        memset(private_data, 0, sizeof(struct cifsFileInfo));
47
        private_data->netfid = netfid;
48
        private_data->pid = current->tgid;
49
        init_MUTEX(&private_data->fh_sem);
50
        mutex_init(&private_data->lock_mutex);
51
        INIT_LIST_HEAD(&private_data->llist);
52
        private_data->pfile = file; /* needed for writepage */
53
        private_data->pInode = inode;
54
        private_data->invalidHandle = FALSE;
55
        private_data->closePend = FALSE;
56
        /* we have to track num writers to the inode, since writepages
57
        does not tell us which handle the write is for so there can
58
        be a close (overlapping with write) of the filehandle that
59
        cifs_writepages chose to use */
60
        atomic_set(&private_data->wrtPending, 0);
61
 
62
        return private_data;
63
}
64
 
65
static inline int cifs_convert_flags(unsigned int flags)
66
{
67
        if ((flags & O_ACCMODE) == O_RDONLY)
68
                return GENERIC_READ;
69
        else if ((flags & O_ACCMODE) == O_WRONLY)
70
                return GENERIC_WRITE;
71
        else if ((flags & O_ACCMODE) == O_RDWR) {
72
                /* GENERIC_ALL is too much permission to request
73
                   can cause unnecessary access denied on create */
74
                /* return GENERIC_ALL; */
75
                return (GENERIC_READ | GENERIC_WRITE);
76
        }
77
 
78
        return 0x20197;
79
}
80
 
81
static inline int cifs_get_disposition(unsigned int flags)
82
{
83
        if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
84
                return FILE_CREATE;
85
        else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
86
                return FILE_OVERWRITE_IF;
87
        else if ((flags & O_CREAT) == O_CREAT)
88
                return FILE_OPEN_IF;
89
        else if ((flags & O_TRUNC) == O_TRUNC)
90
                return FILE_OVERWRITE;
91
        else
92
                return FILE_OPEN;
93
}
94
 
95
/* all arguments to this function must be checked for validity in caller */
96
static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
97
        struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
98
        struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
99
        char *full_path, int xid)
100
{
101
        struct timespec temp;
102
        int rc;
103
 
104
        /* want handles we can use to read with first
105
           in the list so we do not have to walk the
106
           list to search for one in prepare_write */
107
        if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
108
                list_add_tail(&pCifsFile->flist,
109
                              &pCifsInode->openFileList);
110
        } else {
111
                list_add(&pCifsFile->flist,
112
                         &pCifsInode->openFileList);
113
        }
114
        write_unlock(&GlobalSMBSeslock);
115
        if (pCifsInode->clientCanCacheRead) {
116
                /* we have the inode open somewhere else
117
                   no need to discard cache data */
118
                goto client_can_cache;
119
        }
120
 
121
        /* BB need same check in cifs_create too? */
122
        /* if not oplocked, invalidate inode pages if mtime or file
123
           size changed */
124
        temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
125
        if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
126
                           (file->f_path.dentry->d_inode->i_size ==
127
                            (loff_t)le64_to_cpu(buf->EndOfFile))) {
128
                cFYI(1, ("inode unchanged on server"));
129
        } else {
130
                if (file->f_path.dentry->d_inode->i_mapping) {
131
                /* BB no need to lock inode until after invalidate
132
                   since namei code should already have it locked? */
133
                        rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
134
                        if (rc != 0)
135
                                CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
136
                }
137
                cFYI(1, ("invalidating remote inode since open detected it "
138
                         "changed"));
139
                invalidate_remote_inode(file->f_path.dentry->d_inode);
140
        }
141
 
142
client_can_cache:
143
        if (pTcon->unix_ext)
144
                rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
145
                        full_path, inode->i_sb, xid);
146
        else
147
                rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
148
                        full_path, buf, inode->i_sb, xid);
149
 
150
        if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
151
                pCifsInode->clientCanCacheAll = TRUE;
152
                pCifsInode->clientCanCacheRead = TRUE;
153
                cFYI(1, ("Exclusive Oplock granted on inode %p",
154
                         file->f_path.dentry->d_inode));
155
        } else if ((*oplock & 0xF) == OPLOCK_READ)
156
                pCifsInode->clientCanCacheRead = TRUE;
157
 
158
        return rc;
159
}
160
 
161
int cifs_open(struct inode *inode, struct file *file)
162
{
163
        int rc = -EACCES;
164
        int xid, oplock;
165
        struct cifs_sb_info *cifs_sb;
166
        struct cifsTconInfo *pTcon;
167
        struct cifsFileInfo *pCifsFile;
168
        struct cifsInodeInfo *pCifsInode;
169
        struct list_head *tmp;
170
        char *full_path = NULL;
171
        int desiredAccess;
172
        int disposition;
173
        __u16 netfid;
174
        FILE_ALL_INFO *buf = NULL;
175
 
176
        xid = GetXid();
177
 
178
        cifs_sb = CIFS_SB(inode->i_sb);
179
        pTcon = cifs_sb->tcon;
180
 
181
        if (file->f_flags & O_CREAT) {
182
                /* search inode for this file and fill in file->private_data */
183
                pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
184
                read_lock(&GlobalSMBSeslock);
185
                list_for_each(tmp, &pCifsInode->openFileList) {
186
                        pCifsFile = list_entry(tmp, struct cifsFileInfo,
187
                                               flist);
188
                        if ((pCifsFile->pfile == NULL) &&
189
                            (pCifsFile->pid == current->tgid)) {
190
                                /* mode set in cifs_create */
191
 
192
                                /* needed for writepage */
193
                                pCifsFile->pfile = file;
194
 
195
                                file->private_data = pCifsFile;
196
                                break;
197
                        }
198
                }
199
                read_unlock(&GlobalSMBSeslock);
200
                if (file->private_data != NULL) {
201
                        rc = 0;
202
                        FreeXid(xid);
203
                        return rc;
204
                } else {
205
                        if (file->f_flags & O_EXCL)
206
                                cERROR(1, ("could not find file instance for "
207
                                           "new file %p", file));
208
                }
209
        }
210
 
211
        full_path = build_path_from_dentry(file->f_path.dentry);
212
        if (full_path == NULL) {
213
                FreeXid(xid);
214
                return -ENOMEM;
215
        }
216
 
217
        cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
218
                 inode, file->f_flags, full_path));
219
        desiredAccess = cifs_convert_flags(file->f_flags);
220
 
221
/*********************************************************************
222
 *  open flag mapping table:
223
 *
224
 *      POSIX Flag            CIFS Disposition
225
 *      ----------            ----------------
226
 *      O_CREAT               FILE_OPEN_IF
227
 *      O_CREAT | O_EXCL      FILE_CREATE
228
 *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
229
 *      O_TRUNC               FILE_OVERWRITE
230
 *      none of the above     FILE_OPEN
231
 *
232
 *      Note that there is not a direct match between disposition
233
 *      FILE_SUPERSEDE (ie create whether or not file exists although
234
 *      O_CREAT | O_TRUNC is similar but truncates the existing
235
 *      file rather than creating a new file as FILE_SUPERSEDE does
236
 *      (which uses the attributes / metadata passed in on open call)
237
 *?
238
 *?  O_SYNC is a reasonable match to CIFS writethrough flag
239
 *?  and the read write flags match reasonably.  O_LARGEFILE
240
 *?  is irrelevant because largefile support is always used
241
 *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
242
 *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
243
 *********************************************************************/
244
 
245
        disposition = cifs_get_disposition(file->f_flags);
246
 
247
        if (oplockEnabled)
248
                oplock = REQ_OPLOCK;
249
        else
250
                oplock = FALSE;
251
 
252
        /* BB pass O_SYNC flag through on file attributes .. BB */
253
 
254
        /* Also refresh inode by passing in file_info buf returned by SMBOpen
255
           and calling get_inode_info with returned buf (at least helps
256
           non-Unix server case) */
257
 
258
        /* BB we can not do this if this is the second open of a file
259
           and the first handle has writebehind data, we might be
260
           able to simply do a filemap_fdatawrite/filemap_fdatawait first */
261
        buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
262
        if (!buf) {
263
                rc = -ENOMEM;
264
                goto out;
265
        }
266
 
267
        if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
268
                rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
269
                         desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
270
                         cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
271
                                 & CIFS_MOUNT_MAP_SPECIAL_CHR);
272
        else
273
                rc = -EIO; /* no NT SMB support fall into legacy open below */
274
 
275
        if (rc == -EIO) {
276
                /* Old server, try legacy style OpenX */
277
                rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
278
                        desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
279
                        cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
280
                                & CIFS_MOUNT_MAP_SPECIAL_CHR);
281
        }
282
        if (rc) {
283
                cFYI(1, ("cifs_open returned 0x%x", rc));
284
                goto out;
285
        }
286
        file->private_data =
287
                kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
288
        if (file->private_data == NULL) {
289
                rc = -ENOMEM;
290
                goto out;
291
        }
292
        pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
293
        write_lock(&GlobalSMBSeslock);
294
        list_add(&pCifsFile->tlist, &pTcon->openFileList);
295
 
296
        pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
297
        if (pCifsInode) {
298
                rc = cifs_open_inode_helper(inode, file, pCifsInode,
299
                                            pCifsFile, pTcon,
300
                                            &oplock, buf, full_path, xid);
301
        } else {
302
                write_unlock(&GlobalSMBSeslock);
303
        }
304
 
305
        if (oplock & CIFS_CREATE_ACTION) {
306
                /* time to set mode which we can not set earlier due to
307
                   problems creating new read-only files */
308
                if (pTcon->unix_ext) {
309
                        CIFSSMBUnixSetPerms(xid, pTcon, full_path,
310
                                            inode->i_mode,
311
                                            (__u64)-1, (__u64)-1, 0 /* dev */,
312
                                            cifs_sb->local_nls,
313
                                            cifs_sb->mnt_cifs_flags &
314
                                                CIFS_MOUNT_MAP_SPECIAL_CHR);
315
                } else {
316
                        /* BB implement via Windows security descriptors eg
317
                           CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
318
                                              -1, -1, local_nls);
319
                           in the meantime could set r/o dos attribute when
320
                           perms are eg: mode & 0222 == 0 */
321
                }
322
        }
323
 
324
out:
325
        kfree(buf);
326
        kfree(full_path);
327
        FreeXid(xid);
328
        return rc;
329
}
330
 
331
/* Try to reacquire byte range locks that were released when session */
332
/* to server was lost */
333
static int cifs_relock_file(struct cifsFileInfo *cifsFile)
334
{
335
        int rc = 0;
336
 
337
/* BB list all locks open on this file and relock */
338
 
339
        return rc;
340
}
341
 
342
static int cifs_reopen_file(struct file *file, int can_flush)
343
{
344
        int rc = -EACCES;
345
        int xid, oplock;
346
        struct cifs_sb_info *cifs_sb;
347
        struct cifsTconInfo *pTcon;
348
        struct cifsFileInfo *pCifsFile;
349
        struct cifsInodeInfo *pCifsInode;
350
        struct inode *inode;
351
        char *full_path = NULL;
352
        int desiredAccess;
353
        int disposition = FILE_OPEN;
354
        __u16 netfid;
355
 
356
        if (file->private_data) {
357
                pCifsFile = (struct cifsFileInfo *)file->private_data;
358
        } else
359
                return -EBADF;
360
 
361
        xid = GetXid();
362
        down(&pCifsFile->fh_sem);
363
        if (pCifsFile->invalidHandle == FALSE) {
364
                up(&pCifsFile->fh_sem);
365
                FreeXid(xid);
366
                return 0;
367
        }
368
 
369
        if (file->f_path.dentry == NULL) {
370
                cERROR(1, ("no valid name if dentry freed"));
371
                dump_stack();
372
                rc = -EBADF;
373
                goto reopen_error_exit;
374
        }
375
 
376
        inode = file->f_path.dentry->d_inode;
377
        if (inode == NULL) {
378
                cERROR(1, ("inode not valid"));
379
                dump_stack();
380
                rc = -EBADF;
381
                goto reopen_error_exit;
382
        }
383
 
384
        cifs_sb = CIFS_SB(inode->i_sb);
385
        pTcon = cifs_sb->tcon;
386
 
387
/* can not grab rename sem here because various ops, including
388
   those that already have the rename sem can end up causing writepage
389
   to get called and if the server was down that means we end up here,
390
   and we can never tell if the caller already has the rename_sem */
391
        full_path = build_path_from_dentry(file->f_path.dentry);
392
        if (full_path == NULL) {
393
                rc = -ENOMEM;
394
reopen_error_exit:
395
                up(&pCifsFile->fh_sem);
396
                FreeXid(xid);
397
                return rc;
398
        }
399
 
400
        cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
401
                 inode, file->f_flags, full_path));
402
        desiredAccess = cifs_convert_flags(file->f_flags);
403
 
404
        if (oplockEnabled)
405
                oplock = REQ_OPLOCK;
406
        else
407
                oplock = FALSE;
408
 
409
        /* Can not refresh inode by passing in file_info buf to be returned
410
           by SMBOpen and then calling get_inode_info with returned buf
411
           since file might have write behind data that needs to be flushed
412
           and server version of file size can be stale. If we knew for sure
413
           that inode was not dirty locally we could do this */
414
 
415
        rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
416
                         CREATE_NOT_DIR, &netfid, &oplock, NULL,
417
                         cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
418
                                CIFS_MOUNT_MAP_SPECIAL_CHR);
419
        if (rc) {
420
                up(&pCifsFile->fh_sem);
421
                cFYI(1, ("cifs_open returned 0x%x", rc));
422
                cFYI(1, ("oplock: %d", oplock));
423
        } else {
424
                pCifsFile->netfid = netfid;
425
                pCifsFile->invalidHandle = FALSE;
426
                up(&pCifsFile->fh_sem);
427
                pCifsInode = CIFS_I(inode);
428
                if (pCifsInode) {
429
                        if (can_flush) {
430
                                rc = filemap_write_and_wait(inode->i_mapping);
431
                                if (rc != 0)
432
                                        CIFS_I(inode)->write_behind_rc = rc;
433
                        /* temporarily disable caching while we
434
                           go to server to get inode info */
435
                                pCifsInode->clientCanCacheAll = FALSE;
436
                                pCifsInode->clientCanCacheRead = FALSE;
437
                                if (pTcon->unix_ext)
438
                                        rc = cifs_get_inode_info_unix(&inode,
439
                                                full_path, inode->i_sb, xid);
440
                                else
441
                                        rc = cifs_get_inode_info(&inode,
442
                                                full_path, NULL, inode->i_sb,
443
                                                xid);
444
                        } /* else we are writing out data to server already
445
                             and could deadlock if we tried to flush data, and
446
                             since we do not know if we have data that would
447
                             invalidate the current end of file on the server
448
                             we can not go to the server to get the new inod
449
                             info */
450
                        if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
451
                                pCifsInode->clientCanCacheAll = TRUE;
452
                                pCifsInode->clientCanCacheRead = TRUE;
453
                                cFYI(1, ("Exclusive Oplock granted on inode %p",
454
                                         file->f_path.dentry->d_inode));
455
                        } else if ((oplock & 0xF) == OPLOCK_READ) {
456
                                pCifsInode->clientCanCacheRead = TRUE;
457
                                pCifsInode->clientCanCacheAll = FALSE;
458
                        } else {
459
                                pCifsInode->clientCanCacheRead = FALSE;
460
                                pCifsInode->clientCanCacheAll = FALSE;
461
                        }
462
                        cifs_relock_file(pCifsFile);
463
                }
464
        }
465
 
466
        kfree(full_path);
467
        FreeXid(xid);
468
        return rc;
469
}
470
 
471
int cifs_close(struct inode *inode, struct file *file)
472
{
473
        int rc = 0;
474
        int xid, timeout;
475
        struct cifs_sb_info *cifs_sb;
476
        struct cifsTconInfo *pTcon;
477
        struct cifsFileInfo *pSMBFile =
478
                (struct cifsFileInfo *)file->private_data;
479
 
480
        xid = GetXid();
481
 
482
        cifs_sb = CIFS_SB(inode->i_sb);
483
        pTcon = cifs_sb->tcon;
484
        if (pSMBFile) {
485
                struct cifsLockInfo *li, *tmp;
486
 
487
                pSMBFile->closePend = TRUE;
488
                if (pTcon) {
489
                        /* no sense reconnecting to close a file that is
490
                           already closed */
491
                        if (pTcon->tidStatus != CifsNeedReconnect) {
492
                                timeout = 2;
493
                                while ((atomic_read(&pSMBFile->wrtPending) != 0)
494
                                        && (timeout <= 2048)) {
495
                                        /* Give write a better chance to get to
496
                                        server ahead of the close.  We do not
497
                                        want to add a wait_q here as it would
498
                                        increase the memory utilization as
499
                                        the struct would be in each open file,
500
                                        but this should give enough time to
501
                                        clear the socket */
502
#ifdef CONFIG_CIFS_DEBUG2
503
                                        cFYI(1, ("close delay, write pending"));
504
#endif /* DEBUG2 */
505
                                        msleep(timeout);
506
                                        timeout *= 4;
507
                                }
508
                                if (atomic_read(&pSMBFile->wrtPending))
509
                                        cERROR(1,
510
                                                ("close with pending writes"));
511
                                rc = CIFSSMBClose(xid, pTcon,
512
                                                  pSMBFile->netfid);
513
                        }
514
                }
515
 
516
                /* Delete any outstanding lock records.
517
                   We'll lose them when the file is closed anyway. */
518
                mutex_lock(&pSMBFile->lock_mutex);
519
                list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
520
                        list_del(&li->llist);
521
                        kfree(li);
522
                }
523
                mutex_unlock(&pSMBFile->lock_mutex);
524
 
525
                write_lock(&GlobalSMBSeslock);
526
                list_del(&pSMBFile->flist);
527
                list_del(&pSMBFile->tlist);
528
                write_unlock(&GlobalSMBSeslock);
529
                timeout = 10;
530
                /* We waited above to give the SMBWrite a chance to issue
531
                   on the wire (so we do not get SMBWrite returning EBADF
532
                   if writepages is racing with close.  Note that writepages
533
                   does not specify a file handle, so it is possible for a file
534
                   to be opened twice, and the application close the "wrong"
535
                   file handle - in these cases we delay long enough to allow
536
                   the SMBWrite to get on the wire before the SMB Close.
537
                   We allow total wait here over 45 seconds, more than
538
                   oplock break time, and more than enough to allow any write
539
                   to complete on the server, or to time out on the client */
540
                while ((atomic_read(&pSMBFile->wrtPending) != 0)
541
                                && (timeout <= 50000)) {
542
                        cERROR(1, ("writes pending, delay free of handle"));
543
                        msleep(timeout);
544
                        timeout *= 8;
545
                }
546
                kfree(pSMBFile->search_resume_name);
547
                kfree(file->private_data);
548
                file->private_data = NULL;
549
        } else
550
                rc = -EBADF;
551
 
552
        read_lock(&GlobalSMBSeslock);
553
        if (list_empty(&(CIFS_I(inode)->openFileList))) {
554
                cFYI(1, ("closing last open instance for inode %p", inode));
555
                /* if the file is not open we do not know if we can cache info
556
                   on this inode, much less write behind and read ahead */
557
                CIFS_I(inode)->clientCanCacheRead = FALSE;
558
                CIFS_I(inode)->clientCanCacheAll  = FALSE;
559
        }
560
        read_unlock(&GlobalSMBSeslock);
561
        if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
562
                rc = CIFS_I(inode)->write_behind_rc;
563
        FreeXid(xid);
564
        return rc;
565
}
566
 
567
int cifs_closedir(struct inode *inode, struct file *file)
568
{
569
        int rc = 0;
570
        int xid;
571
        struct cifsFileInfo *pCFileStruct =
572
            (struct cifsFileInfo *)file->private_data;
573
        char *ptmp;
574
 
575
        cFYI(1, ("Closedir inode = 0x%p", inode));
576
 
577
        xid = GetXid();
578
 
579
        if (pCFileStruct) {
580
                struct cifsTconInfo *pTcon;
581
                struct cifs_sb_info *cifs_sb =
582
                        CIFS_SB(file->f_path.dentry->d_sb);
583
 
584
                pTcon = cifs_sb->tcon;
585
 
586
                cFYI(1, ("Freeing private data in close dir"));
587
                if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
588
                   (pCFileStruct->invalidHandle == FALSE)) {
589
                        pCFileStruct->invalidHandle = TRUE;
590
                        rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
591
                        cFYI(1, ("Closing uncompleted readdir with rc %d",
592
                                 rc));
593
                        /* not much we can do if it fails anyway, ignore rc */
594
                        rc = 0;
595
                }
596
                ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
597
                if (ptmp) {
598
                        cFYI(1, ("closedir free smb buf in srch struct"));
599
                        pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
600
                        if (pCFileStruct->srch_inf.smallBuf)
601
                                cifs_small_buf_release(ptmp);
602
                        else
603
                                cifs_buf_release(ptmp);
604
                }
605
                ptmp = pCFileStruct->search_resume_name;
606
                if (ptmp) {
607
                        cFYI(1, ("closedir free resume name"));
608
                        pCFileStruct->search_resume_name = NULL;
609
                        kfree(ptmp);
610
                }
611
                kfree(file->private_data);
612
                file->private_data = NULL;
613
        }
614
        /* BB can we lock the filestruct while this is going on? */
615
        FreeXid(xid);
616
        return rc;
617
}
618
 
619
static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
620
                                __u64 offset, __u8 lockType)
621
{
622
        struct cifsLockInfo *li =
623
                kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
624
        if (li == NULL)
625
                return -ENOMEM;
626
        li->offset = offset;
627
        li->length = len;
628
        li->type = lockType;
629
        mutex_lock(&fid->lock_mutex);
630
        list_add(&li->llist, &fid->llist);
631
        mutex_unlock(&fid->lock_mutex);
632
        return 0;
633
}
634
 
635
int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
636
{
637
        int rc, xid;
638
        __u32 numLock = 0;
639
        __u32 numUnlock = 0;
640
        __u64 length;
641
        int wait_flag = FALSE;
642
        struct cifs_sb_info *cifs_sb;
643
        struct cifsTconInfo *pTcon;
644
        __u16 netfid;
645
        __u8 lockType = LOCKING_ANDX_LARGE_FILES;
646
        int posix_locking;
647
 
648
        length = 1 + pfLock->fl_end - pfLock->fl_start;
649
        rc = -EACCES;
650
        xid = GetXid();
651
 
652
        cFYI(1, ("Lock parm: 0x%x flockflags: "
653
                 "0x%x flocktype: 0x%x start: %lld end: %lld",
654
                cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
655
                pfLock->fl_end));
656
 
657
        if (pfLock->fl_flags & FL_POSIX)
658
                cFYI(1, ("Posix"));
659
        if (pfLock->fl_flags & FL_FLOCK)
660
                cFYI(1, ("Flock"));
661
        if (pfLock->fl_flags & FL_SLEEP) {
662
                cFYI(1, ("Blocking lock"));
663
                wait_flag = TRUE;
664
        }
665
        if (pfLock->fl_flags & FL_ACCESS)
666
                cFYI(1, ("Process suspended by mandatory locking - "
667
                         "not implemented yet"));
668
        if (pfLock->fl_flags & FL_LEASE)
669
                cFYI(1, ("Lease on file - not implemented yet"));
670
        if (pfLock->fl_flags &
671
            (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
672
                cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
673
 
674
        if (pfLock->fl_type == F_WRLCK) {
675
                cFYI(1, ("F_WRLCK "));
676
                numLock = 1;
677
        } else if (pfLock->fl_type == F_UNLCK) {
678
                cFYI(1, ("F_UNLCK"));
679
                numUnlock = 1;
680
                /* Check if unlock includes more than
681
                one lock range */
682
        } else if (pfLock->fl_type == F_RDLCK) {
683
                cFYI(1, ("F_RDLCK"));
684
                lockType |= LOCKING_ANDX_SHARED_LOCK;
685
                numLock = 1;
686
        } else if (pfLock->fl_type == F_EXLCK) {
687
                cFYI(1, ("F_EXLCK"));
688
                numLock = 1;
689
        } else if (pfLock->fl_type == F_SHLCK) {
690
                cFYI(1, ("F_SHLCK"));
691
                lockType |= LOCKING_ANDX_SHARED_LOCK;
692
                numLock = 1;
693
        } else
694
                cFYI(1, ("Unknown type of lock"));
695
 
696
        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
697
        pTcon = cifs_sb->tcon;
698
 
699
        if (file->private_data == NULL) {
700
                FreeXid(xid);
701
                return -EBADF;
702
        }
703
        netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
704
 
705
        posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
706
                        (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
707
 
708
        /* BB add code here to normalize offset and length to
709
        account for negative length which we can not accept over the
710
        wire */
711
        if (IS_GETLK(cmd)) {
712
                if (posix_locking) {
713
                        int posix_lock_type;
714
                        if (lockType & LOCKING_ANDX_SHARED_LOCK)
715
                                posix_lock_type = CIFS_RDLCK;
716
                        else
717
                                posix_lock_type = CIFS_WRLCK;
718
                        rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
719
                                        length, pfLock,
720
                                        posix_lock_type, wait_flag);
721
                        FreeXid(xid);
722
                        return rc;
723
                }
724
 
725
                /* BB we could chain these into one lock request BB */
726
                rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
727
                                 0, 1, lockType, 0 /* wait flag */ );
728
                if (rc == 0) {
729
                        rc = CIFSSMBLock(xid, pTcon, netfid, length,
730
                                         pfLock->fl_start, 1 /* numUnlock */ ,
731
 
732
 
733
                        pfLock->fl_type = F_UNLCK;
734
                        if (rc != 0)
735
                                cERROR(1, ("Error unlocking previously locked "
736
                                           "range %d during test of lock", rc));
737
                        rc = 0;
738
 
739
                } else {
740
                        /* if rc == ERR_SHARING_VIOLATION ? */
741
                        rc = 0;  /* do not change lock type to unlock
742
                                   since range in use */
743
                }
744
 
745
                FreeXid(xid);
746
                return rc;
747
        }
748
 
749
        if (!numLock && !numUnlock) {
750
                /* if no lock or unlock then nothing
751
                to do since we do not know what it is */
752
                FreeXid(xid);
753
                return -EOPNOTSUPP;
754
        }
755
 
756
        if (posix_locking) {
757
                int posix_lock_type;
758
                if (lockType & LOCKING_ANDX_SHARED_LOCK)
759
                        posix_lock_type = CIFS_RDLCK;
760
                else
761
                        posix_lock_type = CIFS_WRLCK;
762
 
763
                if (numUnlock == 1)
764
                        posix_lock_type = CIFS_UNLCK;
765
 
766
                rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
767
                                      length, pfLock,
768
                                      posix_lock_type, wait_flag);
769
        } else {
770
                struct cifsFileInfo *fid =
771
                        (struct cifsFileInfo *)file->private_data;
772
 
773
                if (numLock) {
774
                        rc = CIFSSMBLock(xid, pTcon, netfid, length,
775
                                        pfLock->fl_start,
776
                                        0, numLock, lockType, wait_flag);
777
 
778
                        if (rc == 0) {
779
                                /* For Windows locks we must store them. */
780
                                rc = store_file_lock(fid, length,
781
                                                pfLock->fl_start, lockType);
782
                        }
783
                } else if (numUnlock) {
784
                        /* For each stored lock that this unlock overlaps
785
                           completely, unlock it. */
786
                        int stored_rc = 0;
787
                        struct cifsLockInfo *li, *tmp;
788
 
789
                        rc = 0;
790
                        mutex_lock(&fid->lock_mutex);
791
                        list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
792
                                if (pfLock->fl_start <= li->offset &&
793
                                                (pfLock->fl_start + length) >=
794
                                                (li->offset + li->length)) {
795
                                        stored_rc = CIFSSMBLock(xid, pTcon,
796
                                                        netfid,
797
                                                        li->length, li->offset,
798
                                                        1, 0, li->type, FALSE);
799
                                        if (stored_rc)
800
                                                rc = stored_rc;
801
 
802
                                        list_del(&li->llist);
803
                                        kfree(li);
804
                                }
805
                        }
806
                        mutex_unlock(&fid->lock_mutex);
807
                }
808
        }
809
 
810
        if (pfLock->fl_flags & FL_POSIX)
811
                posix_lock_file_wait(file, pfLock);
812
        FreeXid(xid);
813
        return rc;
814
}
815
 
816
ssize_t cifs_user_write(struct file *file, const char __user *write_data,
817
        size_t write_size, loff_t *poffset)
818
{
819
        int rc = 0;
820
        unsigned int bytes_written = 0;
821
        unsigned int total_written;
822
        struct cifs_sb_info *cifs_sb;
823
        struct cifsTconInfo *pTcon;
824
        int xid, long_op;
825
        struct cifsFileInfo *open_file;
826
 
827
        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
828
 
829
        pTcon = cifs_sb->tcon;
830
 
831
        /* cFYI(1,
832
           (" write %d bytes to offset %lld of %s", write_size,
833
           *poffset, file->f_path.dentry->d_name.name)); */
834
 
835
        if (file->private_data == NULL)
836
                return -EBADF;
837
        open_file = (struct cifsFileInfo *) file->private_data;
838
 
839
        xid = GetXid();
840
 
841
        if (*poffset > file->f_path.dentry->d_inode->i_size)
842
                long_op = CIFS_VLONG_OP; /* writes past EOF take long time */
843
        else
844
                long_op = CIFS_LONG_OP;
845
 
846
        for (total_written = 0; write_size > total_written;
847
             total_written += bytes_written) {
848
                rc = -EAGAIN;
849
                while (rc == -EAGAIN) {
850
                        if (file->private_data == NULL) {
851
                                /* file has been closed on us */
852
                                FreeXid(xid);
853
                        /* if we have gotten here we have written some data
854
                           and blocked, and the file has been freed on us while
855
                           we blocked so return what we managed to write */
856
                                return total_written;
857
                        }
858
                        if (open_file->closePend) {
859
                                FreeXid(xid);
860
                                if (total_written)
861
                                        return total_written;
862
                                else
863
                                        return -EBADF;
864
                        }
865
                        if (open_file->invalidHandle) {
866
                                /* we could deadlock if we called
867
                                   filemap_fdatawait from here so tell
868
                                   reopen_file not to flush data to server
869
                                   now */
870
                                rc = cifs_reopen_file(file, FALSE);
871
                                if (rc != 0)
872
                                        break;
873
                        }
874
 
875
                        rc = CIFSSMBWrite(xid, pTcon,
876
                                open_file->netfid,
877
                                min_t(const int, cifs_sb->wsize,
878
                                      write_size - total_written),
879
                                *poffset, &bytes_written,
880
                                NULL, write_data + total_written, long_op);
881
                }
882
                if (rc || (bytes_written == 0)) {
883
                        if (total_written)
884
                                break;
885
                        else {
886
                                FreeXid(xid);
887
                                return rc;
888
                        }
889
                } else
890
                        *poffset += bytes_written;
891
                long_op = CIFS_STD_OP; /* subsequent writes fast -
892
                                    15 seconds is plenty */
893
        }
894
 
895
        cifs_stats_bytes_written(pTcon, total_written);
896
 
897
        /* since the write may have blocked check these pointers again */
898
        if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
899
                struct inode *inode = file->f_path.dentry->d_inode;
900
/* Do not update local mtime - server will set its actual value on write
901
 *              inode->i_ctime = inode->i_mtime =
902
 *                      current_fs_time(inode->i_sb);*/
903
                if (total_written > 0) {
904
                        spin_lock(&inode->i_lock);
905
                        if (*poffset > file->f_path.dentry->d_inode->i_size)
906
                                i_size_write(file->f_path.dentry->d_inode,
907
                                        *poffset);
908
                        spin_unlock(&inode->i_lock);
909
                }
910
                mark_inode_dirty_sync(file->f_path.dentry->d_inode);
911
        }
912
        FreeXid(xid);
913
        return total_written;
914
}
915
 
916
static ssize_t cifs_write(struct file *file, const char *write_data,
917
        size_t write_size, loff_t *poffset)
918
{
919
        int rc = 0;
920
        unsigned int bytes_written = 0;
921
        unsigned int total_written;
922
        struct cifs_sb_info *cifs_sb;
923
        struct cifsTconInfo *pTcon;
924
        int xid, long_op;
925
        struct cifsFileInfo *open_file;
926
 
927
        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
928
 
929
        pTcon = cifs_sb->tcon;
930
 
931
        cFYI(1, ("write %zd bytes to offset %lld of %s", write_size,
932
           *poffset, file->f_path.dentry->d_name.name));
933
 
934
        if (file->private_data == NULL)
935
                return -EBADF;
936
        open_file = (struct cifsFileInfo *)file->private_data;
937
 
938
        xid = GetXid();
939
 
940
        if (*poffset > file->f_path.dentry->d_inode->i_size)
941
                long_op = CIFS_VLONG_OP; /* writes past EOF can be slow */
942
        else
943
                long_op = CIFS_LONG_OP;
944
 
945
        for (total_written = 0; write_size > total_written;
946
             total_written += bytes_written) {
947
                rc = -EAGAIN;
948
                while (rc == -EAGAIN) {
949
                        if (file->private_data == NULL) {
950
                                /* file has been closed on us */
951
                                FreeXid(xid);
952
                        /* if we have gotten here we have written some data
953
                           and blocked, and the file has been freed on us
954
                           while we blocked so return what we managed to
955
                           write */
956
                                return total_written;
957
                        }
958
                        if (open_file->closePend) {
959
                                FreeXid(xid);
960
                                if (total_written)
961
                                        return total_written;
962
                                else
963
                                        return -EBADF;
964
                        }
965
                        if (open_file->invalidHandle) {
966
                                /* we could deadlock if we called
967
                                   filemap_fdatawait from here so tell
968
                                   reopen_file not to flush data to
969
                                   server now */
970
                                rc = cifs_reopen_file(file, FALSE);
971
                                if (rc != 0)
972
                                        break;
973
                        }
974
                        if (experimEnabled || (pTcon->ses->server &&
975
                                ((pTcon->ses->server->secMode &
976
                                (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
977
                                == 0))) {
978
                                struct kvec iov[2];
979
                                unsigned int len;
980
 
981
                                len = min((size_t)cifs_sb->wsize,
982
                                          write_size - total_written);
983
                                /* iov[0] is reserved for smb header */
984
                                iov[1].iov_base = (char *)write_data +
985
                                                  total_written;
986
                                iov[1].iov_len = len;
987
                                rc = CIFSSMBWrite2(xid, pTcon,
988
                                                open_file->netfid, len,
989
                                                *poffset, &bytes_written,
990
                                                iov, 1, long_op);
991
                        } else
992
                                rc = CIFSSMBWrite(xid, pTcon,
993
                                         open_file->netfid,
994
                                         min_t(const int, cifs_sb->wsize,
995
                                               write_size - total_written),
996
                                         *poffset, &bytes_written,
997
                                         write_data + total_written,
998
                                         NULL, long_op);
999
                }
1000
                if (rc || (bytes_written == 0)) {
1001
                        if (total_written)
1002
                                break;
1003
                        else {
1004
                                FreeXid(xid);
1005
                                return rc;
1006
                        }
1007
                } else
1008
                        *poffset += bytes_written;
1009
                long_op = CIFS_STD_OP; /* subsequent writes fast -
1010
                                    15 seconds is plenty */
1011
        }
1012
 
1013
        cifs_stats_bytes_written(pTcon, total_written);
1014
 
1015
        /* since the write may have blocked check these pointers again */
1016
        if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1017
/*BB We could make this contingent on superblock ATIME flag too */
1018
/*              file->f_path.dentry->d_inode->i_ctime =
1019
                file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1020
                if (total_written > 0) {
1021
                        spin_lock(&file->f_path.dentry->d_inode->i_lock);
1022
                        if (*poffset > file->f_path.dentry->d_inode->i_size)
1023
                                i_size_write(file->f_path.dentry->d_inode,
1024
                                             *poffset);
1025
                        spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1026
                }
1027
                mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1028
        }
1029
        FreeXid(xid);
1030
        return total_written;
1031
}
1032
 
1033
#ifdef CONFIG_CIFS_EXPERIMENTAL
1034
struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode)
1035
{
1036
        struct cifsFileInfo *open_file = NULL;
1037
 
1038
        read_lock(&GlobalSMBSeslock);
1039
        /* we could simply get the first_list_entry since write-only entries
1040
           are always at the end of the list but since the first entry might
1041
           have a close pending, we go through the whole list */
1042
        list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1043
                if (open_file->closePend)
1044
                        continue;
1045
                if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
1046
                    (open_file->pfile->f_flags & O_RDONLY))) {
1047
                        if (!open_file->invalidHandle) {
1048
                                /* found a good file */
1049
                                /* lock it so it will not be closed on us */
1050
                                atomic_inc(&open_file->wrtPending);
1051
                                read_unlock(&GlobalSMBSeslock);
1052
                                return open_file;
1053
                        } /* else might as well continue, and look for
1054
                             another, or simply have the caller reopen it
1055
                             again rather than trying to fix this handle */
1056
                } else /* write only file */
1057
                        break; /* write only files are last so must be done */
1058
        }
1059
        read_unlock(&GlobalSMBSeslock);
1060
        return NULL;
1061
}
1062
#endif
1063
 
1064
struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
1065
{
1066
        struct cifsFileInfo *open_file;
1067
        int rc;
1068
 
1069
        /* Having a null inode here (because mapping->host was set to zero by
1070
        the VFS or MM) should not happen but we had reports of on oops (due to
1071
        it being zero) during stress testcases so we need to check for it */
1072
 
1073
        if (cifs_inode == NULL) {
1074
                cERROR(1, ("Null inode passed to cifs_writeable_file"));
1075
                dump_stack();
1076
                return NULL;
1077
        }
1078
 
1079
        read_lock(&GlobalSMBSeslock);
1080
refind_writable:
1081
        list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1082
                if (open_file->closePend)
1083
                        continue;
1084
                if (open_file->pfile &&
1085
                    ((open_file->pfile->f_flags & O_RDWR) ||
1086
                     (open_file->pfile->f_flags & O_WRONLY))) {
1087
                        atomic_inc(&open_file->wrtPending);
1088
 
1089
                        if (!open_file->invalidHandle) {
1090
                                /* found a good writable file */
1091
                                read_unlock(&GlobalSMBSeslock);
1092
                                return open_file;
1093
                        }
1094
 
1095
                        read_unlock(&GlobalSMBSeslock);
1096
                        /* Had to unlock since following call can block */
1097
                        rc = cifs_reopen_file(open_file->pfile, FALSE);
1098
                        if (!rc) {
1099
                                if (!open_file->closePend)
1100
                                        return open_file;
1101
                                else { /* start over in case this was deleted */
1102
                                       /* since the list could be modified */
1103
                                        read_lock(&GlobalSMBSeslock);
1104
                                        atomic_dec(&open_file->wrtPending);
1105
                                        goto refind_writable;
1106
                                }
1107
                        }
1108
 
1109
                        /* if it fails, try another handle if possible -
1110
                        (we can not do this if closePending since
1111
                        loop could be modified - in which case we
1112
                        have to start at the beginning of the list
1113
                        again. Note that it would be bad
1114
                        to hold up writepages here (rather than
1115
                        in caller) with continuous retries */
1116
                        cFYI(1, ("wp failed on reopen file"));
1117
                        read_lock(&GlobalSMBSeslock);
1118
                        /* can not use this handle, no write
1119
                           pending on this one after all */
1120
                        atomic_dec(&open_file->wrtPending);
1121
 
1122
                        if (open_file->closePend) /* list could have changed */
1123
                                goto refind_writable;
1124
                        /* else we simply continue to the next entry. Thus
1125
                           we do not loop on reopen errors.  If we
1126
                           can not reopen the file, for example if we
1127
                           reconnected to a server with another client
1128
                           racing to delete or lock the file we would not
1129
                           make progress if we restarted before the beginning
1130
                           of the loop here. */
1131
                }
1132
        }
1133
        read_unlock(&GlobalSMBSeslock);
1134
        return NULL;
1135
}
1136
 
1137
static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1138
{
1139
        struct address_space *mapping = page->mapping;
1140
        loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1141
        char *write_data;
1142
        int rc = -EFAULT;
1143
        int bytes_written = 0;
1144
        struct cifs_sb_info *cifs_sb;
1145
        struct cifsTconInfo *pTcon;
1146
        struct inode *inode;
1147
        struct cifsFileInfo *open_file;
1148
 
1149
        if (!mapping || !mapping->host)
1150
                return -EFAULT;
1151
 
1152
        inode = page->mapping->host;
1153
        cifs_sb = CIFS_SB(inode->i_sb);
1154
        pTcon = cifs_sb->tcon;
1155
 
1156
        offset += (loff_t)from;
1157
        write_data = kmap(page);
1158
        write_data += from;
1159
 
1160
        if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1161
                kunmap(page);
1162
                return -EIO;
1163
        }
1164
 
1165
        /* racing with truncate? */
1166
        if (offset > mapping->host->i_size) {
1167
                kunmap(page);
1168
                return 0; /* don't care */
1169
        }
1170
 
1171
        /* check to make sure that we are not extending the file */
1172
        if (mapping->host->i_size - offset < (loff_t)to)
1173
                to = (unsigned)(mapping->host->i_size - offset);
1174
 
1175
        open_file = find_writable_file(CIFS_I(mapping->host));
1176
        if (open_file) {
1177
                bytes_written = cifs_write(open_file->pfile, write_data,
1178
                                           to-from, &offset);
1179
                atomic_dec(&open_file->wrtPending);
1180
                /* Does mm or vfs already set times? */
1181
                inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1182
                if ((bytes_written > 0) && (offset)) {
1183
                        rc = 0;
1184
                } else if (bytes_written < 0) {
1185
                        if (rc != -EBADF)
1186
                                rc = bytes_written;
1187
                }
1188
        } else {
1189
                cFYI(1, ("No writeable filehandles for inode"));
1190
                rc = -EIO;
1191
        }
1192
 
1193
        kunmap(page);
1194
        return rc;
1195
}
1196
 
1197
static int cifs_writepages(struct address_space *mapping,
1198
                           struct writeback_control *wbc)
1199
{
1200
        struct backing_dev_info *bdi = mapping->backing_dev_info;
1201
        unsigned int bytes_to_write;
1202
        unsigned int bytes_written;
1203
        struct cifs_sb_info *cifs_sb;
1204
        int done = 0;
1205
        pgoff_t end;
1206
        pgoff_t index;
1207
        int range_whole = 0;
1208
        struct kvec *iov;
1209
        int len;
1210
        int n_iov = 0;
1211
        pgoff_t next;
1212
        int nr_pages;
1213
        __u64 offset = 0;
1214
        struct cifsFileInfo *open_file;
1215
        struct page *page;
1216
        struct pagevec pvec;
1217
        int rc = 0;
1218
        int scanned = 0;
1219
        int xid;
1220
 
1221
        cifs_sb = CIFS_SB(mapping->host->i_sb);
1222
 
1223
        /*
1224
         * If wsize is smaller that the page cache size, default to writing
1225
         * one page at a time via cifs_writepage
1226
         */
1227
        if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1228
                return generic_writepages(mapping, wbc);
1229
 
1230
        if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1231
                if (cifs_sb->tcon->ses->server->secMode &
1232
                                (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1233
                        if (!experimEnabled)
1234
                                return generic_writepages(mapping, wbc);
1235
 
1236
        iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1237
        if (iov == NULL)
1238
                return generic_writepages(mapping, wbc);
1239
 
1240
 
1241
        /*
1242
         * BB: Is this meaningful for a non-block-device file system?
1243
         * If it is, we should test it again after we do I/O
1244
         */
1245
        if (wbc->nonblocking && bdi_write_congested(bdi)) {
1246
                wbc->encountered_congestion = 1;
1247
                kfree(iov);
1248
                return 0;
1249
        }
1250
 
1251
        xid = GetXid();
1252
 
1253
        pagevec_init(&pvec, 0);
1254
        if (wbc->range_cyclic) {
1255
                index = mapping->writeback_index; /* Start from prev offset */
1256
                end = -1;
1257
        } else {
1258
                index = wbc->range_start >> PAGE_CACHE_SHIFT;
1259
                end = wbc->range_end >> PAGE_CACHE_SHIFT;
1260
                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1261
                        range_whole = 1;
1262
                scanned = 1;
1263
        }
1264
retry:
1265
        while (!done && (index <= end) &&
1266
               (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1267
                        PAGECACHE_TAG_DIRTY,
1268
                        min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1269
                int first;
1270
                unsigned int i;
1271
 
1272
                first = -1;
1273
                next = 0;
1274
                n_iov = 0;
1275
                bytes_to_write = 0;
1276
 
1277
                for (i = 0; i < nr_pages; i++) {
1278
                        page = pvec.pages[i];
1279
                        /*
1280
                         * At this point we hold neither mapping->tree_lock nor
1281
                         * lock on the page itself: the page may be truncated or
1282
                         * invalidated (changing page->mapping to NULL), or even
1283
                         * swizzled back from swapper_space to tmpfs file
1284
                         * mapping
1285
                         */
1286
 
1287
                        if (first < 0)
1288
                                lock_page(page);
1289
                        else if (TestSetPageLocked(page))
1290
                                break;
1291
 
1292
                        if (unlikely(page->mapping != mapping)) {
1293
                                unlock_page(page);
1294
                                break;
1295
                        }
1296
 
1297
                        if (!wbc->range_cyclic && page->index > end) {
1298
                                done = 1;
1299
                                unlock_page(page);
1300
                                break;
1301
                        }
1302
 
1303
                        if (next && (page->index != next)) {
1304
                                /* Not next consecutive page */
1305
                                unlock_page(page);
1306
                                break;
1307
                        }
1308
 
1309
                        if (wbc->sync_mode != WB_SYNC_NONE)
1310
                                wait_on_page_writeback(page);
1311
 
1312
                        if (PageWriteback(page) ||
1313
                                        !clear_page_dirty_for_io(page)) {
1314
                                unlock_page(page);
1315
                                break;
1316
                        }
1317
 
1318
                        /*
1319
                         * This actually clears the dirty bit in the radix tree.
1320
                         * See cifs_writepage() for more commentary.
1321
                         */
1322
                        set_page_writeback(page);
1323
 
1324
                        if (page_offset(page) >= mapping->host->i_size) {
1325
                                done = 1;
1326
                                unlock_page(page);
1327
                                end_page_writeback(page);
1328
                                break;
1329
                        }
1330
 
1331
                        /*
1332
                         * BB can we get rid of this?  pages are held by pvec
1333
                         */
1334
                        page_cache_get(page);
1335
 
1336
                        len = min(mapping->host->i_size - page_offset(page),
1337
                                  (loff_t)PAGE_CACHE_SIZE);
1338
 
1339
                        /* reserve iov[0] for the smb header */
1340
                        n_iov++;
1341
                        iov[n_iov].iov_base = kmap(page);
1342
                        iov[n_iov].iov_len = len;
1343
                        bytes_to_write += len;
1344
 
1345
                        if (first < 0) {
1346
                                first = i;
1347
                                offset = page_offset(page);
1348
                        }
1349
                        next = page->index + 1;
1350
                        if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1351
                                break;
1352
                }
1353
                if (n_iov) {
1354
                        /* Search for a writable handle every time we call
1355
                         * CIFSSMBWrite2.  We can't rely on the last handle
1356
                         * we used to still be valid
1357
                         */
1358
                        open_file = find_writable_file(CIFS_I(mapping->host));
1359
                        if (!open_file) {
1360
                                cERROR(1, ("No writable handles for inode"));
1361
                                rc = -EBADF;
1362
                        } else {
1363
                                rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1364
                                                   open_file->netfid,
1365
                                                   bytes_to_write, offset,
1366
                                                   &bytes_written, iov, n_iov,
1367
                                                   CIFS_LONG_OP);
1368
                                atomic_dec(&open_file->wrtPending);
1369
                                if (rc || bytes_written < bytes_to_write) {
1370
                                        cERROR(1, ("Write2 ret %d, wrote %d",
1371
                                                  rc, bytes_written));
1372
                                        /* BB what if continued retry is
1373
                                           requested via mount flags? */
1374
                                        if (rc == -ENOSPC)
1375
                                                set_bit(AS_ENOSPC, &mapping->flags);
1376
                                        else
1377
                                                set_bit(AS_EIO, &mapping->flags);
1378
                                } else {
1379
                                        cifs_stats_bytes_written(cifs_sb->tcon,
1380
                                                                 bytes_written);
1381
                                }
1382
                        }
1383
                        for (i = 0; i < n_iov; i++) {
1384
                                page = pvec.pages[first + i];
1385
                                /* Should we also set page error on
1386
                                success rc but too little data written? */
1387
                                /* BB investigate retry logic on temporary
1388
                                server crash cases and how recovery works
1389
                                when page marked as error */
1390
                                if (rc)
1391
                                        SetPageError(page);
1392
                                kunmap(page);
1393
                                unlock_page(page);
1394
                                end_page_writeback(page);
1395
                                page_cache_release(page);
1396
                        }
1397
                        if ((wbc->nr_to_write -= n_iov) <= 0)
1398
                                done = 1;
1399
                        index = next;
1400
                }
1401
                pagevec_release(&pvec);
1402
        }
1403
        if (!scanned && !done) {
1404
                /*
1405
                 * We hit the last page and there is more work to be done: wrap
1406
                 * back to the start of the file
1407
                 */
1408
                scanned = 1;
1409
                index = 0;
1410
                goto retry;
1411
        }
1412
        if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1413
                mapping->writeback_index = index;
1414
 
1415
        FreeXid(xid);
1416
        kfree(iov);
1417
        return rc;
1418
}
1419
 
1420
static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1421
{
1422
        int rc = -EFAULT;
1423
        int xid;
1424
 
1425
        xid = GetXid();
1426
/* BB add check for wbc flags */
1427
        page_cache_get(page);
1428
        if (!PageUptodate(page)) {
1429
                cFYI(1, ("ppw - page not up to date"));
1430
        }
1431
 
1432
        /*
1433
         * Set the "writeback" flag, and clear "dirty" in the radix tree.
1434
         *
1435
         * A writepage() implementation always needs to do either this,
1436
         * or re-dirty the page with "redirty_page_for_writepage()" in
1437
         * the case of a failure.
1438
         *
1439
         * Just unlocking the page will cause the radix tree tag-bits
1440
         * to fail to update with the state of the page correctly.
1441
         */
1442
        set_page_writeback(page);
1443
        rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1444
        SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1445
        unlock_page(page);
1446
        end_page_writeback(page);
1447
        page_cache_release(page);
1448
        FreeXid(xid);
1449
        return rc;
1450
}
1451
 
1452
static int cifs_commit_write(struct file *file, struct page *page,
1453
        unsigned offset, unsigned to)
1454
{
1455
        int xid;
1456
        int rc = 0;
1457
        struct inode *inode = page->mapping->host;
1458
        loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1459
        char *page_data;
1460
 
1461
        xid = GetXid();
1462
        cFYI(1, ("commit write for page %p up to position %lld for %d",
1463
                 page, position, to));
1464
        spin_lock(&inode->i_lock);
1465
        if (position > inode->i_size) {
1466
                i_size_write(inode, position);
1467
        }
1468
        spin_unlock(&inode->i_lock);
1469
        if (!PageUptodate(page)) {
1470
                position =  ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1471
                /* can not rely on (or let) writepage write this data */
1472
                if (to < offset) {
1473
                        cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1474
                                offset, to));
1475
                        FreeXid(xid);
1476
                        return rc;
1477
                }
1478
                /* this is probably better than directly calling
1479
                   partialpage_write since in this function the file handle is
1480
                   known which we might as well leverage */
1481
                /* BB check if anything else missing out of ppw
1482
                   such as updating last write time */
1483
                page_data = kmap(page);
1484
                rc = cifs_write(file, page_data + offset, to-offset,
1485
                                &position);
1486
                if (rc > 0)
1487
                        rc = 0;
1488
                /* else if (rc < 0) should we set writebehind rc? */
1489
                kunmap(page);
1490
        } else {
1491
                set_page_dirty(page);
1492
        }
1493
 
1494
        FreeXid(xid);
1495
        return rc;
1496
}
1497
 
1498
int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1499
{
1500
        int xid;
1501
        int rc = 0;
1502
        struct inode *inode = file->f_path.dentry->d_inode;
1503
 
1504
        xid = GetXid();
1505
 
1506
        cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1507
                dentry->d_name.name, datasync));
1508
 
1509
        rc = filemap_write_and_wait(inode->i_mapping);
1510
        if (rc == 0) {
1511
                rc = CIFS_I(inode)->write_behind_rc;
1512
                CIFS_I(inode)->write_behind_rc = 0;
1513
        }
1514
        FreeXid(xid);
1515
        return rc;
1516
}
1517
 
1518
/* static void cifs_sync_page(struct page *page)
1519
{
1520
        struct address_space *mapping;
1521
        struct inode *inode;
1522
        unsigned long index = page->index;
1523
        unsigned int rpages = 0;
1524
        int rc = 0;
1525
 
1526
        cFYI(1, ("sync page %p",page));
1527
        mapping = page->mapping;
1528
        if (!mapping)
1529
                return 0;
1530
        inode = mapping->host;
1531
        if (!inode)
1532
                return; */
1533
 
1534
/*      fill in rpages then
1535
        result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1536
 
1537
/*      cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1538
 
1539
#if 0
1540
        if (rc < 0)
1541
                return rc;
1542
        return 0;
1543
#endif
1544
} */
1545
 
1546
/*
1547
 * As file closes, flush all cached write data for this inode checking
1548
 * for write behind errors.
1549
 */
1550
int cifs_flush(struct file *file, fl_owner_t id)
1551
{
1552
        struct inode *inode = file->f_path.dentry->d_inode;
1553
        int rc = 0;
1554
 
1555
        /* Rather than do the steps manually:
1556
           lock the inode for writing
1557
           loop through pages looking for write behind data (dirty pages)
1558
           coalesce into contiguous 16K (or smaller) chunks to write to server
1559
           send to server (prefer in parallel)
1560
           deal with writebehind errors
1561
           unlock inode for writing
1562
           filemapfdatawrite appears easier for the time being */
1563
 
1564
        rc = filemap_fdatawrite(inode->i_mapping);
1565
        /* reset wb rc if we were able to write out dirty pages */
1566
        if (!rc) {
1567
                rc = CIFS_I(inode)->write_behind_rc;
1568
                CIFS_I(inode)->write_behind_rc = 0;
1569
        }
1570
 
1571
        cFYI(1, ("Flush inode %p file %p rc %d", inode, file, rc));
1572
 
1573
        return rc;
1574
}
1575
 
1576
ssize_t cifs_user_read(struct file *file, char __user *read_data,
1577
        size_t read_size, loff_t *poffset)
1578
{
1579
        int rc = -EACCES;
1580
        unsigned int bytes_read = 0;
1581
        unsigned int total_read = 0;
1582
        unsigned int current_read_size;
1583
        struct cifs_sb_info *cifs_sb;
1584
        struct cifsTconInfo *pTcon;
1585
        int xid;
1586
        struct cifsFileInfo *open_file;
1587
        char *smb_read_data;
1588
        char __user *current_offset;
1589
        struct smb_com_read_rsp *pSMBr;
1590
 
1591
        xid = GetXid();
1592
        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1593
        pTcon = cifs_sb->tcon;
1594
 
1595
        if (file->private_data == NULL) {
1596
                FreeXid(xid);
1597
                return -EBADF;
1598
        }
1599
        open_file = (struct cifsFileInfo *)file->private_data;
1600
 
1601
        if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1602
                cFYI(1, ("attempting read on write only file instance"));
1603
        }
1604
        for (total_read = 0, current_offset = read_data;
1605
             read_size > total_read;
1606
             total_read += bytes_read, current_offset += bytes_read) {
1607
                current_read_size = min_t(const int, read_size - total_read,
1608
                                          cifs_sb->rsize);
1609
                rc = -EAGAIN;
1610
                smb_read_data = NULL;
1611
                while (rc == -EAGAIN) {
1612
                        int buf_type = CIFS_NO_BUFFER;
1613
                        if ((open_file->invalidHandle) &&
1614
                            (!open_file->closePend)) {
1615
                                rc = cifs_reopen_file(file, TRUE);
1616
                                if (rc != 0)
1617
                                        break;
1618
                        }
1619
                        rc = CIFSSMBRead(xid, pTcon,
1620
                                         open_file->netfid,
1621
                                         current_read_size, *poffset,
1622
                                         &bytes_read, &smb_read_data,
1623
                                         &buf_type);
1624
                        pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1625
                        if (smb_read_data) {
1626
                                if (copy_to_user(current_offset,
1627
                                                smb_read_data +
1628
                                                4 /* RFC1001 length field */ +
1629
                                                le16_to_cpu(pSMBr->DataOffset),
1630
                                                bytes_read)) {
1631
                                        rc = -EFAULT;
1632
                                }
1633
 
1634
                                if (buf_type == CIFS_SMALL_BUFFER)
1635
                                        cifs_small_buf_release(smb_read_data);
1636
                                else if (buf_type == CIFS_LARGE_BUFFER)
1637
                                        cifs_buf_release(smb_read_data);
1638
                                smb_read_data = NULL;
1639
                        }
1640
                }
1641
                if (rc || (bytes_read == 0)) {
1642
                        if (total_read) {
1643
                                break;
1644
                        } else {
1645
                                FreeXid(xid);
1646
                                return rc;
1647
                        }
1648
                } else {
1649
                        cifs_stats_bytes_read(pTcon, bytes_read);
1650
                        *poffset += bytes_read;
1651
                }
1652
        }
1653
        FreeXid(xid);
1654
        return total_read;
1655
}
1656
 
1657
 
1658
static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1659
        loff_t *poffset)
1660
{
1661
        int rc = -EACCES;
1662
        unsigned int bytes_read = 0;
1663
        unsigned int total_read;
1664
        unsigned int current_read_size;
1665
        struct cifs_sb_info *cifs_sb;
1666
        struct cifsTconInfo *pTcon;
1667
        int xid;
1668
        char *current_offset;
1669
        struct cifsFileInfo *open_file;
1670
        int buf_type = CIFS_NO_BUFFER;
1671
 
1672
        xid = GetXid();
1673
        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1674
        pTcon = cifs_sb->tcon;
1675
 
1676
        if (file->private_data == NULL) {
1677
                FreeXid(xid);
1678
                return -EBADF;
1679
        }
1680
        open_file = (struct cifsFileInfo *)file->private_data;
1681
 
1682
        if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1683
                cFYI(1, ("attempting read on write only file instance"));
1684
 
1685
        for (total_read = 0, current_offset = read_data;
1686
             read_size > total_read;
1687
             total_read += bytes_read, current_offset += bytes_read) {
1688
                current_read_size = min_t(const int, read_size - total_read,
1689
                                          cifs_sb->rsize);
1690
                /* For windows me and 9x we do not want to request more
1691
                than it negotiated since it will refuse the read then */
1692
                if ((pTcon->ses) &&
1693
                        !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1694
                        current_read_size = min_t(const int, current_read_size,
1695
                                        pTcon->ses->server->maxBuf - 128);
1696
                }
1697
                rc = -EAGAIN;
1698
                while (rc == -EAGAIN) {
1699
                        if ((open_file->invalidHandle) &&
1700
                            (!open_file->closePend)) {
1701
                                rc = cifs_reopen_file(file, TRUE);
1702
                                if (rc != 0)
1703
                                        break;
1704
                        }
1705
                        rc = CIFSSMBRead(xid, pTcon,
1706
                                         open_file->netfid,
1707
                                         current_read_size, *poffset,
1708
                                         &bytes_read, &current_offset,
1709
                                         &buf_type);
1710
                }
1711
                if (rc || (bytes_read == 0)) {
1712
                        if (total_read) {
1713
                                break;
1714
                        } else {
1715
                                FreeXid(xid);
1716
                                return rc;
1717
                        }
1718
                } else {
1719
                        cifs_stats_bytes_read(pTcon, total_read);
1720
                        *poffset += bytes_read;
1721
                }
1722
        }
1723
        FreeXid(xid);
1724
        return total_read;
1725
}
1726
 
1727
int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1728
{
1729
        struct dentry *dentry = file->f_path.dentry;
1730
        int rc, xid;
1731
 
1732
        xid = GetXid();
1733
        rc = cifs_revalidate(dentry);
1734
        if (rc) {
1735
                cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1736
                FreeXid(xid);
1737
                return rc;
1738
        }
1739
        rc = generic_file_mmap(file, vma);
1740
        FreeXid(xid);
1741
        return rc;
1742
}
1743
 
1744
 
1745
static void cifs_copy_cache_pages(struct address_space *mapping,
1746
        struct list_head *pages, int bytes_read, char *data,
1747
        struct pagevec *plru_pvec)
1748
{
1749
        struct page *page;
1750
        char *target;
1751
 
1752
        while (bytes_read > 0) {
1753
                if (list_empty(pages))
1754
                        break;
1755
 
1756
                page = list_entry(pages->prev, struct page, lru);
1757
                list_del(&page->lru);
1758
 
1759
                if (add_to_page_cache(page, mapping, page->index,
1760
                                      GFP_KERNEL)) {
1761
                        page_cache_release(page);
1762
                        cFYI(1, ("Add page cache failed"));
1763
                        data += PAGE_CACHE_SIZE;
1764
                        bytes_read -= PAGE_CACHE_SIZE;
1765
                        continue;
1766
                }
1767
 
1768
                target = kmap_atomic(page, KM_USER0);
1769
 
1770
                if (PAGE_CACHE_SIZE > bytes_read) {
1771
                        memcpy(target, data, bytes_read);
1772
                        /* zero the tail end of this partial page */
1773
                        memset(target + bytes_read, 0,
1774
                               PAGE_CACHE_SIZE - bytes_read);
1775
                        bytes_read = 0;
1776
                } else {
1777
                        memcpy(target, data, PAGE_CACHE_SIZE);
1778
                        bytes_read -= PAGE_CACHE_SIZE;
1779
                }
1780
                kunmap_atomic(target, KM_USER0);
1781
 
1782
                flush_dcache_page(page);
1783
                SetPageUptodate(page);
1784
                unlock_page(page);
1785
                if (!pagevec_add(plru_pvec, page))
1786
                        __pagevec_lru_add(plru_pvec);
1787
                data += PAGE_CACHE_SIZE;
1788
        }
1789
        return;
1790
}
1791
 
1792
static int cifs_readpages(struct file *file, struct address_space *mapping,
1793
        struct list_head *page_list, unsigned num_pages)
1794
{
1795
        int rc = -EACCES;
1796
        int xid;
1797
        loff_t offset;
1798
        struct page *page;
1799
        struct cifs_sb_info *cifs_sb;
1800
        struct cifsTconInfo *pTcon;
1801
        unsigned int bytes_read = 0;
1802
        unsigned int read_size, i;
1803
        char *smb_read_data = NULL;
1804
        struct smb_com_read_rsp *pSMBr;
1805
        struct pagevec lru_pvec;
1806
        struct cifsFileInfo *open_file;
1807
        int buf_type = CIFS_NO_BUFFER;
1808
 
1809
        xid = GetXid();
1810
        if (file->private_data == NULL) {
1811
                FreeXid(xid);
1812
                return -EBADF;
1813
        }
1814
        open_file = (struct cifsFileInfo *)file->private_data;
1815
        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1816
        pTcon = cifs_sb->tcon;
1817
 
1818
        pagevec_init(&lru_pvec, 0);
1819
#ifdef CONFIG_CIFS_DEBUG2
1820
                cFYI(1, ("rpages: num pages %d", num_pages));
1821
#endif
1822
        for (i = 0; i < num_pages; ) {
1823
                unsigned contig_pages;
1824
                struct page *tmp_page;
1825
                unsigned long expected_index;
1826
 
1827
                if (list_empty(page_list))
1828
                        break;
1829
 
1830
                page = list_entry(page_list->prev, struct page, lru);
1831
                offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1832
 
1833
                /* count adjacent pages that we will read into */
1834
                contig_pages = 0;
1835
                expected_index =
1836
                        list_entry(page_list->prev, struct page, lru)->index;
1837
                list_for_each_entry_reverse(tmp_page, page_list, lru) {
1838
                        if (tmp_page->index == expected_index) {
1839
                                contig_pages++;
1840
                                expected_index++;
1841
                        } else
1842
                                break;
1843
                }
1844
                if (contig_pages + i >  num_pages)
1845
                        contig_pages = num_pages - i;
1846
 
1847
                /* for reads over a certain size could initiate async
1848
                   read ahead */
1849
 
1850
                read_size = contig_pages * PAGE_CACHE_SIZE;
1851
                /* Read size needs to be in multiples of one page */
1852
                read_size = min_t(const unsigned int, read_size,
1853
                                  cifs_sb->rsize & PAGE_CACHE_MASK);
1854
#ifdef CONFIG_CIFS_DEBUG2
1855
                cFYI(1, ("rpages: read size 0x%x  contiguous pages %d",
1856
                                read_size, contig_pages));
1857
#endif
1858
                rc = -EAGAIN;
1859
                while (rc == -EAGAIN) {
1860
                        if ((open_file->invalidHandle) &&
1861
                            (!open_file->closePend)) {
1862
                                rc = cifs_reopen_file(file, TRUE);
1863
                                if (rc != 0)
1864
                                        break;
1865
                        }
1866
 
1867
                        rc = CIFSSMBRead(xid, pTcon,
1868
                                         open_file->netfid,
1869
                                         read_size, offset,
1870
                                         &bytes_read, &smb_read_data,
1871
                                         &buf_type);
1872
                        /* BB more RC checks ? */
1873
                        if (rc == -EAGAIN) {
1874
                                if (smb_read_data) {
1875
                                        if (buf_type == CIFS_SMALL_BUFFER)
1876
                                                cifs_small_buf_release(smb_read_data);
1877
                                        else if (buf_type == CIFS_LARGE_BUFFER)
1878
                                                cifs_buf_release(smb_read_data);
1879
                                        smb_read_data = NULL;
1880
                                }
1881
                        }
1882
                }
1883
                if ((rc < 0) || (smb_read_data == NULL)) {
1884
                        cFYI(1, ("Read error in readpages: %d", rc));
1885
                        break;
1886
                } else if (bytes_read > 0) {
1887
                        task_io_account_read(bytes_read);
1888
                        pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1889
                        cifs_copy_cache_pages(mapping, page_list, bytes_read,
1890
                                smb_read_data + 4 /* RFC1001 hdr */ +
1891
                                le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1892
 
1893
                        i +=  bytes_read >> PAGE_CACHE_SHIFT;
1894
                        cifs_stats_bytes_read(pTcon, bytes_read);
1895
                        if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1896
                                i++; /* account for partial page */
1897
 
1898
                                /* server copy of file can have smaller size
1899
                                   than client */
1900
                                /* BB do we need to verify this common case ?
1901
                                   this case is ok - if we are at server EOF
1902
                                   we will hit it on next read */
1903
 
1904
                                /* break; */
1905
                        }
1906
                } else {
1907
                        cFYI(1, ("No bytes read (%d) at offset %lld . "
1908
                                 "Cleaning remaining pages from readahead list",
1909
                                 bytes_read, offset));
1910
                        /* BB turn off caching and do new lookup on
1911
                           file size at server? */
1912
                        break;
1913
                }
1914
                if (smb_read_data) {
1915
                        if (buf_type == CIFS_SMALL_BUFFER)
1916
                                cifs_small_buf_release(smb_read_data);
1917
                        else if (buf_type == CIFS_LARGE_BUFFER)
1918
                                cifs_buf_release(smb_read_data);
1919
                        smb_read_data = NULL;
1920
                }
1921
                bytes_read = 0;
1922
        }
1923
 
1924
        pagevec_lru_add(&lru_pvec);
1925
 
1926
/* need to free smb_read_data buf before exit */
1927
        if (smb_read_data) {
1928
                if (buf_type == CIFS_SMALL_BUFFER)
1929
                        cifs_small_buf_release(smb_read_data);
1930
                else if (buf_type == CIFS_LARGE_BUFFER)
1931
                        cifs_buf_release(smb_read_data);
1932
                smb_read_data = NULL;
1933
        }
1934
 
1935
        FreeXid(xid);
1936
        return rc;
1937
}
1938
 
1939
static int cifs_readpage_worker(struct file *file, struct page *page,
1940
        loff_t *poffset)
1941
{
1942
        char *read_data;
1943
        int rc;
1944
 
1945
        page_cache_get(page);
1946
        read_data = kmap(page);
1947
        /* for reads over a certain size could initiate async read ahead */
1948
 
1949
        rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1950
 
1951
        if (rc < 0)
1952
                goto io_error;
1953
        else
1954
                cFYI(1, ("Bytes read %d", rc));
1955
 
1956
        file->f_path.dentry->d_inode->i_atime =
1957
                current_fs_time(file->f_path.dentry->d_inode->i_sb);
1958
 
1959
        if (PAGE_CACHE_SIZE > rc)
1960
                memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1961
 
1962
        flush_dcache_page(page);
1963
        SetPageUptodate(page);
1964
        rc = 0;
1965
 
1966
io_error:
1967
        kunmap(page);
1968
        page_cache_release(page);
1969
        return rc;
1970
}
1971
 
1972
static int cifs_readpage(struct file *file, struct page *page)
1973
{
1974
        loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1975
        int rc = -EACCES;
1976
        int xid;
1977
 
1978
        xid = GetXid();
1979
 
1980
        if (file->private_data == NULL) {
1981
                FreeXid(xid);
1982
                return -EBADF;
1983
        }
1984
 
1985
        cFYI(1, ("readpage %p at offset %d 0x%x\n",
1986
                 page, (int)offset, (int)offset));
1987
 
1988
        rc = cifs_readpage_worker(file, page, &offset);
1989
 
1990
        unlock_page(page);
1991
 
1992
        FreeXid(xid);
1993
        return rc;
1994
}
1995
 
1996
static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
1997
{
1998
        struct cifsFileInfo *open_file;
1999
 
2000
        read_lock(&GlobalSMBSeslock);
2001
        list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2002
                if (open_file->closePend)
2003
                        continue;
2004
                if (open_file->pfile &&
2005
                    ((open_file->pfile->f_flags & O_RDWR) ||
2006
                     (open_file->pfile->f_flags & O_WRONLY))) {
2007
                        read_unlock(&GlobalSMBSeslock);
2008
                        return 1;
2009
                }
2010
        }
2011
        read_unlock(&GlobalSMBSeslock);
2012
        return 0;
2013
}
2014
 
2015
/* We do not want to update the file size from server for inodes
2016
   open for write - to avoid races with writepage extending
2017
   the file - in the future we could consider allowing
2018
   refreshing the inode only on increases in the file size
2019
   but this is tricky to do without racing with writebehind
2020
   page caching in the current Linux kernel design */
2021
int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2022
{
2023
        if (!cifsInode)
2024
                return 1;
2025
 
2026
        if (is_inode_writable(cifsInode)) {
2027
                /* This inode is open for write at least once */
2028
                struct cifs_sb_info *cifs_sb;
2029
 
2030
                cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2031
                if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
2032
                        /* since no page cache to corrupt on directio
2033
                        we can change size safely */
2034
                        return 1;
2035
                }
2036
 
2037
                if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2038
                        return 1;
2039
 
2040
                return 0;
2041
        } else
2042
                return 1;
2043
}
2044
 
2045
static int cifs_prepare_write(struct file *file, struct page *page,
2046
        unsigned from, unsigned to)
2047
{
2048
        int rc = 0;
2049
        loff_t i_size;
2050
        loff_t offset;
2051
 
2052
        cFYI(1, ("prepare write for page %p from %d to %d", page, from, to));
2053
        if (PageUptodate(page))
2054
                return 0;
2055
 
2056
        /* If we are writing a full page it will be up to date,
2057
           no need to read from the server */
2058
        if ((to == PAGE_CACHE_SIZE) && (from == 0)) {
2059
                SetPageUptodate(page);
2060
                return 0;
2061
        }
2062
 
2063
        offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2064
        i_size = i_size_read(page->mapping->host);
2065
 
2066
        if ((offset >= i_size) ||
2067
            ((from == 0) && (offset + to) >= i_size)) {
2068
                /*
2069
                 * We don't need to read data beyond the end of the file.
2070
                 * zero it, and set the page uptodate
2071
                 */
2072
                simple_prepare_write(file, page, from, to);
2073
                SetPageUptodate(page);
2074
        } else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2075
                /* might as well read a page, it is fast enough */
2076
                rc = cifs_readpage_worker(file, page, &offset);
2077
        } else {
2078
                /* we could try using another file handle if there is one -
2079
                   but how would we lock it to prevent close of that handle
2080
                   racing with this read? In any case
2081
                   this will be written out by commit_write so is fine */
2082
        }
2083
 
2084
        /* we do not need to pass errors back
2085
           e.g. if we do not have read access to the file
2086
           because cifs_commit_write will do the right thing.  -- shaggy */
2087
 
2088
        return 0;
2089
}
2090
 
2091
const struct address_space_operations cifs_addr_ops = {
2092
        .readpage = cifs_readpage,
2093
        .readpages = cifs_readpages,
2094
        .writepage = cifs_writepage,
2095
        .writepages = cifs_writepages,
2096
        .prepare_write = cifs_prepare_write,
2097
        .commit_write = cifs_commit_write,
2098
        .set_page_dirty = __set_page_dirty_nobuffers,
2099
        /* .sync_page = cifs_sync_page, */
2100
        /* .direct_IO = */
2101
};
2102
 
2103
/*
2104
 * cifs_readpages requires the server to support a buffer large enough to
2105
 * contain the header plus one complete page of data.  Otherwise, we need
2106
 * to leave cifs_readpages out of the address space operations.
2107
 */
2108
const struct address_space_operations cifs_addr_ops_smallbuf = {
2109
        .readpage = cifs_readpage,
2110
        .writepage = cifs_writepage,
2111
        .writepages = cifs_writepages,
2112
        .prepare_write = cifs_prepare_write,
2113
        .commit_write = cifs_commit_write,
2114
        .set_page_dirty = __set_page_dirty_nobuffers,
2115
        /* .sync_page = cifs_sync_page, */
2116
        /* .direct_IO = */
2117
};

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.