OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [conts/] [posix/] [mm0/] [mm/] [file.c] - Blame information for rev 7

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * File read, write, open and close.
3
 *
4
 * Copyright (C) 2008 Bahadir Balban
5
 */
6
#include <init.h>
7
#include <vm_area.h>
8
#include <malloc/malloc.h>
9
#include <mm/alloc_page.h>
10
#include <l4/macros.h>
11
#include <l4/api/errno.h>
12
#include <l4lib/types.h>
13
#include L4LIB_INC_ARCH(syscalls.h)
14
#include L4LIB_INC_ARCH(syslib.h)
15
#include <l4lib/ipcdefs.h>
16
#include <l4/api/kip.h>
17
#include <posix/sys/types.h>
18
#include <string.h>
19
#include <globals.h>
20
#include <file.h>
21
#include <user.h>
22
#include <test.h>
23
 
24
#include <lib/pathstr.h>
25
#include <malloc/malloc.h>
26
#include <stdio.h>
27
#include <task.h>
28
#include <stat.h>
29
#include <vfs.h>
30
#include <alloca.h>
31
#include <path.h>
32
#include <syscalls.h>
33
 
34
#include INC_GLUE(message.h)
35
 
36
/* Copy from one page's buffer into another page */
37
int page_copy(struct page *dst, struct page *src,
38
              unsigned long dst_offset, unsigned long src_offset,
39
              unsigned long size)
40
{
41
        void *dstvaddr, *srcvaddr;
42
 
43
        BUG_ON(dst_offset + size > PAGE_SIZE);
44
        BUG_ON(src_offset + size > PAGE_SIZE);
45
 
46
        dstvaddr = page_to_virt(dst);
47
        srcvaddr = page_to_virt(src);
48
/*
49
        printf("%s: Copying from page with offset %lx to page with offset %lx\n"
50
               "src copy offset: 0x%lx, dst copy offset: 0x%lx, copy size: %lx\n",
51
               __FUNCTION__, src->offset, dst->offset, src_offset, dst_offset,
52
               size);
53
*/
54
//      printf("%s: Copying string: %s, source: %lx\n", __FUNCTION__,
55
//                     (char *)(srcvaddr + src_offset), (unsigned long)srcvaddr+src_offset);
56
 
57
        memcpy(dstvaddr + dst_offset, srcvaddr + src_offset, size);
58
 
59
        return 0;
60
}
61
 
62
int vfs_read(struct vnode *v, unsigned long file_offset,
63
             unsigned long npages, void *pagebuf)
64
{
65
        /* Ensure vnode is not a directory */
66
        if (vfs_isdir(v))
67
                return -EISDIR;
68
 
69
        return v->fops.read(v, file_offset, npages, pagebuf);
70
}
71
 
72
/* Directories only for now */
73
void print_vnode(struct vnode *v)
74
{
75
        struct dentry *d, *c;
76
 
77
        printf("Vnode names:\n");
78
        list_foreach_struct(d, &v->dentries, vref) {
79
                printf("%s\n", d->name);
80
                printf("Children dentries:\n");
81
                list_foreach_struct(c, &d->children, child)
82
                        printf("%s\n", c->name);
83
        }
84
}
85
 
86
 
87
/* Creates a node under a directory, e.g. a file, directory. */
88
struct vnode *vfs_vnode_create(struct tcb *task, struct pathdata *pdata,
89
                               unsigned int mode)
90
{
91
        struct vnode *vparent, *newnode;
92
        const char *nodename;
93
 
94
        /* The last component is to be created */
95
        nodename = pathdata_last_component(pdata);
96
 
97
        /* Check that the parent directory exists. */
98
        if (IS_ERR(vparent = vfs_vnode_lookup_bypath(pdata)))
99
                return vparent;
100
 
101
        /* The parent vnode must be a directory. */
102
        if (!vfs_isdir(vparent))
103
                return PTR_ERR(-ENOENT);
104
 
105
        /* Create new directory under the parent */
106
        if (IS_ERR(newnode = vparent->ops.mknod(vparent, nodename, mode)))
107
                return newnode;
108
 
109
        // print_vnode(vparent);
110
        return newnode;
111
}
112
 
113
int sys_mkdir(struct tcb *task, const char *pathname, unsigned int mode)
114
{
115
        struct pathdata *pdata;
116
        struct vnode *v;
117
        int ret = 0;
118
 
119
        /* Parse path data */
120
        if (IS_ERR(pdata = pathdata_parse(pathname,
121
                                          alloca(strlen(pathname) + 1),
122
                                          task)))
123
                return (int)pdata;
124
 
125
        /* Make sure we create a directory */
126
        mode |= S_IFDIR;
127
 
128
        /* Create the directory or fail */
129
        if (IS_ERR(v = vfs_vnode_create(task, pdata, mode)))
130
                ret = (int)v;
131
 
132
        /* Destroy extracted path data */
133
        pathdata_destroy(pdata);
134
        return ret;
135
}
136
 
137
int sys_chdir(struct tcb *task, const char *pathname)
138
{
139
        struct vnode *v;
140
        struct pathdata *pdata;
141
        int ret = 0;
142
 
143
        /* Parse path data */
144
        if (IS_ERR(pdata = pathdata_parse(pathname,
145
                                          alloca(strlen(pathname) + 1),
146
                                          task)))
147
                return (int)pdata;
148
 
149
        /* Get the vnode */
150
        if (IS_ERR(v = vfs_vnode_lookup_bypath(pdata))) {
151
                ret = (int)v;
152
                goto out;
153
        }
154
 
155
        /* Ensure it's a directory */
156
        if (!vfs_isdir(v)) {
157
                ret = -ENOTDIR;
158
                goto out;
159
        }
160
 
161
        /* Assign the current directory pointer */
162
        task->fs_data->curdir = v;
163
 
164
out:
165
        /* Destroy extracted path data */
166
        pathdata_destroy(pdata);
167
        return ret;
168
}
169
 
170
void fill_kstat(struct vnode *v, struct kstat *ks)
171
{
172
        ks->vnum = (u64)v->vnum;
173
        ks->mode = v->mode;
174
        ks->links = v->links;
175
        ks->uid = v->owner & 0xFFFF;
176
        ks->gid = (v->owner >> 16) & 0xFFFF;
177
        ks->size = v->size;
178
        ks->blksize = v->sb->blocksize;
179
        ks->atime = v->atime;
180
        ks->mtime = v->mtime;
181
        ks->ctime = v->ctime;
182
}
183
 
184
int sys_fstat(struct tcb *task, int fd, void *statbuf)
185
{
186
        /* Check that fd is valid */
187
        if (fd < 0 || fd > TASK_FILES_MAX ||
188
            !task->files->fd[fd].vmfile)
189
                return -EBADF;
190
 
191
        /* Fill in the c0-style stat structure */
192
        fill_kstat(task->files->fd[fd].vmfile->vnode, statbuf);
193
 
194
        return 0;
195
}
196
 
197
/*
198
 * Returns codezero-style stat structure which in turn is
199
 * converted to posix style stat structure via the libposix
200
 * library in userspace.
201
 */
202
int sys_stat(struct tcb *task, const char *pathname, void *statbuf)
203
{
204
        struct vnode *v;
205
        struct pathdata *pdata;
206
        int ret = 0;
207
 
208
        /* Parse path data */
209
        if (IS_ERR(pdata = pathdata_parse(pathname,
210
                                          alloca(strlen(pathname) + 1),
211
                                          task)))
212
                return (int)pdata;
213
 
214
        /* Get the vnode */
215
        if (IS_ERR(v = vfs_vnode_lookup_bypath(pdata))) {
216
                ret = (int)v;
217
                goto out;
218
        }
219
 
220
        /* Fill in the c0-style stat structure */
221
        fill_kstat(v, statbuf);
222
 
223
out:
224
        /* Destroy extracted path data */
225
        pathdata_destroy(pdata);
226
        return ret;
227
}
228
 
229
 
230
/*
231
 * Inserts the page to vmfile's list in order of page frame offset.
232
 * We use an ordered list instead of a better data structure for now.
233
 */
234
int insert_page_olist(struct page *this, struct vm_object *vmo)
235
{
236
        struct page *before, *after;
237
 
238
        /* Add if list is empty */
239
        if (list_empty(&vmo->page_cache)) {
240
                list_insert_tail(&this->list, &vmo->page_cache);
241
                return 0;
242
        }
243
 
244
        /* Else find the right interval */
245
        list_foreach_struct(before, &vmo->page_cache, list) {
246
                after = link_to_struct(before->list.next, struct page, list);
247
 
248
                /* If there's only one in list */
249
                if (before->list.next == &vmo->page_cache) {
250
                        /* Add as next if greater */
251
                        if (this->offset > before->offset)
252
                                list_insert(&this->list, &before->list);
253
                        /* Add  as previous if smaller */
254
                        else if (this->offset < before->offset)
255
                                list_insert_tail(&this->list, &before->list);
256
                        else
257
                                BUG();
258
                        return 0;
259
                }
260
 
261
                /* If this page is in-between two other, insert it there */
262
                if (before->offset < this->offset &&
263
                    after->offset > this->offset) {
264
                        list_insert(&this->list, &before->list);
265
                        return 0;
266
                }
267
                BUG_ON(this->offset == before->offset);
268
                BUG_ON(this->offset == after->offset);
269
        }
270
        BUG();
271
}
272
 
273
/*
274
 * This reads-in a range of pages from a file and populates the page cache
275
 * just like a page fault, but its not in the page fault path.
276
 */
277
int read_file_pages(struct vm_file *vmfile, unsigned long pfn_start,
278
                    unsigned long pfn_end)
279
{
280
        struct page *page;
281
 
282
        for (int f_offset = pfn_start; f_offset < pfn_end; f_offset++) {
283
                page = vmfile->vm_obj.pager->ops.page_in(&vmfile->vm_obj,
284
                                                         f_offset);
285
                if (IS_ERR(page)) {
286
                        printf("%s: %s:Could not read page %d "
287
                               "from file with vnum: 0x%lu\n", __TASKNAME__,
288
                               __FUNCTION__, f_offset, vmfile->vnode->vnum);
289
                        return (int)page;
290
                }
291
        }
292
 
293
        return 0;
294
}
295
 
296
/*
297
 * The buffer must be contiguous by page, if npages > 1.
298
 */
299
int vfs_write(struct vnode *v, unsigned long file_offset,
300
              unsigned long npages, void *pagebuf)
301
{
302
        int fwrite_end;
303
        int ret;
304
 
305
        // printf("%s/%s\n", __TASKNAME__, __FUNCTION__);
306
 
307
        /* Ensure vnode is not a directory */
308
        if (vfs_isdir(v))
309
                return -EISDIR;
310
 
311
        //printf("%s/%s: Writing to vnode %lu, at pgoff 0x%x, %d pages, buf at 0x%x\n",
312
        //      __TASKNAME__, __FUNCTION__, vnum, f_offset, npages, pagebuf);
313
 
314
        if ((ret = v->fops.write(v, file_offset, npages, pagebuf)) < 0)
315
                return ret;
316
 
317
        /*
318
         * If the file is extended, write silently extends it.
319
         * We update the extended size here. Otherwise subsequent write's
320
         * may fail by relying on wrong file size.
321
         */
322
        fwrite_end = __pfn_to_addr(file_offset) + ret;
323
        if (v->size < fwrite_end) {
324
                v->size = fwrite_end;
325
                v->sb->ops->write_vnode(v->sb, v);
326
        }
327
 
328
        return ret;
329
}
330
 
331
/* Writes updated file stats back to vfs. (e.g. new file size) */
332
int vfs_update_file_stats(struct vm_file *f)
333
{
334
        struct vnode *v = f->vnode;
335
 
336
        v->size = f->length;
337
        v->sb->ops->write_vnode(v->sb, v);
338
 
339
        return 0;
340
}
341
 
342
/* Writes pages in cache back to their file */
343
int write_file_pages(struct vm_file *f, unsigned long pfn_start,
344
                     unsigned long pfn_end)
345
{
346
        int err;
347
 
348
        /* We have only thought of vfs files for this */
349
        BUG_ON(f->type != VM_FILE_VFS);
350
 
351
        /* Need not flush files that haven't been written */
352
        if (!(f->vm_obj.flags & VM_DIRTY))
353
                return 0;
354
 
355
        BUG_ON(pfn_end != __pfn(page_align_up(f->length)));
356
        for (int f_offset = pfn_start; f_offset < pfn_end; f_offset++) {
357
                err = f->vm_obj.pager->ops.page_out(&f->vm_obj, f_offset);
358
                if (err < 0) {
359
                        printf("%s: %s:Could not write page %d "
360
                               "to file with vnum: 0x%lu\n", __TASKNAME__,
361
                               __FUNCTION__, f_offset, f->vnode->vnum);
362
                        return err;
363
                }
364
        }
365
 
366
        return 0;
367
}
368
 
369
/* Flush all dirty file pages and update file stats */
370
int flush_file_pages(struct vm_file *f)
371
{
372
        int err;
373
 
374
        if ((err = write_file_pages(f, 0, __pfn(page_align_up(f->length)))) < 0)
375
                return err;
376
 
377
        if ((err = vfs_update_file_stats(f)) < 0)
378
                return err;
379
 
380
        return 0;
381
}
382
 
383
/* Given a task and fd, syncs all IO on it */
384
int fsync_common(struct tcb *task, int fd)
385
{
386
        int err;
387
 
388
        /* Check fd validity */
389
        if (fd < 0 || fd > TASK_FILES_MAX)
390
                return -EINVAL;
391
 
392
        /*
393
         * If we don't know about the file, even if it was
394
         * opened by the vfs, it is sure that there's no
395
         * pending IO on it. We simply return.
396
         */
397
        if (!task->files->fd[fd].vmfile)
398
                return 0;
399
 
400
        /*
401
        printf("Thread %d flushing fd: %d, vnum: 0x%lx, vnode: %p\n",
402
               task->tid, fd, task->files->fd[fd].vmfile->vnode->vnum,
403
               task->files->fd[fd].vmfile->vnode);
404
        */
405
 
406
        /* Finish I/O on file */
407
        if ((err = flush_file_pages(task->files->fd[fd].vmfile)) < 0)
408
                return err;
409
 
410
        return 0;
411
}
412
 
413
void vm_file_put(struct vm_file *file)
414
{
415
        /* Reduce file's opener count */
416
        if (!(file->openers--))
417
                /* No openers left, check any mappers */
418
                if (!file->vm_obj.nlinks)
419
                        /* No links or openers, delete the file */
420
                        vm_file_delete(file);
421
 
422
        /* FIXME:
423
         * Shall we delete the cached vnode here as well???
424
         */
425
}
426
 
427
/*
428
 * FIXME: fsync + close could be done under a single "close" ipc
429
 * from pager. Currently there are 2 ipcs: 1 fsync + 1 fd close.
430
 */
431
 
432
/* Closes the file descriptor and notifies vfs */
433
int do_close(struct tcb *task, int fd)
434
{
435
        int err;
436
 
437
         //printf("%s: Closing fd: %d on task %d\n", __FUNCTION__,
438
         //      fd, task->tid);
439
 
440
        if ((err = id_del(task->files->fdpool, fd)) < 0) {
441
                printf("%s: Error releasing fd identifier.\n",
442
                       __FUNCTION__);
443
                return err;
444
        }
445
 
446
        if (!task->files->fd[fd].vmfile)
447
                return 0;
448
 
449
        /* Reduce file refcount etc. */
450
        vm_file_put(task->files->fd[fd].vmfile);
451
 
452
        task->files->fd[fd].cursor = 0;
453
        task->files->fd[fd].vmfile = 0;
454
 
455
        return 0;
456
}
457
 
458
int sys_close(struct tcb *task, int fd)
459
{
460
        int ret;
461
 
462
        /* Sync the file and update stats */
463
        if ((ret = fsync_common(task, fd)) < 0)
464
                return ret;
465
 
466
        /* Close the file descriptor. */
467
        return do_close(task, fd);
468
}
469
 
470
int sys_fsync(struct tcb *task, int fd)
471
{
472
        /* Sync the file and update stats */
473
        return fsync_common(task, fd);
474
}
475
 
476
/* FIXME: Add error handling to this */
477
/* Extends a file's size by adding it new pages */
478
int new_file_pages(struct vm_file *f, unsigned long start, unsigned long end)
479
{
480
        unsigned long npages = end - start;
481
        struct page *page;
482
        void *paddr;
483
 
484
        /* Allocate the memory for new pages */
485
        if (!(paddr = alloc_page(npages)))
486
                return -ENOMEM;
487
 
488
        /* Process each page */
489
        for (unsigned long i = 0; i < npages; i++) {
490
                page = phys_to_page(paddr + PAGE_SIZE * i);
491
                page_init(page);
492
                page->refcnt++;
493
                page->owner = &f->vm_obj;
494
                page->offset = start + i;
495
                page->virtual = 0;
496
 
497
                /* Add the page to file's vm object */
498
                BUG_ON(!list_empty(&page->list));
499
                insert_page_olist(page, &f->vm_obj);
500
        }
501
 
502
        /* Update vm object */
503
        f->vm_obj.npages += npages;
504
 
505
        return 0;
506
}
507
 
508
#define page_offset(x)  ((unsigned long)(x) & PAGE_MASK)
509
 
510
 
511
/*
512
 * Reads a page range from an ordered list of pages into a buffer,
513
 * from those pages, or from the buffer, into those pages, depending on
514
 * the read flag.
515
 *
516
 * NOTE:
517
 * This assumes the page range is consecutively available in the cache
518
 * and count bytes are available. To ensure this,
519
 * read/write/new_file_pages must have been called first and count
520
 * must have been checked. Since it has these checking assumptions,
521
 * count must be satisfied.
522
 */
523
int copy_cache_pages(struct vm_file *vmfile, struct tcb *task, void *buf,
524
                     unsigned long pfn_start, unsigned long pfn_end,
525
                     unsigned long cursor_offset, int count, int read)
526
{
527
        struct page *file_page;
528
        unsigned long task_offset; /* Current copy offset on the task buffer */
529
        unsigned long file_offset; /* Current copy offset on the file */
530
        int copysize, left;
531
        int empty;
532
 
533
        task_offset = (unsigned long)buf;
534
        file_offset = cursor_offset;
535
        left = count;
536
 
537
        /* Find the head of consecutive pages */
538
        list_foreach_struct(file_page, &vmfile->vm_obj.page_cache, list) {
539
                if (file_page->offset < pfn_start)
540
                        continue;
541
                else if (file_page->offset == pfn_end || left == 0)
542
                        break;
543
 
544
                empty = PAGE_SIZE - page_offset(file_offset);
545
 
546
                /* Copy until a single page cache page is filled */
547
                while (empty && left) {
548
                        copysize = min(PAGE_SIZE - page_offset(file_offset), left);
549
                        copysize = min(copysize, PAGE_SIZE - page_offset(task_offset));
550
 
551
                        if (read)
552
                                page_copy(task_prefault_smart(task, task_offset,
553
                                                              VM_READ | VM_WRITE),
554
                                          file_page,
555
                                          page_offset(task_offset),
556
                                          page_offset(file_offset),
557
                                          copysize);
558
                        else
559
                                page_copy(file_page,
560
                                          task_prefault_smart(task, task_offset,
561
                                                              VM_READ),
562
                                          page_offset(file_offset),
563
                                          page_offset(task_offset),
564
                                          copysize);
565
 
566
                        empty -= copysize;
567
                        left -= copysize;
568
                        task_offset += copysize;
569
                        file_offset += copysize;
570
                }
571
        }
572
        BUG_ON(left != 0);
573
 
574
        return count - left;
575
}
576
 
577
int sys_read(struct tcb *task, int fd, void *buf, int count)
578
{
579
        unsigned long pfn_start, pfn_end;
580
        unsigned long cursor;
581
        struct vm_file *vmfile;
582
        int ret = 0;
583
 
584
        /* Check that fd is valid */
585
        if (fd < 0 || fd > TASK_FILES_MAX ||
586
            !task->files->fd[fd].vmfile)
587
                return -EBADF;
588
 
589
 
590
        /* Check count validity */
591
        if (count < 0)
592
                return -EINVAL;
593
        else if (!count)
594
                return 0;
595
 
596
        /* Check user buffer validity. */
597
        if ((ret = pager_validate_user_range(task, buf,
598
                                       (unsigned long)count,
599
                                       VM_READ)) < 0)
600
                return -EFAULT;
601
 
602
        vmfile = task->files->fd[fd].vmfile;
603
        cursor = task->files->fd[fd].cursor;
604
 
605
        /* If cursor is beyond file end, simply return 0 */
606
        if (cursor >= vmfile->length)
607
                return 0;
608
 
609
        /* Start and end pages expected to be read by user */
610
        pfn_start = __pfn(cursor);
611
        pfn_end = __pfn(page_align_up(cursor + count));
612
 
613
        /* But we can read up to maximum file size */
614
        pfn_end = __pfn(page_align_up(vmfile->length)) < pfn_end ?
615
                  __pfn(page_align_up(vmfile->length)) : pfn_end;
616
 
617
        /* If trying to read more than end of file, reduce it to max possible */
618
        if (cursor + count > vmfile->length)
619
                count = vmfile->length - cursor;
620
 
621
        /* Read the page range into the cache from file */
622
        if ((ret = read_file_pages(vmfile, pfn_start, pfn_end)) < 0)
623
                return ret;
624
 
625
        /* Read it into the user buffer from the cache */
626
        if ((count = copy_cache_pages(vmfile, task, buf, pfn_start, pfn_end,
627
                                      cursor, count, 1)) < 0)
628
                return count;
629
 
630
        /* Update cursor on success */
631
        task->files->fd[fd].cursor += count;
632
 
633
        return count;
634
}
635
 
636
/* FIXME:
637
 *
638
 * Error:
639
 * We find the page buffer is in, and then copy from the *start* of the page
640
 * rather than buffer's offset in that page. - I think this is fixed.
641
 */
642
int sys_write(struct tcb *task, int fd, void *buf, int count)
643
{
644
        unsigned long pfn_wstart, pfn_wend;     /* Write start/end */
645
        unsigned long pfn_fstart, pfn_fend;     /* File start/end */
646
        unsigned long pfn_nstart, pfn_nend;     /* New pages start/end */
647
        unsigned long cursor;
648
        struct vm_file *vmfile;
649
        int ret = 0;
650
 
651
        /* Check that fd is valid */
652
        if (fd < 0 || fd > TASK_FILES_MAX ||
653
            !task->files->fd[fd].vmfile)
654
                return -EBADF;
655
 
656
        /* Check count validity */
657
        if (count < 0)
658
                return -EINVAL;
659
        else if (!count)
660
                return 0;
661
 
662
        /* Check user buffer validity. */
663
        if ((ret = pager_validate_user_range(task, buf,
664
                                             (unsigned long)count,
665
                                             VM_WRITE | VM_READ)) < 0)
666
                return -EINVAL;
667
 
668
        vmfile = task->files->fd[fd].vmfile;
669
        cursor = task->files->fd[fd].cursor;
670
 
671
        //printf("Thread %d writing to fd: %d, vnum: 0x%lx, vnode: %p\n",
672
        //task->tid, fd, vmfile->vnode->vnum, vmfile->vnode);
673
 
674
        /* See what pages user wants to write */
675
        pfn_wstart = __pfn(cursor);
676
        pfn_wend = __pfn(page_align_up(cursor + count));
677
 
678
        /* Get file start and end pages */
679
        pfn_fstart = 0;
680
        pfn_fend = __pfn(page_align_up(vmfile->length));
681
 
682
        /*
683
         * Find the intersection to determine which pages are
684
         * already part of the file, and which ones are new.
685
         */
686
        if (pfn_wstart < pfn_fend) {
687
                pfn_fstart = pfn_wstart;
688
 
689
                /*
690
                 * Shorten the end if end page is
691
                 * less than file size
692
                 */
693
                if (pfn_wend < pfn_fend) {
694
                        pfn_fend = pfn_wend;
695
 
696
                        /* This also means no new pages in file */
697
                        pfn_nstart = 0;
698
                        pfn_nend = 0;
699
                } else {
700
 
701
                        /* The new pages start from file end,
702
                         * and end by write end. */
703
                        pfn_nstart = pfn_fend;
704
                        pfn_nend = pfn_wend;
705
                }
706
 
707
        } else {
708
                /* No intersection, its all new pages */
709
                pfn_fstart = 0;
710
                pfn_fend = 0;
711
                pfn_nstart = pfn_wstart;
712
                pfn_nend = pfn_wend;
713
        }
714
 
715
        /*
716
         * Read in the portion that's already part of the file.
717
         */
718
        if ((ret = read_file_pages(vmfile, pfn_fstart, pfn_fend)) < 0)
719
                return ret;
720
 
721
        /* Create new pages for the part that's new in the file */
722
        if ((ret = new_file_pages(vmfile, pfn_nstart, pfn_nend)) < 0)
723
                return ret;
724
 
725
        /*
726
         * At this point be it new or existing file pages, all pages
727
         * to be written are expected to be in the page cache. Write.
728
         */
729
        //byte_offset = PAGE_MASK & cursor;
730
        if ((ret = copy_cache_pages(vmfile, task, buf, pfn_wstart,
731
                                     pfn_wend, cursor, count, 0)) < 0)
732
                return ret;
733
 
734
        /*
735
         * Update the file size, and cursor. vfs will be notified
736
         * of this change when the file is flushed (e.g. via fflush()
737
         * or close())
738
         */
739
        if (task->files->fd[fd].cursor + count > vmfile->length)
740
                vmfile->length = task->files->fd[fd].cursor + count;
741
 
742
        task->files->fd[fd].cursor += count;
743
 
744
        return count;
745
}
746
 
747
/* FIXME: Check for invalid cursor values. Check for total, sometimes negative. */
748
int sys_lseek(struct tcb *task, int fd, off_t offset, int whence)
749
{
750
        int retval = 0;
751
        unsigned long long total, cursor;
752
 
753
        /* Check that fd is valid */
754
        if (fd < 0 || fd > TASK_FILES_MAX ||
755
            !task->files->fd[fd].vmfile)
756
                return -EBADF;
757
 
758
        /* Offset validity */
759
        if (offset < 0)
760
                return -EINVAL;
761
 
762
        switch (whence) {
763
        case SEEK_SET:
764
                retval = task->files->fd[fd].cursor = offset;
765
                break;
766
        case SEEK_CUR:
767
                cursor = (unsigned long long)task->files->fd[fd].cursor;
768
                if (cursor + offset > 0xFFFFFFFF)
769
                        retval = -EINVAL;
770
                else
771
                        retval = task->files->fd[fd].cursor += offset;
772
                break;
773
        case SEEK_END:
774
                cursor = (unsigned long long)task->files->fd[fd].cursor;
775
                total = (unsigned long long)task->files->fd[fd].vmfile->length;
776
                if (cursor + total > 0xFFFFFFFF)
777
                        retval = -EINVAL;
778
                else {
779
                        retval = task->files->fd[fd].cursor =
780
                                task->files->fd[fd].vmfile->length + offset;
781
                }
782
        default:
783
                retval = -EINVAL;
784
                break;
785
        }
786
 
787
        return retval;
788
}
789
 
790
/*
791
 * FIXME: Here's how this should have been:
792
 * v->ops.readdir() -> Reads fs-specific directory contents. i.e. reads
793
 * the directory buffer, doesn't care however contained vnode details are
794
 * stored.
795
 *
796
 * After reading, it converts the fs-spceific contents into generic vfs
797
 * dentries and populates the dentries of those vnodes.
798
 *
799
 * If vfs_readdir() is issued, those generic dentries are converted into
800
 * the posix-defined directory record structure. During this on-the-fly
801
 * generation, pseudo-entries such as . and .. are also added.
802
 *
803
 * If this layering is not done, i.e. the low-level dentry buffer already
804
 * keeps this record structure and we try to return that, then we wont
805
 * have a chance to add the pseudo-entries . and .. These record entries
806
 * are essentially created from parent vnode and current vnode but using
807
 * the names . and ..
808
 */
809
 
810
int fill_dirent(void *buf, unsigned long vnum, int offset, char *name)
811
{
812
        struct dirent *d = buf;
813
 
814
        d->inum = (unsigned int)vnum;
815
        d->offset = offset;
816
        d->rlength = sizeof(struct dirent);
817
        strncpy((char *)d->name, name, DIRENT_NAME_MAX);
818
 
819
        return d->rlength;
820
}
821
 
822
 
823
/*
824
 * Reads @count bytes of posix struct dirents into @buf. This implements
825
 * the raw dirent read syscall upon which readdir() etc. posix calls
826
 * can be built in userspace.
827
 *
828
 * FIXME: Ensure buf is in shared utcb, and count does not exceed it.
829
 */
830
int sys_readdir(struct tcb *t, int fd, int count, char *dirbuf)
831
{
832
        int dirent_size = sizeof(struct dirent);
833
        int total = 0, nbytes = 0;
834
        struct vnode *v;
835
        struct dentry *d;
836
        char *buf = dirbuf;
837
 
838
        // printf("%s/%s\n", __TASKNAME__, __FUNCTION__);
839
 
840
        /*
841
         * FIXME:
842
         * Add dirbuf overflow checking
843
         */
844
 
845
        /* Check address is in task's utcb */
846
 
847
        if (fd < 0 || fd > TASK_FILES_MAX ||
848
            !t->files->fd[fd].vmfile->vnode)
849
                return -EBADF;
850
 
851
        v = t->files->fd[fd].vmfile->vnode;
852
 
853
        d = link_to_struct(v->dentries.next, struct dentry, vref);
854
 
855
        /* Ensure vnode is a directory */
856
        if (!vfs_isdir(v))
857
                return -ENOTDIR;
858
 
859
        /* Write pseudo-entries . and .. to user buffer */
860
        if (count < dirent_size)
861
                return 0;
862
 
863
        fill_dirent(buf, v->vnum, nbytes, VFS_STR_CURDIR);
864
        nbytes += dirent_size;
865
        buf += dirent_size;
866
        count -= dirent_size;
867
 
868
        if (count < dirent_size)
869
                return 0;
870
 
871
        fill_dirent(buf, d->parent->vnode->vnum, nbytes, VFS_STR_PARDIR);
872
        nbytes += dirent_size;
873
        buf += dirent_size;
874
        count -= dirent_size;
875
 
876
        /* Copy fs-specific dir to buf in struct dirent format */
877
        if ((total = v->ops.filldir(buf, v, count)) < 0)
878
                return total;
879
 
880
        return nbytes + total;
881
}
882
 
883
/* FIXME:
884
 * - Is it already open?
885
 * - Check flags and mode.
886
 */
887
int sys_open(struct tcb *task, const char *pathname,
888
             int flags, unsigned int mode)
889
{
890
        struct pathdata *pdata;
891
        struct vnode *v;
892
        struct vm_file *vmfile;
893
        int retval;
894
        int fd;
895
 
896
 
897
        /* Parse path data */
898
        if (IS_ERR(pdata = pathdata_parse(pathname,
899
                                          alloca(strlen(pathname) + 1),
900
                                          task)))
901
                return (int)pdata;
902
 
903
        /* Creating new file */
904
        if (flags & O_CREAT) {
905
                /* Make sure mode identifies a file */
906
                mode |= S_IFREG;
907
 
908
                /* Create new vnode */
909
                if (IS_ERR(v = vfs_vnode_create(task, pdata, mode))) {
910
                        retval = (int)v;
911
                        goto out;
912
                }
913
        } else {
914
                /* Not creating. Get the existing vnode */
915
                if (IS_ERR(v = vfs_vnode_lookup_bypath(pdata))) {
916
                        retval = (int)v;
917
                        goto out;
918
                }
919
        }
920
 
921
        /* Get a new fd */
922
        BUG_ON((fd = id_new(task->files->fdpool)) < 0);
923
        retval = fd;
924
 
925
        /* Check if that vm_file is already in the list */
926
        list_foreach_struct(vmfile, &global_vm_files.list, list) {
927
 
928
                /* Compare vnode pointer */
929
                if (vmfile->vnode == v) {
930
                        /* Add a reference to it from the task */
931
                        task->files->fd[fd].vmfile = vmfile;
932
 
933
                        vmfile->openers++;
934
                        goto out;
935
                }
936
        }
937
 
938
        /* Create a new vm_file for this vnode */
939
        if (IS_ERR(vmfile = vfs_file_create())) {
940
                retval = (int)vmfile;
941
                goto out;
942
        }
943
 
944
        /* Assign file information */
945
        vmfile->vnode = v;
946
        vmfile->length = vmfile->vnode->size;
947
 
948
        /* Add a reference to it from the task */
949
        vmfile->vm_obj.pager = &file_pager;
950
        task->files->fd[fd].vmfile = vmfile;
951
        vmfile->openers++;
952
 
953
        /* Add to file list */
954
        global_add_vm_file(vmfile);
955
 
956
out:
957
        pathdata_destroy(pdata);
958
        return retval;
959
}
960
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.