VirtualBox

Ticket #3805: regops.c

File regops.c, 17.8 KB (added by Frank Mehnert, 15 years ago)

Replacement for /usr/src/vboxvfs-3.0.10/regops.c which should fix this issue

Line 
1/** @file
2 *
3 * vboxvfs -- VirtualBox Guest Additions for Linux:
4 * Regular file inode and file operations
5 */
6
7/*
8 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 *
18 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
19 * Clara, CA 95054 USA or visit http://www.sun.com if you need
20 * additional information or have any questions.
21 */
22
23/*
24 * Limitations: only COW memory mapping is supported
25 */
26
27#include "vfsmod.h"
28
29#define CHUNK_SIZE 4096
30
31/* fops */
32static int
33sf_reg_read_aux (const char *caller, struct sf_glob_info *sf_g,
34 struct sf_reg_info *sf_r, void *buf, uint32_t *nread,
35 uint64_t pos)
36{
37 int rc = vboxCallRead (&client_handle, &sf_g->map, sf_r->handle,
38 pos, nread, buf, false /* already locked? */);
39 if (RT_FAILURE (rc)) {
40 LogFunc(("vboxCallRead failed. caller=%s, rc=%Rrc\n",
41 caller, rc));
42 return -EPROTO;
43 }
44 return 0;
45}
46
47static int
48sf_reg_write_aux (const char *caller, struct sf_glob_info *sf_g,
49 struct sf_reg_info *sf_r, void *buf, uint32_t *nwritten,
50 uint64_t pos)
51{
52 int rc = vboxCallWrite (&client_handle, &sf_g->map, sf_r->handle,
53 pos, nwritten, buf, false /* already locked? */);
54 if (RT_FAILURE (rc)) {
55 LogFunc(("vboxCallWrite failed. caller=%s, rc=%Rrc\n",
56 caller, rc));
57 return -EPROTO;
58 }
59 return 0;
60}
61
62static ssize_t
63sf_reg_read (struct file *file, char *buf, size_t size, loff_t *off)
64{
65 int err;
66 void *tmp;
67 size_t left = size;
68 ssize_t total_bytes_read = 0;
69 struct inode *inode = file->f_dentry->d_inode;
70 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
71 struct sf_reg_info *sf_r = file->private_data;
72 loff_t pos = *off;
73
74 TRACE ();
75 if (!S_ISREG (inode->i_mode)) {
76 LogFunc(("read from non regular file %d\n", inode->i_mode));
77 return -EINVAL;
78 }
79
80 /** XXX Check read permission accoring to inode->i_mode! */
81
82 if (!size) {
83 return 0;
84 }
85
86 tmp = kmalloc (CHUNK_SIZE, GFP_KERNEL);
87 if (!tmp) {
88 LogRelFunc(("could not allocate bounce buffer memory %d bytes\n", CHUNK_SIZE));
89 return -ENOMEM;
90 }
91
92 while (left) {
93 uint32_t to_read, nread;
94
95 to_read = CHUNK_SIZE;
96 if (to_read > left) {
97 to_read = (uint32_t) left;
98 }
99 nread = to_read;
100
101 err = sf_reg_read_aux (__func__, sf_g, sf_r, tmp, &nread, pos);
102 if (err)
103 goto fail;
104
105 if (copy_to_user (buf, tmp, nread)) {
106 err = -EFAULT;
107 goto fail;
108 }
109
110 pos += nread;
111 left -= nread;
112 buf += nread;
113 total_bytes_read += nread;
114 if (nread != to_read) {
115 break;
116 }
117 }
118
119 *off += total_bytes_read;
120 kfree (tmp);
121 return total_bytes_read;
122
123 fail:
124 kfree (tmp);
125 return err;
126}
127
128static ssize_t
129sf_reg_write (struct file *file, const char *buf, size_t size, loff_t *off)
130{
131 int err;
132 void *tmp;
133 size_t left = size;
134 ssize_t total_bytes_written = 0;
135 struct inode *inode = file->f_dentry->d_inode;
136 struct sf_inode_info *sf_i = GET_INODE_INFO (inode);
137 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
138 struct sf_reg_info *sf_r = file->private_data;
139 loff_t pos;
140
141 TRACE ();
142 BUG_ON (!sf_i);
143 BUG_ON (!sf_g);
144 BUG_ON (!sf_r);
145
146 if (!S_ISREG (inode->i_mode)) {
147 LogFunc(("write to non regular file %d\n", inode->i_mode));
148 return -EINVAL;
149 }
150
151 pos = *off;
152 if (file->f_flags & O_APPEND)
153 {
154 pos = inode->i_size;
155 *off = pos;
156 }
157
158 /** XXX Check write permission accoring to inode->i_mode! */
159
160 if (!size)
161 return 0;
162
163 tmp = kmalloc (CHUNK_SIZE, GFP_KERNEL);
164 if (!tmp) {
165 LogRelFunc(("could not allocate bounce buffer memory %d bytes\n", CHUNK_SIZE));
166 return -ENOMEM;
167 }
168
169 while (left) {
170 uint32_t to_write, nwritten;
171
172 to_write = CHUNK_SIZE;
173 if (to_write > left) {
174 to_write = (uint32_t) left;
175 }
176 nwritten = to_write;
177
178 if (copy_from_user (tmp, buf, to_write)) {
179 err = -EFAULT;
180 goto fail;
181 }
182
183 err = sf_reg_write_aux (__func__, sf_g, sf_r, tmp, &nwritten, pos);
184 if (err)
185 goto fail;
186
187 pos += nwritten;
188 left -= nwritten;
189 buf += nwritten;
190 total_bytes_written += nwritten;
191 if (nwritten != to_write)
192 break;
193 }
194
195 *off += total_bytes_written;
196 if (*off > inode->i_size)
197 inode->i_size = *off;
198
199 sf_i->force_restat = 1;
200 kfree (tmp);
201 return total_bytes_written;
202
203 fail:
204 kfree (tmp);
205 return err;
206}
207
208static int
209sf_reg_open (struct inode *inode, struct file *file)
210{
211 int rc, rc_linux = 0;
212 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
213 struct sf_inode_info *sf_i = GET_INODE_INFO (inode);
214 struct sf_reg_info *sf_r;
215 SHFLCREATEPARMS params;
216
217 TRACE ();
218 BUG_ON (!sf_g);
219 BUG_ON (!sf_i);
220
221 LogFunc(("open %s\n", sf_i->path->String.utf8));
222
223 sf_r = kmalloc (sizeof (*sf_r), GFP_KERNEL);
224 if (!sf_r) {
225 LogRelFunc(("could not allocate reg info\n"));
226 return -ENOMEM;
227 }
228
229 memset(&params, 0, sizeof(params));
230 params.Handle = SHFL_HANDLE_NIL;
231 /* We check the value of params.Handle afterwards to find out if
232 * the call succeeded or failed, as the API does not seem to cleanly
233 * distinguish error and informational messages.
234 *
235 * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to
236 * make the shared folders host service use our fMode parameter */
237
238 if (file->f_flags & O_CREAT) {
239 LogFunc(("O_CREAT set\n"));
240 params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW;
241 /* We ignore O_EXCL, as the Linux kernel seems to call create
242 beforehand itself, so O_EXCL should always fail. */
243 if (file->f_flags & O_TRUNC) {
244 LogFunc(("O_TRUNC set\n"));
245 params.CreateFlags |= ( SHFL_CF_ACT_OVERWRITE_IF_EXISTS
246 | SHFL_CF_ACCESS_WRITE);
247 }
248 else {
249 params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
250 }
251 }
252 else {
253 params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW;
254 if (file->f_flags & O_TRUNC) {
255 LogFunc(("O_TRUNC set\n"));
256 params.CreateFlags |= ( SHFL_CF_ACT_OVERWRITE_IF_EXISTS
257 | SHFL_CF_ACCESS_WRITE);
258 }
259 }
260
261 if (!(params.CreateFlags & SHFL_CF_ACCESS_READWRITE)) {
262 switch (file->f_flags & O_ACCMODE) {
263 case O_RDONLY:
264 params.CreateFlags |= SHFL_CF_ACCESS_READ;
265 break;
266
267 case O_WRONLY:
268 params.CreateFlags |= SHFL_CF_ACCESS_WRITE;
269 break;
270
271 case O_RDWR:
272 params.CreateFlags |= SHFL_CF_ACCESS_READWRITE;
273 break;
274
275 default:
276 BUG ();
277 }
278 }
279
280 params.Info.Attr.fMode = inode->i_mode;
281 LogFunc(("sf_reg_open: calling vboxCallCreate, file %s, flags=%d, %#x\n",
282 sf_i->path->String.utf8 , file->f_flags, params.CreateFlags));
283 rc = vboxCallCreate (&client_handle, &sf_g->map, sf_i->path, &params);
284
285 if (RT_FAILURE (rc)) {
286 LogFunc(("vboxCallCreate failed flags=%d,%#x rc=%Rrc\n",
287 file->f_flags, params.CreateFlags, rc));
288 kfree (sf_r);
289 return -RTErrConvertToErrno(rc);
290 }
291
292 if (SHFL_HANDLE_NIL == params.Handle) {
293 switch (params.Result) {
294 case SHFL_PATH_NOT_FOUND:
295 case SHFL_FILE_NOT_FOUND:
296 rc_linux = -ENOENT;
297 break;
298 case SHFL_FILE_EXISTS:
299 rc_linux = -EEXIST;
300 break;
301 default:
302 break;
303 }
304 }
305
306 sf_i->force_restat = 1;
307 sf_r->handle = params.Handle;
308 sf_i->file = file;
309 file->private_data = sf_r;
310 return rc_linux;
311}
312
313static int
314sf_reg_release (struct inode *inode, struct file *file)
315{
316 int rc;
317 struct sf_reg_info *sf_r;
318 struct sf_glob_info *sf_g;
319 struct sf_inode_info *sf_i = GET_INODE_INFO (inode);
320
321 TRACE ();
322 sf_g = GET_GLOB_INFO (inode->i_sb);
323 sf_r = file->private_data;
324
325 BUG_ON (!sf_g);
326 BUG_ON (!sf_r);
327
328 rc = vboxCallClose (&client_handle, &sf_g->map, sf_r->handle);
329 if (RT_FAILURE (rc)) {
330 LogFunc(("vboxCallClose failed rc=%Rrc\n", rc));
331 }
332
333 kfree (sf_r);
334 sf_i->file = NULL;
335 file->private_data = NULL;
336 return 0;
337}
338
339#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
340static int
341sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
342#elif LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 0)
343static struct page *
344sf_reg_nopage (struct vm_area_struct *vma, unsigned long vaddr, int *type)
345# define SET_TYPE(t) *type = (t)
346#else /* LINUX_VERSION_CODE < KERNEL_VERSION (2, 6, 0) */
347static struct page *
348sf_reg_nopage (struct vm_area_struct *vma, unsigned long vaddr, int unused)
349# define SET_TYPE(t)
350#endif
351{
352 struct page *page;
353 char *buf;
354 loff_t off;
355 uint32_t nread = PAGE_SIZE;
356 int err;
357 struct file *file = vma->vm_file;
358 struct inode *inode = file->f_dentry->d_inode;
359 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
360 struct sf_reg_info *sf_r = file->private_data;
361
362 TRACE ();
363#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
364 if (vmf->pgoff > vma->vm_end)
365 return VM_FAULT_SIGBUS;
366#else
367 if (vaddr > vma->vm_end) {
368 SET_TYPE (VM_FAULT_SIGBUS);
369 return NOPAGE_SIGBUS;
370 }
371#endif
372
373 page = alloc_page (GFP_HIGHUSER);
374 if (!page) {
375 LogRelFunc(("failed to allocate page\n"));
376#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
377 return VM_FAULT_OOM;
378#else
379 SET_TYPE (VM_FAULT_OOM);
380 return NOPAGE_OOM;
381#endif
382 }
383
384 buf = kmap (page);
385#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
386 off = (vmf->pgoff << PAGE_SHIFT);
387#else
388 off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
389#endif
390 err = sf_reg_read_aux (__func__, sf_g, sf_r, buf, &nread, off);
391 if (err) {
392 kunmap (page);
393 put_page (page);
394#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
395 return VM_FAULT_SIGBUS;
396#else
397 SET_TYPE (VM_FAULT_SIGBUS);
398 return NOPAGE_SIGBUS;
399#endif
400 }
401
402 BUG_ON (nread > PAGE_SIZE);
403 if (!nread) {
404#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
405 clear_user_page (page_address (page), vmf->pgoff, page);
406#elif LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 0)
407 clear_user_page (page_address (page), vaddr, page);
408#else
409 clear_user_page (page_address (page), vaddr);
410#endif
411 }
412 else {
413 memset (buf + nread, 0, PAGE_SIZE - nread);
414 }
415
416 flush_dcache_page (page);
417 kunmap (page);
418#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
419 vmf->page = page;
420 return 0;
421#else
422 SET_TYPE (VM_FAULT_MAJOR);
423 return page;
424#endif
425}
426
427static struct vm_operations_struct sf_vma_ops = {
428#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
429 .fault = sf_reg_fault
430#else
431 .nopage = sf_reg_nopage
432#endif
433};
434
435static int
436sf_reg_mmap (struct file *file, struct vm_area_struct *vma)
437{
438 TRACE ();
439 if (vma->vm_flags & VM_SHARED) {
440 LogFunc(("shared mmapping not available\n"));
441 return -EINVAL;
442 }
443
444 vma->vm_ops = &sf_vma_ops;
445 return 0;
446}
447
448struct file_operations sf_reg_fops = {
449 .read = sf_reg_read,
450 .open = sf_reg_open,
451 .write = sf_reg_write,
452 .release = sf_reg_release,
453 .mmap = sf_reg_mmap,
454#if LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 0)
455# if LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 23)
456 .splice_read = generic_file_splice_read,
457# else
458 .sendfile = generic_file_sendfile,
459# endif
460 .aio_read = generic_file_aio_read,
461 .aio_write = generic_file_aio_write,
462 .fsync = simple_sync_file,
463 .llseek = generic_file_llseek,
464#endif
465};
466
467
468struct inode_operations sf_reg_iops = {
469#if LINUX_VERSION_CODE < KERNEL_VERSION (2, 6, 0)
470 .revalidate = sf_inode_revalidate
471#else
472 .getattr = sf_getattr,
473 .setattr = sf_setattr
474#endif
475};
476
477
478#if LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 0)
479static int
480sf_readpage(struct file *file, struct page *page)
481{
482 struct inode *inode = file->f_dentry->d_inode;
483 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
484 struct sf_reg_info *sf_r = file->private_data;
485 uint32_t nread = PAGE_SIZE;
486 char *buf;
487 loff_t off = ((loff_t)page->index) << PAGE_SHIFT;
488 int ret;
489
490 TRACE ();
491
492 buf = kmap(page);
493 ret = sf_reg_read_aux (__func__, sf_g, sf_r, buf, &nread, off);
494 if (ret) {
495 kunmap (page);
496 if (PageLocked(page))
497 unlock_page(page);
498 return ret;
499 }
500 BUG_ON (nread > PAGE_SIZE);
501 memset(&buf[nread], 0, PAGE_SIZE - nread);
502 flush_dcache_page (page);
503 kunmap (page);
504 SetPageUptodate(page);
505 unlock_page(page);
506 return 0;
507}
508
509static int
510sf_writepage(struct page *page, struct writeback_control *wbc)
511{
512 struct address_space *mapping = page->mapping;
513 struct inode *inode = mapping->host;
514 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
515 struct sf_inode_info *sf_i = GET_INODE_INFO (inode);
516 struct file *file = sf_i->file;
517 struct sf_reg_info *sf_r = file->private_data;
518 char *buf;
519 uint32_t nwritten = PAGE_SIZE;
520 int end_index = inode->i_size >> PAGE_SHIFT;
521 loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
522 int err;
523
524 TRACE ();
525
526 if (page->index >= end_index)
527 nwritten = inode->i_size & (PAGE_SIZE-1);
528
529 buf = kmap(page);
530
531 err = sf_reg_write_aux (__func__, sf_g, sf_r, buf, &nwritten, off);
532 if (err < 0) {
533 ClearPageUptodate(page);
534 goto out;
535 }
536
537 if (off > inode->i_size)
538 inode->i_size = off;
539
540 if (PageError(page))
541 ClearPageError(page);
542 err = 0;
543out:
544 kunmap(page);
545
546 unlock_page(page);
547 return err;
548}
549
550# if LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 24)
551int
552sf_write_begin(struct file *file, struct address_space *mapping, loff_t pos,
553 unsigned len, unsigned flags, struct page **pagep, void **fsdata)
554{
555 TRACE ();
556
557 return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
558}
559
560int
561sf_write_end(struct file *file, struct address_space *mapping, loff_t pos,
562 unsigned len, unsigned copied, struct page *page, void *fsdata)
563{
564 struct inode *inode = mapping->host;
565 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
566 struct sf_reg_info *sf_r = file->private_data;
567 void *buf;
568 unsigned from = pos & (PAGE_SIZE - 1);
569 uint32_t nwritten = len;
570 int err;
571
572 TRACE ();
573
574 buf = kmap(page);
575 err = sf_reg_write_aux (__func__, sf_g, sf_r, buf+from, &nwritten, pos);
576 kunmap(page);
577
578 if (!PageUptodate(page) && err == PAGE_SIZE)
579 SetPageUptodate(page);
580
581 if (err >= 0) {
582 pos += nwritten;
583 if (pos > inode->i_size)
584 inode->i_size = pos;
585 }
586
587 unlock_page(page);
588 page_cache_release(page);
589
590 return nwritten;
591}
592
593# endif /* KERNEL_VERSION >= 2.6.24 */
594
595struct address_space_operations sf_reg_aops = {
596 .readpage = sf_readpage,
597 .writepage = sf_writepage,
598# if LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 24)
599 .write_begin = sf_write_begin,
600 .write_end = sf_write_end,
601# else
602 .prepare_write = simple_prepare_write,
603 .commit_write = simple_commit_write,
604# endif
605};
606#endif

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy