libfuse
fuse_lowlevel.c
1/*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu>
4
5 Implementation of (most of) the low-level FUSE API. The session loop
6 functions are implemented in separate files.
7
8 This program can be distributed under the terms of the GNU LGPLv2.
9 See the file COPYING.LIB
10*/
11
12#include <stdbool.h>
13#define _GNU_SOURCE
14
15#include "fuse_config.h"
16#include "fuse_i.h"
17#include "fuse_kernel.h"
18#include "fuse_opt.h"
19#include "fuse_misc.h"
20#include "mount_util.h"
21#include "util.h"
22
23#include <stdio.h>
24#include <stdlib.h>
25#include <stddef.h>
26#include <stdalign.h>
27#include <string.h>
28#include <unistd.h>
29#include <limits.h>
30#include <errno.h>
31#include <assert.h>
32#include <sys/file.h>
33#include <sys/ioctl.h>
34
35#ifndef F_LINUX_SPECIFIC_BASE
36#define F_LINUX_SPECIFIC_BASE 1024
37#endif
38#ifndef F_SETPIPE_SZ
39#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
40#endif
41
42
43#define PARAM(inarg) (((char *)(inarg)) + sizeof(*(inarg)))
44#define OFFSET_MAX 0x7fffffffffffffffLL
45
46#define container_of(ptr, type, member) ({ \
47 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
48 (type *)( (char *)__mptr - offsetof(type,member) );})
49
50struct fuse_pollhandle {
51 uint64_t kh;
52 struct fuse_session *se;
53};
54
55static size_t pagesize;
56
57static __attribute__((constructor)) void fuse_ll_init_pagesize(void)
58{
59 pagesize = getpagesize();
60}
61
62static void convert_stat(const struct stat *stbuf, struct fuse_attr *attr)
63{
64 attr->ino = stbuf->st_ino;
65 attr->mode = stbuf->st_mode;
66 attr->nlink = stbuf->st_nlink;
67 attr->uid = stbuf->st_uid;
68 attr->gid = stbuf->st_gid;
69 attr->rdev = stbuf->st_rdev;
70 attr->size = stbuf->st_size;
71 attr->blksize = stbuf->st_blksize;
72 attr->blocks = stbuf->st_blocks;
73 attr->atime = stbuf->st_atime;
74 attr->mtime = stbuf->st_mtime;
75 attr->ctime = stbuf->st_ctime;
76 attr->atimensec = ST_ATIM_NSEC(stbuf);
77 attr->mtimensec = ST_MTIM_NSEC(stbuf);
78 attr->ctimensec = ST_CTIM_NSEC(stbuf);
79}
80
81static void convert_attr(const struct fuse_setattr_in *attr, struct stat *stbuf)
82{
83 stbuf->st_mode = attr->mode;
84 stbuf->st_uid = attr->uid;
85 stbuf->st_gid = attr->gid;
86 stbuf->st_size = attr->size;
87 stbuf->st_atime = attr->atime;
88 stbuf->st_mtime = attr->mtime;
89 stbuf->st_ctime = attr->ctime;
90 ST_ATIM_NSEC_SET(stbuf, attr->atimensec);
91 ST_MTIM_NSEC_SET(stbuf, attr->mtimensec);
92 ST_CTIM_NSEC_SET(stbuf, attr->ctimensec);
93}
94
95static size_t iov_length(const struct iovec *iov, size_t count)
96{
97 size_t seg;
98 size_t ret = 0;
99
100 for (seg = 0; seg < count; seg++)
101 ret += iov[seg].iov_len;
102 return ret;
103}
104
105static void list_init_req(struct fuse_req *req)
106{
107 req->next = req;
108 req->prev = req;
109}
110
111static void list_del_req(struct fuse_req *req)
112{
113 struct fuse_req *prev = req->prev;
114 struct fuse_req *next = req->next;
115 prev->next = next;
116 next->prev = prev;
117}
118
119static void list_add_req(struct fuse_req *req, struct fuse_req *next)
120{
121 struct fuse_req *prev = next->prev;
122 req->next = next;
123 req->prev = prev;
124 prev->next = req;
125 next->prev = req;
126}
127
128static void destroy_req(fuse_req_t req)
129{
130 assert(req->ch == NULL);
131 pthread_mutex_destroy(&req->lock);
132 free(req);
133}
134
135void fuse_free_req(fuse_req_t req)
136{
137 int ctr;
138 struct fuse_session *se = req->se;
139
140 if (se->conn.no_interrupt) {
141 ctr = --req->ref_cnt;
142 fuse_chan_put(req->ch);
143 req->ch = NULL;
144 } else {
145 pthread_mutex_lock(&se->lock);
146 req->u.ni.func = NULL;
147 req->u.ni.data = NULL;
148 list_del_req(req);
149 ctr = --req->ref_cnt;
150 fuse_chan_put(req->ch);
151 req->ch = NULL;
152 pthread_mutex_unlock(&se->lock);
153 }
154 if (!ctr)
155 destroy_req(req);
156}
157
158static struct fuse_req *fuse_ll_alloc_req(struct fuse_session *se)
159{
160 struct fuse_req *req;
161
162 req = (struct fuse_req *) calloc(1, sizeof(struct fuse_req));
163 if (req == NULL) {
164 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate request\n");
165 } else {
166 req->se = se;
167 req->ref_cnt = 1;
168 list_init_req(req);
169 pthread_mutex_init(&req->lock, NULL);
170 }
171
172 return req;
173}
174
175/* Send data. If *ch* is NULL, send via session master fd */
176static int fuse_send_msg(struct fuse_session *se, struct fuse_chan *ch,
177 struct iovec *iov, int count)
178{
179 struct fuse_out_header *out = iov[0].iov_base;
180
181 assert(se != NULL);
182 out->len = iov_length(iov, count);
183 if (se->debug) {
184 if (out->unique == 0) {
185 fuse_log(FUSE_LOG_DEBUG, "NOTIFY: code=%d length=%u\n",
186 out->error, out->len);
187 } else if (out->error) {
188 fuse_log(FUSE_LOG_DEBUG,
189 " unique: %llu, error: %i (%s), outsize: %i\n",
190 (unsigned long long) out->unique, out->error,
191 strerror(-out->error), out->len);
192 } else {
193 fuse_log(FUSE_LOG_DEBUG,
194 " unique: %llu, success, outsize: %i\n",
195 (unsigned long long) out->unique, out->len);
196 }
197 }
198
199 ssize_t res;
200 if (se->io != NULL)
201 /* se->io->writev is never NULL if se->io is not NULL as
202 specified by fuse_session_custom_io()*/
203 res = se->io->writev(ch ? ch->fd : se->fd, iov, count,
204 se->userdata);
205 else
206 res = writev(ch ? ch->fd : se->fd, iov, count);
207
208 int err = errno;
209
210 if (res == -1) {
211 /* ENOENT means the operation was interrupted */
212 if (!fuse_session_exited(se) && err != ENOENT)
213 perror("fuse: writing device");
214 return -err;
215 }
216
217 return 0;
218}
219
220
221int fuse_send_reply_iov_nofree(fuse_req_t req, int error, struct iovec *iov,
222 int count)
223{
224 struct fuse_out_header out;
225
226#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 32
227 const char *str = strerrordesc_np(error * -1);
228 if ((str == NULL && error != 0) || error > 0) {
229#else
230 if (error <= -1000 || error > 0) {
231#endif
232 fuse_log(FUSE_LOG_ERR, "fuse: bad error value: %i\n", error);
233 error = -ERANGE;
234 }
235
236 out.unique = req->unique;
237 out.error = error;
238
239 iov[0].iov_base = &out;
240 iov[0].iov_len = sizeof(struct fuse_out_header);
241
242 return fuse_send_msg(req->se, req->ch, iov, count);
243}
244
245static int send_reply_iov(fuse_req_t req, int error, struct iovec *iov,
246 int count)
247{
248 int res;
249
250 res = fuse_send_reply_iov_nofree(req, error, iov, count);
251 fuse_free_req(req);
252 return res;
253}
254
255static int send_reply(fuse_req_t req, int error, const void *arg,
256 size_t argsize)
257{
258 struct iovec iov[2];
259 int count = 1;
260 if (argsize) {
261 iov[1].iov_base = (void *) arg;
262 iov[1].iov_len = argsize;
263 count++;
264 }
265 return send_reply_iov(req, error, iov, count);
266}
267
268int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
269{
270 int res;
271 struct iovec *padded_iov;
272
273 padded_iov = malloc((count + 1) * sizeof(struct iovec));
274 if (padded_iov == NULL)
275 return fuse_reply_err(req, ENOMEM);
276
277 memcpy(padded_iov + 1, iov, count * sizeof(struct iovec));
278 count++;
279
280 res = send_reply_iov(req, 0, padded_iov, count);
281 free(padded_iov);
282
283 return res;
284}
285
286
287/* `buf` is allowed to be empty so that the proper size may be
288 allocated by the caller */
289size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize,
290 const char *name, const struct stat *stbuf, off_t off)
291{
292 (void)req;
293 size_t namelen;
294 size_t entlen;
295 size_t entlen_padded;
296 struct fuse_dirent *dirent;
297
298 namelen = strlen(name);
299 entlen = FUSE_NAME_OFFSET + namelen;
300 entlen_padded = FUSE_DIRENT_ALIGN(entlen);
301
302 if ((buf == NULL) || (entlen_padded > bufsize))
303 return entlen_padded;
304
305 dirent = (struct fuse_dirent*) buf;
306 dirent->ino = stbuf->st_ino;
307 dirent->off = off;
308 dirent->namelen = namelen;
309 dirent->type = (stbuf->st_mode & S_IFMT) >> 12;
310 memcpy(dirent->name, name, namelen);
311 memset(dirent->name + namelen, 0, entlen_padded - entlen);
312
313 return entlen_padded;
314}
315
316static void convert_statfs(const struct statvfs *stbuf,
317 struct fuse_kstatfs *kstatfs)
318{
319 kstatfs->bsize = stbuf->f_bsize;
320 kstatfs->frsize = stbuf->f_frsize;
321 kstatfs->blocks = stbuf->f_blocks;
322 kstatfs->bfree = stbuf->f_bfree;
323 kstatfs->bavail = stbuf->f_bavail;
324 kstatfs->files = stbuf->f_files;
325 kstatfs->ffree = stbuf->f_ffree;
326 kstatfs->namelen = stbuf->f_namemax;
327}
328
329static int send_reply_ok(fuse_req_t req, const void *arg, size_t argsize)
330{
331 return send_reply(req, 0, arg, argsize);
332}
333
334int fuse_reply_err(fuse_req_t req, int err)
335{
336 return send_reply(req, -err, NULL, 0);
337}
338
340{
341 fuse_free_req(req);
342}
343
344static unsigned long calc_timeout_sec(double t)
345{
346 if (t > (double) ULONG_MAX)
347 return ULONG_MAX;
348 else if (t < 0.0)
349 return 0;
350 else
351 return (unsigned long) t;
352}
353
354static unsigned int calc_timeout_nsec(double t)
355{
356 double f = t - (double) calc_timeout_sec(t);
357 if (f < 0.0)
358 return 0;
359 else if (f >= 0.999999999)
360 return 999999999;
361 else
362 return (unsigned int) (f * 1.0e9);
363}
364
365static void fill_entry(struct fuse_entry_out *arg,
366 const struct fuse_entry_param *e)
367{
368 arg->nodeid = e->ino;
369 arg->generation = e->generation;
370 arg->entry_valid = calc_timeout_sec(e->entry_timeout);
371 arg->entry_valid_nsec = calc_timeout_nsec(e->entry_timeout);
372 arg->attr_valid = calc_timeout_sec(e->attr_timeout);
373 arg->attr_valid_nsec = calc_timeout_nsec(e->attr_timeout);
374 convert_stat(&e->attr, &arg->attr);
375}
376
377/* `buf` is allowed to be empty so that the proper size may be
378 allocated by the caller */
379size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize,
380 const char *name,
381 const struct fuse_entry_param *e, off_t off)
382{
383 (void)req;
384 size_t namelen;
385 size_t entlen;
386 size_t entlen_padded;
387
388 namelen = strlen(name);
389 entlen = FUSE_NAME_OFFSET_DIRENTPLUS + namelen;
390 entlen_padded = FUSE_DIRENT_ALIGN(entlen);
391 if ((buf == NULL) || (entlen_padded > bufsize))
392 return entlen_padded;
393
394 struct fuse_direntplus *dp = (struct fuse_direntplus *) buf;
395 memset(&dp->entry_out, 0, sizeof(dp->entry_out));
396 fill_entry(&dp->entry_out, e);
397
398 struct fuse_dirent *dirent = &dp->dirent;
399 dirent->ino = e->attr.st_ino;
400 dirent->off = off;
401 dirent->namelen = namelen;
402 dirent->type = (e->attr.st_mode & S_IFMT) >> 12;
403 memcpy(dirent->name, name, namelen);
404 memset(dirent->name + namelen, 0, entlen_padded - entlen);
405
406 return entlen_padded;
407}
408
409static void fill_open(struct fuse_open_out *arg,
410 const struct fuse_file_info *f)
411{
412 arg->fh = f->fh;
413 if (f->backing_id > 0) {
414 arg->backing_id = f->backing_id;
415 arg->open_flags |= FOPEN_PASSTHROUGH;
416 }
417 if (f->direct_io)
418 arg->open_flags |= FOPEN_DIRECT_IO;
419 if (f->keep_cache)
420 arg->open_flags |= FOPEN_KEEP_CACHE;
421 if (f->cache_readdir)
422 arg->open_flags |= FOPEN_CACHE_DIR;
423 if (f->nonseekable)
424 arg->open_flags |= FOPEN_NONSEEKABLE;
425 if (f->noflush)
426 arg->open_flags |= FOPEN_NOFLUSH;
428 arg->open_flags |= FOPEN_PARALLEL_DIRECT_WRITES;
429}
430
431int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
432{
433 struct fuse_entry_out arg;
434 size_t size = req->se->conn.proto_minor < 9 ?
435 FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg);
436
437 /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant
438 negative entry */
439 if (!e->ino && req->se->conn.proto_minor < 4)
440 return fuse_reply_err(req, ENOENT);
441
442 memset(&arg, 0, sizeof(arg));
443 fill_entry(&arg, e);
444 return send_reply_ok(req, &arg, size);
445}
446
447int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e,
448 const struct fuse_file_info *f)
449{
450 alignas(uint64_t) char buf[sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)];
451 size_t entrysize = req->se->conn.proto_minor < 9 ?
452 FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(struct fuse_entry_out);
453 struct fuse_entry_out *earg = (struct fuse_entry_out *) buf;
454 struct fuse_open_out *oarg = (struct fuse_open_out *) (buf + entrysize);
455
456 memset(buf, 0, sizeof(buf));
457 fill_entry(earg, e);
458 fill_open(oarg, f);
459 return send_reply_ok(req, buf,
460 entrysize + sizeof(struct fuse_open_out));
461}
462
463int fuse_reply_attr(fuse_req_t req, const struct stat *attr,
464 double attr_timeout)
465{
466 struct fuse_attr_out arg;
467 size_t size = req->se->conn.proto_minor < 9 ?
468 FUSE_COMPAT_ATTR_OUT_SIZE : sizeof(arg);
469
470 memset(&arg, 0, sizeof(arg));
471 arg.attr_valid = calc_timeout_sec(attr_timeout);
472 arg.attr_valid_nsec = calc_timeout_nsec(attr_timeout);
473 convert_stat(attr, &arg.attr);
474
475 return send_reply_ok(req, &arg, size);
476}
477
478int fuse_reply_readlink(fuse_req_t req, const char *linkname)
479{
480 return send_reply_ok(req, linkname, strlen(linkname));
481}
482
483int fuse_passthrough_open(fuse_req_t req, int fd)
484{
485 struct fuse_backing_map map = { .fd = fd };
486 int ret;
487
488 ret = ioctl(req->se->fd, FUSE_DEV_IOC_BACKING_OPEN, &map);
489 if (ret <= 0) {
490 fuse_log(FUSE_LOG_ERR, "fuse: passthrough_open: %s\n", strerror(errno));
491 return 0;
492 }
493
494 return ret;
495}
496
497int fuse_passthrough_close(fuse_req_t req, int backing_id)
498{
499 int ret;
500
501 ret = ioctl(req->se->fd, FUSE_DEV_IOC_BACKING_CLOSE, &backing_id);
502 if (ret < 0)
503 fuse_log(FUSE_LOG_ERR, "fuse: passthrough_close: %s\n", strerror(errno));
504
505 return ret;
506}
507
508int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *f)
509{
510 struct fuse_open_out arg;
511
512 memset(&arg, 0, sizeof(arg));
513 fill_open(&arg, f);
514 return send_reply_ok(req, &arg, sizeof(arg));
515}
516
517int fuse_reply_write(fuse_req_t req, size_t count)
518{
519 struct fuse_write_out arg;
520
521 memset(&arg, 0, sizeof(arg));
522 arg.size = count;
523
524 return send_reply_ok(req, &arg, sizeof(arg));
525}
526
527int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
528{
529 return send_reply_ok(req, buf, size);
530}
531
532static int fuse_send_data_iov_fallback(struct fuse_session *se,
533 struct fuse_chan *ch,
534 struct iovec *iov, int iov_count,
535 struct fuse_bufvec *buf,
536 size_t len)
537{
538 struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
539 void *mbuf;
540 int res;
541
542 /* Optimize common case */
543 if (buf->count == 1 && buf->idx == 0 && buf->off == 0 &&
544 !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
545 /* FIXME: also avoid memory copy if there are multiple buffers
546 but none of them contain an fd */
547
548 iov[iov_count].iov_base = buf->buf[0].mem;
549 iov[iov_count].iov_len = len;
550 iov_count++;
551 return fuse_send_msg(se, ch, iov, iov_count);
552 }
553
554 res = posix_memalign(&mbuf, pagesize, len);
555 if (res != 0)
556 return res;
557
558 mem_buf.buf[0].mem = mbuf;
559 res = fuse_buf_copy(&mem_buf, buf, 0);
560 if (res < 0) {
561 free(mbuf);
562 return -res;
563 }
564 len = res;
565
566 iov[iov_count].iov_base = mbuf;
567 iov[iov_count].iov_len = len;
568 iov_count++;
569 res = fuse_send_msg(se, ch, iov, iov_count);
570 free(mbuf);
571
572 return res;
573}
574
575struct fuse_ll_pipe {
576 size_t size;
577 int can_grow;
578 int pipe[2];
579};
580
581static void fuse_ll_pipe_free(struct fuse_ll_pipe *llp)
582{
583 close(llp->pipe[0]);
584 close(llp->pipe[1]);
585 free(llp);
586}
587
588#ifdef HAVE_SPLICE
589#if !defined(HAVE_PIPE2) || !defined(O_CLOEXEC)
590static int fuse_pipe(int fds[2])
591{
592 int rv = pipe(fds);
593
594 if (rv == -1)
595 return rv;
596
597 if (fcntl(fds[0], F_SETFL, O_NONBLOCK) == -1 ||
598 fcntl(fds[1], F_SETFL, O_NONBLOCK) == -1 ||
599 fcntl(fds[0], F_SETFD, FD_CLOEXEC) == -1 ||
600 fcntl(fds[1], F_SETFD, FD_CLOEXEC) == -1) {
601 close(fds[0]);
602 close(fds[1]);
603 rv = -1;
604 }
605 return rv;
606}
607#else
608static int fuse_pipe(int fds[2])
609{
610 return pipe2(fds, O_CLOEXEC | O_NONBLOCK);
611}
612#endif
613
614static struct fuse_ll_pipe *fuse_ll_get_pipe(struct fuse_session *se)
615{
616 struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
617 if (llp == NULL) {
618 int res;
619
620 llp = malloc(sizeof(struct fuse_ll_pipe));
621 if (llp == NULL)
622 return NULL;
623
624 res = fuse_pipe(llp->pipe);
625 if (res == -1) {
626 free(llp);
627 return NULL;
628 }
629
630 /*
631 *the default size is 16 pages on linux
632 */
633 llp->size = pagesize * 16;
634 llp->can_grow = 1;
635
636 pthread_setspecific(se->pipe_key, llp);
637 }
638
639 return llp;
640}
641#endif
642
643static void fuse_ll_clear_pipe(struct fuse_session *se)
644{
645 struct fuse_ll_pipe *llp = pthread_getspecific(se->pipe_key);
646 if (llp) {
647 pthread_setspecific(se->pipe_key, NULL);
648 fuse_ll_pipe_free(llp);
649 }
650}
651
652#if defined(HAVE_SPLICE) && defined(HAVE_VMSPLICE)
653static int read_back(int fd, char *buf, size_t len)
654{
655 int res;
656
657 res = read(fd, buf, len);
658 if (res == -1) {
659 fuse_log(FUSE_LOG_ERR, "fuse: internal error: failed to read back from pipe: %s\n", strerror(errno));
660 return -EIO;
661 }
662 if (res != len) {
663 fuse_log(FUSE_LOG_ERR, "fuse: internal error: short read back from pipe: %i from %zi\n", res, len);
664 return -EIO;
665 }
666 return 0;
667}
668
669static int grow_pipe_to_max(int pipefd)
670{
671 int res;
672 long max;
673 long maxfd;
674 char buf[32];
675
676 maxfd = open("/proc/sys/fs/pipe-max-size", O_RDONLY);
677 if (maxfd < 0)
678 return -errno;
679
680 res = read(maxfd, buf, sizeof(buf) - 1);
681 if (res < 0) {
682 int saved_errno;
683
684 saved_errno = errno;
685 close(maxfd);
686 return -saved_errno;
687 }
688 close(maxfd);
689 buf[res] = '\0';
690
691 res = libfuse_strtol(buf, &max);
692 if (res)
693 return res;
694 res = fcntl(pipefd, F_SETPIPE_SZ, max);
695 if (res < 0)
696 return -errno;
697 return max;
698}
699
700static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
701 struct iovec *iov, int iov_count,
702 struct fuse_bufvec *buf, unsigned int flags)
703{
704 int res;
705 size_t len = fuse_buf_size(buf);
706 struct fuse_out_header *out = iov[0].iov_base;
707 struct fuse_ll_pipe *llp;
708 int splice_flags;
709 size_t pipesize;
710 size_t total_buf_size;
711 size_t idx;
712 size_t headerlen;
713 struct fuse_bufvec pipe_buf = FUSE_BUFVEC_INIT(len);
714
715 if (se->broken_splice_nonblock)
716 goto fallback;
717
718 if (flags & FUSE_BUF_NO_SPLICE)
719 goto fallback;
720
721 total_buf_size = 0;
722 for (idx = buf->idx; idx < buf->count; idx++) {
723 total_buf_size += buf->buf[idx].size;
724 if (idx == buf->idx)
725 total_buf_size -= buf->off;
726 }
727 if (total_buf_size < 2 * pagesize)
728 goto fallback;
729
730 if (se->conn.proto_minor < 14 ||
731 !(se->conn.want_ext & FUSE_CAP_SPLICE_WRITE))
732 goto fallback;
733
734 llp = fuse_ll_get_pipe(se);
735 if (llp == NULL)
736 goto fallback;
737
738
739 headerlen = iov_length(iov, iov_count);
740
741 out->len = headerlen + len;
742
743 /*
744 * Heuristic for the required pipe size, does not work if the
745 * source contains less than page size fragments
746 */
747 pipesize = pagesize * (iov_count + buf->count + 1) + out->len;
748
749 if (llp->size < pipesize) {
750 if (llp->can_grow) {
751 res = fcntl(llp->pipe[0], F_SETPIPE_SZ, pipesize);
752 if (res == -1) {
753 res = grow_pipe_to_max(llp->pipe[0]);
754 if (res > 0)
755 llp->size = res;
756 llp->can_grow = 0;
757 goto fallback;
758 }
759 llp->size = res;
760 }
761 if (llp->size < pipesize)
762 goto fallback;
763 }
764
765
766 res = vmsplice(llp->pipe[1], iov, iov_count, SPLICE_F_NONBLOCK);
767 if (res == -1)
768 goto fallback;
769
770 if (res != headerlen) {
771 res = -EIO;
772 fuse_log(FUSE_LOG_ERR, "fuse: short vmsplice to pipe: %u/%zu\n", res,
773 headerlen);
774 goto clear_pipe;
775 }
776
777 pipe_buf.buf[0].flags = FUSE_BUF_IS_FD;
778 pipe_buf.buf[0].fd = llp->pipe[1];
779
780 res = fuse_buf_copy(&pipe_buf, buf,
782 if (res < 0) {
783 if (res == -EAGAIN || res == -EINVAL) {
784 /*
785 * Should only get EAGAIN on kernels with
786 * broken SPLICE_F_NONBLOCK support (<=
787 * 2.6.35) where this error or a short read is
788 * returned even if the pipe itself is not
789 * full
790 *
791 * EINVAL might mean that splice can't handle
792 * this combination of input and output.
793 */
794 if (res == -EAGAIN)
795 se->broken_splice_nonblock = 1;
796
797 pthread_setspecific(se->pipe_key, NULL);
798 fuse_ll_pipe_free(llp);
799 goto fallback;
800 }
801 res = -res;
802 goto clear_pipe;
803 }
804
805 if (res != 0 && res < len) {
806 struct fuse_bufvec mem_buf = FUSE_BUFVEC_INIT(len);
807 void *mbuf;
808 size_t now_len = res;
809 /*
810 * For regular files a short count is either
811 * 1) due to EOF, or
812 * 2) because of broken SPLICE_F_NONBLOCK (see above)
813 *
814 * For other inputs it's possible that we overflowed
815 * the pipe because of small buffer fragments.
816 */
817
818 res = posix_memalign(&mbuf, pagesize, len);
819 if (res != 0)
820 goto clear_pipe;
821
822 mem_buf.buf[0].mem = mbuf;
823 mem_buf.off = now_len;
824 res = fuse_buf_copy(&mem_buf, buf, 0);
825 if (res > 0) {
826 char *tmpbuf;
827 size_t extra_len = res;
828 /*
829 * Trickiest case: got more data. Need to get
830 * back the data from the pipe and then fall
831 * back to regular write.
832 */
833 tmpbuf = malloc(headerlen);
834 if (tmpbuf == NULL) {
835 free(mbuf);
836 res = ENOMEM;
837 goto clear_pipe;
838 }
839 res = read_back(llp->pipe[0], tmpbuf, headerlen);
840 free(tmpbuf);
841 if (res != 0) {
842 free(mbuf);
843 goto clear_pipe;
844 }
845 res = read_back(llp->pipe[0], mbuf, now_len);
846 if (res != 0) {
847 free(mbuf);
848 goto clear_pipe;
849 }
850 len = now_len + extra_len;
851 iov[iov_count].iov_base = mbuf;
852 iov[iov_count].iov_len = len;
853 iov_count++;
854 res = fuse_send_msg(se, ch, iov, iov_count);
855 free(mbuf);
856 return res;
857 }
858 free(mbuf);
859 res = now_len;
860 }
861 len = res;
862 out->len = headerlen + len;
863
864 if (se->debug) {
865 fuse_log(FUSE_LOG_DEBUG,
866 " unique: %llu, success, outsize: %i (splice)\n",
867 (unsigned long long) out->unique, out->len);
868 }
869
870 splice_flags = 0;
871 if ((flags & FUSE_BUF_SPLICE_MOVE) &&
872 (se->conn.want_ext & FUSE_CAP_SPLICE_MOVE))
873 splice_flags |= SPLICE_F_MOVE;
874
875 if (se->io != NULL && se->io->splice_send != NULL) {
876 res = se->io->splice_send(llp->pipe[0], NULL,
877 ch ? ch->fd : se->fd, NULL, out->len,
878 splice_flags, se->userdata);
879 } else {
880 res = splice(llp->pipe[0], NULL, ch ? ch->fd : se->fd, NULL,
881 out->len, splice_flags);
882 }
883 if (res == -1) {
884 res = -errno;
885 perror("fuse: splice from pipe");
886 goto clear_pipe;
887 }
888 if (res != out->len) {
889 res = -EIO;
890 fuse_log(FUSE_LOG_ERR, "fuse: short splice from pipe: %u/%u\n",
891 res, out->len);
892 goto clear_pipe;
893 }
894 return 0;
895
896clear_pipe:
897 fuse_ll_clear_pipe(se);
898 return res;
899
900fallback:
901 return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
902}
903#else
904static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
905 struct iovec *iov, int iov_count,
906 struct fuse_bufvec *buf, unsigned int flags)
907{
908 size_t len = fuse_buf_size(buf);
909 (void) flags;
910
911 return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
912}
913#endif
914
915int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv,
916 enum fuse_buf_copy_flags flags)
917{
918 struct iovec iov[2];
919 struct fuse_out_header out;
920 int res;
921
922 iov[0].iov_base = &out;
923 iov[0].iov_len = sizeof(struct fuse_out_header);
924
925 out.unique = req->unique;
926 out.error = 0;
927
928 res = fuse_send_data_iov(req->se, req->ch, iov, 1, bufv, flags);
929 if (res <= 0) {
930 fuse_free_req(req);
931 return res;
932 } else {
933 return fuse_reply_err(req, res);
934 }
935}
936
937int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
938{
939 struct fuse_statfs_out arg;
940 size_t size = req->se->conn.proto_minor < 4 ?
941 FUSE_COMPAT_STATFS_SIZE : sizeof(arg);
942
943 memset(&arg, 0, sizeof(arg));
944 convert_statfs(stbuf, &arg.st);
945
946 return send_reply_ok(req, &arg, size);
947}
948
949int fuse_reply_xattr(fuse_req_t req, size_t count)
950{
951 struct fuse_getxattr_out arg;
952
953 memset(&arg, 0, sizeof(arg));
954 arg.size = count;
955
956 return send_reply_ok(req, &arg, sizeof(arg));
957}
958
959int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
960{
961 struct fuse_lk_out arg;
962
963 memset(&arg, 0, sizeof(arg));
964 arg.lk.type = lock->l_type;
965 if (lock->l_type != F_UNLCK) {
966 arg.lk.start = lock->l_start;
967 if (lock->l_len == 0)
968 arg.lk.end = OFFSET_MAX;
969 else
970 arg.lk.end = lock->l_start + lock->l_len - 1;
971 }
972 arg.lk.pid = lock->l_pid;
973 return send_reply_ok(req, &arg, sizeof(arg));
974}
975
976int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
977{
978 struct fuse_bmap_out arg;
979
980 memset(&arg, 0, sizeof(arg));
981 arg.block = idx;
982
983 return send_reply_ok(req, &arg, sizeof(arg));
984}
985
986static struct fuse_ioctl_iovec *fuse_ioctl_iovec_copy(const struct iovec *iov,
987 size_t count)
988{
989 struct fuse_ioctl_iovec *fiov;
990 size_t i;
991
992 fiov = malloc(sizeof(fiov[0]) * count);
993 if (!fiov)
994 return NULL;
995
996 for (i = 0; i < count; i++) {
997 fiov[i].base = (uintptr_t) iov[i].iov_base;
998 fiov[i].len = iov[i].iov_len;
999 }
1000
1001 return fiov;
1002}
1003
1005 const struct iovec *in_iov, size_t in_count,
1006 const struct iovec *out_iov, size_t out_count)
1007{
1008 struct fuse_ioctl_out arg;
1009 struct fuse_ioctl_iovec *in_fiov = NULL;
1010 struct fuse_ioctl_iovec *out_fiov = NULL;
1011 struct iovec iov[4];
1012 size_t count = 1;
1013 int res;
1014
1015 memset(&arg, 0, sizeof(arg));
1016 arg.flags |= FUSE_IOCTL_RETRY;
1017 arg.in_iovs = in_count;
1018 arg.out_iovs = out_count;
1019 iov[count].iov_base = &arg;
1020 iov[count].iov_len = sizeof(arg);
1021 count++;
1022
1023 if (req->se->conn.proto_minor < 16) {
1024 if (in_count) {
1025 iov[count].iov_base = (void *)in_iov;
1026 iov[count].iov_len = sizeof(in_iov[0]) * in_count;
1027 count++;
1028 }
1029
1030 if (out_count) {
1031 iov[count].iov_base = (void *)out_iov;
1032 iov[count].iov_len = sizeof(out_iov[0]) * out_count;
1033 count++;
1034 }
1035 } else {
1036 /* Can't handle non-compat 64bit ioctls on 32bit */
1037 if (sizeof(void *) == 4 && req->ioctl_64bit) {
1038 res = fuse_reply_err(req, EINVAL);
1039 goto out;
1040 }
1041
1042 if (in_count) {
1043 in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count);
1044 if (!in_fiov)
1045 goto enomem;
1046
1047 iov[count].iov_base = (void *)in_fiov;
1048 iov[count].iov_len = sizeof(in_fiov[0]) * in_count;
1049 count++;
1050 }
1051 if (out_count) {
1052 out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count);
1053 if (!out_fiov)
1054 goto enomem;
1055
1056 iov[count].iov_base = (void *)out_fiov;
1057 iov[count].iov_len = sizeof(out_fiov[0]) * out_count;
1058 count++;
1059 }
1060 }
1061
1062 res = send_reply_iov(req, 0, iov, count);
1063out:
1064 free(in_fiov);
1065 free(out_fiov);
1066
1067 return res;
1068
1069enomem:
1070 res = fuse_reply_err(req, ENOMEM);
1071 goto out;
1072}
1073
1074int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
1075{
1076 struct fuse_ioctl_out arg;
1077 struct iovec iov[3];
1078 size_t count = 1;
1079
1080 memset(&arg, 0, sizeof(arg));
1081 arg.result = result;
1082 iov[count].iov_base = &arg;
1083 iov[count].iov_len = sizeof(arg);
1084 count++;
1085
1086 if (size) {
1087 iov[count].iov_base = (char *) buf;
1088 iov[count].iov_len = size;
1089 count++;
1090 }
1091
1092 return send_reply_iov(req, 0, iov, count);
1093}
1094
1095int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov,
1096 int count)
1097{
1098 struct iovec *padded_iov;
1099 struct fuse_ioctl_out arg;
1100 int res;
1101
1102 padded_iov = malloc((count + 2) * sizeof(struct iovec));
1103 if (padded_iov == NULL)
1104 return fuse_reply_err(req, ENOMEM);
1105
1106 memset(&arg, 0, sizeof(arg));
1107 arg.result = result;
1108 padded_iov[1].iov_base = &arg;
1109 padded_iov[1].iov_len = sizeof(arg);
1110
1111 memcpy(&padded_iov[2], iov, count * sizeof(struct iovec));
1112
1113 res = send_reply_iov(req, 0, padded_iov, count + 2);
1114 free(padded_iov);
1115
1116 return res;
1117}
1118
1119int fuse_reply_poll(fuse_req_t req, unsigned revents)
1120{
1121 struct fuse_poll_out arg;
1122
1123 memset(&arg, 0, sizeof(arg));
1124 arg.revents = revents;
1125
1126 return send_reply_ok(req, &arg, sizeof(arg));
1127}
1128
1129int fuse_reply_lseek(fuse_req_t req, off_t off)
1130{
1131 struct fuse_lseek_out arg;
1132
1133 memset(&arg, 0, sizeof(arg));
1134 arg.offset = off;
1135
1136 return send_reply_ok(req, &arg, sizeof(arg));
1137}
1138
1139static void do_lookup(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1140{
1141 char *name = (char *) inarg;
1142
1143 if (req->se->op.lookup)
1144 req->se->op.lookup(req, nodeid, name);
1145 else
1146 fuse_reply_err(req, ENOSYS);
1147}
1148
1149static void do_forget(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1150{
1151 struct fuse_forget_in *arg = (struct fuse_forget_in *) inarg;
1152
1153 if (req->se->op.forget)
1154 req->se->op.forget(req, nodeid, arg->nlookup);
1155 else
1156 fuse_reply_none(req);
1157}
1158
1159static void do_batch_forget(fuse_req_t req, fuse_ino_t nodeid,
1160 const void *inarg)
1161{
1162 struct fuse_batch_forget_in *arg = (void *) inarg;
1163 struct fuse_forget_one *param = (void *) PARAM(arg);
1164 unsigned int i;
1165
1166 (void) nodeid;
1167
1168 if (req->se->op.forget_multi) {
1169 req->se->op.forget_multi(req, arg->count,
1170 (struct fuse_forget_data *) param);
1171 } else if (req->se->op.forget) {
1172 for (i = 0; i < arg->count; i++) {
1173 struct fuse_forget_one *forget = &param[i];
1174 struct fuse_req *dummy_req;
1175
1176 dummy_req = fuse_ll_alloc_req(req->se);
1177 if (dummy_req == NULL)
1178 break;
1179
1180 dummy_req->unique = req->unique;
1181 dummy_req->ctx = req->ctx;
1182 dummy_req->ch = NULL;
1183
1184 req->se->op.forget(dummy_req, forget->nodeid,
1185 forget->nlookup);
1186 }
1187 fuse_reply_none(req);
1188 } else {
1189 fuse_reply_none(req);
1190 }
1191}
1192
1193static void do_getattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1194{
1195 struct fuse_file_info *fip = NULL;
1196 struct fuse_file_info fi;
1197
1198 if (req->se->conn.proto_minor >= 9) {
1199 struct fuse_getattr_in *arg = (struct fuse_getattr_in *) inarg;
1200
1201 if (arg->getattr_flags & FUSE_GETATTR_FH) {
1202 memset(&fi, 0, sizeof(fi));
1203 fi.fh = arg->fh;
1204 fip = &fi;
1205 }
1206 }
1207
1208 if (req->se->op.getattr)
1209 req->se->op.getattr(req, nodeid, fip);
1210 else
1211 fuse_reply_err(req, ENOSYS);
1212}
1213
1214static void do_setattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1215{
1216 struct fuse_setattr_in *arg = (struct fuse_setattr_in *) inarg;
1217
1218 if (req->se->op.setattr) {
1219 struct fuse_file_info *fi = NULL;
1220 struct fuse_file_info fi_store;
1221 struct stat stbuf;
1222 memset(&stbuf, 0, sizeof(stbuf));
1223 convert_attr(arg, &stbuf);
1224 if (arg->valid & FATTR_FH) {
1225 arg->valid &= ~FATTR_FH;
1226 memset(&fi_store, 0, sizeof(fi_store));
1227 fi = &fi_store;
1228 fi->fh = arg->fh;
1229 }
1230 arg->valid &=
1231 FUSE_SET_ATTR_MODE |
1232 FUSE_SET_ATTR_UID |
1233 FUSE_SET_ATTR_GID |
1234 FUSE_SET_ATTR_SIZE |
1235 FUSE_SET_ATTR_ATIME |
1236 FUSE_SET_ATTR_MTIME |
1237 FUSE_SET_ATTR_KILL_SUID |
1238 FUSE_SET_ATTR_KILL_SGID |
1239 FUSE_SET_ATTR_ATIME_NOW |
1240 FUSE_SET_ATTR_MTIME_NOW |
1241 FUSE_SET_ATTR_CTIME;
1242
1243 req->se->op.setattr(req, nodeid, &stbuf, arg->valid, fi);
1244 } else
1245 fuse_reply_err(req, ENOSYS);
1246}
1247
1248static void do_access(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1249{
1250 struct fuse_access_in *arg = (struct fuse_access_in *) inarg;
1251
1252 if (req->se->op.access)
1253 req->se->op.access(req, nodeid, arg->mask);
1254 else
1255 fuse_reply_err(req, ENOSYS);
1256}
1257
1258static void do_readlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1259{
1260 (void) inarg;
1261
1262 if (req->se->op.readlink)
1263 req->se->op.readlink(req, nodeid);
1264 else
1265 fuse_reply_err(req, ENOSYS);
1266}
1267
1268static void do_mknod(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1269{
1270 struct fuse_mknod_in *arg = (struct fuse_mknod_in *) inarg;
1271 char *name = PARAM(arg);
1272
1273 if (req->se->conn.proto_minor >= 12)
1274 req->ctx.umask = arg->umask;
1275 else
1276 name = (char *) inarg + FUSE_COMPAT_MKNOD_IN_SIZE;
1277
1278 if (req->se->op.mknod)
1279 req->se->op.mknod(req, nodeid, name, arg->mode, arg->rdev);
1280 else
1281 fuse_reply_err(req, ENOSYS);
1282}
1283
1284static void do_mkdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1285{
1286 struct fuse_mkdir_in *arg = (struct fuse_mkdir_in *) inarg;
1287
1288 if (req->se->conn.proto_minor >= 12)
1289 req->ctx.umask = arg->umask;
1290
1291 if (req->se->op.mkdir)
1292 req->se->op.mkdir(req, nodeid, PARAM(arg), arg->mode);
1293 else
1294 fuse_reply_err(req, ENOSYS);
1295}
1296
1297static void do_unlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1298{
1299 char *name = (char *) inarg;
1300
1301 if (req->se->op.unlink)
1302 req->se->op.unlink(req, nodeid, name);
1303 else
1304 fuse_reply_err(req, ENOSYS);
1305}
1306
1307static void do_rmdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1308{
1309 char *name = (char *) inarg;
1310
1311 if (req->se->op.rmdir)
1312 req->se->op.rmdir(req, nodeid, name);
1313 else
1314 fuse_reply_err(req, ENOSYS);
1315}
1316
1317static void do_symlink(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1318{
1319 char *name = (char *) inarg;
1320 char *linkname = ((char *) inarg) + strlen((char *) inarg) + 1;
1321
1322 if (req->se->op.symlink)
1323 req->se->op.symlink(req, linkname, nodeid, name);
1324 else
1325 fuse_reply_err(req, ENOSYS);
1326}
1327
1328static void do_rename(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1329{
1330 struct fuse_rename_in *arg = (struct fuse_rename_in *) inarg;
1331 char *oldname = PARAM(arg);
1332 char *newname = oldname + strlen(oldname) + 1;
1333
1334 if (req->se->op.rename)
1335 req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1336 0);
1337 else
1338 fuse_reply_err(req, ENOSYS);
1339}
1340
1341static void do_rename2(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1342{
1343 struct fuse_rename2_in *arg = (struct fuse_rename2_in *) inarg;
1344 char *oldname = PARAM(arg);
1345 char *newname = oldname + strlen(oldname) + 1;
1346
1347 if (req->se->op.rename)
1348 req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1349 arg->flags);
1350 else
1351 fuse_reply_err(req, ENOSYS);
1352}
1353
1354static void do_link(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1355{
1356 struct fuse_link_in *arg = (struct fuse_link_in *) inarg;
1357
1358 if (req->se->op.link)
1359 req->se->op.link(req, arg->oldnodeid, nodeid, PARAM(arg));
1360 else
1361 fuse_reply_err(req, ENOSYS);
1362}
1363
1364static void do_tmpfile(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1365{
1366 struct fuse_create_in *arg = (struct fuse_create_in *) inarg;
1367
1368 if (req->se->op.tmpfile) {
1369 struct fuse_file_info fi;
1370
1371 memset(&fi, 0, sizeof(fi));
1372 fi.flags = arg->flags;
1373
1374 if (req->se->conn.proto_minor >= 12)
1375 req->ctx.umask = arg->umask;
1376
1377 req->se->op.tmpfile(req, nodeid, arg->mode, &fi);
1378 } else
1379 fuse_reply_err(req, ENOSYS);
1380}
1381
1382static void do_create(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1383{
1384 struct fuse_create_in *arg = (struct fuse_create_in *) inarg;
1385
1386 if (req->se->op.create) {
1387 struct fuse_file_info fi;
1388 char *name = PARAM(arg);
1389
1390 memset(&fi, 0, sizeof(fi));
1391 fi.flags = arg->flags;
1392
1393 if (req->se->conn.proto_minor >= 12)
1394 req->ctx.umask = arg->umask;
1395 else
1396 name = (char *) inarg + sizeof(struct fuse_open_in);
1397
1398 req->se->op.create(req, nodeid, name, arg->mode, &fi);
1399 } else
1400 fuse_reply_err(req, ENOSYS);
1401}
1402
1403static void do_open(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1404{
1405 struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1406 struct fuse_file_info fi;
1407
1408 memset(&fi, 0, sizeof(fi));
1409 fi.flags = arg->flags;
1410
1411 if (req->se->op.open)
1412 req->se->op.open(req, nodeid, &fi);
1413 else if (req->se->conn.want_ext & FUSE_CAP_NO_OPEN_SUPPORT)
1414 fuse_reply_err(req, ENOSYS);
1415 else
1416 fuse_reply_open(req, &fi);
1417}
1418
1419static void do_read(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1420{
1421 struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1422
1423 if (req->se->op.read) {
1424 struct fuse_file_info fi;
1425
1426 memset(&fi, 0, sizeof(fi));
1427 fi.fh = arg->fh;
1428 if (req->se->conn.proto_minor >= 9) {
1429 fi.lock_owner = arg->lock_owner;
1430 fi.flags = arg->flags;
1431 }
1432 req->se->op.read(req, nodeid, arg->size, arg->offset, &fi);
1433 } else
1434 fuse_reply_err(req, ENOSYS);
1435}
1436
1437static void do_write(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1438{
1439 struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1440 struct fuse_file_info fi;
1441 char *param;
1442
1443 memset(&fi, 0, sizeof(fi));
1444 fi.fh = arg->fh;
1445 fi.writepage = (arg->write_flags & FUSE_WRITE_CACHE) != 0;
1446
1447 if (req->se->conn.proto_minor < 9) {
1448 param = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1449 } else {
1450 fi.lock_owner = arg->lock_owner;
1451 fi.flags = arg->flags;
1452 param = PARAM(arg);
1453 }
1454
1455 if (req->se->op.write)
1456 req->se->op.write(req, nodeid, param, arg->size,
1457 arg->offset, &fi);
1458 else
1459 fuse_reply_err(req, ENOSYS);
1460}
1461
1462static void do_write_buf(fuse_req_t req, fuse_ino_t nodeid, const void *inarg,
1463 const struct fuse_buf *ibuf)
1464{
1465 struct fuse_session *se = req->se;
1466 struct fuse_bufvec bufv = {
1467 .buf[0] = *ibuf,
1468 .count = 1,
1469 };
1470 struct fuse_write_in *arg = (struct fuse_write_in *) inarg;
1471 struct fuse_file_info fi;
1472
1473 memset(&fi, 0, sizeof(fi));
1474 fi.fh = arg->fh;
1475 fi.writepage = arg->write_flags & FUSE_WRITE_CACHE;
1476
1477 if (se->conn.proto_minor < 9) {
1478 bufv.buf[0].mem = ((char *) arg) + FUSE_COMPAT_WRITE_IN_SIZE;
1479 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1480 FUSE_COMPAT_WRITE_IN_SIZE;
1481 assert(!(bufv.buf[0].flags & FUSE_BUF_IS_FD));
1482 } else {
1483 fi.lock_owner = arg->lock_owner;
1484 fi.flags = arg->flags;
1485 if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
1486 bufv.buf[0].mem = PARAM(arg);
1487
1488 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
1489 sizeof(struct fuse_write_in);
1490 }
1491 if (bufv.buf[0].size < arg->size) {
1492 fuse_log(FUSE_LOG_ERR, "fuse: do_write_buf: buffer size too small\n");
1493 fuse_reply_err(req, EIO);
1494 goto out;
1495 }
1496 bufv.buf[0].size = arg->size;
1497
1498 se->op.write_buf(req, nodeid, &bufv, arg->offset, &fi);
1499
1500out:
1501 /* Need to reset the pipe if ->write_buf() didn't consume all data */
1502 if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
1503 fuse_ll_clear_pipe(se);
1504}
1505
1506static void do_flush(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1507{
1508 struct fuse_flush_in *arg = (struct fuse_flush_in *) inarg;
1509 struct fuse_file_info fi;
1510
1511 memset(&fi, 0, sizeof(fi));
1512 fi.fh = arg->fh;
1513 fi.flush = 1;
1514 if (req->se->conn.proto_minor >= 7)
1515 fi.lock_owner = arg->lock_owner;
1516
1517 if (req->se->op.flush)
1518 req->se->op.flush(req, nodeid, &fi);
1519 else
1520 fuse_reply_err(req, ENOSYS);
1521}
1522
1523static void do_release(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1524{
1525 struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1526 struct fuse_file_info fi;
1527
1528 memset(&fi, 0, sizeof(fi));
1529 fi.flags = arg->flags;
1530 fi.fh = arg->fh;
1531 if (req->se->conn.proto_minor >= 8) {
1532 fi.flush = (arg->release_flags & FUSE_RELEASE_FLUSH) ? 1 : 0;
1533 fi.lock_owner = arg->lock_owner;
1534 }
1535 if (arg->release_flags & FUSE_RELEASE_FLOCK_UNLOCK) {
1536 fi.flock_release = 1;
1537 fi.lock_owner = arg->lock_owner;
1538 }
1539
1540 if (req->se->op.release)
1541 req->se->op.release(req, nodeid, &fi);
1542 else
1543 fuse_reply_err(req, 0);
1544}
1545
1546static void do_fsync(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1547{
1548 struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1549 struct fuse_file_info fi;
1550 int datasync = arg->fsync_flags & 1;
1551
1552 memset(&fi, 0, sizeof(fi));
1553 fi.fh = arg->fh;
1554
1555 if (req->se->op.fsync)
1556 req->se->op.fsync(req, nodeid, datasync, &fi);
1557 else
1558 fuse_reply_err(req, ENOSYS);
1559}
1560
1561static void do_opendir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1562{
1563 struct fuse_open_in *arg = (struct fuse_open_in *) inarg;
1564 struct fuse_file_info fi;
1565
1566 memset(&fi, 0, sizeof(fi));
1567 fi.flags = arg->flags;
1568
1569 if (req->se->op.opendir)
1570 req->se->op.opendir(req, nodeid, &fi);
1571 else if (req->se->conn.want_ext & FUSE_CAP_NO_OPENDIR_SUPPORT)
1572 fuse_reply_err(req, ENOSYS);
1573 else
1574 fuse_reply_open(req, &fi);
1575}
1576
1577static void do_readdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1578{
1579 struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1580 struct fuse_file_info fi;
1581
1582 memset(&fi, 0, sizeof(fi));
1583 fi.fh = arg->fh;
1584
1585 if (req->se->op.readdir)
1586 req->se->op.readdir(req, nodeid, arg->size, arg->offset, &fi);
1587 else
1588 fuse_reply_err(req, ENOSYS);
1589}
1590
1591static void do_readdirplus(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1592{
1593 struct fuse_read_in *arg = (struct fuse_read_in *) inarg;
1594 struct fuse_file_info fi;
1595
1596 memset(&fi, 0, sizeof(fi));
1597 fi.fh = arg->fh;
1598
1599 if (req->se->op.readdirplus)
1600 req->se->op.readdirplus(req, nodeid, arg->size, arg->offset, &fi);
1601 else
1602 fuse_reply_err(req, ENOSYS);
1603}
1604
1605static void do_releasedir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1606{
1607 struct fuse_release_in *arg = (struct fuse_release_in *) inarg;
1608 struct fuse_file_info fi;
1609
1610 memset(&fi, 0, sizeof(fi));
1611 fi.flags = arg->flags;
1612 fi.fh = arg->fh;
1613
1614 if (req->se->op.releasedir)
1615 req->se->op.releasedir(req, nodeid, &fi);
1616 else
1617 fuse_reply_err(req, 0);
1618}
1619
1620static void do_fsyncdir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1621{
1622 struct fuse_fsync_in *arg = (struct fuse_fsync_in *) inarg;
1623 struct fuse_file_info fi;
1624 int datasync = arg->fsync_flags & 1;
1625
1626 memset(&fi, 0, sizeof(fi));
1627 fi.fh = arg->fh;
1628
1629 if (req->se->op.fsyncdir)
1630 req->se->op.fsyncdir(req, nodeid, datasync, &fi);
1631 else
1632 fuse_reply_err(req, ENOSYS);
1633}
1634
1635static void do_statfs(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1636{
1637 (void) nodeid;
1638 (void) inarg;
1639
1640 if (req->se->op.statfs)
1641 req->se->op.statfs(req, nodeid);
1642 else {
1643 struct statvfs buf = {
1644 .f_namemax = 255,
1645 .f_bsize = 512,
1646 };
1647 fuse_reply_statfs(req, &buf);
1648 }
1649}
1650
1651static void do_setxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1652{
1653 struct fuse_session *se = req->se;
1654 unsigned int xattr_ext = !!(se->conn.want_ext & FUSE_CAP_SETXATTR_EXT);
1655 struct fuse_setxattr_in *arg = (struct fuse_setxattr_in *) inarg;
1656 char *name = xattr_ext ? PARAM(arg) :
1657 (char *)arg + FUSE_COMPAT_SETXATTR_IN_SIZE;
1658 char *value = name + strlen(name) + 1;
1659
1660 /* XXX:The API should be extended to support extra_flags/setxattr_flags */
1661 if (req->se->op.setxattr)
1662 req->se->op.setxattr(req, nodeid, name, value, arg->size,
1663 arg->flags);
1664 else
1665 fuse_reply_err(req, ENOSYS);
1666}
1667
1668static void do_getxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1669{
1670 struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1671
1672 if (req->se->op.getxattr)
1673 req->se->op.getxattr(req, nodeid, PARAM(arg), arg->size);
1674 else
1675 fuse_reply_err(req, ENOSYS);
1676}
1677
1678static void do_listxattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1679{
1680 struct fuse_getxattr_in *arg = (struct fuse_getxattr_in *) inarg;
1681
1682 if (req->se->op.listxattr)
1683 req->se->op.listxattr(req, nodeid, arg->size);
1684 else
1685 fuse_reply_err(req, ENOSYS);
1686}
1687
1688static void do_removexattr(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1689{
1690 char *name = (char *) inarg;
1691
1692 if (req->se->op.removexattr)
1693 req->se->op.removexattr(req, nodeid, name);
1694 else
1695 fuse_reply_err(req, ENOSYS);
1696}
1697
1698static void convert_fuse_file_lock(struct fuse_file_lock *fl,
1699 struct flock *flock)
1700{
1701 memset(flock, 0, sizeof(struct flock));
1702 flock->l_type = fl->type;
1703 flock->l_whence = SEEK_SET;
1704 flock->l_start = fl->start;
1705 if (fl->end == OFFSET_MAX)
1706 flock->l_len = 0;
1707 else
1708 flock->l_len = fl->end - fl->start + 1;
1709 flock->l_pid = fl->pid;
1710}
1711
1712static void do_getlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1713{
1714 struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1715 struct fuse_file_info fi;
1716 struct flock flock;
1717
1718 memset(&fi, 0, sizeof(fi));
1719 fi.fh = arg->fh;
1720 fi.lock_owner = arg->owner;
1721
1722 convert_fuse_file_lock(&arg->lk, &flock);
1723 if (req->se->op.getlk)
1724 req->se->op.getlk(req, nodeid, &fi, &flock);
1725 else
1726 fuse_reply_err(req, ENOSYS);
1727}
1728
1729static void do_setlk_common(fuse_req_t req, fuse_ino_t nodeid,
1730 const void *inarg, int sleep)
1731{
1732 struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg;
1733 struct fuse_file_info fi;
1734 struct flock flock;
1735
1736 memset(&fi, 0, sizeof(fi));
1737 fi.fh = arg->fh;
1738 fi.lock_owner = arg->owner;
1739
1740 if (arg->lk_flags & FUSE_LK_FLOCK) {
1741 int op = 0;
1742
1743 switch (arg->lk.type) {
1744 case F_RDLCK:
1745 op = LOCK_SH;
1746 break;
1747 case F_WRLCK:
1748 op = LOCK_EX;
1749 break;
1750 case F_UNLCK:
1751 op = LOCK_UN;
1752 break;
1753 }
1754 if (!sleep)
1755 op |= LOCK_NB;
1756
1757 if (req->se->op.flock)
1758 req->se->op.flock(req, nodeid, &fi, op);
1759 else
1760 fuse_reply_err(req, ENOSYS);
1761 } else {
1762 convert_fuse_file_lock(&arg->lk, &flock);
1763 if (req->se->op.setlk)
1764 req->se->op.setlk(req, nodeid, &fi, &flock, sleep);
1765 else
1766 fuse_reply_err(req, ENOSYS);
1767 }
1768}
1769
1770static void do_setlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1771{
1772 do_setlk_common(req, nodeid, inarg, 0);
1773}
1774
1775static void do_setlkw(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1776{
1777 do_setlk_common(req, nodeid, inarg, 1);
1778}
1779
1780static int find_interrupted(struct fuse_session *se, struct fuse_req *req)
1781{
1782 struct fuse_req *curr;
1783
1784 for (curr = se->list.next; curr != &se->list; curr = curr->next) {
1785 if (curr->unique == req->u.i.unique) {
1787 void *data;
1788
1789 curr->ref_cnt++;
1790 pthread_mutex_unlock(&se->lock);
1791
1792 /* Ugh, ugly locking */
1793 pthread_mutex_lock(&curr->lock);
1794 pthread_mutex_lock(&se->lock);
1795 curr->interrupted = 1;
1796 func = curr->u.ni.func;
1797 data = curr->u.ni.data;
1798 pthread_mutex_unlock(&se->lock);
1799 if (func)
1800 func(curr, data);
1801 pthread_mutex_unlock(&curr->lock);
1802
1803 pthread_mutex_lock(&se->lock);
1804 curr->ref_cnt--;
1805 if (!curr->ref_cnt) {
1806 destroy_req(curr);
1807 }
1808
1809 return 1;
1810 }
1811 }
1812 for (curr = se->interrupts.next; curr != &se->interrupts;
1813 curr = curr->next) {
1814 if (curr->u.i.unique == req->u.i.unique)
1815 return 1;
1816 }
1817 return 0;
1818}
1819
1820static void do_interrupt(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1821{
1822 struct fuse_interrupt_in *arg = (struct fuse_interrupt_in *) inarg;
1823 struct fuse_session *se = req->se;
1824
1825 (void) nodeid;
1826 if (se->debug)
1827 fuse_log(FUSE_LOG_DEBUG, "INTERRUPT: %llu\n",
1828 (unsigned long long) arg->unique);
1829
1830 req->u.i.unique = arg->unique;
1831
1832 pthread_mutex_lock(&se->lock);
1833 if (find_interrupted(se, req)) {
1834 fuse_chan_put(req->ch);
1835 req->ch = NULL;
1836 destroy_req(req);
1837 } else
1838 list_add_req(req, &se->interrupts);
1839 pthread_mutex_unlock(&se->lock);
1840}
1841
1842static struct fuse_req *check_interrupt(struct fuse_session *se,
1843 struct fuse_req *req)
1844{
1845 struct fuse_req *curr;
1846
1847 for (curr = se->interrupts.next; curr != &se->interrupts;
1848 curr = curr->next) {
1849 if (curr->u.i.unique == req->unique) {
1850 req->interrupted = 1;
1851 list_del_req(curr);
1852 fuse_chan_put(curr->ch);
1853 curr->ch = NULL;
1854 destroy_req(curr);
1855 return NULL;
1856 }
1857 }
1858 curr = se->interrupts.next;
1859 if (curr != &se->interrupts) {
1860 list_del_req(curr);
1861 list_init_req(curr);
1862 return curr;
1863 } else
1864 return NULL;
1865}
1866
1867static void do_bmap(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1868{
1869 struct fuse_bmap_in *arg = (struct fuse_bmap_in *) inarg;
1870
1871 if (req->se->op.bmap)
1872 req->se->op.bmap(req, nodeid, arg->blocksize, arg->block);
1873 else
1874 fuse_reply_err(req, ENOSYS);
1875}
1876
1877static void do_ioctl(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1878{
1879 struct fuse_ioctl_in *arg = (struct fuse_ioctl_in *) inarg;
1880 unsigned int flags = arg->flags;
1881 void *in_buf = arg->in_size ? PARAM(arg) : NULL;
1882 struct fuse_file_info fi;
1883
1884 if (flags & FUSE_IOCTL_DIR &&
1885 !(req->se->conn.want_ext & FUSE_CAP_IOCTL_DIR)) {
1886 fuse_reply_err(req, ENOTTY);
1887 return;
1888 }
1889
1890 memset(&fi, 0, sizeof(fi));
1891 fi.fh = arg->fh;
1892
1893 if (sizeof(void *) == 4 && req->se->conn.proto_minor >= 16 &&
1894 !(flags & FUSE_IOCTL_32BIT)) {
1895 req->ioctl_64bit = 1;
1896 }
1897
1898 if (req->se->op.ioctl)
1899 req->se->op.ioctl(req, nodeid, arg->cmd,
1900 (void *)(uintptr_t)arg->arg, &fi, flags,
1901 in_buf, arg->in_size, arg->out_size);
1902 else
1903 fuse_reply_err(req, ENOSYS);
1904}
1905
1906void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
1907{
1908 free(ph);
1909}
1910
1911static void do_poll(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1912{
1913 struct fuse_poll_in *arg = (struct fuse_poll_in *) inarg;
1914 struct fuse_file_info fi;
1915
1916 memset(&fi, 0, sizeof(fi));
1917 fi.fh = arg->fh;
1918 fi.poll_events = arg->events;
1919
1920 if (req->se->op.poll) {
1921 struct fuse_pollhandle *ph = NULL;
1922
1923 if (arg->flags & FUSE_POLL_SCHEDULE_NOTIFY) {
1924 ph = malloc(sizeof(struct fuse_pollhandle));
1925 if (ph == NULL) {
1926 fuse_reply_err(req, ENOMEM);
1927 return;
1928 }
1929 ph->kh = arg->kh;
1930 ph->se = req->se;
1931 }
1932
1933 req->se->op.poll(req, nodeid, &fi, ph);
1934 } else {
1935 fuse_reply_err(req, ENOSYS);
1936 }
1937}
1938
1939static void do_fallocate(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1940{
1941 struct fuse_fallocate_in *arg = (struct fuse_fallocate_in *) inarg;
1942 struct fuse_file_info fi;
1943
1944 memset(&fi, 0, sizeof(fi));
1945 fi.fh = arg->fh;
1946
1947 if (req->se->op.fallocate)
1948 req->se->op.fallocate(req, nodeid, arg->mode, arg->offset, arg->length, &fi);
1949 else
1950 fuse_reply_err(req, ENOSYS);
1951}
1952
1953static void do_copy_file_range(fuse_req_t req, fuse_ino_t nodeid_in, const void *inarg)
1954{
1955 struct fuse_copy_file_range_in *arg = (struct fuse_copy_file_range_in *) inarg;
1956 struct fuse_file_info fi_in, fi_out;
1957
1958 memset(&fi_in, 0, sizeof(fi_in));
1959 fi_in.fh = arg->fh_in;
1960
1961 memset(&fi_out, 0, sizeof(fi_out));
1962 fi_out.fh = arg->fh_out;
1963
1964
1965 if (req->se->op.copy_file_range)
1966 req->se->op.copy_file_range(req, nodeid_in, arg->off_in,
1967 &fi_in, arg->nodeid_out,
1968 arg->off_out, &fi_out, arg->len,
1969 arg->flags);
1970 else
1971 fuse_reply_err(req, ENOSYS);
1972}
1973
1974static void do_lseek(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
1975{
1976 struct fuse_lseek_in *arg = (struct fuse_lseek_in *) inarg;
1977 struct fuse_file_info fi;
1978
1979 memset(&fi, 0, sizeof(fi));
1980 fi.fh = arg->fh;
1981
1982 if (req->se->op.lseek)
1983 req->se->op.lseek(req, nodeid, arg->offset, arg->whence, &fi);
1984 else
1985 fuse_reply_err(req, ENOSYS);
1986}
1987
1988static bool want_flags_valid(uint64_t capable, uint64_t want)
1989{
1990 uint64_t unknown_flags = want & (~capable);
1991 if (unknown_flags != 0) {
1992 fuse_log(FUSE_LOG_ERR,
1993 "fuse: unknown connection 'want' flags: 0x%08lx\n",
1994 unknown_flags);
1995 return false;
1996 }
1997 return true;
1998}
1999
2003static inline int convert_to_conn_want_ext(struct fuse_conn_info *conn,
2004 uint64_t want_ext_default)
2005{
2006 /* Convert want to want_ext if necessary */
2007 if (conn->want != 0) {
2008 if (conn->want_ext != want_ext_default) {
2009 fuse_log(FUSE_LOG_ERR,
2010 "fuse: both 'want' and 'want_ext' are set\n");
2011 return -EINVAL;
2012 }
2013 conn->want_ext |= conn->want;
2014 }
2015
2016 return 0;
2017}
2018
2019/* Prevent bogus data races (bogus since "init" is called before
2020 * multi-threading becomes relevant */
2021static __attribute__((no_sanitize("thread")))
2022void do_init(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2023{
2024 struct fuse_init_in *arg = (struct fuse_init_in *) inarg;
2025 struct fuse_init_out outarg;
2026 struct fuse_session *se = req->se;
2027 size_t bufsize = se->bufsize;
2028 size_t outargsize = sizeof(outarg);
2029 uint64_t inargflags = 0;
2030 uint64_t outargflags = 0;
2031 bool buf_reallocable = se->buf_reallocable;
2032 (void) nodeid;
2033 if (se->debug) {
2034 fuse_log(FUSE_LOG_DEBUG, "INIT: %u.%u\n", arg->major, arg->minor);
2035 if (arg->major == 7 && arg->minor >= 6) {
2036 fuse_log(FUSE_LOG_DEBUG, "flags=0x%08x\n", arg->flags);
2037 fuse_log(FUSE_LOG_DEBUG, "max_readahead=0x%08x\n",
2038 arg->max_readahead);
2039 }
2040 }
2041 se->conn.proto_major = arg->major;
2042 se->conn.proto_minor = arg->minor;
2043 se->conn.capable_ext = 0;
2044 se->conn.want_ext = 0;
2045
2046 memset(&outarg, 0, sizeof(outarg));
2047 outarg.major = FUSE_KERNEL_VERSION;
2048 outarg.minor = FUSE_KERNEL_MINOR_VERSION;
2049
2050 if (arg->major < 7) {
2051 fuse_log(FUSE_LOG_ERR, "fuse: unsupported protocol version: %u.%u\n",
2052 arg->major, arg->minor);
2053 fuse_reply_err(req, EPROTO);
2054 return;
2055 }
2056
2057 if (arg->major > 7) {
2058 /* Wait for a second INIT request with a 7.X version */
2059 send_reply_ok(req, &outarg, sizeof(outarg));
2060 return;
2061 }
2062
2063 if (arg->minor >= 6) {
2064 if (arg->max_readahead < se->conn.max_readahead)
2065 se->conn.max_readahead = arg->max_readahead;
2066 inargflags = arg->flags;
2067 if (inargflags & FUSE_INIT_EXT)
2068 inargflags = inargflags | (uint64_t) arg->flags2 << 32;
2069 if (inargflags & FUSE_ASYNC_READ)
2070 se->conn.capable_ext |= FUSE_CAP_ASYNC_READ;
2071 if (inargflags & FUSE_POSIX_LOCKS)
2072 se->conn.capable_ext |= FUSE_CAP_POSIX_LOCKS;
2073 if (inargflags & FUSE_ATOMIC_O_TRUNC)
2074 se->conn.capable_ext |= FUSE_CAP_ATOMIC_O_TRUNC;
2075 if (inargflags & FUSE_EXPORT_SUPPORT)
2076 se->conn.capable_ext |= FUSE_CAP_EXPORT_SUPPORT;
2077 if (inargflags & FUSE_DONT_MASK)
2078 se->conn.capable_ext |= FUSE_CAP_DONT_MASK;
2079 if (inargflags & FUSE_FLOCK_LOCKS)
2080 se->conn.capable_ext |= FUSE_CAP_FLOCK_LOCKS;
2081 if (inargflags & FUSE_AUTO_INVAL_DATA)
2082 se->conn.capable_ext |= FUSE_CAP_AUTO_INVAL_DATA;
2083 if (inargflags & FUSE_DO_READDIRPLUS)
2084 se->conn.capable_ext |= FUSE_CAP_READDIRPLUS;
2085 if (inargflags & FUSE_READDIRPLUS_AUTO)
2086 se->conn.capable_ext |= FUSE_CAP_READDIRPLUS_AUTO;
2087 if (inargflags & FUSE_ASYNC_DIO)
2088 se->conn.capable_ext |= FUSE_CAP_ASYNC_DIO;
2089 if (inargflags & FUSE_WRITEBACK_CACHE)
2090 se->conn.capable_ext |= FUSE_CAP_WRITEBACK_CACHE;
2091 if (inargflags & FUSE_NO_OPEN_SUPPORT)
2092 se->conn.capable_ext |= FUSE_CAP_NO_OPEN_SUPPORT;
2093 if (inargflags & FUSE_PARALLEL_DIROPS)
2094 se->conn.capable_ext |= FUSE_CAP_PARALLEL_DIROPS;
2095 if (inargflags & FUSE_POSIX_ACL)
2096 se->conn.capable_ext |= FUSE_CAP_POSIX_ACL;
2097 if (inargflags & FUSE_HANDLE_KILLPRIV)
2098 se->conn.capable_ext |= FUSE_CAP_HANDLE_KILLPRIV;
2099 if (inargflags & FUSE_HANDLE_KILLPRIV_V2)
2100 se->conn.capable_ext |= FUSE_CAP_HANDLE_KILLPRIV_V2;
2101 if (inargflags & FUSE_CACHE_SYMLINKS)
2102 se->conn.capable_ext |= FUSE_CAP_CACHE_SYMLINKS;
2103 if (inargflags & FUSE_NO_OPENDIR_SUPPORT)
2104 se->conn.capable_ext |= FUSE_CAP_NO_OPENDIR_SUPPORT;
2105 if (inargflags & FUSE_EXPLICIT_INVAL_DATA)
2106 se->conn.capable_ext |= FUSE_CAP_EXPLICIT_INVAL_DATA;
2107 if (inargflags & FUSE_SETXATTR_EXT)
2108 se->conn.capable_ext |= FUSE_CAP_SETXATTR_EXT;
2109 if (!(inargflags & FUSE_MAX_PAGES)) {
2110 size_t max_bufsize =
2111 FUSE_DEFAULT_MAX_PAGES_PER_REQ * getpagesize()
2112 + FUSE_BUFFER_HEADER_SIZE;
2113 if (bufsize > max_bufsize) {
2114 bufsize = max_bufsize;
2115 }
2116 buf_reallocable = false;
2117 }
2118 if (inargflags & FUSE_DIRECT_IO_ALLOW_MMAP)
2119 se->conn.capable_ext |= FUSE_CAP_DIRECT_IO_ALLOW_MMAP;
2120 if (arg->minor >= 38 || (inargflags & FUSE_HAS_EXPIRE_ONLY))
2121 se->conn.capable_ext |= FUSE_CAP_EXPIRE_ONLY;
2122 if (inargflags & FUSE_PASSTHROUGH)
2123 se->conn.capable_ext |= FUSE_CAP_PASSTHROUGH;
2124 if (inargflags & FUSE_NO_EXPORT_SUPPORT)
2125 se->conn.capable_ext |= FUSE_CAP_NO_EXPORT_SUPPORT;
2126 } else {
2127 se->conn.max_readahead = 0;
2128 }
2129
2130 if (se->conn.proto_minor >= 14) {
2131#ifdef HAVE_SPLICE
2132#ifdef HAVE_VMSPLICE
2133 if ((se->io == NULL) || (se->io->splice_send != NULL)) {
2134 se->conn.capable_ext |= FUSE_CAP_SPLICE_WRITE |
2136 }
2137#endif
2138 if ((se->io == NULL) || (se->io->splice_receive != NULL)) {
2139 se->conn.capable_ext |= FUSE_CAP_SPLICE_READ;
2140 }
2141#endif
2142 }
2143 if (se->conn.proto_minor >= 18)
2144 se->conn.capable_ext |= FUSE_CAP_IOCTL_DIR;
2145
2146 /* Default settings for modern filesystems.
2147 *
2148 * Most of these capabilities were disabled by default in
2149 * libfuse2 for backwards compatibility reasons. In libfuse3,
2150 * we can finally enable them by default (as long as they're
2151 * supported by the kernel).
2152 */
2153#define LL_SET_DEFAULT(cond, cap) \
2154 if ((cond)) \
2155 fuse_set_feature_flag(&se->conn, cap)
2156
2157 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ);
2158 LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA);
2159 LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO);
2160 LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR);
2161 LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC);
2162 LL_SET_DEFAULT(se->op.write_buf, FUSE_CAP_SPLICE_READ);
2163 LL_SET_DEFAULT(se->op.getlk && se->op.setlk,
2165 LL_SET_DEFAULT(se->op.flock, FUSE_CAP_FLOCK_LOCKS);
2166 LL_SET_DEFAULT(se->op.readdirplus, FUSE_CAP_READDIRPLUS);
2167 LL_SET_DEFAULT(se->op.readdirplus && se->op.readdir,
2169
2170 /* This could safely become default, but libfuse needs an API extension
2171 * to support it
2172 * LL_SET_DEFAULT(1, FUSE_CAP_SETXATTR_EXT);
2173 */
2174
2175 se->conn.time_gran = 1;
2176
2177 se->got_init = 1;
2178 if (se->op.init) {
2179 uint64_t want_ext_default = se->conn.want_ext;
2180 int rc;
2181
2182 // Apply the first 32 bits of capable_ext to capable
2183 se->conn.capable =
2184 (uint32_t)(se->conn.capable_ext & 0xFFFFFFFF);
2185
2186 se->op.init(se->userdata, &se->conn);
2187
2188 /*
2189 * se->conn.want is 32-bit value and deprecated in favour of
2190 * se->conn.want_ext
2191 * Userspace might still use conn.want - we need to convert it
2192 */
2193 rc = convert_to_conn_want_ext(&se->conn, want_ext_default);
2194 if (rc != 0) {
2195 fuse_reply_err(req, EPROTO);
2196 se->error = -EPROTO;
2198 return;
2199 }
2200 }
2201
2202 if (!want_flags_valid(se->conn.capable_ext, se->conn.want_ext)) {
2203 fuse_reply_err(req, EPROTO);
2204 se->error = -EPROTO;
2206 return;
2207 }
2208
2209 unsigned max_read_mo = get_max_read(se->mo);
2210 if (se->conn.max_read != max_read_mo) {
2211 fuse_log(FUSE_LOG_ERR, "fuse: error: init() and fuse_session_new() "
2212 "requested different maximum read size (%u vs %u)\n",
2213 se->conn.max_read, max_read_mo);
2214 fuse_reply_err(req, EPROTO);
2215 se->error = -EPROTO;
2217 return;
2218 }
2219
2220 if (bufsize < FUSE_MIN_READ_BUFFER) {
2221 fuse_log(FUSE_LOG_ERR,
2222 "fuse: warning: buffer size too small: %zu\n",
2223 bufsize);
2224 bufsize = FUSE_MIN_READ_BUFFER;
2225 }
2226
2227 if (buf_reallocable)
2228 bufsize = UINT_MAX;
2229 se->conn.max_write = MIN(se->conn.max_write, bufsize - FUSE_BUFFER_HEADER_SIZE);
2230 se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
2231
2232 if (arg->flags & FUSE_MAX_PAGES) {
2233 outarg.flags |= FUSE_MAX_PAGES;
2234 outarg.max_pages = (se->conn.max_write - 1) / getpagesize() + 1;
2235 }
2236 outargflags = outarg.flags;
2237 /* Always enable big writes, this is superseded
2238 by the max_write option */
2239 outargflags |= FUSE_BIG_WRITES;
2240
2241 if (se->conn.want_ext & FUSE_CAP_ASYNC_READ)
2242 outargflags |= FUSE_ASYNC_READ;
2243 if (se->conn.want_ext & FUSE_CAP_POSIX_LOCKS)
2244 outargflags |= FUSE_POSIX_LOCKS;
2245 if (se->conn.want_ext & FUSE_CAP_ATOMIC_O_TRUNC)
2246 outargflags |= FUSE_ATOMIC_O_TRUNC;
2247 if (se->conn.want_ext & FUSE_CAP_EXPORT_SUPPORT)
2248 outargflags |= FUSE_EXPORT_SUPPORT;
2249 if (se->conn.want_ext & FUSE_CAP_DONT_MASK)
2250 outargflags |= FUSE_DONT_MASK;
2251 if (se->conn.want_ext & FUSE_CAP_FLOCK_LOCKS)
2252 outargflags |= FUSE_FLOCK_LOCKS;
2253 if (se->conn.want_ext & FUSE_CAP_AUTO_INVAL_DATA)
2254 outargflags |= FUSE_AUTO_INVAL_DATA;
2255 if (se->conn.want_ext & FUSE_CAP_READDIRPLUS)
2256 outargflags |= FUSE_DO_READDIRPLUS;
2257 if (se->conn.want_ext & FUSE_CAP_READDIRPLUS_AUTO)
2258 outargflags |= FUSE_READDIRPLUS_AUTO;
2259 if (se->conn.want_ext & FUSE_CAP_ASYNC_DIO)
2260 outargflags |= FUSE_ASYNC_DIO;
2261 if (se->conn.want_ext & FUSE_CAP_WRITEBACK_CACHE)
2262 outargflags |= FUSE_WRITEBACK_CACHE;
2263 if (se->conn.want_ext & FUSE_CAP_PARALLEL_DIROPS)
2264 outargflags |= FUSE_PARALLEL_DIROPS;
2265 if (se->conn.want_ext & FUSE_CAP_POSIX_ACL)
2266 outargflags |= FUSE_POSIX_ACL;
2267 if (se->conn.want_ext & FUSE_CAP_HANDLE_KILLPRIV)
2268 outargflags |= FUSE_HANDLE_KILLPRIV;
2269 if (se->conn.want_ext & FUSE_CAP_HANDLE_KILLPRIV_V2)
2270 outargflags |= FUSE_HANDLE_KILLPRIV_V2;
2271 if (se->conn.want_ext & FUSE_CAP_CACHE_SYMLINKS)
2272 outargflags |= FUSE_CACHE_SYMLINKS;
2273 if (se->conn.want_ext & FUSE_CAP_EXPLICIT_INVAL_DATA)
2274 outargflags |= FUSE_EXPLICIT_INVAL_DATA;
2275 if (se->conn.want_ext & FUSE_CAP_SETXATTR_EXT)
2276 outargflags |= FUSE_SETXATTR_EXT;
2277 if (se->conn.want_ext & FUSE_CAP_DIRECT_IO_ALLOW_MMAP)
2278 outargflags |= FUSE_DIRECT_IO_ALLOW_MMAP;
2279 if (se->conn.want_ext & FUSE_CAP_PASSTHROUGH) {
2280 outargflags |= FUSE_PASSTHROUGH;
2281 /*
2282 * outarg.max_stack_depth includes the fuse stack layer,
2283 * so it is one more than max_backing_stack_depth.
2284 */
2285 outarg.max_stack_depth = se->conn.max_backing_stack_depth + 1;
2286 }
2287 if (se->conn.want_ext & FUSE_CAP_NO_EXPORT_SUPPORT)
2288 outargflags |= FUSE_NO_EXPORT_SUPPORT;
2289
2290 if (inargflags & FUSE_INIT_EXT) {
2291 outargflags |= FUSE_INIT_EXT;
2292 outarg.flags2 = outargflags >> 32;
2293 }
2294
2295 outarg.flags = outargflags;
2296
2297 outarg.max_readahead = se->conn.max_readahead;
2298 outarg.max_write = se->conn.max_write;
2299 if (se->conn.proto_minor >= 13) {
2300 if (se->conn.max_background >= (1 << 16))
2301 se->conn.max_background = (1 << 16) - 1;
2302 if (se->conn.congestion_threshold > se->conn.max_background)
2303 se->conn.congestion_threshold = se->conn.max_background;
2304 if (!se->conn.congestion_threshold) {
2305 se->conn.congestion_threshold =
2306 se->conn.max_background * 3 / 4;
2307 }
2308
2309 outarg.max_background = se->conn.max_background;
2310 outarg.congestion_threshold = se->conn.congestion_threshold;
2311 }
2312 if (se->conn.proto_minor >= 23)
2313 outarg.time_gran = se->conn.time_gran;
2314
2315 if (se->debug) {
2316 fuse_log(FUSE_LOG_DEBUG, " INIT: %u.%u\n", outarg.major, outarg.minor);
2317 fuse_log(FUSE_LOG_DEBUG, " flags=0x%08x\n", outarg.flags);
2318 fuse_log(FUSE_LOG_DEBUG, " max_readahead=0x%08x\n",
2319 outarg.max_readahead);
2320 fuse_log(FUSE_LOG_DEBUG, " max_write=0x%08x\n", outarg.max_write);
2321 fuse_log(FUSE_LOG_DEBUG, " max_background=%i\n",
2322 outarg.max_background);
2323 fuse_log(FUSE_LOG_DEBUG, " congestion_threshold=%i\n",
2324 outarg.congestion_threshold);
2325 fuse_log(FUSE_LOG_DEBUG, " time_gran=%u\n",
2326 outarg.time_gran);
2327 if (se->conn.want_ext & FUSE_CAP_PASSTHROUGH)
2328 fuse_log(FUSE_LOG_DEBUG, " max_stack_depth=%u\n",
2329 outarg.max_stack_depth);
2330 }
2331 if (arg->minor < 5)
2332 outargsize = FUSE_COMPAT_INIT_OUT_SIZE;
2333 else if (arg->minor < 23)
2334 outargsize = FUSE_COMPAT_22_INIT_OUT_SIZE;
2335
2336 send_reply_ok(req, &outarg, outargsize);
2337}
2338
2339static void do_destroy(fuse_req_t req, fuse_ino_t nodeid, const void *inarg)
2340{
2341 struct fuse_session *se = req->se;
2342
2343 (void) nodeid;
2344 (void) inarg;
2345
2346 se->got_destroy = 1;
2347 se->got_init = 0;
2348 if (se->op.destroy)
2349 se->op.destroy(se->userdata);
2350
2351 send_reply_ok(req, NULL, 0);
2352}
2353
2354static void list_del_nreq(struct fuse_notify_req *nreq)
2355{
2356 struct fuse_notify_req *prev = nreq->prev;
2357 struct fuse_notify_req *next = nreq->next;
2358 prev->next = next;
2359 next->prev = prev;
2360}
2361
2362static void list_add_nreq(struct fuse_notify_req *nreq,
2363 struct fuse_notify_req *next)
2364{
2365 struct fuse_notify_req *prev = next->prev;
2366 nreq->next = next;
2367 nreq->prev = prev;
2368 prev->next = nreq;
2369 next->prev = nreq;
2370}
2371
2372static void list_init_nreq(struct fuse_notify_req *nreq)
2373{
2374 nreq->next = nreq;
2375 nreq->prev = nreq;
2376}
2377
2378static void do_notify_reply(fuse_req_t req, fuse_ino_t nodeid,
2379 const void *inarg, const struct fuse_buf *buf)
2380{
2381 struct fuse_session *se = req->se;
2382 struct fuse_notify_req *nreq;
2383 struct fuse_notify_req *head;
2384
2385 pthread_mutex_lock(&se->lock);
2386 head = &se->notify_list;
2387 for (nreq = head->next; nreq != head; nreq = nreq->next) {
2388 if (nreq->unique == req->unique) {
2389 list_del_nreq(nreq);
2390 break;
2391 }
2392 }
2393 pthread_mutex_unlock(&se->lock);
2394
2395 if (nreq != head)
2396 nreq->reply(nreq, req, nodeid, inarg, buf);
2397}
2398
2399static int send_notify_iov(struct fuse_session *se, int notify_code,
2400 struct iovec *iov, int count)
2401{
2402 struct fuse_out_header out;
2403
2404 if (!se->got_init)
2405 return -ENOTCONN;
2406
2407 out.unique = 0;
2408 out.error = notify_code;
2409 iov[0].iov_base = &out;
2410 iov[0].iov_len = sizeof(struct fuse_out_header);
2411
2412 return fuse_send_msg(se, NULL, iov, count);
2413}
2414
2415int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
2416{
2417 if (ph != NULL) {
2418 struct fuse_notify_poll_wakeup_out outarg;
2419 struct iovec iov[2];
2420
2421 outarg.kh = ph->kh;
2422
2423 iov[1].iov_base = &outarg;
2424 iov[1].iov_len = sizeof(outarg);
2425
2426 return send_notify_iov(ph->se, FUSE_NOTIFY_POLL, iov, 2);
2427 } else {
2428 return 0;
2429 }
2430}
2431
2432int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino,
2433 off_t off, off_t len)
2434{
2435 struct fuse_notify_inval_inode_out outarg;
2436 struct iovec iov[2];
2437
2438 if (!se)
2439 return -EINVAL;
2440
2441 if (se->conn.proto_minor < 12)
2442 return -ENOSYS;
2443
2444 outarg.ino = ino;
2445 outarg.off = off;
2446 outarg.len = len;
2447
2448 iov[1].iov_base = &outarg;
2449 iov[1].iov_len = sizeof(outarg);
2450
2451 return send_notify_iov(se, FUSE_NOTIFY_INVAL_INODE, iov, 2);
2452}
2453
2473static int fuse_lowlevel_notify_entry(struct fuse_session *se, fuse_ino_t parent,
2474 const char *name, size_t namelen,
2475 enum fuse_notify_entry_flags flags)
2476{
2477 struct fuse_notify_inval_entry_out outarg;
2478 struct iovec iov[3];
2479
2480 if (!se)
2481 return -EINVAL;
2482
2483 if (se->conn.proto_minor < 12)
2484 return -ENOSYS;
2485
2486 outarg.parent = parent;
2487 outarg.namelen = namelen;
2488 outarg.flags = 0;
2489 if (flags & FUSE_LL_EXPIRE_ONLY)
2490 outarg.flags |= FUSE_EXPIRE_ONLY;
2491
2492 iov[1].iov_base = &outarg;
2493 iov[1].iov_len = sizeof(outarg);
2494 iov[2].iov_base = (void *)name;
2495 iov[2].iov_len = namelen + 1;
2496
2497 return send_notify_iov(se, FUSE_NOTIFY_INVAL_ENTRY, iov, 3);
2498}
2499
2500int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent,
2501 const char *name, size_t namelen)
2502{
2503 return fuse_lowlevel_notify_entry(se, parent, name, namelen, FUSE_LL_INVALIDATE);
2504}
2505
2506int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent,
2507 const char *name, size_t namelen)
2508{
2509 if (!se)
2510 return -EINVAL;
2511
2512 if (!(se->conn.capable_ext & FUSE_CAP_EXPIRE_ONLY))
2513 return -ENOSYS;
2514
2515 return fuse_lowlevel_notify_entry(se, parent, name, namelen, FUSE_LL_EXPIRE_ONLY);
2516}
2517
2518
2519int fuse_lowlevel_notify_delete(struct fuse_session *se,
2520 fuse_ino_t parent, fuse_ino_t child,
2521 const char *name, size_t namelen)
2522{
2523 struct fuse_notify_delete_out outarg;
2524 struct iovec iov[3];
2525
2526 if (!se)
2527 return -EINVAL;
2528
2529 if (se->conn.proto_minor < 18)
2530 return -ENOSYS;
2531
2532 outarg.parent = parent;
2533 outarg.child = child;
2534 outarg.namelen = namelen;
2535 outarg.padding = 0;
2536
2537 iov[1].iov_base = &outarg;
2538 iov[1].iov_len = sizeof(outarg);
2539 iov[2].iov_base = (void *)name;
2540 iov[2].iov_len = namelen + 1;
2541
2542 return send_notify_iov(se, FUSE_NOTIFY_DELETE, iov, 3);
2543}
2544
2545int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino,
2546 off_t offset, struct fuse_bufvec *bufv,
2547 enum fuse_buf_copy_flags flags)
2548{
2549 struct fuse_out_header out;
2550 struct fuse_notify_store_out outarg;
2551 struct iovec iov[3];
2552 size_t size = fuse_buf_size(bufv);
2553 int res;
2554
2555 if (!se)
2556 return -EINVAL;
2557
2558 if (se->conn.proto_minor < 15)
2559 return -ENOSYS;
2560
2561 out.unique = 0;
2562 out.error = FUSE_NOTIFY_STORE;
2563
2564 outarg.nodeid = ino;
2565 outarg.offset = offset;
2566 outarg.size = size;
2567 outarg.padding = 0;
2568
2569 iov[0].iov_base = &out;
2570 iov[0].iov_len = sizeof(out);
2571 iov[1].iov_base = &outarg;
2572 iov[1].iov_len = sizeof(outarg);
2573
2574 res = fuse_send_data_iov(se, NULL, iov, 2, bufv, flags);
2575 if (res > 0)
2576 res = -res;
2577
2578 return res;
2579}
2580
2581struct fuse_retrieve_req {
2582 struct fuse_notify_req nreq;
2583 void *cookie;
2584};
2585
2586static void fuse_ll_retrieve_reply(struct fuse_notify_req *nreq,
2587 fuse_req_t req, fuse_ino_t ino,
2588 const void *inarg,
2589 const struct fuse_buf *ibuf)
2590{
2591 struct fuse_session *se = req->se;
2592 struct fuse_retrieve_req *rreq =
2593 container_of(nreq, struct fuse_retrieve_req, nreq);
2594 const struct fuse_notify_retrieve_in *arg = inarg;
2595 struct fuse_bufvec bufv = {
2596 .buf[0] = *ibuf,
2597 .count = 1,
2598 };
2599
2600 if (!(bufv.buf[0].flags & FUSE_BUF_IS_FD))
2601 bufv.buf[0].mem = PARAM(arg);
2602
2603 bufv.buf[0].size -= sizeof(struct fuse_in_header) +
2604 sizeof(struct fuse_notify_retrieve_in);
2605
2606 if (bufv.buf[0].size < arg->size) {
2607 fuse_log(FUSE_LOG_ERR, "fuse: retrieve reply: buffer size too small\n");
2608 fuse_reply_none(req);
2609 goto out;
2610 }
2611 bufv.buf[0].size = arg->size;
2612
2613 if (se->op.retrieve_reply) {
2614 se->op.retrieve_reply(req, rreq->cookie, ino,
2615 arg->offset, &bufv);
2616 } else {
2617 fuse_reply_none(req);
2618 }
2619out:
2620 free(rreq);
2621 if ((ibuf->flags & FUSE_BUF_IS_FD) && bufv.idx < bufv.count)
2622 fuse_ll_clear_pipe(se);
2623}
2624
2625int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino,
2626 size_t size, off_t offset, void *cookie)
2627{
2628 struct fuse_notify_retrieve_out outarg;
2629 struct iovec iov[2];
2630 struct fuse_retrieve_req *rreq;
2631 int err;
2632
2633 if (!se)
2634 return -EINVAL;
2635
2636 if (se->conn.proto_minor < 15)
2637 return -ENOSYS;
2638
2639 rreq = malloc(sizeof(*rreq));
2640 if (rreq == NULL)
2641 return -ENOMEM;
2642
2643 pthread_mutex_lock(&se->lock);
2644 rreq->cookie = cookie;
2645 rreq->nreq.unique = se->notify_ctr++;
2646 rreq->nreq.reply = fuse_ll_retrieve_reply;
2647 list_add_nreq(&rreq->nreq, &se->notify_list);
2648 pthread_mutex_unlock(&se->lock);
2649
2650 outarg.notify_unique = rreq->nreq.unique;
2651 outarg.nodeid = ino;
2652 outarg.offset = offset;
2653 outarg.size = size;
2654 outarg.padding = 0;
2655
2656 iov[1].iov_base = &outarg;
2657 iov[1].iov_len = sizeof(outarg);
2658
2659 err = send_notify_iov(se, FUSE_NOTIFY_RETRIEVE, iov, 2);
2660 if (err) {
2661 pthread_mutex_lock(&se->lock);
2662 list_del_nreq(&rreq->nreq);
2663 pthread_mutex_unlock(&se->lock);
2664 free(rreq);
2665 }
2666
2667 return err;
2668}
2669
2671{
2672 return req->se->userdata;
2673}
2674
2675const struct fuse_ctx *fuse_req_ctx(fuse_req_t req)
2676{
2677 return &req->ctx;
2678}
2679
2681 void *data)
2682{
2683 pthread_mutex_lock(&req->lock);
2684 pthread_mutex_lock(&req->se->lock);
2685 req->u.ni.func = func;
2686 req->u.ni.data = data;
2687 pthread_mutex_unlock(&req->se->lock);
2688 if (req->interrupted && func)
2689 func(req, data);
2690 pthread_mutex_unlock(&req->lock);
2691}
2692
2694{
2695 int interrupted;
2696
2697 pthread_mutex_lock(&req->se->lock);
2698 interrupted = req->interrupted;
2699 pthread_mutex_unlock(&req->se->lock);
2700
2701 return interrupted;
2702}
2703
2704static struct {
2705 void (*func)(fuse_req_t, fuse_ino_t, const void *);
2706 const char *name;
2707} fuse_ll_ops[] = {
2708 [FUSE_LOOKUP] = { do_lookup, "LOOKUP" },
2709 [FUSE_FORGET] = { do_forget, "FORGET" },
2710 [FUSE_GETATTR] = { do_getattr, "GETATTR" },
2711 [FUSE_SETATTR] = { do_setattr, "SETATTR" },
2712 [FUSE_READLINK] = { do_readlink, "READLINK" },
2713 [FUSE_SYMLINK] = { do_symlink, "SYMLINK" },
2714 [FUSE_MKNOD] = { do_mknod, "MKNOD" },
2715 [FUSE_MKDIR] = { do_mkdir, "MKDIR" },
2716 [FUSE_UNLINK] = { do_unlink, "UNLINK" },
2717 [FUSE_RMDIR] = { do_rmdir, "RMDIR" },
2718 [FUSE_RENAME] = { do_rename, "RENAME" },
2719 [FUSE_LINK] = { do_link, "LINK" },
2720 [FUSE_OPEN] = { do_open, "OPEN" },
2721 [FUSE_READ] = { do_read, "READ" },
2722 [FUSE_WRITE] = { do_write, "WRITE" },
2723 [FUSE_STATFS] = { do_statfs, "STATFS" },
2724 [FUSE_RELEASE] = { do_release, "RELEASE" },
2725 [FUSE_FSYNC] = { do_fsync, "FSYNC" },
2726 [FUSE_SETXATTR] = { do_setxattr, "SETXATTR" },
2727 [FUSE_GETXATTR] = { do_getxattr, "GETXATTR" },
2728 [FUSE_LISTXATTR] = { do_listxattr, "LISTXATTR" },
2729 [FUSE_REMOVEXATTR] = { do_removexattr, "REMOVEXATTR" },
2730 [FUSE_FLUSH] = { do_flush, "FLUSH" },
2731 [FUSE_INIT] = { do_init, "INIT" },
2732 [FUSE_OPENDIR] = { do_opendir, "OPENDIR" },
2733 [FUSE_READDIR] = { do_readdir, "READDIR" },
2734 [FUSE_RELEASEDIR] = { do_releasedir, "RELEASEDIR" },
2735 [FUSE_FSYNCDIR] = { do_fsyncdir, "FSYNCDIR" },
2736 [FUSE_GETLK] = { do_getlk, "GETLK" },
2737 [FUSE_SETLK] = { do_setlk, "SETLK" },
2738 [FUSE_SETLKW] = { do_setlkw, "SETLKW" },
2739 [FUSE_ACCESS] = { do_access, "ACCESS" },
2740 [FUSE_CREATE] = { do_create, "CREATE" },
2741 [FUSE_TMPFILE] = { do_tmpfile, "TMPFILE" },
2742 [FUSE_INTERRUPT] = { do_interrupt, "INTERRUPT" },
2743 [FUSE_BMAP] = { do_bmap, "BMAP" },
2744 [FUSE_IOCTL] = { do_ioctl, "IOCTL" },
2745 [FUSE_POLL] = { do_poll, "POLL" },
2746 [FUSE_FALLOCATE] = { do_fallocate, "FALLOCATE" },
2747 [FUSE_DESTROY] = { do_destroy, "DESTROY" },
2748 [FUSE_NOTIFY_REPLY] = { (void *) 1, "NOTIFY_REPLY" },
2749 [FUSE_BATCH_FORGET] = { do_batch_forget, "BATCH_FORGET" },
2750 [FUSE_READDIRPLUS] = { do_readdirplus, "READDIRPLUS"},
2751 [FUSE_RENAME2] = { do_rename2, "RENAME2" },
2752 [FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" },
2753 [FUSE_LSEEK] = { do_lseek, "LSEEK" },
2754 [CUSE_INIT] = { cuse_lowlevel_init, "CUSE_INIT" },
2755};
2756
2757/*
2758 * For ABI compatibility we cannot allow higher values than CUSE_INIT.
2759 * Without ABI compatibility we could use the size of the array.
2760 * #define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
2761 */
2762#define FUSE_MAXOP (CUSE_INIT + 1)
2763
2764static const char *opname(enum fuse_opcode opcode)
2765{
2766 if (opcode >= FUSE_MAXOP || !fuse_ll_ops[opcode].name)
2767 return "???";
2768 else
2769 return fuse_ll_ops[opcode].name;
2770}
2771
2772static int fuse_ll_copy_from_pipe(struct fuse_bufvec *dst,
2773 struct fuse_bufvec *src)
2774{
2775 ssize_t res = fuse_buf_copy(dst, src, 0);
2776 if (res < 0) {
2777 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n", strerror(-res));
2778 return res;
2779 }
2780 if ((size_t)res < fuse_buf_size(dst)) {
2781 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: short read\n");
2782 return -1;
2783 }
2784 return 0;
2785}
2786
2787void fuse_session_process_buf(struct fuse_session *se,
2788 const struct fuse_buf *buf)
2789{
2790 fuse_session_process_buf_internal(se, buf, NULL);
2791}
2792
2793/* libfuse internal handler */
2794void fuse_session_process_buf_internal(struct fuse_session *se,
2795 const struct fuse_buf *buf, struct fuse_chan *ch)
2796{
2797 const size_t write_header_size = sizeof(struct fuse_in_header) +
2798 sizeof(struct fuse_write_in);
2799 struct fuse_bufvec bufv = { .buf[0] = *buf, .count = 1 };
2800 struct fuse_bufvec tmpbuf = FUSE_BUFVEC_INIT(write_header_size);
2801 struct fuse_in_header *in;
2802 const void *inarg;
2803 struct fuse_req *req;
2804 void *mbuf = NULL;
2805 int err;
2806 int res;
2807
2808 if (buf->flags & FUSE_BUF_IS_FD) {
2809 if (buf->size < tmpbuf.buf[0].size)
2810 tmpbuf.buf[0].size = buf->size;
2811
2812 mbuf = malloc(tmpbuf.buf[0].size);
2813 if (mbuf == NULL) {
2814 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate header\n");
2815 goto clear_pipe;
2816 }
2817 tmpbuf.buf[0].mem = mbuf;
2818
2819 res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2820 if (res < 0)
2821 goto clear_pipe;
2822
2823 in = mbuf;
2824 } else {
2825 in = buf->mem;
2826 }
2827
2828 if (se->debug) {
2829 fuse_log(FUSE_LOG_DEBUG,
2830 "unique: %llu, opcode: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
2831 (unsigned long long) in->unique,
2832 opname((enum fuse_opcode) in->opcode), in->opcode,
2833 (unsigned long long) in->nodeid, buf->size, in->pid);
2834 }
2835
2836 req = fuse_ll_alloc_req(se);
2837 if (req == NULL) {
2838 struct fuse_out_header out = {
2839 .unique = in->unique,
2840 .error = -ENOMEM,
2841 };
2842 struct iovec iov = {
2843 .iov_base = &out,
2844 .iov_len = sizeof(struct fuse_out_header),
2845 };
2846
2847 fuse_send_msg(se, ch, &iov, 1);
2848 goto clear_pipe;
2849 }
2850
2851 req->unique = in->unique;
2852 req->ctx.uid = in->uid;
2853 req->ctx.gid = in->gid;
2854 req->ctx.pid = in->pid;
2855 req->ch = ch ? fuse_chan_get(ch) : NULL;
2856
2857 err = EIO;
2858 if (!se->got_init) {
2859 enum fuse_opcode expected;
2860
2861 expected = se->cuse_data ? CUSE_INIT : FUSE_INIT;
2862 if (in->opcode != expected)
2863 goto reply_err;
2864 } else if (in->opcode == FUSE_INIT || in->opcode == CUSE_INIT)
2865 goto reply_err;
2866
2867 err = EACCES;
2868 /* Implement -o allow_root */
2869 if (se->deny_others && in->uid != se->owner && in->uid != 0 &&
2870 in->opcode != FUSE_INIT && in->opcode != FUSE_READ &&
2871 in->opcode != FUSE_WRITE && in->opcode != FUSE_FSYNC &&
2872 in->opcode != FUSE_RELEASE && in->opcode != FUSE_READDIR &&
2873 in->opcode != FUSE_FSYNCDIR && in->opcode != FUSE_RELEASEDIR &&
2874 in->opcode != FUSE_NOTIFY_REPLY &&
2875 in->opcode != FUSE_READDIRPLUS)
2876 goto reply_err;
2877
2878 err = ENOSYS;
2879 if (in->opcode >= FUSE_MAXOP || !fuse_ll_ops[in->opcode].func)
2880 goto reply_err;
2881 /* Do not process interrupt request */
2882 if (se->conn.no_interrupt && in->opcode == FUSE_INTERRUPT) {
2883 if (se->debug)
2884 fuse_log(FUSE_LOG_DEBUG, "FUSE_INTERRUPT: reply to kernel to disable interrupt\n");
2885 goto reply_err;
2886 }
2887 if (!se->conn.no_interrupt && in->opcode != FUSE_INTERRUPT) {
2888 struct fuse_req *intr;
2889 pthread_mutex_lock(&se->lock);
2890 intr = check_interrupt(se, req);
2891 list_add_req(req, &se->list);
2892 pthread_mutex_unlock(&se->lock);
2893 if (intr)
2894 fuse_reply_err(intr, EAGAIN);
2895 }
2896
2897 if ((buf->flags & FUSE_BUF_IS_FD) && write_header_size < buf->size &&
2898 (in->opcode != FUSE_WRITE || !se->op.write_buf) &&
2899 in->opcode != FUSE_NOTIFY_REPLY) {
2900 void *newmbuf;
2901
2902 err = ENOMEM;
2903 newmbuf = realloc(mbuf, buf->size);
2904 if (newmbuf == NULL)
2905 goto reply_err;
2906 mbuf = newmbuf;
2907
2908 tmpbuf = FUSE_BUFVEC_INIT(buf->size - write_header_size);
2909 tmpbuf.buf[0].mem = (char *)mbuf + write_header_size;
2910
2911 res = fuse_ll_copy_from_pipe(&tmpbuf, &bufv);
2912 err = -res;
2913 if (res < 0)
2914 goto reply_err;
2915
2916 in = mbuf;
2917 }
2918
2919 inarg = (void *) &in[1];
2920 if (in->opcode == FUSE_WRITE && se->op.write_buf)
2921 do_write_buf(req, in->nodeid, inarg, buf);
2922 else if (in->opcode == FUSE_NOTIFY_REPLY)
2923 do_notify_reply(req, in->nodeid, inarg, buf);
2924 else
2925 fuse_ll_ops[in->opcode].func(req, in->nodeid, inarg);
2926
2927out_free:
2928 free(mbuf);
2929 return;
2930
2931reply_err:
2932 fuse_reply_err(req, err);
2933clear_pipe:
2934 if (buf->flags & FUSE_BUF_IS_FD)
2935 fuse_ll_clear_pipe(se);
2936 goto out_free;
2937}
2938
2939#define LL_OPTION(n,o,v) \
2940 { n, offsetof(struct fuse_session, o), v }
2941
2942static const struct fuse_opt fuse_ll_opts[] = {
2943 LL_OPTION("debug", debug, 1),
2944 LL_OPTION("-d", debug, 1),
2945 LL_OPTION("--debug", debug, 1),
2946 LL_OPTION("allow_root", deny_others, 1),
2948};
2949
2950void fuse_lowlevel_version(void)
2951{
2952 printf("using FUSE kernel interface version %i.%i\n",
2953 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
2954 fuse_mount_version();
2955}
2956
2957void fuse_lowlevel_help(void)
2958{
2959 /* These are not all options, but the ones that are
2960 potentially of interest to an end-user */
2961 printf(
2962" -o allow_other allow access by all users\n"
2963" -o allow_root allow access by root\n"
2964" -o auto_unmount auto unmount on process termination\n");
2965}
2966
2967void fuse_session_destroy(struct fuse_session *se)
2968{
2969 struct fuse_ll_pipe *llp;
2970
2971 if (se->got_init && !se->got_destroy) {
2972 if (se->op.destroy)
2973 se->op.destroy(se->userdata);
2974 }
2975 llp = pthread_getspecific(se->pipe_key);
2976 if (llp != NULL)
2977 fuse_ll_pipe_free(llp);
2978 pthread_key_delete(se->pipe_key);
2979 pthread_mutex_destroy(&se->lock);
2980 free(se->cuse_data);
2981 if (se->fd != -1)
2982 close(se->fd);
2983 if (se->io != NULL)
2984 free(se->io);
2985 destroy_mount_opts(se->mo);
2986 free(se);
2987}
2988
2989
2990static void fuse_ll_pipe_destructor(void *data)
2991{
2992 struct fuse_ll_pipe *llp = data;
2993 fuse_ll_pipe_free(llp);
2994}
2995
2996void fuse_buf_free(struct fuse_buf *buf)
2997{
2998 if (buf->mem == NULL)
2999 return;
3000
3001 size_t write_header_sz =
3002 sizeof(struct fuse_in_header) + sizeof(struct fuse_write_in);
3003
3004 char *ptr = (char *)buf->mem - pagesize + write_header_sz;
3005 free(ptr);
3006 buf->mem = NULL;
3007}
3008
3009/*
3010 * This is used to allocate buffers that hold fuse requests
3011 */
3012static void *buf_alloc(size_t size, bool internal)
3013{
3014 /*
3015 * For libfuse internal caller add in alignment. That cannot be done
3016 * for an external caller, as it is not guaranteed that the external
3017 * caller frees the raw pointer.
3018 */
3019 if (internal) {
3020 size_t write_header_sz = sizeof(struct fuse_in_header) +
3021 sizeof(struct fuse_write_in);
3022 size_t new_size = ROUND_UP(size + write_header_sz, pagesize);
3023
3024 char *buf = aligned_alloc(pagesize, new_size);
3025 if (buf == NULL)
3026 return NULL;
3027
3028 buf += pagesize - write_header_sz;
3029
3030 return buf;
3031 } else {
3032 return malloc(size);
3033 }
3034}
3035
3036/*
3037 *@param internal true if called from libfuse internal code
3038 */
3039static int _fuse_session_receive_buf(struct fuse_session *se,
3040 struct fuse_buf *buf, struct fuse_chan *ch,
3041 bool internal)
3042{
3043 int err;
3044 ssize_t res;
3045 size_t bufsize = se->bufsize;
3046#ifdef HAVE_SPLICE
3047 struct fuse_ll_pipe *llp;
3048 struct fuse_buf tmpbuf;
3049
3050 if (se->conn.proto_minor < 14 ||
3051 !(se->conn.want_ext & FUSE_CAP_SPLICE_READ))
3052 goto fallback;
3053
3054 llp = fuse_ll_get_pipe(se);
3055 if (llp == NULL)
3056 goto fallback;
3057
3058 if (llp->size < bufsize) {
3059 if (llp->can_grow) {
3060 res = fcntl(llp->pipe[0], F_SETPIPE_SZ, bufsize);
3061 if (res == -1) {
3062 llp->can_grow = 0;
3063 res = grow_pipe_to_max(llp->pipe[0]);
3064 if (res > 0)
3065 llp->size = res;
3066 goto fallback;
3067 }
3068 llp->size = res;
3069 }
3070 if (llp->size < bufsize)
3071 goto fallback;
3072 }
3073
3074 if (se->io != NULL && se->io->splice_receive != NULL) {
3075 res = se->io->splice_receive(ch ? ch->fd : se->fd, NULL,
3076 llp->pipe[1], NULL, bufsize, 0,
3077 se->userdata);
3078 } else {
3079 res = splice(ch ? ch->fd : se->fd, NULL, llp->pipe[1], NULL,
3080 bufsize, 0);
3081 }
3082 err = errno;
3083
3084 if (fuse_session_exited(se))
3085 return 0;
3086
3087 if (res == -1) {
3088 if (err == ENODEV) {
3089 /* Filesystem was unmounted, or connection was aborted
3090 via /sys/fs/fuse/connections */
3092 return 0;
3093 }
3094 if (err != EINTR && err != EAGAIN)
3095 perror("fuse: splice from device");
3096 return -err;
3097 }
3098
3099 if (res < sizeof(struct fuse_in_header)) {
3100 fuse_log(FUSE_LOG_ERR, "short splice from fuse device\n");
3101 return -EIO;
3102 }
3103
3104 tmpbuf = (struct fuse_buf){
3105 .size = res,
3106 .flags = FUSE_BUF_IS_FD,
3107 .fd = llp->pipe[0],
3108 };
3109
3110 /*
3111 * Don't bother with zero copy for small requests.
3112 * fuse_loop_mt() needs to check for FORGET so this more than
3113 * just an optimization.
3114 */
3115 if (res < sizeof(struct fuse_in_header) + sizeof(struct fuse_write_in) +
3116 pagesize) {
3117 struct fuse_bufvec src = { .buf[0] = tmpbuf, .count = 1 };
3118 struct fuse_bufvec dst = { .count = 1 };
3119
3120 if (!buf->mem) {
3121 buf->mem = buf_alloc(se->bufsize, internal);
3122 if (!buf->mem) {
3123 fuse_log(
3124 FUSE_LOG_ERR,
3125 "fuse: failed to allocate read buffer\n");
3126 return -ENOMEM;
3127 }
3128 buf->mem_size = se->bufsize;
3129 if (internal)
3130 se->buf_reallocable = true;
3131 }
3132 buf->size = se->bufsize;
3133 buf->flags = 0;
3134 dst.buf[0] = *buf;
3135
3136 res = fuse_buf_copy(&dst, &src, 0);
3137 if (res < 0) {
3138 fuse_log(FUSE_LOG_ERR, "fuse: copy from pipe: %s\n",
3139 strerror(-res));
3140 fuse_ll_clear_pipe(se);
3141 return res;
3142 }
3143 if (res < tmpbuf.size) {
3144 fuse_log(FUSE_LOG_ERR,
3145 "fuse: copy from pipe: short read\n");
3146 fuse_ll_clear_pipe(se);
3147 return -EIO;
3148 }
3149 assert(res == tmpbuf.size);
3150
3151 } else {
3152 /* Don't overwrite buf->mem, as that would cause a leak */
3153 buf->fd = tmpbuf.fd;
3154 buf->flags = tmpbuf.flags;
3155 }
3156 buf->size = tmpbuf.size;
3157
3158 return res;
3159
3160fallback:
3161#endif
3162 if (!buf->mem) {
3163 buf->mem = buf_alloc(se->bufsize, internal);
3164 if (!buf->mem) {
3165 fuse_log(FUSE_LOG_ERR,
3166 "fuse: failed to allocate read buffer\n");
3167 return -ENOMEM;
3168 }
3169 buf->mem_size = se->bufsize;
3170 if (internal)
3171 se->buf_reallocable = true;
3172 }
3173
3174restart:
3175 if (se->buf_reallocable)
3176 bufsize = buf->mem_size;
3177 if (se->io != NULL) {
3178 /* se->io->read is never NULL if se->io is not NULL as
3179 specified by fuse_session_custom_io()*/
3180 res = se->io->read(ch ? ch->fd : se->fd, buf->mem, bufsize,
3181 se->userdata);
3182 } else {
3183 res = read(ch ? ch->fd : se->fd, buf->mem, bufsize);
3184 }
3185 err = errno;
3186
3187 if (fuse_session_exited(se))
3188 return 0;
3189 if (res == -1) {
3190 if (err == EINVAL && se->buf_reallocable &&
3191 se->bufsize > buf->mem_size) {
3192 void *newbuf = buf_alloc(se->bufsize, internal);
3193 if (!newbuf) {
3194 fuse_log(
3195 FUSE_LOG_ERR,
3196 "fuse: failed to (re)allocate read buffer\n");
3197 return -ENOMEM;
3198 }
3199 fuse_buf_free(buf);
3200 buf->mem = newbuf;
3201 buf->mem_size = se->bufsize;
3202 se->buf_reallocable = true;
3203 goto restart;
3204 }
3205
3206 /* ENOENT means the operation was interrupted, it's safe
3207 to restart */
3208 if (err == ENOENT)
3209 goto restart;
3210
3211 if (err == ENODEV) {
3212 /* Filesystem was unmounted, or connection was aborted
3213 via /sys/fs/fuse/connections */
3215 return 0;
3216 }
3217 /* Errors occurring during normal operation: EINTR (read
3218 interrupted), EAGAIN (nonblocking I/O), ENODEV (filesystem
3219 umounted) */
3220 if (err != EINTR && err != EAGAIN)
3221 perror("fuse: reading device");
3222 return -err;
3223 }
3224 if ((size_t)res < sizeof(struct fuse_in_header)) {
3225 fuse_log(FUSE_LOG_ERR, "short read on fuse device\n");
3226 return -EIO;
3227 }
3228
3229 buf->size = res;
3230
3231 return res;
3232}
3233
3234int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
3235{
3236 return _fuse_session_receive_buf(se, buf, NULL, false);
3237}
3238
3239/* libfuse internal handler */
3240int fuse_session_receive_buf_internal(struct fuse_session *se,
3241 struct fuse_buf *buf,
3242 struct fuse_chan *ch)
3243{
3244 return _fuse_session_receive_buf(se, buf, ch, true);
3245}
3246
3247struct fuse_session *
3248fuse_session_new_versioned(struct fuse_args *args,
3249 const struct fuse_lowlevel_ops *op, size_t op_size,
3250 struct libfuse_version *version, void *userdata);
3251struct fuse_session *
3252fuse_session_new_versioned(struct fuse_args *args,
3253 const struct fuse_lowlevel_ops *op, size_t op_size,
3254 struct libfuse_version *version, void *userdata)
3255{
3256 int err;
3257 struct fuse_session *se;
3258 struct mount_opts *mo;
3259
3260 if (sizeof(struct fuse_lowlevel_ops) < op_size) {
3261 fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
3262 op_size = sizeof(struct fuse_lowlevel_ops);
3263 }
3264
3265 if (args->argc == 0) {
3266 fuse_log(FUSE_LOG_ERR, "fuse: empty argv passed to fuse_session_new().\n");
3267 return NULL;
3268 }
3269
3270 se = (struct fuse_session *) calloc(1, sizeof(struct fuse_session));
3271 if (se == NULL) {
3272 fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate fuse object\n");
3273 goto out1;
3274 }
3275 se->fd = -1;
3276 se->conn.max_write = FUSE_DEFAULT_MAX_PAGES_LIMIT * getpagesize();
3277 se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
3278 se->conn.max_readahead = UINT_MAX;
3279
3280 /* Parse options */
3281 if(fuse_opt_parse(args, se, fuse_ll_opts, NULL) == -1)
3282 goto out2;
3283 if(se->deny_others) {
3284 /* Allowing access only by root is done by instructing
3285 * kernel to allow access by everyone, and then restricting
3286 * access to root and mountpoint owner in libfuse.
3287 */
3288 // We may be adding the option a second time, but
3289 // that doesn't hurt.
3290 if(fuse_opt_add_arg(args, "-oallow_other") == -1)
3291 goto out2;
3292 }
3293 mo = parse_mount_opts(args);
3294 if (mo == NULL)
3295 goto out3;
3296
3297 if(args->argc == 1 &&
3298 args->argv[0][0] == '-') {
3299 fuse_log(FUSE_LOG_ERR, "fuse: warning: argv[0] looks like an option, but "
3300 "will be ignored\n");
3301 } else if (args->argc != 1) {
3302 int i;
3303 fuse_log(FUSE_LOG_ERR, "fuse: unknown option(s): `");
3304 for(i = 1; i < args->argc-1; i++)
3305 fuse_log(FUSE_LOG_ERR, "%s ", args->argv[i]);
3306 fuse_log(FUSE_LOG_ERR, "%s'\n", args->argv[i]);
3307 goto out4;
3308 }
3309
3310 if (se->debug)
3311 fuse_log(FUSE_LOG_DEBUG, "FUSE library version: %s\n", PACKAGE_VERSION);
3312
3313 list_init_req(&se->list);
3314 list_init_req(&se->interrupts);
3315 list_init_nreq(&se->notify_list);
3316 se->notify_ctr = 1;
3317 pthread_mutex_init(&se->lock, NULL);
3318
3319 err = pthread_key_create(&se->pipe_key, fuse_ll_pipe_destructor);
3320 if (err) {
3321 fuse_log(FUSE_LOG_ERR, "fuse: failed to create thread specific key: %s\n",
3322 strerror(err));
3323 goto out5;
3324 }
3325
3326 memcpy(&se->op, op, op_size);
3327 se->owner = getuid();
3328 se->userdata = userdata;
3329
3330 se->mo = mo;
3331
3332 /* Fuse server application should pass the version it was compiled
3333 * against and pass it. If a libfuse version accidentally introduces an
3334 * ABI incompatibility, it might be possible to 'fix' that at run time,
3335 * by checking the version numbers.
3336 */
3337 se->version = *version;
3338
3339 return se;
3340
3341out5:
3342 pthread_mutex_destroy(&se->lock);
3343out4:
3344 fuse_opt_free_args(args);
3345out3:
3346 if (mo != NULL)
3347 destroy_mount_opts(mo);
3348out2:
3349 free(se);
3350out1:
3351 return NULL;
3352}
3353
3354struct fuse_session *fuse_session_new_30(struct fuse_args *args,
3355 const struct fuse_lowlevel_ops *op,
3356 size_t op_size, void *userdata);
3357struct fuse_session *fuse_session_new_30(struct fuse_args *args,
3358 const struct fuse_lowlevel_ops *op,
3359 size_t op_size,
3360 void *userdata)
3361{
3362 /* unknown version */
3363 struct libfuse_version version = { 0 };
3364
3365 return fuse_session_new_versioned(args, op, op_size, &version,
3366 userdata);
3367}
3368
3369FUSE_SYMVER("fuse_session_custom_io_317", "fuse_session_custom_io@@FUSE_3.17")
3370int fuse_session_custom_io_317(struct fuse_session *se,
3371 const struct fuse_custom_io *io, size_t op_size, int fd)
3372{
3373 if (sizeof(struct fuse_custom_io) < op_size) {
3374 fuse_log(FUSE_LOG_ERR, "fuse: warning: library too old, some operations may not work\n");
3375 op_size = sizeof(struct fuse_custom_io);
3376 }
3377
3378 if (fd < 0) {
3379 fuse_log(FUSE_LOG_ERR, "Invalid file descriptor value %d passed to "
3380 "fuse_session_custom_io()\n", fd);
3381 return -EBADF;
3382 }
3383 if (io == NULL) {
3384 fuse_log(FUSE_LOG_ERR, "No custom IO passed to "
3385 "fuse_session_custom_io()\n");
3386 return -EINVAL;
3387 } else if (io->read == NULL || io->writev == NULL) {
3388 /* If the user provides their own file descriptor, we can't
3389 guarantee that the default behavior of the io operations made
3390 in libfuse will function properly. Therefore, we enforce the
3391 user to implement these io operations when using custom io. */
3392 fuse_log(FUSE_LOG_ERR, "io passed to fuse_session_custom_io() must "
3393 "implement both io->read() and io->writev\n");
3394 return -EINVAL;
3395 }
3396
3397 se->io = calloc(1, sizeof(struct fuse_custom_io));
3398 if (se->io == NULL) {
3399 fuse_log(FUSE_LOG_ERR, "Failed to allocate memory for custom io. "
3400 "Error: %s\n", strerror(errno));
3401 return -errno;
3402 }
3403
3404 se->fd = fd;
3405 memcpy(se->io, io, op_size);
3406 return 0;
3407}
3408
3409int fuse_session_custom_io_30(struct fuse_session *se,
3410 const struct fuse_custom_io *io, int fd);
3411FUSE_SYMVER("fuse_session_custom_io_30", "fuse_session_custom_io@FUSE_3.0")
3412int fuse_session_custom_io_30(struct fuse_session *se,
3413 const struct fuse_custom_io *io, int fd)
3414{
3415 return fuse_session_custom_io_317(se, io,
3416 offsetof(struct fuse_custom_io, clone_fd), fd);
3417}
3418
3419int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
3420{
3421 int fd;
3422
3423 if (mountpoint == NULL) {
3424 fuse_log(FUSE_LOG_ERR, "Invalid null-ptr mountpoint!\n");
3425 return -1;
3426 }
3427
3428 /*
3429 * Make sure file descriptors 0, 1 and 2 are open, otherwise chaos
3430 * would ensue.
3431 */
3432 do {
3433 fd = open("/dev/null", O_RDWR);
3434 if (fd > 2)
3435 close(fd);
3436 } while (fd >= 0 && fd <= 2);
3437
3438 /*
3439 * To allow FUSE daemons to run without privileges, the caller may open
3440 * /dev/fuse before launching the file system and pass on the file
3441 * descriptor by specifying /dev/fd/N as the mount point. Note that the
3442 * parent process takes care of performing the mount in this case.
3443 */
3444 fd = fuse_mnt_parse_fuse_fd(mountpoint);
3445 if (fd != -1) {
3446 if (fcntl(fd, F_GETFD) == -1) {
3447 fuse_log(FUSE_LOG_ERR,
3448 "fuse: Invalid file descriptor /dev/fd/%u\n",
3449 fd);
3450 return -1;
3451 }
3452 se->fd = fd;
3453 return 0;
3454 }
3455
3456 /* Open channel */
3457 fd = fuse_kern_mount(mountpoint, se->mo);
3458 if (fd == -1)
3459 return -1;
3460 se->fd = fd;
3461
3462 /* Save mountpoint */
3463 se->mountpoint = strdup(mountpoint);
3464 if (se->mountpoint == NULL)
3465 goto error_out;
3466
3467 return 0;
3468
3469error_out:
3470 fuse_kern_unmount(mountpoint, fd);
3471 return -1;
3472}
3473
3474int fuse_session_fd(struct fuse_session *se)
3475{
3476 return se->fd;
3477}
3478
3479void fuse_session_unmount(struct fuse_session *se)
3480{
3481 if (se->mountpoint != NULL) {
3482 fuse_kern_unmount(se->mountpoint, se->fd);
3483 se->fd = -1;
3484 free(se->mountpoint);
3485 se->mountpoint = NULL;
3486 }
3487}
3488
3489#ifdef linux
3490int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3491{
3492 char *buf;
3493 size_t bufsize = 1024;
3494 char path[128];
3495 int ret;
3496 int fd;
3497 unsigned long pid = req->ctx.pid;
3498 char *s;
3499
3500 sprintf(path, "/proc/%lu/task/%lu/status", pid, pid);
3501
3502retry:
3503 buf = malloc(bufsize);
3504 if (buf == NULL)
3505 return -ENOMEM;
3506
3507 ret = -EIO;
3508 fd = open(path, O_RDONLY);
3509 if (fd == -1)
3510 goto out_free;
3511
3512 ret = read(fd, buf, bufsize);
3513 close(fd);
3514 if (ret < 0) {
3515 ret = -EIO;
3516 goto out_free;
3517 }
3518
3519 if ((size_t)ret == bufsize) {
3520 free(buf);
3521 bufsize *= 4;
3522 goto retry;
3523 }
3524
3525 buf[ret] = '\0';
3526 ret = -EIO;
3527 s = strstr(buf, "\nGroups:");
3528 if (s == NULL)
3529 goto out_free;
3530
3531 s += 8;
3532 ret = 0;
3533 while (1) {
3534 char *end;
3535 unsigned long val = strtoul(s, &end, 0);
3536 if (end == s)
3537 break;
3538
3539 s = end;
3540 if (ret < size)
3541 list[ret] = val;
3542 ret++;
3543 }
3544
3545out_free:
3546 free(buf);
3547 return ret;
3548}
3549#else /* linux */
3550/*
3551 * This is currently not implemented on other than Linux...
3552 */
3553int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
3554{
3555 (void) req; (void) size; (void) list;
3556 return -ENOSYS;
3557}
3558#endif
3559
3560/* Prevent spurious data race warning - we don't care
3561 * about races for this flag */
3562__attribute__((no_sanitize_thread))
3563void fuse_session_exit(struct fuse_session *se)
3564{
3565 se->exited = 1;
3566}
3567
3568__attribute__((no_sanitize_thread))
3569void fuse_session_reset(struct fuse_session *se)
3570{
3571 se->exited = 0;
3572 se->error = 0;
3573}
3574
3575__attribute__((no_sanitize_thread))
3576int fuse_session_exited(struct fuse_session *se)
3577{
3578 return se->exited;
3579}
@ FUSE_CAP_POSIX_ACL
@ FUSE_CAP_READDIRPLUS
@ FUSE_CAP_NO_OPENDIR_SUPPORT
@ FUSE_CAP_PARALLEL_DIROPS
@ FUSE_CAP_ASYNC_DIO
@ FUSE_CAP_NO_EXPORT_SUPPORT
@ FUSE_CAP_WRITEBACK_CACHE
@ FUSE_CAP_IOCTL_DIR
@ FUSE_CAP_AUTO_INVAL_DATA
@ FUSE_CAP_SPLICE_READ
@ FUSE_CAP_SPLICE_MOVE
@ FUSE_CAP_POSIX_LOCKS
@ FUSE_CAP_HANDLE_KILLPRIV_V2
@ FUSE_CAP_HANDLE_KILLPRIV
@ FUSE_CAP_DONT_MASK
@ FUSE_CAP_ATOMIC_O_TRUNC
@ FUSE_CAP_SPLICE_WRITE
@ FUSE_CAP_PASSTHROUGH
@ FUSE_CAP_FLOCK_LOCKS
@ FUSE_CAP_EXPIRE_ONLY
@ FUSE_CAP_EXPORT_SUPPORT
@ FUSE_CAP_READDIRPLUS_AUTO
@ FUSE_CAP_NO_OPEN_SUPPORT
@ FUSE_CAP_DIRECT_IO_ALLOW_MMAP
@ FUSE_CAP_SETXATTR_EXT
@ FUSE_CAP_ASYNC_READ
@ FUSE_CAP_CACHE_SYMLINKS
@ FUSE_CAP_EXPLICIT_INVAL_DATA
size_t fuse_buf_size(const struct fuse_bufvec *bufv)
Definition buffer.c:22
@ FUSE_BUF_IS_FD
ssize_t fuse_buf_copy(struct fuse_bufvec *dst, struct fuse_bufvec *src, enum fuse_buf_copy_flags flags)
Definition buffer.c:284
fuse_buf_copy_flags
@ FUSE_BUF_SPLICE_NONBLOCK
@ FUSE_BUF_FORCE_SPLICE
@ FUSE_BUF_NO_SPLICE
@ FUSE_BUF_SPLICE_MOVE
void fuse_log(enum fuse_log_level level, const char *fmt,...)
Definition fuse_log.c:77
void fuse_session_destroy(struct fuse_session *se)
fuse_notify_entry_flags
int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *fi)
void fuse_session_exit(struct fuse_session *se)
void(* fuse_interrupt_func_t)(fuse_req_t req, void *data)
int fuse_reply_poll(fuse_req_t req, unsigned revents)
int fuse_reply_err(fuse_req_t req, int err)
void * fuse_req_userdata(fuse_req_t req)
int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
struct fuse_req * fuse_req_t
size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct fuse_entry_param *e, off_t off)
int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov, int count)
int fuse_lowlevel_notify_delete(struct fuse_session *se, fuse_ino_t parent, fuse_ino_t child, const char *name, size_t namelen)
void fuse_session_process_buf(struct fuse_session *se, const struct fuse_buf *buf)
int fuse_session_exited(struct fuse_session *se)
int fuse_lowlevel_notify_retrieve(struct fuse_session *se, fuse_ino_t ino, size_t size, off_t offset, void *cookie)
int fuse_reply_readlink(fuse_req_t req, const char *link)
int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
void fuse_reply_none(fuse_req_t req)
int fuse_lowlevel_notify_expire_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen)
int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov, size_t in_count, const struct iovec *out_iov, size_t out_count)
void fuse_lowlevel_help(void)
int fuse_lowlevel_notify_inval_inode(struct fuse_session *se, fuse_ino_t ino, off_t off, off_t len)
int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
int fuse_reply_write(fuse_req_t req, size_t count)
int fuse_session_receive_buf(struct fuse_session *se, struct fuse_buf *buf)
int fuse_lowlevel_notify_poll(struct fuse_pollhandle *ph)
int fuse_lowlevel_notify_inval_entry(struct fuse_session *se, fuse_ino_t parent, const char *name, size_t namelen)
void fuse_session_reset(struct fuse_session *se)
int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e, const struct fuse_file_info *fi)
int fuse_reply_lseek(fuse_req_t req, off_t off)
void fuse_lowlevel_version(void)
uint64_t fuse_ino_t
size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize, const char *name, const struct stat *stbuf, off_t off)
int fuse_reply_attr(fuse_req_t req, const struct stat *attr, double attr_timeout)
int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
int fuse_passthrough_open(fuse_req_t req, int fd)
int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino, off_t offset, struct fuse_bufvec *bufv, enum fuse_buf_copy_flags flags)
int fuse_reply_xattr(fuse_req_t req, size_t count)
int fuse_opt_add_arg(struct fuse_args *args, const char *arg)
Definition fuse_opt.c:55
void fuse_opt_free_args(struct fuse_args *args)
Definition fuse_opt.c:34
int fuse_opt_parse(struct fuse_args *args, void *data, const struct fuse_opt opts[], fuse_opt_proc_t proc)
Definition fuse_opt.c:398
#define FUSE_OPT_END
Definition fuse_opt.h:104
@ FUSE_BUF_IS_FD
void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
const struct fuse_ctx * fuse_req_ctx(fuse_req_t req)
struct fuse_req * fuse_req_t
int fuse_session_fd(struct fuse_session *se)
int fuse_req_interrupted(fuse_req_t req)
int fuse_req_getgroups(fuse_req_t req, int size, gid_t list[])
void fuse_session_unmount(struct fuse_session *se)
int fuse_session_mount(struct fuse_session *se, const char *mountpoint)
void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func, void *data)
uint64_t fuse_ino_t
char ** argv
Definition fuse_opt.h:114
enum fuse_buf_flags flags
size_t mem_size
void * mem
size_t size
struct fuse_buf buf[1]
uint64_t want_ext
double entry_timeout
fuse_ino_t ino
uint64_t generation
double attr_timeout
struct stat attr
uint64_t lock_owner
uint32_t writepage
Definition fuse_common.h:68
uint32_t poll_events
uint32_t cache_readdir
Definition fuse_common.h:97
uint32_t nonseekable
Definition fuse_common.h:86
int32_t backing_id
uint32_t parallel_direct_writes
uint32_t noflush
uint32_t flush
Definition fuse_common.h:82
uint32_t direct_io
Definition fuse_common.h:71
uint32_t keep_cache
Definition fuse_common.h:77