diff options
-rw-r--r-- | src/bindfs.1 | 3 | ||||
-rw-r--r-- | src/bindfs.c | 34 | ||||
-rw-r--r-- | src/misc.c | 57 | ||||
-rw-r--r-- | src/misc.h | 27 | ||||
-rw-r--r-- | src/userinfo.c | 310 | ||||
-rw-r--r-- | src/userinfo.h | 1 | ||||
-rwxr-xr-x | tests/common.rb | 10 | ||||
-rwxr-xr-x | tests/test_bindfs.rb | 28 |
8 files changed, 410 insertions, 60 deletions
diff --git a/src/bindfs.1 b/src/bindfs.1 index 9a03199..cd822bc 100644 --- a/src/bindfs.1 +++ b/src/bindfs.1 @@ -349,6 +349,9 @@ MacFuse caches file contents by default. This means that changes in source files are not always immediately visible under the mount point. \fB\-o nolocalcaches\fP can be used to disable the cache. +When using \fB\-\-mirror[-only]\fP on a group, bindfs won't see changes to the group's member list. +Sending bindfs a \fBSIGUSR1\fP signal will make it reread the user database. + .SH BUGS Please report to the issue tracker on the project home page at diff --git a/src/bindfs.c b/src/bindfs.c index 1726adf..401a0ae 100644 --- a/src/bindfs.c +++ b/src/bindfs.c @@ -55,6 +55,7 @@ #include <pwd.h> #include <grp.h> #include <limits.h> +#include <signal.h> #ifdef HAVE_SETXATTR #include <sys/xattr.h> #endif @@ -150,7 +151,6 @@ static int getattr_common(const char *path, struct stat *stbuf); /* Chowns a new file if necessary. */ static void chown_new_file(const char *path, struct fuse_context *fc, int (*chown_func)(const char*, uid_t, gid_t)); - /* FUSE callbacks */ static void *bindfs_init(); static void bindfs_destroy(void *private_data); @@ -189,12 +189,18 @@ static int bindfs_fsync(const char *path, int isdatasync, static void print_usage(const char *progname); + static int process_option(void *data, const char *arg, int key, struct fuse_args *outargs); static int parse_mirrored_users(char* mirror); static int parse_user_map(UserMap *map, UserMap *reverse_map, char *spec); -static char* get_working_dir(); +static char *get_working_dir(); static void maybe_stdout_stderr_to_file(); + +/* Sets up handling of SIGUSR1. */ +static void setup_signal_handling(); +static void signal_handler(int sig); + static void atexit_func(); static int is_mirroring_enabled() @@ -312,6 +318,8 @@ static void chown_new_file(const char *path, struct fuse_context *fc, int (*chow } } + + static void *bindfs_init() { assert(settings.permchain != NULL); @@ -1240,7 +1248,7 @@ static void maybe_stdout_stderr_to_file() strcat(path, "/"); strcat(path, filename); - fd = open(path, O_CREAT | O_WRONLY); + fd = open(path, O_CREAT | O_WRONLY, 0666); free(path); fchmod(fd, 0777 & ~settings.original_umask); @@ -1251,7 +1259,7 @@ static void maybe_stdout_stderr_to_file() #endif } -static char* get_working_dir() +static char *get_working_dir() { size_t buf_size = 4096; char* buf = malloc(buf_size); @@ -1262,6 +1270,21 @@ static char* get_working_dir() return buf; } +static void setup_signal_handling() +{ + struct sigaction sa; + sa.sa_handler = signal_handler; + sigemptyset(&sa.sa_mask); + sa.sa_flags = 0; + + sigaction(SIGUSR1, &sa, NULL); +} + +static void signal_handler(int sig) +{ + invalidate_user_cache(); +} + static void atexit_func() { free(settings.original_working_dir); @@ -1505,6 +1528,9 @@ int main(int argc, char *argv[]) bindfs_oper.removexattr = NULL; } + /* fuse_main will daemonize by fork()'ing. The signal handler will persist. */ + setup_signal_handling(); + fuse_main_return = fuse_main(args.argc, args.argv, &bindfs_oper); fuse_opt_free_args(&args); @@ -1,5 +1,5 @@ /* - Copyright 2006,2007,2008 Martin Pärtel <martin.partel@gmail.com> + Copyright 2006,2007,2008,2012 Martin Pärtel <martin.partel@gmail.com> This file is part of bindfs. @@ -81,3 +81,58 @@ const char *my_basename(const char *path) else return path; } + +void grow_array_impl(void **array, int* capacity, int member_size) +{ + int new_cap = *capacity; + if (new_cap == 0) { + new_cap = 8; + } else { + new_cap *= 2; + } + + *array = realloc(*array, new_cap * member_size); + *capacity = new_cap; +} + +void init_arena(struct arena *a, int initial_capacity) +{ + a->size = 0; + a->capacity = initial_capacity; + if (initial_capacity > 0) { + a->ptr = (char *)malloc(initial_capacity); + } else { + a->ptr = NULL; + } +} + +void grow_arena(struct arena *a, int amount) +{ + int new_cap; + + a->size += amount; + if (a->size >= a->capacity) { + new_cap = a->capacity; + if (new_cap == 0) { + new_cap = 8; + } else { + new_cap *= 2; + } + a->ptr = (char *)realloc(a->ptr, new_cap); + a->capacity = new_cap; + } +} + +int append_to_arena(struct arena *a, void *src, int src_size) +{ + int dest = a->size; + grow_arena(a, src_size); + memcpy(&a->ptr[dest], src, src_size); + return dest; +} + +void free_arena(struct arena *a) +{ + free(a->ptr); + init_arena(a, 0); +} @@ -1,5 +1,5 @@ /* - Copyright 2006,2007,2008 Martin Pärtel <martin.partel@gmail.com> + Copyright 2006,2007,2008,2012 Martin Pärtel <martin.partel@gmail.com> This file is part of bindfs. @@ -20,6 +20,7 @@ #ifndef INC_BINDFS_MISC_H #define INC_BINDFS_MISC_H + /* Counts the number of times ch occurs in s. */ int count_chars(const char *s, char ch); @@ -37,4 +38,28 @@ char *strdup_until(const char *s, const char *endchars); Returns NULL if path is NULL. */ const char *my_basename(const char *path); +/* Reallocs `*array` (may be NULL) to be at least one larger + than `*capacity` (may be 0) and stores the new capacity + in `*capacity`. */ +#define grow_array(array, capacity, member_size) grow_array_impl((void**)(array), (capacity), (member_size)) +void grow_array_impl(void **array, int* capacity, int member_size); + + +/* Simple arena allocation for when it's convenient to + grow multiple times and deallocate all at once. */ +struct arena { + char *ptr; + int size; + int capacity; +}; + +#define ARENA_INITIALIZER { NULL, 0, 0 } + +void init_arena(struct arena *a, int initial_capacity); +void grow_arena(struct arena *a, int amount); +int append_to_arena(struct arena *a, void *src, int src_size); +void free_arena(struct arena *a); + +#define ARENA_GET(a, offset) (&(a).ptr[(offset)]) + #endif diff --git a/src/userinfo.c b/src/userinfo.c index df0b6e6..a86c171 100644 --- a/src/userinfo.c +++ b/src/userinfo.c @@ -1,5 +1,5 @@ /* - Copyright 2006,2007,2008 Martin Pärtel <martin.partel@gmail.com> + Copyright 2006,2007,2008,2012 Martin Pärtel <martin.partel@gmail.com> This file is part of bindfs. @@ -18,9 +18,228 @@ */ #include "userinfo.h" +#include "misc.h" +#include "debug.h" #include <stdlib.h> #include <string.h> #include <errno.h> +#include <pthread.h> + +struct uid_cache_entry { + uid_t uid; + gid_t main_gid; + int username_offset; /* arena-allocated */ +}; + +struct gid_cache_entry { + gid_t gid; + int uid_count; + int uids_offset; /* arena-allocated */ +}; + +static pthread_rwlock_t cache_lock = PTHREAD_RWLOCK_INITIALIZER; + +static struct uid_cache_entry *uid_cache = NULL; +static int uid_cache_size = 0; +static int uid_cache_capacity = 0; + +static struct gid_cache_entry *gid_cache = NULL; +static int gid_cache_size = 0; +static int gid_cache_capacity = 0; + +static struct arena cache_arena = ARENA_INITIALIZER; + +static volatile int cache_rebuild_requested = 1; + +static void rebuild_cache(); +static struct uid_cache_entry *uid_cache_lookup(uid_t key); +static struct gid_cache_entry *gid_cache_lookup(gid_t key); +static int rebuild_uid_cache(); +static int rebuild_gid_cache(); +static void clear_uid_cache(); +static void clear_gid_cache(); +static int uid_cache_name_sortcmp(const void *key, const void *entry); +static int uid_cache_name_searchcmp(const void *key, const void *entry); +static int uid_cache_uid_sortcmp(const void *key, const void *entry); +static int uid_cache_uid_searchcmp(const void *key, const void *entry); +static int gid_cache_gid_sortcmp(const void *key, const void *entry); +static int gid_cache_gid_searchcmp(const void *key, const void *entry); + +static void rebuild_cache() +{ + free_arena(&cache_arena); + init_arena(&cache_arena, 1024); + rebuild_uid_cache(); + rebuild_gid_cache(); + qsort(uid_cache, uid_cache_size, sizeof(struct uid_cache_entry), uid_cache_uid_sortcmp); + qsort(gid_cache, gid_cache_size, sizeof(struct gid_cache_entry), gid_cache_gid_sortcmp); +} + +static struct uid_cache_entry *uid_cache_lookup(uid_t key) +{ + return (struct uid_cache_entry *)bsearch( + &key, + uid_cache, + uid_cache_size, + sizeof(struct uid_cache_entry), + uid_cache_uid_searchcmp + ); +} + +static struct gid_cache_entry *gid_cache_lookup(gid_t key) +{ + return (struct gid_cache_entry *)bsearch( + &key, + gid_cache, + gid_cache_size, + sizeof(struct gid_cache_entry), + gid_cache_gid_searchcmp + ); +} + +static int rebuild_uid_cache() +{ + /* We're holding the lock, so we have mutual exclusion on getpwent and getgrent too. */ + struct passwd *pw; + struct uid_cache_entry *ent; + int username_len; + + uid_cache_size = 0; + + while (1) { + errno = 0; + pw = getpwent(); + if (pw == NULL) { + if (errno == 0) { + break; + } else { + goto error; + } + } + + if (uid_cache_size == uid_cache_capacity) { + grow_array(&uid_cache, &uid_cache_capacity, sizeof(struct uid_cache_entry)); + } + + ent = &uid_cache[uid_cache_size++]; + ent->uid = pw->pw_uid; + ent->main_gid = pw->pw_gid; + + username_len = strlen(pw->pw_name) + 1; + ent->username_offset = append_to_arena(&cache_arena, pw->pw_name, username_len); + } + + endpwent(); + return 1; +error: + endpwent(); + clear_uid_cache(); + DPRINTF("Failed to rebuild uid cache"); + return 0; +} + +static int rebuild_gid_cache() +{ + /* We're holding the lock, so we have mutual exclusion on getpwent and getgrent too. */ + struct group *gr; + struct gid_cache_entry *ent; + int i; + struct uid_cache_entry *uid_ent; + + gid_cache_size = 0; + + qsort(uid_cache, uid_cache_size, sizeof(struct uid_cache_entry), uid_cache_name_sortcmp); + + while (1) { + errno = 0; + gr = getgrent(); + if (gr == NULL) { + if (errno == 0) { + break; + } else { + goto error; + } + } + + if (gid_cache_size == gid_cache_capacity) { + grow_array(&gid_cache, &gid_cache_capacity, sizeof(struct gid_cache_entry)); + } + + ent = &gid_cache[gid_cache_size++]; + ent->gid = gr->gr_gid; + ent->uid_count = 0; + ent->uids_offset = cache_arena.size; + + for (i = 0; gr->gr_mem[i] != NULL; ++i) { + uid_ent = (struct uid_cache_entry *)bsearch( + gr->gr_mem[i], + uid_cache, + uid_cache_size, + sizeof(struct uid_cache_entry), + uid_cache_name_searchcmp + ); + if (uid_ent != NULL) { + grow_arena(&cache_arena, sizeof(uid_t)); + ((uid_t *)ARENA_GET(cache_arena, ent->uids_offset))[ent->uid_count++] = uid_ent->uid; + ++ent->uid_count; + } + } + } + + endgrent(); + return 1; +error: + endgrent(); + clear_gid_cache(); + DPRINTF("Failed to rebuild uid cache"); + return 0; +} + +static void clear_uid_cache() +{ + uid_cache_size = 0; +} + +static void clear_gid_cache() +{ + gid_cache_size = 0; +} + +static int uid_cache_name_sortcmp(const void *a, const void *b) +{ + int name_a_off = ((struct uid_cache_entry *)a)->username_offset; + int name_b_off = ((struct uid_cache_entry *)b)->username_offset; + const char *name_a = (const char *)ARENA_GET(cache_arena, name_a_off); + const char *name_b = (const char *)ARENA_GET(cache_arena, name_b_off); + return strcmp(name_a, name_b); +} + +static int uid_cache_name_searchcmp(const void *key, const void *entry) +{ + int name_off = ((struct uid_cache_entry *)entry)->username_offset; + const char *name = (const char *)ARENA_GET(cache_arena, name_off); + return strcmp((const char *)key, name); +} + +static int uid_cache_uid_sortcmp(const void *a, const void *b) +{ + return (long)((struct uid_cache_entry *)a)->uid - (long)((struct uid_cache_entry *)b)->uid; +} + +static int uid_cache_uid_searchcmp(const void *key, const void *entry) +{ + return (long)*((uid_t *)key) - (long)((struct uid_cache_entry *)entry)->uid; +} + +static int gid_cache_gid_sortcmp(const void *a, const void *b) +{ + return (long)((struct gid_cache_entry *)a)->gid - (long)((struct gid_cache_entry *)b)->gid; +} + +static int gid_cache_gid_searchcmp(const void *key, const void *entry) +{ + return (long)*((gid_t *)key) - (long)((struct gid_cache_entry *)entry)->gid; +} int user_uid(const char *username, uid_t *ret) @@ -120,58 +339,51 @@ int group_gid(const char *groupname, gid_t *ret) return 1; } - int user_belongs_to_group(uid_t uid, gid_t gid) { - struct passwd pwbuf, *pwbufp = NULL; - struct group grbuf, *grbufp = NULL; - char *buf; - size_t buflen; - - int member; - uid_t member_uid; - - int res; - - buflen = 1024; - buf = malloc(buflen); - - res = getpwuid_r(uid, &pwbuf, buf, buflen, &pwbufp); - while(res == ERANGE) { - buflen *= 2; - buf = realloc(buf, buflen); - res = getpwuid_r(uid, &pwbuf, buf, buflen, &pwbufp); + int ret = 0; + int i; + uid_t *uids; + + pthread_rwlock_rdlock(&cache_lock); + + if (cache_rebuild_requested) { + pthread_rwlock_unlock(&cache_lock); + + pthread_rwlock_wrlock(&cache_lock); + if (cache_rebuild_requested) { + DPRINTF("Building user/group cache"); + cache_rebuild_requested = 0; + rebuild_cache(); + } + pthread_rwlock_unlock(&cache_lock); + + pthread_rwlock_rdlock(&cache_lock); } - - if (pwbufp == NULL) - goto no; - - if (gid == pwbuf.pw_gid) - goto yes; - - /* we reuse the same buf because we don't need the passwd info any more */ - res = getgrgid_r(gid, &grbuf, buf, buflen, &grbufp); - while(res == ERANGE) { - buflen *= 2; - buf = realloc(buf, buflen); - res = getgrgid_r(gid, &grbuf, buf, buflen, &grbufp); + + struct uid_cache_entry *uent = uid_cache_lookup(uid); + if (uent && uent->main_gid == gid) { + ret = 1; + goto done; } - - if (grbufp == NULL) - goto no; - - for (member = 0; grbuf.gr_mem[member] != NULL; ++member) { - if (user_uid(grbuf.gr_mem[member], &member_uid)) - if (member_uid == uid) - goto yes; + + struct gid_cache_entry *gent = gid_cache_lookup(gid); + if (gent) { + uids = (uid_t*)ARENA_GET(cache_arena, gent->uids_offset); + for (i = 0; i < gent->uid_count; ++i) { + if (uids[i] == uid) { + ret = 1; + goto done; + } + } } + +done: + pthread_rwlock_unlock(&cache_lock); + return ret; +} - goto no; - -yes: - free(buf); - return 1; -no: - free(buf); - return 0; +void invalidate_user_cache() +{ + cache_rebuild_requested = 1; } diff --git a/src/userinfo.h b/src/userinfo.h index 26cdb00..4c0ca55 100644 --- a/src/userinfo.h +++ b/src/userinfo.h @@ -35,5 +35,6 @@ int user_uid(const char *username, uid_t *ret); int group_gid(const char *groupname, gid_t *ret); int user_belongs_to_group(uid_t uid, gid_t gid); +void invalidate_user_cache(); /* safe to call from signal handler */ #endif diff --git a/tests/common.rb b/tests/common.rb index 58d5d84..2563b38 100755 --- a/tests/common.rb +++ b/tests/common.rb @@ -126,12 +126,16 @@ def testenv(bindfs_args, options = {}, &block) testcase_ok = true begin - yield + block.call(bindfs_pid) rescue Exception => ex fail("ERROR: testcase `#{options[:title]}' failed", ex) testcase_ok = false end + if File.exist?("bindfs.log") + system("cat bindfs.log") + end + begin unless system(umount_cmd + ' mnt') raise Exception.new(umount_cmd + " failed with status #{$?}") @@ -141,10 +145,6 @@ def testenv(bindfs_args, options = {}, &block) fail("ERROR: failed to umount") testcase_ok = false end - - if File.exist?("bindfs.log") - system("cat bindfs.log") - end begin Dir.chdir '..' diff --git a/tests/test_bindfs.rb b/tests/test_bindfs.rb index 821b466..670be19 100755 --- a/tests/test_bindfs.rb +++ b/tests/test_bindfs.rb @@ -349,3 +349,31 @@ testenv("", :title => "has readdir inode numbers") do assert { inodes['file'] == File.stat('src/file').ino } assert { inodes['dir'] == File.stat('src/dir').ino } end + +# FIXME: this stuff around testenv is a hax, and testenv may also exit(), which defeats the 'ensure' below. +# the test setup ought to be refactored. It might well use MiniTest or something. +if Process.uid == 0 + begin + `groupdel bindfs_test_group 2>&1` + `groupadd -f bindfs_test_group` + raise "Failed to create test group" if !$?.success? + testenv("--mirror=@bindfs_test_group", :title => "SIGUSR1 rereads user database") do |bindfs_pid| + touch('src/file') + chown('nobody', nil, 'src/file') + + assert { File.stat('mnt/file').uid == $nobody_uid } + `adduser root bindfs_test_group` + raise "Failed to add root to test group" if !$?.success? + + # Cache not refreshed yet + assert { File.stat('mnt/file').uid == $nobody_uid } + + Process.kill("SIGUSR1", bindfs_pid) + sleep 0.5 + + assert { File.stat('mnt/file').uid == 0 } + end + ensure + `groupdel bindfs_test_group 2>&1` + end +end |