summaryrefslogtreecommitdiff
path: root/drivers/staging/lustre
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/lustre')
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_rmtacl.c299
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c883
-rw-r--r--drivers/staging/lustre/lustre/llite/remote_perm.c324
-rw-r--r--drivers/staging/lustre/lustre/obdclass/acl.c415
4 files changed, 0 insertions, 1921 deletions
diff --git a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
deleted file mode 100644
index 8509b07cb..000000000
--- a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/llite_rmtacl.c
- *
- * Lustre Remote User Access Control List.
- *
- * Author: Fan Yong <fanyong@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#ifdef CONFIG_FS_POSIX_ACL
-
-#include "../include/lustre_lite.h"
-#include "../include/lustre_eacl.h"
-#include "llite_internal.h"
-
-static inline __u32 rce_hashfunc(uid_t id)
-{
- return id & (RCE_HASHES - 1);
-}
-
-static inline __u32 ee_hashfunc(uid_t id)
-{
- return id & (EE_HASHES - 1);
-}
-
-u64 rce_ops2valid(int ops)
-{
- switch (ops) {
- case RMT_LSETFACL:
- return OBD_MD_FLRMTLSETFACL;
- case RMT_LGETFACL:
- return OBD_MD_FLRMTLGETFACL;
- case RMT_RSETFACL:
- return OBD_MD_FLRMTRSETFACL;
- case RMT_RGETFACL:
- return OBD_MD_FLRMTRGETFACL;
- default:
- return 0;
- }
-}
-
-static struct rmtacl_ctl_entry *rce_alloc(pid_t key, int ops)
-{
- struct rmtacl_ctl_entry *rce;
-
- rce = kzalloc(sizeof(*rce), GFP_NOFS);
- if (!rce)
- return NULL;
-
- INIT_LIST_HEAD(&rce->rce_list);
- rce->rce_key = key;
- rce->rce_ops = ops;
-
- return rce;
-}
-
-static void rce_free(struct rmtacl_ctl_entry *rce)
-{
- if (!list_empty(&rce->rce_list))
- list_del(&rce->rce_list);
-
- kfree(rce);
-}
-
-static struct rmtacl_ctl_entry *__rct_search(struct rmtacl_ctl_table *rct,
- pid_t key)
-{
- struct rmtacl_ctl_entry *rce;
- struct list_head *head = &rct->rct_entries[rce_hashfunc(key)];
-
- list_for_each_entry(rce, head, rce_list)
- if (rce->rce_key == key)
- return rce;
-
- return NULL;
-}
-
-struct rmtacl_ctl_entry *rct_search(struct rmtacl_ctl_table *rct, pid_t key)
-{
- struct rmtacl_ctl_entry *rce;
-
- spin_lock(&rct->rct_lock);
- rce = __rct_search(rct, key);
- spin_unlock(&rct->rct_lock);
- return rce;
-}
-
-int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops)
-{
- struct rmtacl_ctl_entry *rce, *e;
-
- rce = rce_alloc(key, ops);
- if (!rce)
- return -ENOMEM;
-
- spin_lock(&rct->rct_lock);
- e = __rct_search(rct, key);
- if (unlikely(e)) {
- CWARN("Unexpected stale rmtacl_entry found: [key: %d] [ops: %d]\n",
- (int)key, ops);
- rce_free(e);
- }
- list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
- spin_unlock(&rct->rct_lock);
-
- return 0;
-}
-
-int rct_del(struct rmtacl_ctl_table *rct, pid_t key)
-{
- struct rmtacl_ctl_entry *rce;
-
- spin_lock(&rct->rct_lock);
- rce = __rct_search(rct, key);
- if (rce)
- rce_free(rce);
- spin_unlock(&rct->rct_lock);
-
- return rce ? 0 : -ENOENT;
-}
-
-void rct_init(struct rmtacl_ctl_table *rct)
-{
- int i;
-
- spin_lock_init(&rct->rct_lock);
- for (i = 0; i < RCE_HASHES; i++)
- INIT_LIST_HEAD(&rct->rct_entries[i]);
-}
-
-void rct_fini(struct rmtacl_ctl_table *rct)
-{
- struct rmtacl_ctl_entry *rce;
- int i;
-
- spin_lock(&rct->rct_lock);
- for (i = 0; i < RCE_HASHES; i++)
- while (!list_empty(&rct->rct_entries[i])) {
- rce = list_entry(rct->rct_entries[i].next,
- struct rmtacl_ctl_entry, rce_list);
- rce_free(rce);
- }
- spin_unlock(&rct->rct_lock);
-}
-
-static struct eacl_entry *ee_alloc(pid_t key, struct lu_fid *fid, int type,
- ext_acl_xattr_header *header)
-{
- struct eacl_entry *ee;
-
- ee = kzalloc(sizeof(*ee), GFP_NOFS);
- if (!ee)
- return NULL;
-
- INIT_LIST_HEAD(&ee->ee_list);
- ee->ee_key = key;
- ee->ee_fid = *fid;
- ee->ee_type = type;
- ee->ee_acl = header;
-
- return ee;
-}
-
-void ee_free(struct eacl_entry *ee)
-{
- if (!list_empty(&ee->ee_list))
- list_del(&ee->ee_list);
-
- if (ee->ee_acl)
- lustre_ext_acl_xattr_free(ee->ee_acl);
-
- kfree(ee);
-}
-
-static struct eacl_entry *__et_search_del(struct eacl_table *et, pid_t key,
- struct lu_fid *fid, int type)
-{
- struct eacl_entry *ee;
- struct list_head *head = &et->et_entries[ee_hashfunc(key)];
-
- LASSERT(fid);
- list_for_each_entry(ee, head, ee_list)
- if (ee->ee_key == key) {
- if (lu_fid_eq(&ee->ee_fid, fid) &&
- ee->ee_type == type) {
- list_del_init(&ee->ee_list);
- return ee;
- }
- }
-
- return NULL;
-}
-
-struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
- struct lu_fid *fid, int type)
-{
- struct eacl_entry *ee;
-
- spin_lock(&et->et_lock);
- ee = __et_search_del(et, key, fid, type);
- spin_unlock(&et->et_lock);
- return ee;
-}
-
-void et_search_free(struct eacl_table *et, pid_t key)
-{
- struct eacl_entry *ee, *next;
- struct list_head *head = &et->et_entries[ee_hashfunc(key)];
-
- spin_lock(&et->et_lock);
- list_for_each_entry_safe(ee, next, head, ee_list)
- if (ee->ee_key == key)
- ee_free(ee);
-
- spin_unlock(&et->et_lock);
-}
-
-int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
- ext_acl_xattr_header *header)
-{
- struct eacl_entry *ee, *e;
-
- ee = ee_alloc(key, fid, type, header);
- if (!ee)
- return -ENOMEM;
-
- spin_lock(&et->et_lock);
- e = __et_search_del(et, key, fid, type);
- if (unlikely(e)) {
- CWARN("Unexpected stale eacl_entry found: [key: %d] [fid: " DFID "] [type: %d]\n",
- (int)key, PFID(fid), type);
- ee_free(e);
- }
- list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
- spin_unlock(&et->et_lock);
-
- return 0;
-}
-
-void et_init(struct eacl_table *et)
-{
- int i;
-
- spin_lock_init(&et->et_lock);
- for (i = 0; i < EE_HASHES; i++)
- INIT_LIST_HEAD(&et->et_entries[i]);
-}
-
-void et_fini(struct eacl_table *et)
-{
- struct eacl_entry *ee;
- int i;
-
- spin_lock(&et->et_lock);
- for (i = 0; i < EE_HASHES; i++)
- while (!list_empty(&et->et_entries[i])) {
- ee = list_entry(et->et_entries[i].next,
- struct eacl_entry, ee_list);
- ee_free(ee);
- }
- spin_unlock(&et->et_lock);
-}
-
-#endif
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
deleted file mode 100644
index 813a9a354..000000000
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ /dev/null
@@ -1,883 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/*
- * linux/drivers/block/loop.c
- *
- * Written by Theodore Ts'o, 3/29/93
- *
- * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
- * permitted under the GNU General Public License.
- *
- * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
- * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
- *
- * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
- *
- * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
- *
- * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
- *
- * Loadable modules and other fixes by AK, 1998
- *
- * Maximum number of loop devices now dynamic via max_loop module parameter.
- * Russell Kroll <rkroll@exploits.org> 19990701
- *
- * Maximum number of loop devices when compiled-in now selectable by passing
- * max_loop=<1-255> to the kernel on boot.
- * Erik I. Bols?, <eriki@himolde.no>, Oct 31, 1999
- *
- * Completely rewrite request handling to be make_request_fn style and
- * non blocking, pushing work to a helper thread. Lots of fixes from
- * Al Viro too.
- * Jens Axboe <axboe@suse.de>, Nov 2000
- *
- * Support up to 256 loop devices
- * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
- *
- * Support for falling back on the write file operation when the address space
- * operations prepare_write and/or commit_write are not available on the
- * backing filesystem.
- * Anton Altaparmakov, 16 Feb 2005
- *
- * Still To Fix:
- * - Advisory locking is ignored here.
- * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
- *
- */
-
-#include <linux/module.h>
-
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/major.h>
-#include <linux/wait.h>
-#include <linux/blkdev.h>
-#include <linux/blkpg.h>
-#include <linux/init.h>
-#include <linux/swap.h>
-#include <linux/slab.h>
-#include <linux/suspend.h>
-#include <linux/writeback.h>
-#include <linux/buffer_head.h> /* for invalidate_bdev() */
-#include <linux/completion.h>
-#include <linux/highmem.h>
-#include <linux/gfp.h>
-#include <linux/pagevec.h>
-#include <linux/uaccess.h>
-
-#include "../include/lustre_lib.h"
-#include "../include/lustre_lite.h"
-#include "llite_internal.h"
-
-#define LLOOP_MAX_SEGMENTS LNET_MAX_IOV
-
-/* Possible states of device */
-enum {
- LLOOP_UNBOUND,
- LLOOP_BOUND,
- LLOOP_RUNDOWN,
-};
-
-struct lloop_device {
- int lo_number;
- int lo_refcnt;
- loff_t lo_offset;
- loff_t lo_sizelimit;
- int lo_flags;
- struct file *lo_backing_file;
- struct block_device *lo_device;
- unsigned lo_blocksize;
-
- gfp_t old_gfp_mask;
-
- spinlock_t lo_lock;
- struct bio *lo_bio;
- struct bio *lo_biotail;
- int lo_state;
- struct semaphore lo_sem;
- struct mutex lo_ctl_mutex;
- atomic_t lo_pending;
- wait_queue_head_t lo_bh_wait;
-
- struct request_queue *lo_queue;
-
- const struct lu_env *lo_env;
- struct cl_io lo_io;
- struct ll_dio_pages lo_pvec;
-
- /* data to handle bio for lustre. */
- struct lo_request_data {
- struct page *lrd_pages[LLOOP_MAX_SEGMENTS];
- loff_t lrd_offsets[LLOOP_MAX_SEGMENTS];
- } lo_requests[1];
-};
-
-/*
- * Loop flags
- */
-enum {
- LO_FLAGS_READ_ONLY = 1,
-};
-
-static int lloop_major;
-#define MAX_LOOP_DEFAULT 16
-static int max_loop = MAX_LOOP_DEFAULT;
-static struct lloop_device *loop_dev;
-static struct gendisk **disks;
-static struct mutex lloop_mutex;
-static void *ll_iocontrol_magic;
-
-static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
-{
- loff_t size, offset, loopsize;
-
- /* Compute loopsize in bytes */
- size = i_size_read(file->f_mapping->host);
- offset = lo->lo_offset;
- loopsize = size - offset;
- if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
- loopsize = lo->lo_sizelimit;
-
- /*
- * Unfortunately, if we want to do I/O on the device,
- * the number of 512-byte sectors has to fit into a sector_t.
- */
- return loopsize >> 9;
-}
-
-static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
-{
- const struct lu_env *env = lo->lo_env;
- struct cl_io *io = &lo->lo_io;
- struct inode *inode = file_inode(lo->lo_backing_file);
- struct cl_object *obj = ll_i2info(inode)->lli_clob;
- pgoff_t offset;
- int ret;
- int rw;
- u32 page_count = 0;
- struct bio_vec bvec;
- struct bvec_iter iter;
- struct bio *bio;
- ssize_t bytes;
-
- struct ll_dio_pages *pvec = &lo->lo_pvec;
- struct page **pages = pvec->ldp_pages;
- loff_t *offsets = pvec->ldp_offsets;
-
- truncate_inode_pages(inode->i_mapping, 0);
-
- /* initialize the IO */
- memset(io, 0, sizeof(*io));
- io->ci_obj = obj;
- ret = cl_io_init(env, io, CIT_MISC, obj);
- if (ret)
- return io->ci_result;
- io->ci_lockreq = CILR_NEVER;
-
- rw = head->bi_rw;
- for (bio = head; bio ; bio = bio->bi_next) {
- LASSERT(rw == bio->bi_rw);
-
- offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
- bio_for_each_segment(bvec, bio, iter) {
- BUG_ON(bvec.bv_offset != 0);
- BUG_ON(bvec.bv_len != PAGE_SIZE);
-
- pages[page_count] = bvec.bv_page;
- offsets[page_count] = offset;
- page_count++;
- offset += bvec.bv_len;
- }
- LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
- }
-
- ll_stats_ops_tally(ll_i2sbi(inode),
- (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
- page_count);
-
- pvec->ldp_size = page_count << PAGE_SHIFT;
- pvec->ldp_nr = page_count;
-
- /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
- * write those pages into OST. Even worse case is that more pages
- * would be asked to write out to swap space, and then finally get here
- * again.
- * Unfortunately this is NOT easy to fix.
- * Thoughts on solution:
- * 0. Define a reserved pool for cl_pages, which could be a list of
- * pre-allocated cl_pages;
- * 1. Define a new operation in cl_object_operations{}, says clo_depth,
- * which measures how many layers for this lustre object. Generally
- * speaking, the depth would be 2, one for llite, and one for lovsub.
- * However, for SNS, there will be more since we need additional page
- * to store parity;
- * 2. Reserve the # of (page_count * depth) cl_pages from the reserved
- * pool. Afterwards, the clio would allocate the pages from reserved
- * pool, this guarantees we needn't allocate the cl_pages from
- * generic cl_page slab cache.
- * Of course, if there is NOT enough pages in the pool, we might
- * be asked to write less pages once, this purely depends on
- * implementation. Anyway, we should be careful to avoid deadlocking.
- */
- inode_lock(inode);
- bytes = ll_direct_rw_pages(env, io, rw, inode, pvec);
- inode_unlock(inode);
- cl_io_fini(env, io);
- return (bytes == pvec->ldp_size) ? 0 : (int)bytes;
-}
-
-/*
- * Add bio to back of pending list
- */
-static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&lo->lo_lock, flags);
- if (lo->lo_biotail) {
- lo->lo_biotail->bi_next = bio;
- lo->lo_biotail = bio;
- } else {
- lo->lo_bio = lo->lo_biotail = bio;
- }
- spin_unlock_irqrestore(&lo->lo_lock, flags);
-
- atomic_inc(&lo->lo_pending);
- if (waitqueue_active(&lo->lo_bh_wait))
- wake_up(&lo->lo_bh_wait);
-}
-
-/*
- * Grab first pending buffer
- */
-static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
-{
- struct bio *first;
- struct bio **bio;
- unsigned int count = 0;
- unsigned int page_count = 0;
- int rw;
-
- spin_lock_irq(&lo->lo_lock);
- first = lo->lo_bio;
- if (unlikely(!first)) {
- spin_unlock_irq(&lo->lo_lock);
- return 0;
- }
-
- /* TODO: need to split the bio, too bad. */
- LASSERT(first->bi_vcnt <= LLOOP_MAX_SEGMENTS);
-
- rw = first->bi_rw;
- bio = &lo->lo_bio;
- while (*bio && (*bio)->bi_rw == rw) {
- CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u\n",
- (unsigned long long)(*bio)->bi_iter.bi_sector,
- (*bio)->bi_iter.bi_size,
- page_count, (*bio)->bi_vcnt);
- if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
- break;
-
- page_count += (*bio)->bi_vcnt;
- count++;
- bio = &(*bio)->bi_next;
- }
- if (*bio) {
- /* Some of bios can't be mergeable. */
- lo->lo_bio = *bio;
- *bio = NULL;
- } else {
- /* Hit the end of queue */
- lo->lo_biotail = NULL;
- lo->lo_bio = NULL;
- }
- *req = first;
- spin_unlock_irq(&lo->lo_lock);
- return count;
-}
-
-static blk_qc_t loop_make_request(struct request_queue *q, struct bio *old_bio)
-{
- struct lloop_device *lo = q->queuedata;
- int rw = bio_rw(old_bio);
- int inactive;
-
- blk_queue_split(q, &old_bio, q->bio_split);
-
- if (!lo)
- goto err;
-
- CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
- (unsigned long long)old_bio->bi_iter.bi_sector,
- old_bio->bi_iter.bi_size);
-
- spin_lock_irq(&lo->lo_lock);
- inactive = lo->lo_state != LLOOP_BOUND;
- spin_unlock_irq(&lo->lo_lock);
- if (inactive)
- goto err;
-
- if (rw == WRITE) {
- if (lo->lo_flags & LO_FLAGS_READ_ONLY)
- goto err;
- } else if (rw == READA) {
- rw = READ;
- } else if (rw != READ) {
- CERROR("lloop: unknown command (%x)\n", rw);
- goto err;
- }
- loop_add_bio(lo, old_bio);
- return BLK_QC_T_NONE;
-err:
- bio_io_error(old_bio);
- return BLK_QC_T_NONE;
-}
-
-static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
-{
- int ret;
-
- ret = do_bio_lustrebacked(lo, bio);
- while (bio) {
- struct bio *tmp = bio->bi_next;
-
- bio->bi_next = NULL;
- bio->bi_error = ret;
- bio_endio(bio);
- bio = tmp;
- }
-}
-
-static inline int loop_active(struct lloop_device *lo)
-{
- return atomic_read(&lo->lo_pending) ||
- (lo->lo_state == LLOOP_RUNDOWN);
-}
-
-/*
- * worker thread that handles reads/writes to file backed loop devices,
- * to avoid blocking in our make_request_fn.
- */
-static int loop_thread(void *data)
-{
- struct lloop_device *lo = data;
- struct bio *bio;
- unsigned int count;
- unsigned long times = 0;
- unsigned long total_count = 0;
-
- struct lu_env *env;
- int refcheck;
- int ret = 0;
-
- set_user_nice(current, MIN_NICE);
-
- lo->lo_state = LLOOP_BOUND;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env)) {
- ret = PTR_ERR(env);
- goto out;
- }
-
- lo->lo_env = env;
- memset(&lo->lo_pvec, 0, sizeof(lo->lo_pvec));
- lo->lo_pvec.ldp_pages = lo->lo_requests[0].lrd_pages;
- lo->lo_pvec.ldp_offsets = lo->lo_requests[0].lrd_offsets;
-
- /*
- * up sem, we are running
- */
- up(&lo->lo_sem);
-
- for (;;) {
- wait_event(lo->lo_bh_wait, loop_active(lo));
- if (!atomic_read(&lo->lo_pending)) {
- int exiting = 0;
-
- spin_lock_irq(&lo->lo_lock);
- exiting = (lo->lo_state == LLOOP_RUNDOWN);
- spin_unlock_irq(&lo->lo_lock);
- if (exiting)
- break;
- }
-
- bio = NULL;
- count = loop_get_bio(lo, &bio);
- if (!count) {
- CWARN("lloop(minor: %d): missing bio\n", lo->lo_number);
- continue;
- }
-
- total_count += count;
- if (total_count < count) { /* overflow */
- total_count = count;
- times = 1;
- } else {
- times++;
- }
- if ((times & 127) == 0) {
- CDEBUG(D_INFO, "total: %lu, count: %lu, avg: %lu\n",
- total_count, times, total_count / times);
- }
-
- LASSERT(bio);
- LASSERT(count <= atomic_read(&lo->lo_pending));
- loop_handle_bio(lo, bio);
- atomic_sub(count, &lo->lo_pending);
- }
- cl_env_put(env, &refcheck);
-
-out:
- up(&lo->lo_sem);
- return ret;
-}
-
-static int loop_set_fd(struct lloop_device *lo, struct file *unused,
- struct block_device *bdev, struct file *file)
-{
- struct inode *inode;
- struct address_space *mapping;
- int lo_flags = 0;
- int error;
- loff_t size;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- error = -EBUSY;
- if (lo->lo_state != LLOOP_UNBOUND)
- goto out;
-
- mapping = file->f_mapping;
- inode = mapping->host;
-
- error = -EINVAL;
- if (!S_ISREG(inode->i_mode) || inode->i_sb->s_magic != LL_SUPER_MAGIC)
- goto out;
-
- if (!(file->f_mode & FMODE_WRITE))
- lo_flags |= LO_FLAGS_READ_ONLY;
-
- size = get_loop_size(lo, file);
-
- if ((loff_t)(sector_t)size != size) {
- error = -EFBIG;
- goto out;
- }
-
- /* remove all pages in cache so as dirty pages not to be existent. */
- truncate_inode_pages(mapping, 0);
-
- set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
-
- lo->lo_blocksize = PAGE_SIZE;
- lo->lo_device = bdev;
- lo->lo_flags = lo_flags;
- lo->lo_backing_file = file;
- lo->lo_sizelimit = 0;
- lo->old_gfp_mask = mapping_gfp_mask(mapping);
- mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
-
- lo->lo_bio = lo->lo_biotail = NULL;
-
- /*
- * set queue make_request_fn, and add limits based on lower level
- * device
- */
- blk_queue_make_request(lo->lo_queue, loop_make_request);
- lo->lo_queue->queuedata = lo;
-
- /* queue parameters */
- CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
- blk_queue_logical_block_size(lo->lo_queue,
- (unsigned short)PAGE_SIZE);
- blk_queue_max_hw_sectors(lo->lo_queue,
- LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9));
- blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
-
- set_capacity(disks[lo->lo_number], size);
- bd_set_size(bdev, size << 9);
-
- set_blocksize(bdev, lo->lo_blocksize);
-
- kthread_run(loop_thread, lo, "lloop%d", lo->lo_number);
- down(&lo->lo_sem);
- return 0;
-
-out:
- /* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
- return error;
-}
-
-static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev,
- int count)
-{
- struct file *filp = lo->lo_backing_file;
- gfp_t gfp = lo->old_gfp_mask;
-
- if (lo->lo_state != LLOOP_BOUND)
- return -ENXIO;
-
- if (lo->lo_refcnt > count) /* we needed one fd for the ioctl */
- return -EBUSY;
-
- if (!filp)
- return -EINVAL;
-
- spin_lock_irq(&lo->lo_lock);
- lo->lo_state = LLOOP_RUNDOWN;
- spin_unlock_irq(&lo->lo_lock);
- wake_up(&lo->lo_bh_wait);
-
- down(&lo->lo_sem);
- lo->lo_backing_file = NULL;
- lo->lo_device = NULL;
- lo->lo_offset = 0;
- lo->lo_sizelimit = 0;
- lo->lo_flags = 0;
- invalidate_bdev(bdev);
- set_capacity(disks[lo->lo_number], 0);
- bd_set_size(bdev, 0);
- mapping_set_gfp_mask(filp->f_mapping, gfp);
- lo->lo_state = LLOOP_UNBOUND;
- fput(filp);
- /* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
- return 0;
-}
-
-static int lo_open(struct block_device *bdev, fmode_t mode)
-{
- struct lloop_device *lo = bdev->bd_disk->private_data;
-
- mutex_lock(&lo->lo_ctl_mutex);
- lo->lo_refcnt++;
- mutex_unlock(&lo->lo_ctl_mutex);
-
- return 0;
-}
-
-static void lo_release(struct gendisk *disk, fmode_t mode)
-{
- struct lloop_device *lo = disk->private_data;
-
- mutex_lock(&lo->lo_ctl_mutex);
- --lo->lo_refcnt;
- mutex_unlock(&lo->lo_ctl_mutex);
-}
-
-/* lloop device node's ioctl function. */
-static int lo_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct lloop_device *lo = bdev->bd_disk->private_data;
- struct inode *inode = NULL;
- int err = 0;
-
- mutex_lock(&lloop_mutex);
- switch (cmd) {
- case LL_IOC_LLOOP_DETACH: {
- err = loop_clr_fd(lo, bdev, 2);
- if (err == 0)
- blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
- break;
- }
-
- case LL_IOC_LLOOP_INFO: {
- struct lu_fid fid;
-
- if (!lo->lo_backing_file) {
- err = -ENOENT;
- break;
- }
- if (!inode)
- inode = file_inode(lo->lo_backing_file);
- if (lo->lo_state == LLOOP_BOUND)
- fid = ll_i2info(inode)->lli_fid;
- else
- fid_zero(&fid);
-
- if (copy_to_user((void __user *)arg, &fid, sizeof(fid)))
- err = -EFAULT;
- break;
- }
-
- default:
- err = -EINVAL;
- break;
- }
- mutex_unlock(&lloop_mutex);
-
- return err;
-}
-
-static struct block_device_operations lo_fops = {
- .owner = THIS_MODULE,
- .open = lo_open,
- .release = lo_release,
- .ioctl = lo_ioctl,
-};
-
-/* dynamic iocontrol callback.
- * This callback is registered in lloop_init and will be called by
- * ll_iocontrol_call.
- *
- * This is a llite regular file ioctl function. It takes the responsibility
- * of attaching or detaching a file by a lloop's device number.
- */
-static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
- unsigned int cmd, unsigned long arg,
- void *magic, int *rcp)
-{
- struct lloop_device *lo = NULL;
- struct block_device *bdev = NULL;
- int err = 0;
- dev_t dev;
-
- if (magic != ll_iocontrol_magic)
- return LLIOC_CONT;
-
- if (!disks) {
- err = -ENODEV;
- goto out1;
- }
-
- CWARN("Enter llop_ioctl\n");
-
- mutex_lock(&lloop_mutex);
- switch (cmd) {
- case LL_IOC_LLOOP_ATTACH: {
- struct lloop_device *lo_free = NULL;
- int i;
-
- for (i = 0; i < max_loop; i++, lo = NULL) {
- lo = &loop_dev[i];
- if (lo->lo_state == LLOOP_UNBOUND) {
- if (!lo_free)
- lo_free = lo;
- continue;
- }
- if (file_inode(lo->lo_backing_file) == file_inode(file))
- break;
- }
- if (lo || !lo_free) {
- err = -EBUSY;
- goto out;
- }
-
- lo = lo_free;
- dev = MKDEV(lloop_major, lo->lo_number);
-
- /* quit if the used pointer is writable */
- if (put_user((long)old_encode_dev(dev), (long __user *)arg)) {
- err = -EFAULT;
- goto out;
- }
-
- bdev = blkdev_get_by_dev(dev, file->f_mode, NULL);
- if (IS_ERR(bdev)) {
- err = PTR_ERR(bdev);
- goto out;
- }
-
- get_file(file);
- err = loop_set_fd(lo, NULL, bdev, file);
- if (err) {
- fput(file);
- blkdev_put(bdev, 0);
- }
-
- break;
- }
-
- case LL_IOC_LLOOP_DETACH_BYDEV: {
- int minor;
-
- dev = old_decode_dev(arg);
- if (MAJOR(dev) != lloop_major) {
- err = -EINVAL;
- goto out;
- }
-
- minor = MINOR(dev);
- if (minor > max_loop - 1) {
- err = -EINVAL;
- goto out;
- }
-
- lo = &loop_dev[minor];
- if (lo->lo_state != LLOOP_BOUND) {
- err = -EINVAL;
- goto out;
- }
-
- bdev = lo->lo_device;
- err = loop_clr_fd(lo, bdev, 1);
- if (err == 0)
- blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
-
- break;
- }
-
- default:
- err = -EINVAL;
- break;
- }
-
-out:
- mutex_unlock(&lloop_mutex);
-out1:
- if (rcp)
- *rcp = err;
- return LLIOC_STOP;
-}
-
-static int __init lloop_init(void)
-{
- int i;
- unsigned int cmdlist[] = {
- LL_IOC_LLOOP_ATTACH,
- LL_IOC_LLOOP_DETACH_BYDEV,
- };
-
- if (max_loop < 1 || max_loop > 256) {
- max_loop = MAX_LOOP_DEFAULT;
- CWARN("lloop: invalid max_loop (must be between 1 and 256), using default (%u)\n",
- max_loop);
- }
-
- lloop_major = register_blkdev(0, "lloop");
- if (lloop_major < 0)
- return -EIO;
-
- CDEBUG(D_CONFIG, "registered lloop major %d with %u minors\n",
- lloop_major, max_loop);
-
- ll_iocontrol_magic = ll_iocontrol_register(lloop_ioctl, 2, cmdlist);
- if (!ll_iocontrol_magic)
- goto out_mem1;
-
- loop_dev = kcalloc(max_loop, sizeof(*loop_dev), GFP_KERNEL);
- if (!loop_dev)
- goto out_mem1;
-
- disks = kcalloc(max_loop, sizeof(*disks), GFP_KERNEL);
- if (!disks)
- goto out_mem2;
-
- for (i = 0; i < max_loop; i++) {
- disks[i] = alloc_disk(1);
- if (!disks[i])
- goto out_mem3;
- }
-
- mutex_init(&lloop_mutex);
-
- for (i = 0; i < max_loop; i++) {
- struct lloop_device *lo = &loop_dev[i];
- struct gendisk *disk = disks[i];
-
- lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
- if (!lo->lo_queue)
- goto out_mem4;
-
- mutex_init(&lo->lo_ctl_mutex);
- sema_init(&lo->lo_sem, 0);
- init_waitqueue_head(&lo->lo_bh_wait);
- lo->lo_number = i;
- spin_lock_init(&lo->lo_lock);
- disk->major = lloop_major;
- disk->first_minor = i;
- disk->fops = &lo_fops;
- sprintf(disk->disk_name, "lloop%d", i);
- disk->private_data = lo;
- disk->queue = lo->lo_queue;
- }
-
- /* We cannot fail after we call this, so another loop!*/
- for (i = 0; i < max_loop; i++)
- add_disk(disks[i]);
- return 0;
-
-out_mem4:
- while (i--)
- blk_cleanup_queue(loop_dev[i].lo_queue);
- i = max_loop;
-out_mem3:
- while (i--)
- put_disk(disks[i]);
- kfree(disks);
-out_mem2:
- kfree(loop_dev);
-out_mem1:
- unregister_blkdev(lloop_major, "lloop");
- ll_iocontrol_unregister(ll_iocontrol_magic);
- CERROR("lloop: ran out of memory\n");
- return -ENOMEM;
-}
-
-static void lloop_exit(void)
-{
- int i;
-
- ll_iocontrol_unregister(ll_iocontrol_magic);
- for (i = 0; i < max_loop; i++) {
- del_gendisk(disks[i]);
- blk_cleanup_queue(loop_dev[i].lo_queue);
- put_disk(disks[i]);
- }
-
- unregister_blkdev(lloop_major, "lloop");
-
- kfree(disks);
- kfree(loop_dev);
-}
-
-module_param(max_loop, int, 0444);
-MODULE_PARM_DESC(max_loop, "maximum of lloop_device");
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre virtual block device");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-MODULE_LICENSE("GPL");
-
-module_init(lloop_init);
-module_exit(lloop_exit);
diff --git a/drivers/staging/lustre/lustre/llite/remote_perm.c b/drivers/staging/lustre/lustre/llite/remote_perm.c
deleted file mode 100644
index e9d25317c..000000000
--- a/drivers/staging/lustre/lustre/llite/remote_perm.c
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/remote_perm.c
- *
- * Lustre Permission Cache for Remote Client
- *
- * Author: Lai Siyao <lsy@clusterfs.com>
- * Author: Fan Yong <fanyong@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <linux/module.h>
-#include <linux/types.h>
-
-#include "../include/lustre_lite.h"
-#include "../include/lustre_ha.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre_disk.h"
-#include "../include/lustre_param.h"
-#include "llite_internal.h"
-
-struct kmem_cache *ll_remote_perm_cachep;
-struct kmem_cache *ll_rmtperm_hash_cachep;
-
-static inline struct ll_remote_perm *alloc_ll_remote_perm(void)
-{
- struct ll_remote_perm *lrp;
-
- lrp = kmem_cache_zalloc(ll_remote_perm_cachep, GFP_KERNEL);
- if (lrp)
- INIT_HLIST_NODE(&lrp->lrp_list);
- return lrp;
-}
-
-static inline void free_ll_remote_perm(struct ll_remote_perm *lrp)
-{
- if (!lrp)
- return;
-
- if (!hlist_unhashed(&lrp->lrp_list))
- hlist_del(&lrp->lrp_list);
- kmem_cache_free(ll_remote_perm_cachep, lrp);
-}
-
-static struct hlist_head *alloc_rmtperm_hash(void)
-{
- struct hlist_head *hash;
- int i;
-
- hash = kmem_cache_zalloc(ll_rmtperm_hash_cachep, GFP_NOFS);
- if (!hash)
- return NULL;
-
- for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
- INIT_HLIST_HEAD(hash + i);
-
- return hash;
-}
-
-void free_rmtperm_hash(struct hlist_head *hash)
-{
- int i;
- struct ll_remote_perm *lrp;
- struct hlist_node *next;
-
- if (!hash)
- return;
-
- for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
- hlist_for_each_entry_safe(lrp, next, hash + i, lrp_list)
- free_ll_remote_perm(lrp);
- kmem_cache_free(ll_rmtperm_hash_cachep, hash);
-}
-
-static inline int remote_perm_hashfunc(uid_t uid)
-{
- return uid & (REMOTE_PERM_HASHSIZE - 1);
-}
-
-/* NB: setxid permission is not checked here, instead it's done on
- * MDT when client get remote permission.
- */
-static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
-{
- struct hlist_head *head;
- struct ll_remote_perm *lrp;
- int found = 0, rc;
-
- if (!lli->lli_remote_perms)
- return -ENOENT;
-
- head = lli->lli_remote_perms +
- remote_perm_hashfunc(from_kuid(&init_user_ns, current_uid()));
-
- spin_lock(&lli->lli_lock);
- hlist_for_each_entry(lrp, head, lrp_list) {
- if (lrp->lrp_uid != from_kuid(&init_user_ns, current_uid()))
- continue;
- if (lrp->lrp_gid != from_kgid(&init_user_ns, current_gid()))
- continue;
- if (lrp->lrp_fsuid != from_kuid(&init_user_ns, current_fsuid()))
- continue;
- if (lrp->lrp_fsgid != from_kgid(&init_user_ns, current_fsgid()))
- continue;
- found = 1;
- break;
- }
-
- if (!found) {
- rc = -ENOENT;
- goto out;
- }
-
- CDEBUG(D_SEC, "found remote perm: %u/%u/%u/%u - %#x\n",
- lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
- lrp->lrp_access_perm);
- rc = ((lrp->lrp_access_perm & mask) == mask) ? 0 : -EACCES;
-
-out:
- spin_unlock(&lli->lli_lock);
- return rc;
-}
-
-int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_remote_perm *lrp = NULL, *tmp = NULL;
- struct hlist_head *head, *perm_hash = NULL;
-
- LASSERT(ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT);
-
-#if 0
- if (perm->rp_uid != current->uid ||
- perm->rp_gid != current->gid ||
- perm->rp_fsuid != current->fsuid ||
- perm->rp_fsgid != current->fsgid) {
- /* user might setxid in this small period */
- CDEBUG(D_SEC,
- "remote perm user %u/%u/%u/%u != current %u/%u/%u/%u\n",
- perm->rp_uid, perm->rp_gid, perm->rp_fsuid,
- perm->rp_fsgid, current->uid, current->gid,
- current->fsuid, current->fsgid);
- return -EAGAIN;
- }
-#endif
-
- if (!lli->lli_remote_perms) {
- perm_hash = alloc_rmtperm_hash();
- if (!perm_hash) {
- CERROR("alloc lli_remote_perms failed!\n");
- return -ENOMEM;
- }
- }
-
- spin_lock(&lli->lli_lock);
-
- if (!lli->lli_remote_perms)
- lli->lli_remote_perms = perm_hash;
- else
- free_rmtperm_hash(perm_hash);
-
- head = lli->lli_remote_perms + remote_perm_hashfunc(perm->rp_uid);
-
-again:
- hlist_for_each_entry(tmp, head, lrp_list) {
- if (tmp->lrp_uid != perm->rp_uid)
- continue;
- if (tmp->lrp_gid != perm->rp_gid)
- continue;
- if (tmp->lrp_fsuid != perm->rp_fsuid)
- continue;
- if (tmp->lrp_fsgid != perm->rp_fsgid)
- continue;
- free_ll_remote_perm(lrp);
- lrp = tmp;
- break;
- }
-
- if (!lrp) {
- spin_unlock(&lli->lli_lock);
- lrp = alloc_ll_remote_perm();
- if (!lrp) {
- CERROR("alloc memory for ll_remote_perm failed!\n");
- return -ENOMEM;
- }
- spin_lock(&lli->lli_lock);
- goto again;
- }
-
- lrp->lrp_access_perm = perm->rp_access_perm;
- if (lrp != tmp) {
- lrp->lrp_uid = perm->rp_uid;
- lrp->lrp_gid = perm->rp_gid;
- lrp->lrp_fsuid = perm->rp_fsuid;
- lrp->lrp_fsgid = perm->rp_fsgid;
- hlist_add_head(&lrp->lrp_list, head);
- }
- lli->lli_rmtperm_time = cfs_time_current();
- spin_unlock(&lli->lli_lock);
-
- CDEBUG(D_SEC, "new remote perm@%p: %u/%u/%u/%u - %#x\n",
- lrp, lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
- lrp->lrp_access_perm);
-
- return 0;
-}
-
-int lustre_check_remote_perm(struct inode *inode, int mask)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ptlrpc_request *req = NULL;
- struct mdt_remote_perm *perm;
- unsigned long save;
- int i = 0, rc;
-
- do {
- save = lli->lli_rmtperm_time;
- rc = do_check_remote_perm(lli, mask);
- if (!rc || (rc != -ENOENT && i))
- break;
-
- might_sleep();
-
- mutex_lock(&lli->lli_rmtperm_mutex);
- /* check again */
- if (save != lli->lli_rmtperm_time) {
- rc = do_check_remote_perm(lli, mask);
- if (!rc || (rc != -ENOENT && i)) {
- mutex_unlock(&lli->lli_rmtperm_mutex);
- break;
- }
- }
-
- if (i++ > 5) {
- CERROR("check remote perm falls in dead loop!\n");
- LBUG();
- }
-
- rc = md_get_remote_perm(sbi->ll_md_exp, ll_inode2fid(inode),
- ll_i2suppgid(inode), &req);
- if (rc) {
- mutex_unlock(&lli->lli_rmtperm_mutex);
- break;
- }
-
- perm = req_capsule_server_swab_get(&req->rq_pill, &RMF_ACL,
- lustre_swab_mdt_remote_perm);
- if (unlikely(!perm)) {
- mutex_unlock(&lli->lli_rmtperm_mutex);
- rc = -EPROTO;
- break;
- }
-
- rc = ll_update_remote_perm(inode, perm);
- mutex_unlock(&lli->lli_rmtperm_mutex);
- if (rc == -ENOMEM)
- break;
-
- ptlrpc_req_finished(req);
- req = NULL;
- } while (1);
- ptlrpc_req_finished(req);
- return rc;
-}
-
-#if 0 /* NB: remote perms can't be freed in ll_mdc_blocking_ast of UPDATE lock,
- * because it will fail sanity test 48.
- */
-void ll_free_remote_perms(struct inode *inode)
-{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct hlist_head *hash = lli->lli_remote_perms;
- struct ll_remote_perm *lrp;
- struct hlist_node *node, *next;
- int i;
-
- LASSERT(hash);
-
- spin_lock(&lli->lli_lock);
-
- for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) {
- hlist_for_each_entry_safe(lrp, node, next, hash + i, lrp_list)
- free_ll_remote_perm(lrp);
- }
-
- spin_unlock(&lli->lli_lock);
-}
-#endif
diff --git a/drivers/staging/lustre/lustre/obdclass/acl.c b/drivers/staging/lustre/lustre/obdclass/acl.c
deleted file mode 100644
index 0e02ae97b..000000000
--- a/drivers/staging/lustre/lustre/obdclass/acl.c
+++ /dev/null
@@ -1,415 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/acl.c
- *
- * Lustre Access Control List.
- *
- * Author: Fan Yong <fanyong@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-#include "../include/lu_object.h"
-#include "../include/lustre_acl.h"
-#include "../include/lustre_eacl.h"
-#include "../include/obd_support.h"
-
-#ifdef CONFIG_FS_POSIX_ACL
-
-#define CFS_ACL_XATTR_VERSION POSIX_ACL_XATTR_VERSION
-
-enum {
- ES_UNK = 0, /* unknown stat */
- ES_UNC = 1, /* ACL entry is not changed */
- ES_MOD = 2, /* ACL entry is modified */
- ES_ADD = 3, /* ACL entry is added */
- ES_DEL = 4 /* ACL entry is deleted */
-};
-
-static inline void lustre_ext_acl_le_to_cpu(ext_acl_xattr_entry *d,
- ext_acl_xattr_entry *s)
-{
- d->e_tag = le16_to_cpu(s->e_tag);
- d->e_perm = le16_to_cpu(s->e_perm);
- d->e_id = le32_to_cpu(s->e_id);
- d->e_stat = le32_to_cpu(s->e_stat);
-}
-
-static inline void lustre_ext_acl_cpu_to_le(ext_acl_xattr_entry *d,
- ext_acl_xattr_entry *s)
-{
- d->e_tag = cpu_to_le16(s->e_tag);
- d->e_perm = cpu_to_le16(s->e_perm);
- d->e_id = cpu_to_le32(s->e_id);
- d->e_stat = cpu_to_le32(s->e_stat);
-}
-
-static inline void lustre_posix_acl_le_to_cpu(posix_acl_xattr_entry *d,
- posix_acl_xattr_entry *s)
-{
- d->e_tag = le16_to_cpu(s->e_tag);
- d->e_perm = le16_to_cpu(s->e_perm);
- d->e_id = le32_to_cpu(s->e_id);
-}
-
-static inline void lustre_posix_acl_cpu_to_le(posix_acl_xattr_entry *d,
- posix_acl_xattr_entry *s)
-{
- d->e_tag = cpu_to_le16(s->e_tag);
- d->e_perm = cpu_to_le16(s->e_perm);
- d->e_id = cpu_to_le32(s->e_id);
-}
-
-/* if "new_count == 0", then "new = {a_version, NULL}", NOT NULL. */
-static int lustre_posix_acl_xattr_reduce_space(posix_acl_xattr_header **header,
- int old_count, int new_count)
-{
- int old_size = CFS_ACL_XATTR_SIZE(old_count, posix_acl_xattr);
- int new_size = CFS_ACL_XATTR_SIZE(new_count, posix_acl_xattr);
- posix_acl_xattr_header *new;
-
- if (unlikely(old_count <= new_count))
- return old_size;
-
- new = kmemdup(*header, new_size, GFP_NOFS);
- if (unlikely(!new))
- return -ENOMEM;
-
- kfree(*header);
- *header = new;
- return new_size;
-}
-
-/* if "new_count == 0", then "new = {0, NULL}", NOT NULL. */
-static int lustre_ext_acl_xattr_reduce_space(ext_acl_xattr_header **header,
- int old_count)
-{
- int ext_count = le32_to_cpu((*header)->a_count);
- int ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);
- ext_acl_xattr_header *new;
-
- if (unlikely(old_count <= ext_count))
- return 0;
-
- new = kmemdup(*header, ext_size, GFP_NOFS);
- if (unlikely(!new))
- return -ENOMEM;
-
- kfree(*header);
- *header = new;
- return 0;
-}
-
-/*
- * Generate new extended ACL based on the posix ACL.
- */
-ext_acl_xattr_header *
-lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size)
-{
- int count, i, esize;
- ext_acl_xattr_header *new;
-
- if (unlikely(size < 0))
- return ERR_PTR(-EINVAL);
- else if (!size)
- count = 0;
- else
- count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
- esize = CFS_ACL_XATTR_SIZE(count, ext_acl_xattr);
- new = kzalloc(esize, GFP_NOFS);
- if (unlikely(!new))
- return ERR_PTR(-ENOMEM);
-
- new->a_count = cpu_to_le32(count);
- for (i = 0; i < count; i++) {
- new->a_entries[i].e_tag = header->a_entries[i].e_tag;
- new->a_entries[i].e_perm = header->a_entries[i].e_perm;
- new->a_entries[i].e_id = header->a_entries[i].e_id;
- new->a_entries[i].e_stat = cpu_to_le32(ES_UNK);
- }
-
- return new;
-}
-EXPORT_SYMBOL(lustre_posix_acl_xattr_2ext);
-
-/*
- * Filter out the "nobody" entries in the posix ACL.
- */
-int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size,
- posix_acl_xattr_header **out)
-{
- int count, i, j, rc = 0;
- __u32 id;
- posix_acl_xattr_header *new;
-
- if (!size)
- return 0;
- if (size < sizeof(*new))
- return -EINVAL;
-
- new = kzalloc(size, GFP_NOFS);
- if (unlikely(!new))
- return -ENOMEM;
-
- new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
- count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
- for (i = 0, j = 0; i < count; i++) {
- id = le32_to_cpu(header->a_entries[i].e_id);
- switch (le16_to_cpu(header->a_entries[i].e_tag)) {
- case ACL_USER_OBJ:
- case ACL_GROUP_OBJ:
- case ACL_MASK:
- case ACL_OTHER:
- if (id != ACL_UNDEFINED_ID) {
- rc = -EIO;
- goto _out;
- }
-
- memcpy(&new->a_entries[j++], &header->a_entries[i],
- sizeof(posix_acl_xattr_entry));
- break;
- case ACL_USER:
- if (id != NOBODY_UID)
- memcpy(&new->a_entries[j++],
- &header->a_entries[i],
- sizeof(posix_acl_xattr_entry));
- break;
- case ACL_GROUP:
- if (id != NOBODY_GID)
- memcpy(&new->a_entries[j++],
- &header->a_entries[i],
- sizeof(posix_acl_xattr_entry));
- break;
- default:
- rc = -EIO;
- goto _out;
- }
- }
-
- /* free unused space. */
- rc = lustre_posix_acl_xattr_reduce_space(&new, count, j);
- if (rc >= 0) {
- size = rc;
- *out = new;
- rc = 0;
- }
-
-_out:
- if (rc) {
- kfree(new);
- size = rc;
- }
- return size;
-}
-EXPORT_SYMBOL(lustre_posix_acl_xattr_filter);
-
-/*
- * Release the extended ACL space.
- */
-void lustre_ext_acl_xattr_free(ext_acl_xattr_header *header)
-{
- kfree(header);
-}
-EXPORT_SYMBOL(lustre_ext_acl_xattr_free);
-
-static ext_acl_xattr_entry *
-lustre_ext_acl_xattr_search(ext_acl_xattr_header *header,
- posix_acl_xattr_entry *entry, int *pos)
-{
- int once, start, end, i, j, count = le32_to_cpu(header->a_count);
-
- once = 0;
- start = *pos;
- end = count;
-
-again:
- for (i = start; i < end; i++) {
- if (header->a_entries[i].e_tag == entry->e_tag &&
- header->a_entries[i].e_id == entry->e_id) {
- j = i;
- if (++i >= count)
- i = 0;
- *pos = i;
- return &header->a_entries[j];
- }
- }
-
- if (!once) {
- once = 1;
- start = 0;
- end = *pos;
- goto again;
- }
-
- return NULL;
-}
-
-/*
- * Merge the posix ACL and the extended ACL into new extended ACL.
- */
-ext_acl_xattr_header *
-lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
- ext_acl_xattr_header *ext_header)
-{
- int ori_ext_count, posix_count, ext_count, ext_size;
- int i, j, pos = 0, rc = 0;
- posix_acl_xattr_entry pae;
- ext_acl_xattr_header *new;
- ext_acl_xattr_entry *ee, eae;
-
- if (unlikely(size < 0))
- return ERR_PTR(-EINVAL);
- else if (!size)
- posix_count = 0;
- else
- posix_count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
- ori_ext_count = le32_to_cpu(ext_header->a_count);
- ext_count = posix_count + ori_ext_count;
- ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);
-
- new = kzalloc(ext_size, GFP_NOFS);
- if (unlikely(!new))
- return ERR_PTR(-ENOMEM);
-
- for (i = 0, j = 0; i < posix_count; i++) {
- lustre_posix_acl_le_to_cpu(&pae, &posix_header->a_entries[i]);
- switch (pae.e_tag) {
- case ACL_USER_OBJ:
- case ACL_GROUP_OBJ:
- case ACL_MASK:
- case ACL_OTHER:
- if (pae.e_id != ACL_UNDEFINED_ID) {
- rc = -EIO;
- goto out;
- }
- case ACL_USER:
- /* ignore "nobody" entry. */
- if (pae.e_id == NOBODY_UID)
- break;
-
- new->a_entries[j].e_tag =
- posix_header->a_entries[i].e_tag;
- new->a_entries[j].e_perm =
- posix_header->a_entries[i].e_perm;
- new->a_entries[j].e_id =
- posix_header->a_entries[i].e_id;
- ee = lustre_ext_acl_xattr_search(ext_header,
- &posix_header->a_entries[i], &pos);
- if (ee) {
- if (posix_header->a_entries[i].e_perm !=
- ee->e_perm)
- /* entry modified. */
- ee->e_stat =
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_MOD);
- else
- /* entry unchanged. */
- ee->e_stat =
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_UNC);
- } else {
- /* new entry. */
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_ADD);
- }
- break;
- case ACL_GROUP:
- /* ignore "nobody" entry. */
- if (pae.e_id == NOBODY_GID)
- break;
- new->a_entries[j].e_tag =
- posix_header->a_entries[i].e_tag;
- new->a_entries[j].e_perm =
- posix_header->a_entries[i].e_perm;
- new->a_entries[j].e_id =
- posix_header->a_entries[i].e_id;
- ee = lustre_ext_acl_xattr_search(ext_header,
- &posix_header->a_entries[i], &pos);
- if (ee) {
- if (posix_header->a_entries[i].e_perm !=
- ee->e_perm)
- /* entry modified. */
- ee->e_stat =
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_MOD);
- else
- /* entry unchanged. */
- ee->e_stat =
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_UNC);
- } else {
- /* new entry. */
- new->a_entries[j++].e_stat =
- cpu_to_le32(ES_ADD);
- }
- break;
- default:
- rc = -EIO;
- goto out;
- }
- }
-
- /* process deleted entries. */
- for (i = 0; i < ori_ext_count; i++) {
- lustre_ext_acl_le_to_cpu(&eae, &ext_header->a_entries[i]);
- if (eae.e_stat == ES_UNK) {
- /* ignore "nobody" entry. */
- if ((eae.e_tag == ACL_USER && eae.e_id == NOBODY_UID) ||
- (eae.e_tag == ACL_GROUP && eae.e_id == NOBODY_GID))
- continue;
-
- new->a_entries[j].e_tag =
- ext_header->a_entries[i].e_tag;
- new->a_entries[j].e_perm =
- ext_header->a_entries[i].e_perm;
- new->a_entries[j].e_id = ext_header->a_entries[i].e_id;
- new->a_entries[j++].e_stat = cpu_to_le32(ES_DEL);
- }
- }
-
- new->a_count = cpu_to_le32(j);
- /* free unused space. */
- rc = lustre_ext_acl_xattr_reduce_space(&new, ext_count);
-
-out:
- if (rc) {
- kfree(new);
- new = ERR_PTR(rc);
- }
- return new;
-}
-EXPORT_SYMBOL(lustre_acl_xattr_merge2ext);
-
-#endif