diff options
Diffstat (limited to 'drivers/staging/lustre/lustre')
196 files changed, 6073 insertions, 16342 deletions
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig index 62c7bba75..8ac7cd4d6 100644 --- a/drivers/staging/lustre/lustre/Kconfig +++ b/drivers/staging/lustre/lustre/Kconfig @@ -1,7 +1,7 @@ config LUSTRE_FS tristate "Lustre file system client support" - depends on INET && m && !MIPS && !XTENSA && !SUPERH - select LNET + depends on m && !MIPS && !XTENSA && !SUPERH + depends on LNET select CRYPTO select CRYPTO_CRC32 select CRYPTO_CRC32_PCLMUL if X86 diff --git a/drivers/staging/lustre/lustre/Makefile b/drivers/staging/lustre/lustre/Makefile index 35d8b0b2d..331e4fcdd 100644 --- a/drivers/staging/lustre/lustre/Makefile +++ b/drivers/staging/lustre/lustre/Makefile @@ -1,2 +1,2 @@ -obj-$(CONFIG_LUSTRE_FS) += libcfs/ obdclass/ ptlrpc/ fld/ osc/ mgc/ \ +obj-$(CONFIG_LUSTRE_FS) += obdclass/ ptlrpc/ fld/ osc/ mgc/ \ fid/ lov/ mdc/ lmv/ llite/ obdecho/ diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c index ff8f38dc1..39269c3c5 100644 --- a/drivers/staging/lustre/lustre/fid/fid_request.c +++ b/drivers/staging/lustre/lustre/fid/fid_request.c @@ -68,7 +68,7 @@ static int seq_client_rpc(struct lu_client_seq *seq, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY, LUSTRE_MDS_VERSION, SEQ_QUERY); - if (req == NULL) + if (!req) return -ENOMEM; /* Init operation code */ @@ -95,7 +95,8 @@ static int seq_client_rpc(struct lu_client_seq *seq, * precreating objects on this OST), and it will send the * request to MDT0 here, so we can not keep resending the * request here, otherwise if MDT0 is failed(umounted), - * it can not release the export of MDT0 */ + * it can not release the export of MDT0 + */ if (seq->lcs_type == LUSTRE_SEQ_DATA) req->rq_no_delay = req->rq_no_resend = 1; debug_mask = D_CONSOLE; @@ -152,7 +153,8 @@ static int seq_client_alloc_meta(const struct lu_env *env, /* If meta server return -EINPROGRESS or EAGAIN, * it means meta server might not be ready to * allocate super sequence from sequence controller - * (MDT0)yet */ + * (MDT0)yet + */ rc = seq_client_rpc(seq, &seq->lcs_space, SEQ_ALLOC_META, "meta"); } while (rc == -EINPROGRESS || rc == -EAGAIN); @@ -226,8 +228,8 @@ int seq_client_alloc_fid(const struct lu_env *env, wait_queue_t link; int rc; - LASSERT(seq != NULL); - LASSERT(fid != NULL); + LASSERT(seq); + LASSERT(fid); init_waitqueue_entry(&link, current); mutex_lock(&seq->lcs_mutex); @@ -292,7 +294,7 @@ void seq_client_flush(struct lu_client_seq *seq) { wait_queue_t link; - LASSERT(seq != NULL); + LASSERT(seq); init_waitqueue_entry(&link, current); mutex_lock(&seq->lcs_mutex); @@ -375,8 +377,8 @@ static int seq_client_init(struct lu_client_seq *seq, { int rc; - LASSERT(seq != NULL); - LASSERT(prefix != NULL); + LASSERT(seq); + LASSERT(prefix); seq->lcs_type = type; @@ -438,7 +440,7 @@ int client_fid_fini(struct obd_device *obd) { struct client_obd *cli = &obd->u.cli; - if (cli->cl_seq != NULL) { + if (cli->cl_seq) { seq_client_fini(cli->cl_seq); kfree(cli->cl_seq); cli->cl_seq = NULL; @@ -448,7 +450,7 @@ int client_fid_fini(struct obd_device *obd) } EXPORT_SYMBOL(client_fid_fini); -static int __init fid_mod_init(void) +static int __init fid_init(void) { seq_debugfs_dir = ldebugfs_register(LUSTRE_SEQ_NAME, debugfs_lustre_root, @@ -456,16 +458,16 @@ static int __init fid_mod_init(void) return PTR_ERR_OR_ZERO(seq_debugfs_dir); } -static void __exit fid_mod_exit(void) +static void __exit fid_exit(void) { if (!IS_ERR_OR_NULL(seq_debugfs_dir)) ldebugfs_remove(&seq_debugfs_dir); } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Lustre FID Module"); +MODULE_DESCRIPTION("Lustre File IDentifier"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); -MODULE_VERSION("0.1.0"); -module_init(fid_mod_init); -module_exit(fid_mod_exit); +module_init(fid_init); +module_exit(fid_exit); diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c index 39f2aa32e..1f0e78686 100644 --- a/drivers/staging/lustre/lustre/fid/lproc_fid.c +++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c @@ -66,7 +66,7 @@ ldebugfs_fid_write_common(const char __user *buffer, size_t count, int rc; char kernbuf[MAX_FID_RANGE_STRLEN]; - LASSERT(range != NULL); + LASSERT(range); if (count >= sizeof(kernbuf)) return -EINVAL; @@ -85,6 +85,8 @@ ldebugfs_fid_write_common(const char __user *buffer, size_t count, rc = sscanf(kernbuf, "[%llx - %llx]\n", (unsigned long long *)&tmp.lsr_start, (unsigned long long *)&tmp.lsr_end); + if (rc != 2) + return -EINVAL; if (!range_is_sane(&tmp) || range_is_zero(&tmp) || tmp.lsr_start < range->lsr_start || tmp.lsr_end > range->lsr_end) return -EINVAL; @@ -102,7 +104,6 @@ ldebugfs_fid_space_seq_write(struct file *file, int rc; seq = ((struct seq_file *)file->private_data)->private; - LASSERT(seq != NULL); mutex_lock(&seq->lcs_mutex); rc = ldebugfs_fid_write_common(buffer, count, &seq->lcs_space); @@ -122,8 +123,6 @@ ldebugfs_fid_space_seq_show(struct seq_file *m, void *unused) { struct lu_client_seq *seq = (struct lu_client_seq *)m->private; - LASSERT(seq != NULL); - mutex_lock(&seq->lcs_mutex); seq_printf(m, "[%#llx - %#llx]:%x:%s\n", PRANGE(&seq->lcs_space)); mutex_unlock(&seq->lcs_mutex); @@ -141,7 +140,6 @@ ldebugfs_fid_width_seq_write(struct file *file, int rc, val; seq = ((struct seq_file *)file->private_data)->private; - LASSERT(seq != NULL); rc = lprocfs_write_helper(buffer, count, &val); if (rc) @@ -170,8 +168,6 @@ ldebugfs_fid_width_seq_show(struct seq_file *m, void *unused) { struct lu_client_seq *seq = (struct lu_client_seq *)m->private; - LASSERT(seq != NULL); - mutex_lock(&seq->lcs_mutex); seq_printf(m, "%llu\n", seq->lcs_width); mutex_unlock(&seq->lcs_mutex); @@ -184,8 +180,6 @@ ldebugfs_fid_fid_seq_show(struct seq_file *m, void *unused) { struct lu_client_seq *seq = (struct lu_client_seq *)m->private; - LASSERT(seq != NULL); - mutex_lock(&seq->lcs_mutex); seq_printf(m, DFID "\n", PFID(&seq->lcs_fid)); mutex_unlock(&seq->lcs_mutex); @@ -199,9 +193,7 @@ ldebugfs_fid_server_seq_show(struct seq_file *m, void *unused) struct lu_client_seq *seq = (struct lu_client_seq *)m->private; struct client_obd *cli; - LASSERT(seq != NULL); - - if (seq->lcs_exp != NULL) { + if (seq->lcs_exp) { cli = &seq->lcs_exp->exp_obd->u.cli; seq_printf(m, "%s\n", cli->cl_target_uuid.uuid); } diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c index d9459e58e..062f388cf 100644 --- a/drivers/staging/lustre/lustre/fld/fld_cache.c +++ b/drivers/staging/lustre/lustre/fld/fld_cache.c @@ -65,7 +65,7 @@ struct fld_cache *fld_cache_init(const char *name, { struct fld_cache *cache; - LASSERT(name != NULL); + LASSERT(name); LASSERT(cache_threshold < cache_size); cache = kzalloc(sizeof(*cache), GFP_NOFS); @@ -100,7 +100,7 @@ void fld_cache_fini(struct fld_cache *cache) { __u64 pct; - LASSERT(cache != NULL); + LASSERT(cache); fld_cache_flush(cache); if (cache->fci_stat.fst_count > 0) { @@ -183,7 +183,8 @@ restart_fixup: } /* we could have overlap over next - * range too. better restart. */ + * range too. better restart. + */ goto restart_fixup; } @@ -218,8 +219,6 @@ static int fld_cache_shrink(struct fld_cache *cache) struct list_head *curr; int num = 0; - LASSERT(cache != NULL); - if (cache->fci_cache_count < cache->fci_cache_size) return 0; @@ -234,7 +233,7 @@ static int fld_cache_shrink(struct fld_cache *cache) } CDEBUG(D_INFO, "%s: FLD cache - Shrunk by %d entries\n", - cache->fci_name, num); + cache->fci_name, num); return 0; } @@ -295,8 +294,8 @@ static void fld_cache_punch_hole(struct fld_cache *cache, * handle range overlap in fld cache. */ static void fld_cache_overlap_handle(struct fld_cache *cache, - struct fld_cache_entry *f_curr, - struct fld_cache_entry *f_new) + struct fld_cache_entry *f_curr, + struct fld_cache_entry *f_new) { const struct lu_seq_range *range = &f_new->fce_range; const u64 new_start = range->lsr_start; @@ -304,7 +303,8 @@ static void fld_cache_overlap_handle(struct fld_cache *cache, const u32 mdt = range->lsr_index; /* this is overlap case, these case are checking overlapping with - * prev range only. fixup will handle overlapping with next range. */ + * prev range only. fixup will handle overlapping with next range. + */ if (f_curr->fce_range.lsr_index == mdt) { f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start, @@ -319,7 +319,8 @@ static void fld_cache_overlap_handle(struct fld_cache *cache, } else if (new_start <= f_curr->fce_range.lsr_start && f_curr->fce_range.lsr_end <= new_end) { /* case 1: new range completely overshadowed existing range. - * e.g. whole range migrated. update fld cache entry */ + * e.g. whole range migrated. update fld cache entry + */ f_curr->fce_range = *range; kfree(f_new); @@ -401,8 +402,8 @@ static int fld_cache_insert_nolock(struct fld_cache *cache, list_for_each_entry_safe(f_curr, n, head, fce_list) { /* add list if next is end of list */ if (new_end < f_curr->fce_range.lsr_start || - (new_end == f_curr->fce_range.lsr_start && - new_flags != f_curr->fce_range.lsr_flags)) + (new_end == f_curr->fce_range.lsr_start && + new_flags != f_curr->fce_range.lsr_flags)) break; prev = &f_curr->fce_list; @@ -414,7 +415,7 @@ static int fld_cache_insert_nolock(struct fld_cache *cache, } } - if (prev == NULL) + if (!prev) prev = head; CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range)); @@ -459,8 +460,8 @@ struct fld_cache_entry head = &cache->fci_entries_head; list_for_each_entry(flde, head, fce_list) { if (range->lsr_start == flde->fce_range.lsr_start || - (range->lsr_end == flde->fce_range.lsr_end && - range->lsr_flags == flde->fce_range.lsr_flags)) { + (range->lsr_end == flde->fce_range.lsr_end && + range->lsr_flags == flde->fce_range.lsr_flags)) { got = flde; break; } @@ -499,7 +500,7 @@ int fld_cache_lookup(struct fld_cache *cache, cache->fci_stat.fst_count++; list_for_each_entry(flde, head, fce_list) { if (flde->fce_range.lsr_start > seq) { - if (prev != NULL) + if (prev) *range = prev->fce_range; break; } diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h index 12eb1647b..e8a3caf20 100644 --- a/drivers/staging/lustre/lustre/fld/fld_internal.h +++ b/drivers/staging/lustre/lustre/fld/fld_internal.h @@ -58,22 +58,16 @@ struct fld_stats { __u64 fst_inflight; }; -typedef int (*fld_hash_func_t) (struct lu_client_fld *, __u64); - -typedef struct lu_fld_target * -(*fld_scan_func_t) (struct lu_client_fld *, __u64); - struct lu_fld_hash { const char *fh_name; - fld_hash_func_t fh_hash_func; - fld_scan_func_t fh_scan_func; + int (*fh_hash_func)(struct lu_client_fld *, __u64); + struct lu_fld_target *(*fh_scan_func)(struct lu_client_fld *, __u64); }; struct fld_cache_entry { struct list_head fce_lru; struct list_head fce_list; - /** - * fld cache entries are sorted on range->lsr_start field. */ + /** fld cache entries are sorted on range->lsr_start field. */ struct lu_seq_range fce_range; }; @@ -84,32 +78,25 @@ struct fld_cache { */ rwlock_t fci_lock; - /** - * Cache shrink threshold */ + /** Cache shrink threshold */ int fci_threshold; - /** - * Preferred number of cached entries */ + /** Preferred number of cached entries */ int fci_cache_size; - /** - * Current number of cached entries. Protected by \a fci_lock */ + /** Current number of cached entries. Protected by \a fci_lock */ int fci_cache_count; - /** - * LRU list fld entries. */ + /** LRU list fld entries. */ struct list_head fci_lru; - /** - * sorted fld entries. */ + /** sorted fld entries. */ struct list_head fci_entries_head; - /** - * Cache statistics. */ + /** Cache statistics. */ struct fld_stats fci_stat; - /** - * Cache name used for debug and messages. */ + /** Cache name used for debug and messages. */ char fci_name[LUSTRE_MDT_MAXNAMELEN]; unsigned int fci_no_shrink:1; }; @@ -169,7 +156,7 @@ struct fld_cache_entry static inline const char * fld_target_name(struct lu_fld_target *tar) { - if (tar->ft_srv != NULL) + if (tar->ft_srv) return tar->ft_srv->lsf_name; return (const char *)tar->ft_exp->exp_obd->obd_name; diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c index d92c01b74..a3d122d85 100644 --- a/drivers/staging/lustre/lustre/fld/fld_request.c +++ b/drivers/staging/lustre/lustre/fld/fld_request.c @@ -58,7 +58,8 @@ #include "fld_internal.h" /* TODO: these 3 functions are copies of flow-control code from mdc_lib.c - * It should be common thing. The same about mdc RPC lock */ + * It should be common thing. The same about mdc RPC lock + */ static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw) { int rc; @@ -124,7 +125,8 @@ fld_rrb_scan(struct lu_client_fld *fld, u64 seq) * it should go to index 0 directly, instead of calculating * hash again, and also if other MDTs is not being connected, * the fld lookup requests(for seq on MDT0) should not be - * blocked because of other MDTs */ + * blocked because of other MDTs + */ if (fid_seq_is_norm(seq)) hash = fld_rrb_hash(fld, seq); else @@ -139,18 +141,19 @@ again: if (hash != 0) { /* It is possible the remote target(MDT) are not connected to * with client yet, so we will refer this to MDT0, which should - * be connected during mount */ + * be connected during mount + */ hash = 0; goto again; } CERROR("%s: Can't find target by hash %d (seq %#llx). Targets (%d):\n", - fld->lcf_name, hash, seq, fld->lcf_count); + fld->lcf_name, hash, seq, fld->lcf_count); list_for_each_entry(target, &fld->lcf_targets, ft_chain) { - const char *srv_name = target->ft_srv != NULL ? + const char *srv_name = target->ft_srv ? target->ft_srv->lsf_name : "<null>"; - const char *exp_name = target->ft_exp != NULL ? + const char *exp_name = target->ft_exp ? (char *)target->ft_exp->exp_obd->obd_uuid.uuid : "<null>"; @@ -183,13 +186,13 @@ fld_client_get_target(struct lu_client_fld *fld, u64 seq) { struct lu_fld_target *target; - LASSERT(fld->lcf_hash != NULL); + LASSERT(fld->lcf_hash); spin_lock(&fld->lcf_lock); target = fld->lcf_hash->fh_scan_func(fld, seq); spin_unlock(&fld->lcf_lock); - if (target != NULL) { + if (target) { CDEBUG(D_INFO, "%s: Found target (idx %llu) by seq %#llx\n", fld->lcf_name, target->ft_idx, seq); } @@ -207,18 +210,18 @@ int fld_client_add_target(struct lu_client_fld *fld, const char *name; struct lu_fld_target *target, *tmp; - LASSERT(tar != NULL); + LASSERT(tar); name = fld_target_name(tar); - LASSERT(name != NULL); - LASSERT(tar->ft_srv != NULL || tar->ft_exp != NULL); + LASSERT(name); + LASSERT(tar->ft_srv || tar->ft_exp); if (fld->lcf_flags != LUSTRE_FLD_INIT) { CERROR("%s: Attempt to add target %s (idx %llu) on fly - skip it\n", - fld->lcf_name, name, tar->ft_idx); + fld->lcf_name, name, tar->ft_idx); return 0; } CDEBUG(D_INFO, "%s: Adding target %s (idx %llu)\n", - fld->lcf_name, name, tar->ft_idx); + fld->lcf_name, name, tar->ft_idx); target = kzalloc(sizeof(*target), GFP_NOFS); if (!target) @@ -236,13 +239,12 @@ int fld_client_add_target(struct lu_client_fld *fld, } target->ft_exp = tar->ft_exp; - if (target->ft_exp != NULL) + if (target->ft_exp) class_export_get(target->ft_exp); target->ft_srv = tar->ft_srv; target->ft_idx = tar->ft_idx; - list_add_tail(&target->ft_chain, - &fld->lcf_targets); + list_add_tail(&target->ft_chain, &fld->lcf_targets); fld->lcf_count++; spin_unlock(&fld->lcf_lock); @@ -257,14 +259,13 @@ int fld_client_del_target(struct lu_client_fld *fld, __u64 idx) struct lu_fld_target *target, *tmp; spin_lock(&fld->lcf_lock); - list_for_each_entry_safe(target, tmp, - &fld->lcf_targets, ft_chain) { + list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) { if (target->ft_idx == idx) { fld->lcf_count--; list_del(&target->ft_chain); spin_unlock(&fld->lcf_lock); - if (target->ft_exp != NULL) + if (target->ft_exp) class_export_put(target->ft_exp); kfree(target); @@ -326,8 +327,6 @@ int fld_client_init(struct lu_client_fld *fld, int cache_size, cache_threshold; int rc; - LASSERT(fld != NULL); - snprintf(fld->lcf_name, sizeof(fld->lcf_name), "cli-%s", prefix); @@ -375,17 +374,16 @@ void fld_client_fini(struct lu_client_fld *fld) struct lu_fld_target *target, *tmp; spin_lock(&fld->lcf_lock); - list_for_each_entry_safe(target, tmp, - &fld->lcf_targets, ft_chain) { + list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) { fld->lcf_count--; list_del(&target->ft_chain); - if (target->ft_exp != NULL) + if (target->ft_exp) class_export_put(target->ft_exp); kfree(target); } spin_unlock(&fld->lcf_lock); - if (fld->lcf_cache != NULL) { + if (fld->lcf_cache) { if (!IS_ERR(fld->lcf_cache)) fld_cache_fini(fld->lcf_cache); fld->lcf_cache = NULL; @@ -402,12 +400,12 @@ int fld_client_rpc(struct obd_export *exp, int rc; struct obd_import *imp; - LASSERT(exp != NULL); + LASSERT(exp); imp = class_exp2cliimp(exp); req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY, LUSTRE_MDS_VERSION, FLD_QUERY); - if (req == NULL) + if (!req) return -ENOMEM; op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC); @@ -436,7 +434,7 @@ int fld_client_rpc(struct obd_export *exp, goto out_req; prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD); - if (prange == NULL) { + if (!prange) { rc = -EFAULT; goto out_req; } @@ -463,10 +461,10 @@ int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds, /* Can not find it in the cache */ target = fld_client_get_target(fld, seq); - LASSERT(target != NULL); + LASSERT(target); CDEBUG(D_INFO, "%s: Lookup fld entry (seq: %#llx) on target %s (idx %llu)\n", - fld->lcf_name, seq, fld_target_name(target), target->ft_idx); + fld->lcf_name, seq, fld_target_name(target), target->ft_idx); res.lsr_start = seq; fld_range_set_type(&res, flags); @@ -487,7 +485,7 @@ void fld_client_flush(struct lu_client_fld *fld) } EXPORT_SYMBOL(fld_client_flush); -static int __init fld_mod_init(void) +static int __init fld_init(void) { fld_debugfs_dir = ldebugfs_register(LUSTRE_FLD_NAME, debugfs_lustre_root, @@ -495,15 +493,16 @@ static int __init fld_mod_init(void) return PTR_ERR_OR_ZERO(fld_debugfs_dir); } -static void __exit fld_mod_exit(void) +static void __exit fld_exit(void) { if (!IS_ERR_OR_NULL(fld_debugfs_dir)) ldebugfs_remove(&fld_debugfs_dir); } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Lustre FLD"); +MODULE_DESCRIPTION("Lustre FID Location Database"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); -module_init(fld_mod_init) -module_exit(fld_mod_exit) +module_init(fld_init) +module_exit(fld_exit) diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c index 41ceaa819..ca898befe 100644 --- a/drivers/staging/lustre/lustre/fld/lproc_fld.c +++ b/drivers/staging/lustre/lustre/fld/lproc_fld.c @@ -60,11 +60,8 @@ fld_debugfs_targets_seq_show(struct seq_file *m, void *unused) struct lu_client_fld *fld = (struct lu_client_fld *)m->private; struct lu_fld_target *target; - LASSERT(fld != NULL); - spin_lock(&fld->lcf_lock); - list_for_each_entry(target, - &fld->lcf_targets, ft_chain) + list_for_each_entry(target, &fld->lcf_targets, ft_chain) seq_printf(m, "%s\n", fld_target_name(target)); spin_unlock(&fld->lcf_lock); @@ -76,8 +73,6 @@ fld_debugfs_hash_seq_show(struct seq_file *m, void *unused) { struct lu_client_fld *fld = (struct lu_client_fld *)m->private; - LASSERT(fld != NULL); - spin_lock(&fld->lcf_lock); seq_printf(m, "%s\n", fld->lcf_hash->fh_name); spin_unlock(&fld->lcf_lock); @@ -102,9 +97,8 @@ fld_debugfs_hash_seq_write(struct file *file, return -EFAULT; fld = ((struct seq_file *)file->private_data)->private; - LASSERT(fld != NULL); - for (i = 0; fld_hash[i].fh_name != NULL; i++) { + for (i = 0; fld_hash[i].fh_name; i++) { if (count != strlen(fld_hash[i].fh_name)) continue; @@ -114,7 +108,7 @@ fld_debugfs_hash_seq_write(struct file *file, } } - if (hash != NULL) { + if (hash) { spin_lock(&fld->lcf_lock); fld->lcf_hash = hash; spin_unlock(&fld->lcf_lock); @@ -132,8 +126,6 @@ fld_debugfs_cache_flush_write(struct file *file, const char __user *buffer, { struct lu_client_fld *fld = file->private_data; - LASSERT(fld != NULL); - fld_cache_flush(fld->lcf_cache); CDEBUG(D_INFO, "%s: Lookup cache is flushed\n", fld->lcf_name); diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h index bd7acc2a1..fb971ded5 100644 --- a/drivers/staging/lustre/lustre/include/cl_object.h +++ b/drivers/staging/lustre/lustre/include/cl_object.h @@ -157,7 +157,8 @@ struct cl_device { }; /** \addtogroup cl_object cl_object - * @{ */ + * @{ + */ /** * "Data attributes" of cl_object. Data attributes can be updated * independently for a sub-object, and top-object's attributes are calculated @@ -288,13 +289,14 @@ struct cl_object_conf { enum { /** configure layout, set up a new stripe, must be called while - * holding layout lock. */ + * holding layout lock. + */ OBJECT_CONF_SET = 0, /** invalidate the current stripe configuration due to losing - * layout lock. */ + * layout lock. + */ OBJECT_CONF_INVALIDATE = 1, - /** wait for old layout to go away so that new layout can be - * set up. */ + /** wait for old layout to go away so that new layout can be set up. */ OBJECT_CONF_WAIT = 2 }; @@ -320,7 +322,7 @@ struct cl_object_operations { * to be used instead of newly created. */ int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage); + struct cl_page *page, struct page *vmpage); /** * Initialize lock slice for this layer. Called top-to-bottom through * every object layer when a new cl_lock is instantiated. Layer @@ -393,7 +395,8 @@ struct cl_object_operations { */ struct cl_object_header { /** Standard lu_object_header. cl_object::co_lu::lo_header points - * here. */ + * here. + */ struct lu_object_header coh_lu; /** \name locks * \todo XXX move locks below to the separate cache-lines, they are @@ -464,7 +467,8 @@ struct cl_object_header { #define CL_PAGE_EOF ((pgoff_t)~0ull) /** \addtogroup cl_page cl_page - * @{ */ + * @{ + */ /** \struct cl_page * Layered client page. @@ -687,12 +691,14 @@ enum cl_page_state { enum cl_page_type { /** Host page, the page is from the host inode which the cl_page - * belongs to. */ + * belongs to. + */ CPT_CACHEABLE = 1, /** Transient page, the transient cl_page is used to bind a cl_page * to vmpage which is not belonging to the same object of cl_page. - * it is used in DirectIO, lockless IO and liblustre. */ + * it is used in DirectIO and lockless IO. + */ CPT_TRANSIENT, }; @@ -728,7 +734,8 @@ struct cl_page { /** Parent page, NULL for top-level page. Immutable after creation. */ struct cl_page *cp_parent; /** Lower-layer page. NULL for bottommost page. Immutable after - * creation. */ + * creation. + */ struct cl_page *cp_child; /** * Page state. This field is const to avoid accidental update, it is @@ -842,7 +849,7 @@ struct cl_page_operations { * \return the underlying VM page. Optional. */ struct page *(*cpo_vmpage)(const struct lu_env *env, - const struct cl_page_slice *slice); + const struct cl_page_slice *slice); /** * Called when \a io acquires this page into the exclusive * ownership. When this method returns, it is guaranteed that the is @@ -1126,7 +1133,8 @@ static inline int __page_in_use(const struct cl_page *page, int refc) /** @} cl_page */ /** \addtogroup cl_lock cl_lock - * @{ */ + * @{ + */ /** \struct cl_lock * * Extent locking on the client. @@ -1641,7 +1649,8 @@ struct cl_lock { struct cl_lock_slice { struct cl_lock *cls_lock; /** Object slice corresponding to this lock slice. Immutable after - * creation. */ + * creation. + */ struct cl_object *cls_obj; const struct cl_lock_operations *cls_ops; /** Linkage into cl_lock::cll_layers. Immutable after creation. */ @@ -1885,7 +1894,8 @@ struct cl_2queue { /** @} cl_page_list */ /** \addtogroup cl_io cl_io - * @{ */ + * @{ + */ /** \struct cl_io * I/O * @@ -2041,8 +2051,8 @@ struct cl_io_operations { * * \see cl_io_operations::cio_iter_fini() */ - int (*cio_iter_init) (const struct lu_env *env, - const struct cl_io_slice *slice); + int (*cio_iter_init)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Finalize io iteration. * @@ -2052,8 +2062,8 @@ struct cl_io_operations { * * \see cl_io_operations::cio_iter_init() */ - void (*cio_iter_fini) (const struct lu_env *env, - const struct cl_io_slice *slice); + void (*cio_iter_fini)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Collect locks for the current iteration of io. * @@ -2063,8 +2073,8 @@ struct cl_io_operations { * cl_io_lock_add(). Once all locks are collected, they are * sorted and enqueued in the proper order. */ - int (*cio_lock) (const struct lu_env *env, - const struct cl_io_slice *slice); + int (*cio_lock)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Finalize unlocking. * @@ -2089,8 +2099,8 @@ struct cl_io_operations { * Called top-to-bottom at the end of io loop. Here layer * might wait for an unfinished asynchronous io. */ - void (*cio_end) (const struct lu_env *env, - const struct cl_io_slice *slice); + void (*cio_end)(const struct lu_env *env, + const struct cl_io_slice *slice); /** * Called bottom-to-top to notify layers that read/write IO * iteration finished, with \a nob bytes transferred. @@ -2101,8 +2111,8 @@ struct cl_io_operations { /** * Called once per io, bottom-to-top to release io resources. */ - void (*cio_fini) (const struct lu_env *env, - const struct cl_io_slice *slice); + void (*cio_fini)(const struct lu_env *env, + const struct cl_io_slice *slice); } op[CIT_OP_NR]; struct { /** @@ -2222,7 +2232,7 @@ struct cl_io_lock_link { struct cl_lock *cill_lock; /** optional destructor */ void (*cill_fini)(const struct lu_env *env, - struct cl_io_lock_link *link); + struct cl_io_lock_link *link); }; /** @@ -2272,7 +2282,7 @@ enum cl_io_lock_dmd { CILR_MANDATORY = 0, /** Layers are free to decide between local and global locking. */ CILR_MAYBE, - /** Never lock: there is no cache (e.g., liblustre). */ + /** Never lock: there is no cache (e.g., lockless IO). */ CILR_NEVER }; @@ -2284,7 +2294,8 @@ enum cl_fsync_mode { /** discard all of dirty pages in a specific file range */ CL_FSYNC_DISCARD = 2, /** start writeback and make sure they have reached storage before - * return. OST_SYNC RPC must be issued and finished */ + * return. OST_SYNC RPC must be issued and finished + */ CL_FSYNC_ALL = 3 }; @@ -2403,7 +2414,8 @@ struct cl_io { /** @} cl_io */ /** \addtogroup cl_req cl_req - * @{ */ + * @{ + */ /** \struct cl_req * Transfer. * @@ -2582,7 +2594,8 @@ enum cache_stats_item { /** how many entities are in the cache right now */ CS_total, /** how many entities in the cache are actively used (and cannot be - * evicted) right now */ + * evicted) right now + */ CS_busy, /** how many entities were created at all */ CS_create, @@ -2600,7 +2613,7 @@ struct cache_stats { }; /** These are not exported so far */ -void cache_stats_init (struct cache_stats *cs, const char *name); +void cache_stats_init(struct cache_stats *cs, const char *name); /** * Client-side site. This represents particular client stack. "Global" @@ -2613,7 +2626,7 @@ struct cl_site { * Statistical counters. Atomics do not scale, something better like * per-cpu counters is needed. * - * These are exported as /proc/fs/lustre/llite/.../site + * These are exported as /sys/kernel/debug/lustre/llite/.../site * * When interpreting keep in mind that both sub-locks (and sub-pages) * and top-locks (and top-pages) are accounted here. @@ -2624,8 +2637,8 @@ struct cl_site { atomic_t cs_locks_state[CLS_NR]; }; -int cl_site_init (struct cl_site *s, struct cl_device *top); -void cl_site_fini (struct cl_site *s); +int cl_site_init(struct cl_site *s, struct cl_device *top); +void cl_site_fini(struct cl_site *s); void cl_stack_fini(const struct lu_env *env, struct cl_device *cl); /** @@ -2653,7 +2666,7 @@ static inline int lu_device_is_cl(const struct lu_device *d) static inline struct cl_device *lu2cl_dev(const struct lu_device *d) { - LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d)); + LASSERT(!d || IS_ERR(d) || lu_device_is_cl(d)); return container_of0(d, struct cl_device, cd_lu_dev); } @@ -2664,7 +2677,7 @@ static inline struct lu_device *cl2lu_dev(struct cl_device *d) static inline struct cl_object *lu2cl(const struct lu_object *o) { - LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev)); + LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->lo_dev)); return container_of0(o, struct cl_object, co_lu); } @@ -2681,7 +2694,7 @@ static inline struct cl_object *cl_object_next(const struct cl_object *obj) static inline struct cl_device *cl_object_device(const struct cl_object *o) { - LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev)); + LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev)); return container_of0(o->co_lu.lo_dev, struct cl_device, cd_lu_dev); } @@ -2725,27 +2738,28 @@ void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice, /** @} helpers */ /** \defgroup cl_object cl_object - * @{ */ -struct cl_object *cl_object_top (struct cl_object *o); + * @{ + */ +struct cl_object *cl_object_top(struct cl_object *o); struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd, const struct lu_fid *fid, const struct cl_object_conf *c); int cl_object_header_init(struct cl_object_header *h); -void cl_object_put (const struct lu_env *env, struct cl_object *o); -void cl_object_get (struct cl_object *o); -void cl_object_attr_lock (struct cl_object *o); +void cl_object_put(const struct lu_env *env, struct cl_object *o); +void cl_object_get(struct cl_object *o); +void cl_object_attr_lock(struct cl_object *o); void cl_object_attr_unlock(struct cl_object *o); -int cl_object_attr_get (const struct lu_env *env, struct cl_object *obj, - struct cl_attr *attr); -int cl_object_attr_set (const struct lu_env *env, struct cl_object *obj, - const struct cl_attr *attr, unsigned valid); -int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj, - struct ost_lvb *lvb); -int cl_conf_set (const struct lu_env *env, struct cl_object *obj, - const struct cl_object_conf *conf); -void cl_object_prune (const struct lu_env *env, struct cl_object *obj); -void cl_object_kill (const struct lu_env *env, struct cl_object *obj); +int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, + struct cl_attr *attr); +int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj, + const struct cl_attr *attr, unsigned valid); +int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj, + struct ost_lvb *lvb); +int cl_conf_set(const struct lu_env *env, struct cl_object *obj, + const struct cl_object_conf *conf); +void cl_object_prune(const struct lu_env *env, struct cl_object *obj); +void cl_object_kill(const struct lu_env *env, struct cl_object *obj); /** * Returns true, iff \a o0 and \a o1 are slices of the same object. @@ -2770,7 +2784,8 @@ static inline void *cl_object_page_slice(struct cl_object *clob, /** @} cl_object */ /** \defgroup cl_page cl_page - * @{ */ + * @{ + */ enum { CLP_GANG_OKAY = 0, CLP_GANG_RESCHED, @@ -2781,34 +2796,26 @@ enum { /* callback of cl_page_gang_lookup() */ typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *, struct cl_page *, void *); -int cl_page_gang_lookup (const struct lu_env *env, - struct cl_object *obj, - struct cl_io *io, - pgoff_t start, pgoff_t end, - cl_page_gang_cb_t cb, void *cbdata); -struct cl_page *cl_page_lookup (struct cl_object_header *hdr, - pgoff_t index); -struct cl_page *cl_page_find (const struct lu_env *env, - struct cl_object *obj, - pgoff_t idx, struct page *vmpage, - enum cl_page_type type); -struct cl_page *cl_page_find_sub (const struct lu_env *env, - struct cl_object *obj, - pgoff_t idx, struct page *vmpage, +int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io, pgoff_t start, pgoff_t end, + cl_page_gang_cb_t cb, void *cbdata); +struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index); +struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *obj, + pgoff_t idx, struct page *vmpage, + enum cl_page_type type); +struct cl_page *cl_page_find_sub(const struct lu_env *env, + struct cl_object *obj, + pgoff_t idx, struct page *vmpage, struct cl_page *parent); -void cl_page_get (struct cl_page *page); -void cl_page_put (const struct lu_env *env, - struct cl_page *page); -void cl_page_print (const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct cl_page *pg); -void cl_page_header_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct cl_page *pg); -struct page *cl_page_vmpage (const struct lu_env *env, - struct cl_page *page); -struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj); -struct cl_page *cl_page_top (struct cl_page *page); +void cl_page_get(struct cl_page *page); +void cl_page_put(const struct lu_env *env, struct cl_page *page); +void cl_page_print(const struct lu_env *env, void *cookie, lu_printer_t printer, + const struct cl_page *pg); +void cl_page_header_print(const struct lu_env *env, void *cookie, + lu_printer_t printer, const struct cl_page *pg); +struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page); +struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj); +struct cl_page *cl_page_top(struct cl_page *page); const struct cl_page_slice *cl_page_at(const struct cl_page *page, const struct lu_device_type *dtype); @@ -2820,17 +2827,17 @@ const struct cl_page_slice *cl_page_at(const struct cl_page *page, */ /** @{ */ -int cl_page_own (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -int cl_page_own_try (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -void cl_page_assume (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -void cl_page_unassume (const struct lu_env *env, - struct cl_io *io, struct cl_page *pg); -void cl_page_disown (const struct lu_env *env, - struct cl_io *io, struct cl_page *page); -int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io); +int cl_page_own(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +int cl_page_own_try(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +void cl_page_assume(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +void cl_page_unassume(const struct lu_env *env, + struct cl_io *io, struct cl_page *pg); +void cl_page_disown(const struct lu_env *env, + struct cl_io *io, struct cl_page *page); +int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io); /** @} ownership */ @@ -2841,19 +2848,19 @@ int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io); * tracking transfer state. */ /** @{ */ -int cl_page_prep (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg, enum cl_req_type crt); -void cl_page_completion (const struct lu_env *env, - struct cl_page *pg, enum cl_req_type crt, int ioret); -int cl_page_make_ready (const struct lu_env *env, struct cl_page *pg, - enum cl_req_type crt); -int cl_page_cache_add (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg, enum cl_req_type crt); -void cl_page_clip (const struct lu_env *env, struct cl_page *pg, - int from, int to); -int cl_page_cancel (const struct lu_env *env, struct cl_page *page); -int cl_page_flush (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); +int cl_page_prep(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg, enum cl_req_type crt); +void cl_page_completion(const struct lu_env *env, + struct cl_page *pg, enum cl_req_type crt, int ioret); +int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg, + enum cl_req_type crt); +int cl_page_cache_add(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg, enum cl_req_type crt); +void cl_page_clip(const struct lu_env *env, struct cl_page *pg, + int from, int to); +int cl_page_cancel(const struct lu_env *env, struct cl_page *page); +int cl_page_flush(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); /** @} transfer */ @@ -2862,24 +2869,22 @@ int cl_page_flush (const struct lu_env *env, struct cl_io *io, * Functions to discard, delete and export a cl_page. */ /** @{ */ -void cl_page_discard (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); -void cl_page_delete (const struct lu_env *env, struct cl_page *pg); -int cl_page_unmap (const struct lu_env *env, struct cl_io *io, - struct cl_page *pg); -int cl_page_is_vmlocked (const struct lu_env *env, - const struct cl_page *pg); -void cl_page_export (const struct lu_env *env, - struct cl_page *pg, int uptodate); -int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, - struct cl_page *page); -loff_t cl_offset (const struct cl_object *obj, pgoff_t idx); -pgoff_t cl_index (const struct cl_object *obj, loff_t offset); -int cl_page_size (const struct cl_object *obj); -int cl_pages_prune (const struct lu_env *env, struct cl_object *obj); - -void cl_lock_print (const struct lu_env *env, void *cookie, - lu_printer_t printer, const struct cl_lock *lock); +void cl_page_discard(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); +void cl_page_delete(const struct lu_env *env, struct cl_page *pg); +int cl_page_unmap(const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); +int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg); +void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate); +int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io, + struct cl_page *page); +loff_t cl_offset(const struct cl_object *obj, pgoff_t idx); +pgoff_t cl_index(const struct cl_object *obj, loff_t offset); +int cl_page_size(const struct cl_object *obj); +int cl_pages_prune(const struct lu_env *env, struct cl_object *obj); + +void cl_lock_print(const struct lu_env *env, void *cookie, + lu_printer_t printer, const struct cl_lock *lock); void cl_lock_descr_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_lock_descr *descr); @@ -2888,7 +2893,8 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie, /** @} cl_page */ /** \defgroup cl_lock cl_lock - * @{ */ + * @{ + */ struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io, const struct cl_lock_descr *need, @@ -2917,19 +2923,19 @@ static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env, const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock, const struct lu_device_type *dtype); -void cl_lock_get (struct cl_lock *lock); -void cl_lock_get_trust (struct cl_lock *lock); -void cl_lock_put (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); +void cl_lock_get(struct cl_lock *lock); +void cl_lock_get_trust(struct cl_lock *lock); +void cl_lock_put(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); -void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); -void cl_lock_release (const struct lu_env *env, struct cl_lock *lock, - const char *scope, const void *source); -void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock); +void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); +void cl_lock_release(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); +void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock); int cl_lock_is_intransit(struct cl_lock *lock); @@ -2966,52 +2972,53 @@ int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock, * * cl_use_try() NONE cl_lock_operations::clo_use() CLS_HELD * - * @{ */ + * @{ + */ -int cl_wait (const struct lu_env *env, struct cl_lock *lock); -void cl_unuse (const struct lu_env *env, struct cl_lock *lock); -int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, - struct cl_io *io, __u32 flags); -int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock); -int cl_wait_try (const struct lu_env *env, struct cl_lock *lock); -int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic); +int cl_wait(const struct lu_env *env, struct cl_lock *lock); +void cl_unuse(const struct lu_env *env, struct cl_lock *lock); +int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, + struct cl_io *io, __u32 flags); +int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock); +int cl_wait_try(const struct lu_env *env, struct cl_lock *lock); +int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic); /** @} statemachine */ -void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock, - enum cl_lock_state state); -int cl_queue_match (const struct list_head *queue, - const struct cl_lock_descr *need); - -void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock); -void cl_lock_mutex_put (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_is_mutexed (struct cl_lock *lock); -int cl_lock_nr_mutexed (const struct lu_env *env); -int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock); -int cl_lock_ext_match (const struct cl_lock_descr *has, - const struct cl_lock_descr *need); -int cl_lock_descr_match(const struct cl_lock_descr *has, - const struct cl_lock_descr *need); -int cl_lock_mode_match (enum cl_lock_mode has, enum cl_lock_mode need); -int cl_lock_modify (const struct lu_env *env, struct cl_lock *lock, - const struct cl_lock_descr *desc); - -void cl_lock_closure_init (const struct lu_env *env, - struct cl_lock_closure *closure, - struct cl_lock *origin, int wait); -void cl_lock_closure_fini (struct cl_lock_closure *closure); -int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, - struct cl_lock_closure *closure); -void cl_lock_disclosure (const struct lu_env *env, - struct cl_lock_closure *closure); -int cl_lock_enclosure (const struct lu_env *env, struct cl_lock *lock, - struct cl_lock_closure *closure); +void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock); +int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock, + enum cl_lock_state state); +int cl_queue_match(const struct list_head *queue, + const struct cl_lock_descr *need); + +void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock); +void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock); +int cl_lock_is_mutexed(struct cl_lock *lock); +int cl_lock_nr_mutexed(const struct lu_env *env); +int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock); +int cl_lock_ext_match(const struct cl_lock_descr *has, + const struct cl_lock_descr *need); +int cl_lock_descr_match(const struct cl_lock_descr *has, + const struct cl_lock_descr *need); +int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need); +int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock, + const struct cl_lock_descr *desc); + +void cl_lock_closure_init(const struct lu_env *env, + struct cl_lock_closure *closure, + struct cl_lock *origin, int wait); +void cl_lock_closure_fini(struct cl_lock_closure *closure); +int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, + struct cl_lock_closure *closure); +void cl_lock_disclosure(const struct lu_env *env, + struct cl_lock_closure *closure); +int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock, + struct cl_lock_closure *closure); void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock); void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock); -void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error); +void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error); void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait); unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock); @@ -3019,39 +3026,40 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock); /** @} cl_lock */ /** \defgroup cl_io cl_io - * @{ */ - -int cl_io_init (const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, struct cl_object *obj); -int cl_io_sub_init (const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, struct cl_object *obj); -int cl_io_rw_init (const struct lu_env *env, struct cl_io *io, - enum cl_io_type iot, loff_t pos, size_t count); -int cl_io_loop (const struct lu_env *env, struct cl_io *io); - -void cl_io_fini (const struct lu_env *env, struct cl_io *io); -int cl_io_iter_init (const struct lu_env *env, struct cl_io *io); -void cl_io_iter_fini (const struct lu_env *env, struct cl_io *io); -int cl_io_lock (const struct lu_env *env, struct cl_io *io); -void cl_io_unlock (const struct lu_env *env, struct cl_io *io); -int cl_io_start (const struct lu_env *env, struct cl_io *io); -void cl_io_end (const struct lu_env *env, struct cl_io *io); -int cl_io_lock_add (const struct lu_env *env, struct cl_io *io, - struct cl_io_lock_link *link); -int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, - struct cl_lock_descr *descr); -int cl_io_read_page (const struct lu_env *env, struct cl_io *io, - struct cl_page *page); -int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, - struct cl_page *page, unsigned from, unsigned to); -int cl_io_commit_write (const struct lu_env *env, struct cl_io *io, - struct cl_page *page, unsigned from, unsigned to); -int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue); -int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue, - long timeout); -int cl_io_is_going (const struct lu_env *env); + * @{ + */ + +int cl_io_init(const struct lu_env *env, struct cl_io *io, + enum cl_io_type iot, struct cl_object *obj); +int cl_io_sub_init(const struct lu_env *env, struct cl_io *io, + enum cl_io_type iot, struct cl_object *obj); +int cl_io_rw_init(const struct lu_env *env, struct cl_io *io, + enum cl_io_type iot, loff_t pos, size_t count); +int cl_io_loop(const struct lu_env *env, struct cl_io *io); + +void cl_io_fini(const struct lu_env *env, struct cl_io *io); +int cl_io_iter_init(const struct lu_env *env, struct cl_io *io); +void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io); +int cl_io_lock(const struct lu_env *env, struct cl_io *io); +void cl_io_unlock(const struct lu_env *env, struct cl_io *io); +int cl_io_start(const struct lu_env *env, struct cl_io *io); +void cl_io_end(const struct lu_env *env, struct cl_io *io); +int cl_io_lock_add(const struct lu_env *env, struct cl_io *io, + struct cl_io_lock_link *link); +int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, + struct cl_lock_descr *descr); +int cl_io_read_page(const struct lu_env *env, struct cl_io *io, + struct cl_page *page); +int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, + struct cl_page *page, unsigned from, unsigned to); +int cl_io_commit_write(const struct lu_env *env, struct cl_io *io, + struct cl_page *page, unsigned from, unsigned to); +int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, + enum cl_req_type iot, struct cl_2queue *queue); +int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, + enum cl_req_type iot, struct cl_2queue *queue, + long timeout); +int cl_io_is_going(const struct lu_env *env); /** * True, iff \a io is an O_APPEND write(2). @@ -3094,7 +3102,8 @@ do { \ /** @} cl_io */ /** \defgroup cl_page_list cl_page_list - * @{ */ + * @{ + */ /** * Last page in the page list. @@ -3117,40 +3126,41 @@ static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist) #define cl_page_list_for_each_safe(page, temp, list) \ list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch) -void cl_page_list_init (struct cl_page_list *plist); -void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page); -void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src, - struct cl_page *page); -void cl_page_list_splice (struct cl_page_list *list, - struct cl_page_list *head); -void cl_page_list_disown (const struct lu_env *env, - struct cl_io *io, struct cl_page_list *plist); - -void cl_2queue_init (struct cl_2queue *queue); -void cl_2queue_disown (const struct lu_env *env, - struct cl_io *io, struct cl_2queue *queue); -void cl_2queue_discard (const struct lu_env *env, - struct cl_io *io, struct cl_2queue *queue); -void cl_2queue_fini (const struct lu_env *env, struct cl_2queue *queue); +void cl_page_list_init(struct cl_page_list *plist); +void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page); +void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src, + struct cl_page *page); +void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head); +void cl_page_list_disown(const struct lu_env *env, + struct cl_io *io, struct cl_page_list *plist); + +void cl_2queue_init(struct cl_2queue *queue); +void cl_2queue_disown(const struct lu_env *env, + struct cl_io *io, struct cl_2queue *queue); +void cl_2queue_discard(const struct lu_env *env, + struct cl_io *io, struct cl_2queue *queue); +void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue); void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page); /** @} cl_page_list */ /** \defgroup cl_req cl_req - * @{ */ + * @{ + */ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page, enum cl_req_type crt, int nr_objects); -void cl_req_page_add (const struct lu_env *env, struct cl_req *req, - struct cl_page *page); -void cl_req_page_done (const struct lu_env *env, struct cl_page *page); -int cl_req_prep (const struct lu_env *env, struct cl_req *req); -void cl_req_attr_set (const struct lu_env *env, struct cl_req *req, - struct cl_req_attr *attr, u64 flags); +void cl_req_page_add(const struct lu_env *env, struct cl_req *req, + struct cl_page *page); +void cl_req_page_done(const struct lu_env *env, struct cl_page *page); +int cl_req_prep(const struct lu_env *env, struct cl_req *req); +void cl_req_attr_set(const struct lu_env *env, struct cl_req *req, + struct cl_req_attr *attr, u64 flags); void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret); /** \defgroup cl_sync_io cl_sync_io - * @{ */ + * @{ + */ /** * Anchor for synchronous transfer. This is allocated on a stack by thread @@ -3214,22 +3224,23 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret); * - cl_env_reexit(cl_env_reenter had to be called priorly) * * \see lu_env, lu_context, lu_context_key - * @{ */ + * @{ + */ struct cl_env_nest { int cen_refcheck; void *cen_cookie; }; -struct lu_env *cl_env_get (int *refcheck); -struct lu_env *cl_env_alloc (int *refcheck, __u32 tags); -struct lu_env *cl_env_nested_get (struct cl_env_nest *nest); -void cl_env_put (struct lu_env *env, int *refcheck); -void cl_env_nested_put (struct cl_env_nest *nest, struct lu_env *env); -void *cl_env_reenter (void); -void cl_env_reexit (void *cookie); -void cl_env_implant (struct lu_env *env, int *refcheck); -void cl_env_unplant (struct lu_env *env, int *refcheck); +struct lu_env *cl_env_get(int *refcheck); +struct lu_env *cl_env_alloc(int *refcheck, __u32 tags); +struct lu_env *cl_env_nested_get(struct cl_env_nest *nest); +void cl_env_put(struct lu_env *env, int *refcheck); +void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env); +void *cl_env_reenter(void); +void cl_env_reexit(void *cookie); +void cl_env_implant(struct lu_env *env, int *refcheck); +void cl_env_unplant(struct lu_env *env, int *refcheck); /** @} cl_env */ diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h index 36e7a6767..5d839a9f7 100644 --- a/drivers/staging/lustre/lustre/include/lclient.h +++ b/drivers/staging/lustre/lustre/include/lclient.h @@ -127,7 +127,7 @@ static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env) struct ccc_thread_info *info; info = lu_context_key_get(&env->le_ctx, &ccc_key); - LASSERT(info != NULL); + LASSERT(info); return info; } @@ -156,7 +156,7 @@ static inline struct ccc_session *ccc_env_session(const struct lu_env *env) struct ccc_session *ses; ses = lu_context_key_get(env->le_ses, &ccc_session_key); - LASSERT(ses != NULL); + LASSERT(ses); return ses; } @@ -383,7 +383,8 @@ void cl_put_grouplock(struct ccc_grouplock *cg); * * NB: If you find you have to use these interfaces for your new code, please * think about it again. These interfaces may be removed in the future for - * better layering. */ + * better layering. + */ struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj); void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm); int lov_read_and_clear_async_rc(struct cl_object *clob); diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h index 33e0b99e1..c6c7f5463 100644 --- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h +++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h @@ -52,7 +52,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page) return; if (PagePrivate(page)) - page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); + page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); cancel_dirty_page(page); ClearPageMappedToDisk(page); diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h index 468bc28be..3907bf4ce 100644 --- a/drivers/staging/lustre/lustre/include/linux/obd.h +++ b/drivers/staging/lustre/lustre/include/linux/obd.h @@ -57,23 +57,23 @@ struct ll_iattr { #define CLIENT_OBD_LIST_LOCK_DEBUG 1 -typedef struct { +struct client_obd_lock { spinlock_t lock; unsigned long time; struct task_struct *task; const char *func; int line; -} client_obd_lock_t; +}; -static inline void __client_obd_list_lock(client_obd_lock_t *lock, +static inline void __client_obd_list_lock(struct client_obd_lock *lock, const char *func, int line) { unsigned long cur = jiffies; while (1) { if (spin_trylock(&lock->lock)) { - LASSERT(lock->task == NULL); + LASSERT(!lock->task); lock->task = current; lock->func = func; lock->line = line; @@ -85,7 +85,7 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock, time_before(lock->time + 5 * HZ, jiffies)) { struct task_struct *task = lock->task; - if (task == NULL) + if (!task) continue; LCONSOLE_WARN("%s:%d: lock %p was acquired by <%s:%d:%s:%d> for %lu seconds.\n", @@ -106,20 +106,20 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock, #define client_obd_list_lock(lock) \ __client_obd_list_lock(lock, __func__, __LINE__) -static inline void client_obd_list_unlock(client_obd_lock_t *lock) +static inline void client_obd_list_unlock(struct client_obd_lock *lock) { - LASSERT(lock->task != NULL); + LASSERT(lock->task); lock->task = NULL; lock->time = jiffies; spin_unlock(&lock->lock); } -static inline void client_obd_list_lock_init(client_obd_lock_t *lock) +static inline void client_obd_list_lock_init(struct client_obd_lock *lock) { spin_lock_init(&lock->lock); } -static inline void client_obd_list_lock_done(client_obd_lock_t *lock) +static inline void client_obd_list_lock_done(struct client_obd_lock *lock) {} #endif /* __LINUX_OBD_H */ diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h index 0ac8e0edc..4146c9c39 100644 --- a/drivers/staging/lustre/lustre/include/lprocfs_status.h +++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h @@ -54,7 +54,7 @@ struct lprocfs_vars { struct file_operations *fops; void *data; /** - * /proc file mode. + * sysfs file mode. */ umode_t proc_mode; }; @@ -175,7 +175,8 @@ struct lprocfs_percpu { enum lprocfs_stats_flags { LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */ LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu - * area and need locking */ + * area and need locking + */ LPROCFS_STATS_FLAG_IRQ_SAFE = 0x0002, /* alloc need irq safe */ }; @@ -196,7 +197,8 @@ struct lprocfs_stats { unsigned short ls_biggest_alloc_num; enum lprocfs_stats_flags ls_flags; /* Lock used when there are no percpu stats areas; For percpu stats, - * it is used to protect ls_biggest_alloc_num change */ + * it is used to protect ls_biggest_alloc_num change + */ spinlock_t ls_lock; /* has ls_num of counter headers */ @@ -274,20 +276,7 @@ static inline int opcode_offset(__u32 opc) OPC_RANGE(OST)); } else if (opc < FLD_LAST_OPC) { /* FLD opcode */ - return (opc - FLD_FIRST_OPC + - OPC_RANGE(SEC) + - OPC_RANGE(SEQ) + - OPC_RANGE(QUOTA) + - OPC_RANGE(LLOG) + - OPC_RANGE(OBD) + - OPC_RANGE(MGS) + - OPC_RANGE(LDLM) + - OPC_RANGE(MDS) + - OPC_RANGE(OST)); - } else if (opc < UPDATE_LAST_OPC) { - /* update opcode */ - return (opc - UPDATE_FIRST_OPC + - OPC_RANGE(FLD) + + return (opc - FLD_FIRST_OPC + OPC_RANGE(SEC) + OPC_RANGE(SEQ) + OPC_RANGE(QUOTA) + @@ -312,8 +301,7 @@ static inline int opcode_offset(__u32 opc) OPC_RANGE(SEC) + \ OPC_RANGE(SEQ) + \ OPC_RANGE(SEC) + \ - OPC_RANGE(FLD) + \ - OPC_RANGE(UPDATE)) + OPC_RANGE(FLD)) #define EXTRA_MAX_OPCODES ((PTLRPC_LAST_CNTR - PTLRPC_FIRST_CNTR) + \ OPC_RANGE(EXTRA)) @@ -407,7 +395,7 @@ static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc, } else { unsigned int cpuid = get_cpu(); - if (unlikely(stats->ls_percpu[cpuid] == NULL)) { + if (unlikely(!stats->ls_percpu[cpuid])) { rc = lprocfs_stats_alloc_one(stats, cpuid); if (rc < 0) { put_cpu(); @@ -438,12 +426,10 @@ static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc, case LPROCFS_GET_SMP_ID: if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) { - if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) { - spin_unlock_irqrestore(&stats->ls_lock, - *flags); - } else { + if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) + spin_unlock_irqrestore(&stats->ls_lock, *flags); + else spin_unlock(&stats->ls_lock); - } } else { put_cpu(); } @@ -451,12 +437,10 @@ static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc, case LPROCFS_GET_NUM_CPU: if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) { - if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) { - spin_unlock_irqrestore(&stats->ls_lock, - *flags); - } else { + if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) + spin_unlock_irqrestore(&stats->ls_lock, *flags); + else spin_unlock(&stats->ls_lock); - } } return; } @@ -521,11 +505,11 @@ static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats, unsigned long flags = 0; __u64 ret = 0; - LASSERT(stats != NULL); + LASSERT(stats); num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); for (i = 0; i < num_cpu; i++) { - if (stats->ls_percpu[i] == NULL) + if (!stats->ls_percpu[i]) continue; ret += lprocfs_read_helper( lprocfs_stats_counter_get(stats, i, idx), @@ -608,7 +592,7 @@ int lprocfs_write_helper(const char __user *buffer, unsigned long count, int *val); int lprocfs_write_u64_helper(const char __user *buffer, unsigned long count, __u64 *val); -int lprocfs_write_frac_u64_helper(const char *buffer, +int lprocfs_write_frac_u64_helper(const char __user *buffer, unsigned long count, __u64 *val, int mult); char *lprocfs_find_named_value(const char *buffer, const char *name, @@ -625,9 +609,10 @@ int lprocfs_single_release(struct inode *, struct file *); int lprocfs_seq_release(struct inode *, struct file *); /* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only - proc entries; otherwise, you will define name##_seq_write function also for - a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally, - call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); */ + * proc entries; otherwise, you will define name##_seq_write function also for + * a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally, + * call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); + */ #define __LPROC_SEQ_FOPS(name, custom_seq_write) \ static int name##_single_open(struct inode *inode, struct file *file) \ { \ diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h index 1d79341a4..242bb1ef6 100644 --- a/drivers/staging/lustre/lustre/include/lu_object.h +++ b/drivers/staging/lustre/lustre/include/lu_object.h @@ -164,11 +164,12 @@ struct lu_device_operations { /** * For lu_object_conf flags */ -typedef enum { +enum loc_flags { /* This is a new object to be allocated, or the file - * corresponding to the object does not exists. */ + * corresponding to the object does not exists. + */ LOC_F_NEW = 0x00000001, -} loc_flags_t; +}; /** * Object configuration, describing particulars of object being created. On @@ -179,7 +180,7 @@ struct lu_object_conf { /** * Some hints for obj find and alloc. */ - loc_flags_t loc_flags; + enum loc_flags loc_flags; }; /** @@ -392,7 +393,7 @@ struct lu_device_type_operations { static inline int lu_device_is_md(const struct lu_device *d) { - return ergo(d != NULL, d->ld_type->ldt_tags & LU_DEVICE_MD); + return ergo(d, d->ld_type->ldt_tags & LU_DEVICE_MD); } /** @@ -488,7 +489,7 @@ enum lu_object_header_flags { /** * Mark this object has already been taken out of cache. */ - LU_OBJECT_UNHASHED = 1 + LU_OBJECT_UNHASHED = 1, }; enum lu_object_header_attr { @@ -756,7 +757,7 @@ static inline const struct lu_fid *lu_object_fid(const struct lu_object *o) /** * return device operations vector for this object */ -static const inline struct lu_device_operations * +static inline const struct lu_device_operations * lu_object_ops(const struct lu_object *o) { return o->lo_dev->ld_ops; @@ -895,7 +896,8 @@ enum lu_xattr_flags { /** @} helpers */ /** \name lu_context - * @{ */ + * @{ + */ /** For lu_context health-checks */ enum lu_context_state { @@ -1116,10 +1118,10 @@ struct lu_context_key { { \ type *value; \ \ - CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \ + CLASSERT(PAGE_SIZE >= sizeof (*value)); \ \ value = kzalloc(sizeof(*value), GFP_NOFS); \ - if (value == NULL) \ + if (!value) \ value = ERR_PTR(-ENOMEM); \ \ return value; \ @@ -1174,7 +1176,7 @@ void lu_context_key_revive (struct lu_context_key *key); do { \ LU_CONTEXT_KEY_INIT(key); \ key = va_arg(args, struct lu_context_key *); \ - } while (key != NULL); \ + } while (key); \ va_end(args); \ } diff --git a/drivers/staging/lustre/lustre/include/lu_ref.h b/drivers/staging/lustre/lustre/include/lu_ref.h index 97cd157dd..f7dfd8395 100644 --- a/drivers/staging/lustre/lustre/include/lu_ref.h +++ b/drivers/staging/lustre/lustre/include/lu_ref.h @@ -17,10 +17,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * */ #ifndef __LUSTRE_LU_REF_H diff --git a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h index 09088f40b..07d45de69 100644 --- a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h +++ b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h @@ -47,9 +47,11 @@ struct ll_fiemap_extent { __u64 fe_logical; /* logical offset in bytes for the start of - * the extent from the beginning of the file */ + * the extent from the beginning of the file + */ __u64 fe_physical; /* physical offset in bytes for the start - * of the extent from the beginning of the disk */ + * of the extent from the beginning of the disk + */ __u64 fe_length; /* length in bytes for this extent */ __u64 fe_reserved64[2]; __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */ @@ -59,9 +61,11 @@ struct ll_fiemap_extent { struct ll_user_fiemap { __u64 fm_start; /* logical offset (inclusive) at - * which to start mapping (in) */ + * which to start mapping (in) + */ __u64 fm_length; /* logical length of mapping which - * userspace wants (in) */ + * userspace wants (in) + */ __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */ __u32 fm_mapped_extents;/* number of extents that were mapped (out) */ __u32 fm_extent_count; /* size of fm_extents array (in) */ @@ -71,28 +75,38 @@ struct ll_user_fiemap { #define FIEMAP_MAX_OFFSET (~0ULL) -#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before map */ -#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute tree */ - -#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */ -#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */ -#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending. - * Sets EXTENT_UNKNOWN. */ -#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read - * while fs is unmounted */ -#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs. - * Sets EXTENT_NO_DIRECT. */ +#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before + * map + */ +#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute + * tree + */ +#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */ +#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */ +#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending. + * Sets EXTENT_UNKNOWN. + */ +#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read + * while fs is unmounted + */ +#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs. + * Sets EXTENT_NO_DIRECT. + */ #define FIEMAP_EXTENT_NOT_ALIGNED 0x00000100 /* Extent offsets may not be - * block aligned. */ + * block aligned. + */ #define FIEMAP_EXTENT_DATA_INLINE 0x00000200 /* Data mixed with metadata. * Sets EXTENT_NOT_ALIGNED.*/ -#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block. - * Sets EXTENT_NOT_ALIGNED.*/ -#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but - * no data (i.e. zero). */ -#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively +#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block. + * Sets EXTENT_NOT_ALIGNED. + */ +#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but + * no data (i.e. zero). + */ +#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively * support extents. Result - * merged for efficiency. */ + * merged for efficiency. + */ static inline size_t fiemap_count_to_size(size_t extent_count) { @@ -114,7 +128,8 @@ static inline unsigned fiemap_size_to_count(size_t array_size) /* Lustre specific flags - use a high bit, don't conflict with upstream flag */ #define FIEMAP_EXTENT_NO_DIRECT 0x40000000 /* Data mapping undefined */ -#define FIEMAP_EXTENT_NET 0x80000000 /* Data stored remotely. - * Sets NO_DIRECT flag */ +#define FIEMAP_EXTENT_NET 0x80000000 /* Data stored remotely. + * Sets NO_DIRECT flag + */ #endif /* _LUSTRE_FIEMAP_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h b/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h deleted file mode 100644 index 93a3d7db3..000000000 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h +++ /dev/null @@ -1,2 +0,0 @@ -#define BUILD_VERSION "v2_3_64_0-g6e62c21-CHANGED-3.9.0" -#define LUSTRE_RELEASE 3.9.0_g6e62c21 diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h index b064b5821..5aae1d06a 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h @@ -113,25 +113,25 @@ #define CONNMGR_REQUEST_PORTAL 1 #define CONNMGR_REPLY_PORTAL 2 -//#define OSC_REQUEST_PORTAL 3 +/*#define OSC_REQUEST_PORTAL 3 */ #define OSC_REPLY_PORTAL 4 -//#define OSC_BULK_PORTAL 5 +/*#define OSC_BULK_PORTAL 5 */ #define OST_IO_PORTAL 6 #define OST_CREATE_PORTAL 7 #define OST_BULK_PORTAL 8 -//#define MDC_REQUEST_PORTAL 9 +/*#define MDC_REQUEST_PORTAL 9 */ #define MDC_REPLY_PORTAL 10 -//#define MDC_BULK_PORTAL 11 +/*#define MDC_BULK_PORTAL 11 */ #define MDS_REQUEST_PORTAL 12 -//#define MDS_REPLY_PORTAL 13 +/*#define MDS_REPLY_PORTAL 13 */ #define MDS_BULK_PORTAL 14 #define LDLM_CB_REQUEST_PORTAL 15 #define LDLM_CB_REPLY_PORTAL 16 #define LDLM_CANCEL_REQUEST_PORTAL 17 #define LDLM_CANCEL_REPLY_PORTAL 18 -//#define PTLBD_REQUEST_PORTAL 19 -//#define PTLBD_REPLY_PORTAL 20 -//#define PTLBD_BULK_PORTAL 21 +/*#define PTLBD_REQUEST_PORTAL 19 */ +/*#define PTLBD_REPLY_PORTAL 20 */ +/*#define PTLBD_BULK_PORTAL 21 */ #define MDS_SETATTR_PORTAL 22 #define MDS_READPAGE_PORTAL 23 #define OUT_PORTAL 24 @@ -146,7 +146,9 @@ #define SEQ_CONTROLLER_PORTAL 32 #define MGS_BULK_PORTAL 33 -/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */ +/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, + * n8851@cray.com + */ /* packet types */ #define PTL_RPC_MSG_REQUEST 4711 @@ -295,7 +297,8 @@ static inline int range_compare_loc(const struct lu_seq_range *r1, fld_range_is_mdt(range) ? "mdt" : "ost" /** \defgroup lu_fid lu_fid - * @{ */ + * @{ + */ /** * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat. @@ -307,7 +310,8 @@ enum lma_compat { LMAC_SOM = 0x00000002, LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */ LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is - * under /O/<seq>/d<x>. */ + * under /O/<seq>/d<x>. + */ }; /** @@ -319,7 +323,8 @@ enum lma_incompat { LMAI_RELEASED = 0x00000001, /* file is released */ LMAI_AGENT = 0x00000002, /* agent inode */ LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object - is on the remote MDT */ + * is on the remote MDT + */ }; #define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT) @@ -395,12 +400,14 @@ enum fid_seq { FID_SEQ_LOCAL_FILE = 0x200000001ULL, FID_SEQ_DOT_LUSTRE = 0x200000002ULL, /* sequence is used for local named objects FIDs generated - * by local_object_storage library */ + * by local_object_storage library + */ FID_SEQ_LOCAL_NAME = 0x200000003ULL, /* Because current FLD will only cache the fid sequence, instead * of oid on the client side, if the FID needs to be exposed to * clients sides, it needs to make sure all of fids under one - * sequence will be located in one MDT. */ + * sequence will be located in one MDT. + */ FID_SEQ_SPECIAL = 0x200000004ULL, FID_SEQ_QUOTA = 0x200000005ULL, FID_SEQ_QUOTA_GLB = 0x200000006ULL, @@ -601,7 +608,8 @@ static inline void ostid_set_seq(struct ost_id *oi, __u64 seq) oi->oi_fid.f_seq = seq; /* Note: if f_oid + f_ver is zero, we need init it * to be 1, otherwise, ostid_seq will treat this - * as old ostid (oi_seq == 0) */ + * as old ostid (oi_seq == 0) + */ if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0) oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID; } @@ -630,15 +638,13 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid) { if (fid_seq_is_mdt0(ostid_seq(oi))) { if (oid >= IDIF_MAX_OID) { - CERROR("Bad %llu to set "DOSTID"\n", - oid, POSTID(oi)); + CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi)); return; } oi->oi.oi_id = oid; } else { if (oid > OBIF_MAX_OID) { - CERROR("Bad %llu to set "DOSTID"\n", - oid, POSTID(oi)); + CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi)); return; } oi->oi_fid.f_oid = oid; @@ -689,11 +695,12 @@ static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid, * that we map into the IDIF namespace. It allows up to 2^48 * objects per OST, as this is the object namespace that has * been in production for years. This can handle create rates - * of 1M objects/s/OST for 9 years, or combinations thereof. */ + * of 1M objects/s/OST for 9 years, or combinations thereof. + */ if (ostid_id(ostid) >= IDIF_MAX_OID) { - CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", - POSTID(ostid), ost_idx); - return -EBADF; + CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n", + POSTID(ostid), ost_idx); + return -EBADF; } fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx); /* truncate to 32 bits by assignment */ @@ -704,10 +711,11 @@ static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid, /* This is either an IDIF object, which identifies objects across * all OSTs, or a regular FID. The IDIF namespace maps legacy * OST objects into the FID namespace. In both cases, we just - * pass the FID through, no conversion needed. */ + * pass the FID through, no conversion needed. + */ if (ostid->oi_fid.f_ver != 0) { - CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", - POSTID(ostid), ost_idx); + CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n", + POSTID(ostid), ost_idx); return -EBADF; } *fid = ostid->oi_fid; @@ -807,7 +815,7 @@ static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src) static inline int fid_is_sane(const struct lu_fid *fid) { - return fid != NULL && + return fid && ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) || fid_is_igif(fid) || fid_is_idif(fid) || fid_seq_is_rsvd(fid_seq(fid))); @@ -868,7 +876,8 @@ static inline void ostid_le_to_cpu(const struct ost_id *src_oi, /** @} lu_fid */ /** \defgroup lu_dir lu_dir - * @{ */ + * @{ + */ /** * Enumeration of possible directory entry attributes. @@ -880,24 +889,8 @@ enum lu_dirent_attrs { LUDA_FID = 0x0001, LUDA_TYPE = 0x0002, LUDA_64BITHASH = 0x0004, - - /* The following attrs are used for MDT internal only, - * not visible to client */ - - /* Verify the dirent consistency */ - LUDA_VERIFY = 0x8000, - /* Only check but not repair the dirent inconsistency */ - LUDA_VERIFY_DRYRUN = 0x4000, - /* The dirent has been repaired, or to be repaired (dryrun). */ - LUDA_REPAIR = 0x2000, - /* The system is upgraded, has beed or to be repaired (dryrun). */ - LUDA_UPGRADE = 0x1000, - /* Ignore this record, go to next directly. */ - LUDA_IGNORE = 0x0800, }; -#define LU_DIRENT_ATTRS_MASK 0xf800 - /** * Layout of readdir pages, as transmitted on wire. */ @@ -1029,16 +1022,16 @@ static inline int lu_dirent_size(struct lu_dirent *ent) * MDS_READPAGE page size * * This is the directory page size packed in MDS_READPAGE RPC. - * It's different than PAGE_CACHE_SIZE because the client needs to + * It's different than PAGE_SIZE because the client needs to * access the struct lu_dirpage header packed at the beginning of * the "page" and without this there isn't any way to know find the - * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ. + * lu_dirpage header is if client and server PAGE_SIZE differ. */ #define LU_PAGE_SHIFT 12 #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT) #define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1)) -#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT)) +#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT)) /** @} lu_dir */ @@ -1128,7 +1121,8 @@ struct ptlrpc_body_v2 { __u32 pb_conn_cnt; __u32 pb_timeout; /* for req, the deadline, for rep, the service est */ __u32 pb_service_time; /* for rep, actual service time, also used for - net_latency of req */ + * net_latency of req + */ __u32 pb_limit; __u64 pb_slv; /* VBR: pre-versions */ @@ -1174,7 +1168,8 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); /* #define MSG_AT_SUPPORT 0x0008 * This was used in early prototypes of adaptive timeouts, and while there * shouldn't be any users of that code there also isn't a need for using this - * bits. Defer usage until at least 1.10 to avoid potential conflict. */ + * bits. Defer usage until at least 1.10 to avoid potential conflict. + */ #define MSG_DELAY_REPLAY 0x0010 #define MSG_VERSION_REPLAY 0x0020 #define MSG_REQ_REPLAY_DONE 0x0040 @@ -1187,7 +1182,7 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define MSG_CONNECT_RECOVERING 0x00000001 #define MSG_CONNECT_RECONNECT 0x00000002 #define MSG_CONNECT_REPLAYABLE 0x00000004 -//#define MSG_CONNECT_PEER 0x8 +/*#define MSG_CONNECT_PEER 0x8 */ #define MSG_CONNECT_LIBCLIENT 0x00000010 #define MSG_CONNECT_INITIAL 0x00000020 #define MSG_CONNECT_ASYNC 0x00000040 @@ -1195,60 +1190,65 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */ /* Connect flags */ -#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/ -#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */ -#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */ -#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */ -#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */ -#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */ -#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */ -#define OBD_CONNECT_ACL 0x80ULL /*access control lists */ -#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */ +#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/ +#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */ +#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */ +#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */ +#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */ +#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */ +#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */ +#define OBD_CONNECT_ACL 0x80ULL /*access control lists */ +#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */ #define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/ -#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */ -#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */ -#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/ +#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */ +#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */ +#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/ #define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated. *We do not support JOIN FILE *anymore, reserve this flags *just for preventing such bit - *to be reused.*/ -#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/ -#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/ -#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */ + *to be reused. + */ +#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/ +#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/ +#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */ #define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */ -#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */ -#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */ -#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */ -#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */ -#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */ -#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */ -#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */ +#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */ +#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */ +#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */ +#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */ +#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */ +#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */ +#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */ #define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */ -#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */ +#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */ #define OBD_CONNECT_REAL 0x8000000ULL /*real connection */ #define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */ -#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/ -#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */ -#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */ -#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */ +#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/ +#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */ +#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */ +#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */ #define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */ #define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */ #define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */ #define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */ #define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */ #define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits - * directory hash */ + * directory hash + */ #define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */ #define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */ #define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */ #define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */ #define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS - * RPC error properly */ + * RPC error properly + */ #define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for - * finer space reservation */ + * finer space reservation + */ #define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8 - * policy and 2.x server */ + * policy and 2.x server + */ #define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */ #define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */ #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */ @@ -1264,61 +1264,19 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); * submit a small patch against EVERY branch that ONLY adds the new flag, * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the * flag to check_obd_connect_data(), and updates wiretests accordingly, so it - * can be approved and landed easily to reserve the flag for future use. */ + * can be approved and landed easily to reserve the flag for future use. + */ /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS * connection. It is a temporary bug fix for Imperative Recovery interop * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for - * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. */ + * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. + */ #define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS #define OCD_HAS_FLAG(ocd, flg) \ (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg)) -#define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE - -#define MDT_CONNECT_SUPPORTED (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \ - OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \ - OBD_CONNECT_IBITS | \ - OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \ - OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \ - OBD_CONNECT_RMT_CLIENT | \ - OBD_CONNECT_RMT_CLIENT_FORCE | \ - OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \ - OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \ - OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \ - OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \ - OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \ - OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \ - OBD_CONNECT_EINPROGRESS | \ - OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \ - OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\ - OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\ - OBD_CONNECT_FLOCK_DEAD | \ - OBD_CONNECT_DISP_STRIPE) - -#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \ - OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \ - OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \ - OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \ - OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \ - LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \ - OBD_CONNECT_RMT_CLIENT | \ - OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \ - OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \ - OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \ - OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \ - OBD_CONNECT_MAX_EASIZE | \ - OBD_CONNECT_EINPROGRESS | \ - OBD_CONNECT_JOBSTATS | \ - OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\ - OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \ - OBD_CONNECT_PINGLESS) -#define ECHO_CONNECT_SUPPORTED (0) -#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \ - OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \ - OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS) - /* Features required for this version of the client to work with server */ #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \ OBD_CONNECT_FULL20) @@ -1334,7 +1292,8 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); /* This structure is used for both request and reply. * * If we eventually have separate connect data for different types, which we - * almost certainly will, then perhaps we stick a union in here. */ + * almost certainly will, then perhaps we stick a union in here. + */ struct obd_connect_data_v1 { __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ __u32 ocd_version; /* lustre release version number */ @@ -1364,7 +1323,7 @@ struct obd_connect_data { __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */ __u8 ocd_inodespace; /* log2 of the per-inode space consumption */ __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */ - __u32 ocd_unused; /* also fix lustre_swab_connect */ + __u32 ocd_unused; /* also fix lustre_swab_connect */ __u64 ocd_transno; /* first transno from client to be replayed */ __u32 ocd_group; /* MDS group on OST */ __u32 ocd_cksum_types; /* supported checksum algorithms */ @@ -1374,7 +1333,8 @@ struct obd_connect_data { /* Fields after ocd_maxbytes are only accessible by the receiver * if the corresponding flag in ocd_connect_flags is set. Accessing * any field after ocd_maxbytes on the receiver without a valid flag - * may result in out-of-bound memory access and kernel oops. */ + * may result in out-of-bound memory access and kernel oops. + */ __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */ __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */ __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */ @@ -1398,7 +1358,8 @@ struct obd_connect_data { * with senior engineers before starting to use a new field. Then, submit * a small patch against EVERY branch that ONLY adds the new field along with * the matching OBD_CONNECT flag, so that can be approved and landed easily to - * reserve the flag for future use. */ + * reserve the flag for future use. + */ void lustre_swab_connect(struct obd_connect_data *ocd); @@ -1408,18 +1369,18 @@ void lustre_swab_connect(struct obd_connect_data *ocd); * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new * algorithm and also the OBD_FL_CKSUM* flags. */ -typedef enum { +enum cksum_type { OBD_CKSUM_CRC32 = 0x00000001, OBD_CKSUM_ADLER = 0x00000002, OBD_CKSUM_CRC32C = 0x00000004, -} cksum_type_t; +}; /* * OST requests: OBDO & OBD request records */ /* opcodes */ -typedef enum { +enum ost_cmd { OST_REPLY = 0, /* reply ? */ OST_GETATTR = 1, OST_SETATTR = 2, @@ -1440,14 +1401,14 @@ typedef enum { OST_QUOTACTL = 19, OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */ OST_LAST_OPC -} ost_cmd_t; +}; #define OST_FIRST_OPC OST_REPLY enum obdo_flags { OBD_FL_INLINEDATA = 0x00000001, OBD_FL_OBDMDEXISTS = 0x00000002, OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */ - OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */ + OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */ OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/ OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */ OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */ @@ -1461,14 +1422,16 @@ enum obdo_flags { OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */ OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */ OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */ - OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client. + OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client. * XXX: obsoleted - reserved for old - * clients prior than 2.2 */ + * clients prior than 2.2 + */ OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */ OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */ /* Note that while these checksum values are currently separate bits, - * in 2.x we can actually allow all values from 1-31 if we wanted. */ + * in 2.x we can actually allow all values from 1-31 if we wanted. + */ OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER | OBD_FL_CKSUM_CRC32C, @@ -1657,7 +1620,7 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) } } -#define OBD_MD_FLID (0x00000001ULL) /* object ID */ +#define OBD_MD_FLID (0x00000001ULL) /* object ID */ #define OBD_MD_FLATIME (0x00000002ULL) /* access time */ #define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */ #define OBD_MD_FLCTIME (0x00000008ULL) /* change time */ @@ -1683,22 +1646,23 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) #define OBD_MD_FLGROUP (0x01000000ULL) /* group */ #define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */ #define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */ - /* ->mds if epoch opens or closes */ + /* ->mds if epoch opens or closes + */ #define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */ #define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */ #define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */ #define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */ #define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */ -#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */ +#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */ #define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */ -#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */ +#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */ #define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */ #define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */ #define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */ #define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */ -#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */ +#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */ #define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */ #define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */ #define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */ @@ -1707,7 +1671,8 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes * under lock; for xattr * requests means the - * client holds the lock */ + * client holds the lock + */ #define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */ #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */ @@ -1727,7 +1692,8 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) #define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS) /* don't forget obdo_fid which is way down at the bottom so it can - * come after the definition of llog_cookie */ + * come after the definition of llog_cookie + */ enum hss_valid { HSS_SETMASK = 0x01, @@ -1749,19 +1715,20 @@ void lustre_swab_obd_statfs(struct obd_statfs *os); /* ost_body.data values for OST_BRW */ -#define OBD_BRW_READ 0x01 -#define OBD_BRW_WRITE 0x02 -#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE) -#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous +#define OBD_BRW_READ 0x01 +#define OBD_BRW_WRITE 0x02 +#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE) +#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous * transfer and is not accounted in - * the grant. */ -#define OBD_BRW_CHECK 0x10 + * the grant. + */ +#define OBD_BRW_CHECK 0x10 #define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */ -#define OBD_BRW_GRANTED 0x40 /* the ost manages this */ -#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */ -#define OBD_BRW_NOQUOTA 0x100 -#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */ -#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */ +#define OBD_BRW_GRANTED 0x40 /* the ost manages this */ +#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */ +#define OBD_BRW_NOQUOTA 0x100 +#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */ +#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */ #define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */ #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */ #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */ @@ -1775,7 +1742,8 @@ struct obd_ioobj { struct ost_id ioo_oid; /* object ID, if multi-obj BRW */ __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4, * now (PTLRPC_BULK_OPS_COUNT - 1) in - * high 16 bits in 2.4 and later */ + * high 16 bits in 2.4 and later + */ __u32 ioo_bufcnt; /* number of niobufs for this object */ }; @@ -1799,7 +1767,8 @@ void lustre_swab_niobuf_remote(struct niobuf_remote *nbr); /* lock value block communicated between the filter and llite */ /* OST_LVB_ERR_INIT is needed because the return code in rc is - * negative, i.e. because ((MASK + rc) & MASK) != MASK. */ + * negative, i.e. because ((MASK + rc) & MASK) != MASK. + */ #define OST_LVB_ERR_INIT 0xffbadbad80000000ULL #define OST_LVB_ERR_MASK 0xffbadbad00000000ULL #define OST_LVB_IS_ERR(blocks) \ @@ -1836,23 +1805,12 @@ void lustre_swab_ost_lvb(struct ost_lvb *lvb); * lquota data structures */ -#ifndef QUOTABLOCK_BITS -#define QUOTABLOCK_BITS 10 -#endif - -#ifndef QUOTABLOCK_SIZE -#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) -#endif - -#ifndef toqb -#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS) -#endif - /* The lquota_id structure is an union of all the possible identifier types that * can be used with quota, this includes: * - 64-bit user ID * - 64-bit group ID - * - a FID which can be used for per-directory quota in the future */ + * - a FID which can be used for per-directory quota in the future + */ union lquota_id { struct lu_fid qid_fid; /* FID for per-directory quota */ __u64 qid_uid; /* user identifier */ @@ -1889,89 +1847,6 @@ do { \ Q_COPY(out, in, qc_dqblk); \ } while (0) -/* Body of quota request used for quota acquire/release RPCs between quota - * master (aka QMT) and slaves (ak QSD). */ -struct quota_body { - struct lu_fid qb_fid; /* FID of global index packing the pool ID - * and type (data or metadata) as well as - * the quota type (user or group). */ - union lquota_id qb_id; /* uid or gid or directory FID */ - __u32 qb_flags; /* see below */ - __u32 qb_padding; - __u64 qb_count; /* acquire/release count (kbytes/inodes) */ - __u64 qb_usage; /* current slave usage (kbytes/inodes) */ - __u64 qb_slv_ver; /* slave index file version */ - struct lustre_handle qb_lockh; /* per-ID lock handle */ - struct lustre_handle qb_glb_lockh; /* global lock handle */ - __u64 qb_padding1[4]; -}; - -/* When the quota_body is used in the reply of quota global intent - * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */ -#define qb_slv_fid qb_fid -/* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in - * quota reply */ -#define qb_qunit qb_usage - -#define QUOTA_DQACQ_FL_ACQ 0x1 /* acquire quota */ -#define QUOTA_DQACQ_FL_PREACQ 0x2 /* pre-acquire */ -#define QUOTA_DQACQ_FL_REL 0x4 /* release quota */ -#define QUOTA_DQACQ_FL_REPORT 0x8 /* report usage */ - -void lustre_swab_quota_body(struct quota_body *b); - -/* Quota types currently supported */ -enum { - LQUOTA_TYPE_USR = 0x00, /* maps to USRQUOTA */ - LQUOTA_TYPE_GRP = 0x01, /* maps to GRPQUOTA */ - LQUOTA_TYPE_MAX -}; - -/* There are 2 different resource types on which a quota limit can be enforced: - * - inodes on the MDTs - * - blocks on the OSTs */ -enum { - LQUOTA_RES_MD = 0x01, /* skip 0 to avoid null oid in FID */ - LQUOTA_RES_DT = 0x02, - LQUOTA_LAST_RES, - LQUOTA_FIRST_RES = LQUOTA_RES_MD -}; - -#define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1) - -/* - * Space accounting support - * Format of an accounting record, providing disk usage information for a given - * user or group - */ -struct lquota_acct_rec { /* 16 bytes */ - __u64 bspace; /* current space in use */ - __u64 ispace; /* current # inodes in use */ -}; - -/* - * Global quota index support - * Format of a global record, providing global quota settings for a given quota - * identifier - */ -struct lquota_glb_rec { /* 32 bytes */ - __u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */ - __u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */ - __u64 qbr_time; /* grace time, in seconds */ - __u64 qbr_granted; /* how much is granted to slaves, in #inodes or - * kbytes */ -}; - -/* - * Slave index support - * Format of a slave record, recording how much space is granted to a given - * slave - */ -struct lquota_slv_rec { /* 8 bytes */ - __u64 qsr_granted; /* space granted to the slave for the key=ID, - * in #inodes or kbytes */ -}; - /* Data structures associated with the quota locks */ /* Glimpse descriptor used for the index & per-ID quota locks */ @@ -1985,9 +1860,6 @@ struct ldlm_gl_lquota_desc { __u64 gl_pad2; }; -#define gl_qunit gl_hardlimit /* current qunit value used when - * glimpsing per-ID quota locks */ - /* quota glimpse flags */ #define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */ @@ -2002,15 +1874,12 @@ struct lquota_lvb { void lustre_swab_lquota_lvb(struct lquota_lvb *lvb); -/* LVB used with global quota lock */ -#define lvb_glb_ver lvb_id_may_rel /* current version of the global index */ - /* op codes */ -typedef enum { +enum quota_cmd { QUOTA_DQACQ = 601, QUOTA_DQREL = 602, QUOTA_LAST_OPC -} quota_cmd_t; +}; #define QUOTA_FIRST_OPC QUOTA_DQACQ /* @@ -2018,7 +1887,7 @@ typedef enum { */ /* opcodes */ -typedef enum { +enum mds_cmd { MDS_GETATTR = 33, MDS_GETATTR_NAME = 34, MDS_CLOSE = 35, @@ -2049,23 +1918,15 @@ typedef enum { MDS_HSM_CT_UNREGISTER = 60, MDS_SWAP_LAYOUTS = 61, MDS_LAST_OPC -} mds_cmd_t; +}; #define MDS_FIRST_OPC MDS_GETATTR -/* opcodes for object update */ -typedef enum { - UPDATE_OBJ = 1000, - UPDATE_LAST_OPC -} update_cmd_t; - -#define UPDATE_FIRST_OPC UPDATE_OBJ - /* * Do not exceed 63 */ -typedef enum { +enum mdt_reint_cmd { REINT_SETATTR = 1, REINT_CREATE = 2, REINT_LINK = 3, @@ -2074,9 +1935,9 @@ typedef enum { REINT_OPEN = 6, REINT_SETXATTR = 7, REINT_RMENTRY = 8, -// REINT_WRITE = 9, +/* REINT_WRITE = 9, */ REINT_MAX -} mds_reint_t, mdt_reint_t; +}; void lustre_swab_generic_32s(__u32 *val); @@ -2097,7 +1958,8 @@ void lustre_swab_generic_32s(__u32 *val); /* INODE LOCK PARTS */ #define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also * was used to protect permission (mode, - * owner, group etc) before 2.4. */ + * owner, group etc) before 2.4. + */ #define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */ #define MDS_INODELOCK_OPEN 0x000004 /* For opened files */ #define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */ @@ -2110,7 +1972,8 @@ void lustre_swab_generic_32s(__u32 *val); * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together. * For Remote directory, the master MDT, where the remote directory is, will * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is, - * will grant LOOKUP_LOCK. */ + * will grant LOOKUP_LOCK. + */ #define MDS_INODELOCK_PERM 0x000010 #define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */ @@ -2120,7 +1983,8 @@ void lustre_swab_generic_32s(__u32 *val); /* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2], * but was moved into name[1] along with the OID to avoid consuming the - * name[2,3] fields that need to be used for the quota id (also a FID). */ + * name[2,3] fields that need to be used for the quota id (also a FID). + */ enum { LUSTRE_RES_ID_SEQ_OFF = 0, LUSTRE_RES_ID_VER_OID_OFF = 1, @@ -2156,7 +2020,8 @@ enum md_op_flags { #define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1 /* these should be identical to their EXT4_*_FL counterparts, they are - * redefined here only to avoid dragging in fs/ext4/ext4.h */ + * redefined here only to avoid dragging in fs/ext4/ext4.h + */ #define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */ #define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */ #define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */ @@ -2168,15 +2033,14 @@ enum md_op_flags { * protocol equivalents of LDISKFS_*_FL values stored on disk, while * the S_* flags are kernel-internal values that change between kernel * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS. - * See b=16526 for a full history. */ + * See b=16526 for a full history. + */ static inline int ll_ext_to_inode_flags(int flags) { return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) | ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) | ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) | -#if defined(S_DIRSYNC) ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) | -#endif ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0)); } @@ -2185,9 +2049,7 @@ static inline int ll_inode_to_ext_flags(int iflags) return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) | ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) | ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) | -#if defined(S_DIRSYNC) ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) | -#endif ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0)); } @@ -2207,9 +2069,10 @@ struct mdt_body { __s64 ctime; __u64 blocks; /* XID, in the case of MDS_READPAGE */ __u64 ioepoch; - __u64 t_state; /* transient file state defined in - * enum md_transient_state - * was "ino" until 2.4.0 */ + __u64 t_state; /* transient file state defined in + * enum md_transient_state + * was "ino" until 2.4.0 + */ __u32 fsuid; __u32 fsgid; __u32 capability; @@ -2219,7 +2082,7 @@ struct mdt_body { __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */ __u32 rdev; __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */ - __u32 unused2; /* was "generation" until 2.4.0 */ + __u32 unused2; /* was "generation" until 2.4.0 */ __u32 suppgid; __u32 eadatasize; __u32 aclsize; @@ -2256,7 +2119,8 @@ enum { }; /* inode access permission for remote user, the inode info are omitted, - * for client knows them. */ + * for client knows them. + */ struct mdt_remote_perm { __u32 rp_uid; __u32 rp_gid; @@ -2306,13 +2170,13 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); * since the client and MDS may run different kernels (see bug 13828) * Therefore, we should only use MDS_ATTR_* attributes for sa_valid. */ -#define MDS_ATTR_MODE 0x1ULL /* = 1 */ -#define MDS_ATTR_UID 0x2ULL /* = 2 */ -#define MDS_ATTR_GID 0x4ULL /* = 4 */ -#define MDS_ATTR_SIZE 0x8ULL /* = 8 */ -#define MDS_ATTR_ATIME 0x10ULL /* = 16 */ -#define MDS_ATTR_MTIME 0x20ULL /* = 32 */ -#define MDS_ATTR_CTIME 0x40ULL /* = 64 */ +#define MDS_ATTR_MODE 0x1ULL /* = 1 */ +#define MDS_ATTR_UID 0x2ULL /* = 2 */ +#define MDS_ATTR_GID 0x4ULL /* = 4 */ +#define MDS_ATTR_SIZE 0x8ULL /* = 8 */ +#define MDS_ATTR_ATIME 0x10ULL /* = 16 */ +#define MDS_ATTR_MTIME 0x20ULL /* = 32 */ +#define MDS_ATTR_CTIME 0x40ULL /* = 64 */ #define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */ #define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */ #define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */ @@ -2320,14 +2184,11 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); #define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */ #define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */ #define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */ -#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, ie O_TRUNC */ +#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, + * ie O_TRUNC + */ #define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */ -#ifndef FMODE_READ -#define FMODE_READ 00000001 -#define FMODE_WRITE 00000002 -#endif - #define MDS_FMODE_CLOSED 00000000 #define MDS_FMODE_EXEC 00000004 /* IO Epoch is opened on a closed file. */ @@ -2354,9 +2215,10 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa); * We do not support JOIN FILE * anymore, reserve this flags * just for preventing such bit - * to be reused. */ + * to be reused. + */ -#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */ +#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */ #define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */ #define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */ #define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */ @@ -2409,7 +2271,8 @@ struct mdt_rec_create { __u32 cr_bias; /* use of helpers set/get_mrc_cr_flags() is needed to access * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to - * extend cr_flags size without breaking 1.8 compat */ + * extend cr_flags size without breaking 1.8 compat + */ __u32 cr_flags_l; /* for use with open, low 32 bits */ __u32 cr_flags_h; /* for use with open, high 32 bits */ __u32 cr_umask; /* umask for create */ @@ -2630,7 +2493,8 @@ enum seq_op { #define LOV_MAX_UUID_BUFFER_SIZE 8192 /* The size of the buffer the lov/mdc reserves for the * array of UUIDs returned by the MDS. With the current - * protocol, this will limit the max number of OSTs per LOV */ + * protocol, this will limit the max number of OSTs per LOV + */ #define LOV_DESC_MAGIC 0xB0CCDE5C #define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */ @@ -2639,13 +2503,13 @@ enum seq_op { /* LOV settings descriptor (should only contain static info) */ struct lov_desc { __u32 ld_tgt_count; /* how many OBD's */ - __u32 ld_active_tgt_count; /* how many active */ - __u32 ld_default_stripe_count; /* how many objects are used */ - __u32 ld_pattern; /* default PATTERN_RAID0 */ - __u64 ld_default_stripe_size; /* in bytes */ - __u64 ld_default_stripe_offset; /* in bytes */ + __u32 ld_active_tgt_count; /* how many active */ + __u32 ld_default_stripe_count; /* how many objects are used */ + __u32 ld_pattern; /* default PATTERN_RAID0 */ + __u64 ld_default_stripe_size; /* in bytes */ + __u64 ld_default_stripe_offset; /* in bytes */ __u32 ld_padding_0; /* unused */ - __u32 ld_qos_maxage; /* in second */ + __u32 ld_qos_maxage; /* in second */ __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */ __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */ struct obd_uuid ld_uuid; @@ -2659,7 +2523,7 @@ void lustre_swab_lov_desc(struct lov_desc *ld); * LDLM requests: */ /* opcodes -- MUST be distinct from OST/MDS opcodes */ -typedef enum { +enum ldlm_cmd { LDLM_ENQUEUE = 101, LDLM_CONVERT = 102, LDLM_CANCEL = 103, @@ -2668,7 +2532,7 @@ typedef enum { LDLM_GL_CALLBACK = 106, LDLM_SET_INFO = 107, LDLM_LAST_OPC -} ldlm_cmd_t; +}; #define LDLM_FIRST_OPC LDLM_ENQUEUE #define RES_NAME_SIZE 4 @@ -2687,7 +2551,7 @@ static inline int ldlm_res_eq(const struct ldlm_res_id *res0, } /* lock types */ -typedef enum { +enum ldlm_mode { LCK_MINMODE = 0, LCK_EX = 1, LCK_PW = 2, @@ -2698,17 +2562,17 @@ typedef enum { LCK_GROUP = 64, LCK_COS = 128, LCK_MAXMODE -} ldlm_mode_t; +}; #define LCK_MODE_NUM 8 -typedef enum { +enum ldlm_type { LDLM_PLAIN = 10, LDLM_EXTENT = 11, LDLM_FLOCK = 12, LDLM_IBITS = 13, LDLM_MAX_TYPE -} ldlm_type_t; +}; #define LDLM_MIN_TYPE LDLM_PLAIN @@ -2747,7 +2611,8 @@ struct ldlm_flock_wire { * the first fields of the ldlm_flock structure because there is only * one ldlm_swab routine to process the ldlm_policy_data_t union. if * this ever changes we will need to swab the union differently based - * on the resource type. */ + * on the resource type. + */ typedef union { struct ldlm_extent l_extent; @@ -2768,15 +2633,15 @@ struct ldlm_intent { void lustre_swab_ldlm_intent(struct ldlm_intent *i); struct ldlm_resource_desc { - ldlm_type_t lr_type; + enum ldlm_type lr_type; __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */ struct ldlm_res_id lr_name; }; struct ldlm_lock_desc { struct ldlm_resource_desc l_resource; - ldlm_mode_t l_req_mode; - ldlm_mode_t l_granted_mode; + enum ldlm_mode l_req_mode; + enum ldlm_mode l_granted_mode; ldlm_wire_policy_data_t l_policy_data; }; @@ -2793,7 +2658,8 @@ struct ldlm_request { void lustre_swab_ldlm_request(struct ldlm_request *rq); /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available. - * Otherwise, 2 are available. */ + * Otherwise, 2 are available. + */ #define ldlm_request_bufsize(count, type) \ ({ \ int _avail = LDLM_LOCKREQ_HANDLES; \ @@ -2820,7 +2686,7 @@ void lustre_swab_ldlm_reply(struct ldlm_reply *r); /* * Opcodes for mountconf (mgs and mgc) */ -typedef enum { +enum mgs_cmd { MGS_CONNECT = 250, MGS_DISCONNECT, MGS_EXCEPTION, /* node died, etc. */ @@ -2829,7 +2695,7 @@ typedef enum { MGS_SET_INFO, MGS_CONFIG_READ, MGS_LAST_OPC -} mgs_cmd_t; +}; #define MGS_FIRST_OPC MGS_CONNECT #define MGS_PARAM_MAXLEN 1024 @@ -2918,13 +2784,13 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size); * Opcodes for multiple servers. */ -typedef enum { +enum obd_cmd { OBD_PING = 400, OBD_LOG_CANCEL, OBD_QC_CALLBACK, OBD_IDX_READ, OBD_LAST_OPC -} obd_cmd_t; +}; #define OBD_FIRST_OPC OBD_PING /* catalog of log objects */ @@ -2933,7 +2799,7 @@ typedef enum { struct llog_logid { struct ost_id lgl_oi; __u32 lgl_ogen; -} __attribute__((packed)); +} __packed; /** Records written to the CATALOGS list */ #define CATLIST "CATALOGS" @@ -2942,7 +2808,7 @@ struct llog_catid { __u32 lci_padding1; __u32 lci_padding2; __u32 lci_padding3; -} __attribute__((packed)); +} __packed; /* Log data record types - there is no specific reason that these need to * be related to the RPC opcodes, but no reason not to (may be handy later?) @@ -2950,7 +2816,7 @@ struct llog_catid { #define LLOG_OP_MAGIC 0x10600000 #define LLOG_OP_MASK 0xfff00000 -typedef enum { +enum llog_op_type { LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000, OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00, /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */ @@ -2970,7 +2836,7 @@ typedef enum { HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000, LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539, LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b, -} llog_op_type; +}; #define LLOG_REC_HDR_NEEDS_SWABBING(r) \ (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC)) @@ -3006,7 +2872,7 @@ struct llog_logid_rec { __u64 lid_padding2; __u64 lid_padding3; struct llog_rec_tail lid_tail; -} __attribute__((packed)); +} __packed; struct llog_unlink_rec { struct llog_rec_hdr lur_hdr; @@ -3014,7 +2880,7 @@ struct llog_unlink_rec { __u32 lur_oseq; __u32 lur_count; struct llog_rec_tail lur_tail; -} __attribute__((packed)); +} __packed; struct llog_unlink64_rec { struct llog_rec_hdr lur_hdr; @@ -3024,7 +2890,7 @@ struct llog_unlink64_rec { __u64 lur_padding2; __u64 lur_padding3; struct llog_rec_tail lur_tail; -} __attribute__((packed)); +} __packed; struct llog_setattr64_rec { struct llog_rec_hdr lsr_hdr; @@ -3035,7 +2901,7 @@ struct llog_setattr64_rec { __u32 lsr_gid_h; __u64 lsr_padding; struct llog_rec_tail lsr_tail; -} __attribute__((packed)); +} __packed; struct llog_size_change_rec { struct llog_rec_hdr lsc_hdr; @@ -3045,16 +2911,7 @@ struct llog_size_change_rec { __u64 lsc_padding2; __u64 lsc_padding3; struct llog_rec_tail lsc_tail; -} __attribute__((packed)); - -#define CHANGELOG_MAGIC 0xca103000 - -/** \a changelog_rec_type's that can't be masked */ -#define CHANGELOG_MINMASK (1 << CL_MARK) -/** bits covering all \a changelog_rec_type's */ -#define CHANGELOG_ALLMASK 0XFFFFFFFF -/** default \a changelog_rec_type mask */ -#define CHANGELOG_DEFMASK CHANGELOG_ALLMASK & ~(1 << CL_ATIME | 1 << CL_CLOSE) +} __packed; /* changelog llog name, needed by client replicators */ #define CHANGELOG_CATALOG "changelog_catalog" @@ -3062,22 +2919,20 @@ struct llog_size_change_rec { struct changelog_setinfo { __u64 cs_recno; __u32 cs_id; -} __attribute__((packed)); +} __packed; /** changelog record */ struct llog_changelog_rec { struct llog_rec_hdr cr_hdr; struct changelog_rec cr; struct llog_rec_tail cr_tail; /**< for_sizezof_only */ -} __attribute__((packed)); +} __packed; struct llog_changelog_ext_rec { struct llog_rec_hdr cr_hdr; struct changelog_ext_rec cr; struct llog_rec_tail cr_tail; /**< for_sizezof_only */ -} __attribute__((packed)); - -#define CHANGELOG_USER_PREFIX "cl" +} __packed; struct llog_changelog_user_rec { struct llog_rec_hdr cur_hdr; @@ -3085,7 +2940,7 @@ struct llog_changelog_user_rec { __u32 cur_padding; __u64 cur_endrec; struct llog_rec_tail cur_tail; -} __attribute__((packed)); +} __packed; enum agent_req_status { ARS_WAITING, @@ -3123,21 +2978,22 @@ struct llog_agent_req_rec { struct llog_rec_hdr arr_hdr; /**< record header */ __u32 arr_status; /**< status of the request */ /* must match enum - * agent_req_status */ + * agent_req_status + */ __u32 arr_archive_id; /**< backend archive number */ __u64 arr_flags; /**< req flags */ - __u64 arr_compound_id; /**< compound cookie */ + __u64 arr_compound_id;/**< compound cookie */ __u64 arr_req_create; /**< req. creation time */ __u64 arr_req_change; /**< req. status change time */ struct hsm_action_item arr_hai; /**< req. to the agent */ - struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */ -} __attribute__((packed)); + struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */ +} __packed; /* Old llog gen for compatibility */ struct llog_gen { __u64 mnt_cnt; __u64 conn_cnt; -} __attribute__((packed)); +} __packed; struct llog_gen_rec { struct llog_rec_hdr lgr_hdr; @@ -3175,19 +3031,21 @@ struct llog_log_hdr { __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23]; __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)]; struct llog_rec_tail llh_tail; -} __attribute__((packed)); +} __packed; #define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \ llh->llh_bitmap_offset - \ sizeof(llh->llh_tail)) * 8) -/** log cookies are used to reference a specific log file and a record therein */ +/** log cookies are used to reference a specific log file and a record + * therein + */ struct llog_cookie { struct llog_logid lgc_lgl; __u32 lgc_subsys; __u32 lgc_index; __u32 lgc_padding; -} __attribute__((packed)); +} __packed; /** llog protocol */ enum llogd_rpc_ops { @@ -3196,7 +3054,7 @@ enum llogd_rpc_ops { LLOG_ORIGIN_HANDLE_READ_HEADER = 503, LLOG_ORIGIN_HANDLE_WRITE_REC = 504, LLOG_ORIGIN_HANDLE_CLOSE = 505, - LLOG_ORIGIN_CONNECT = 506, + LLOG_ORIGIN_CONNECT = 506, LLOG_CATINFO = 507, /* deprecated */ LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508, LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/ @@ -3212,13 +3070,13 @@ struct llogd_body { __u32 lgd_saved_index; __u32 lgd_len; __u64 lgd_cur_offset; -} __attribute__((packed)); +} __packed; struct llogd_conn_body { struct llog_gen lgdc_gen; struct llog_logid lgdc_logid; __u32 lgdc_ctxt_idx; -} __attribute__((packed)); +} __packed; /* Note: 64-bit types are 64-bit aligned in structure */ struct obdo { @@ -3245,17 +3103,18 @@ struct obdo { __u64 o_ioepoch; /* epoch in ost writes */ __u32 o_stripe_idx; /* holds stripe idx */ __u32 o_parent_ver; - struct lustre_handle o_handle; /* brw: lock handle to prolong - * locks */ - struct llog_cookie o_lcookie; /* destroy: unlink cookie from - * MDS */ + struct lustre_handle o_handle; /* brw: lock handle to prolong locks + */ + struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS + */ __u32 o_uid_h; __u32 o_gid_h; __u64 o_data_version; /* getattr: sum of iversion for * each stripe. * brw: grant space consumed on - * the client for the write */ + * the client for the write + */ __u64 o_padding_4; __u64 o_padding_5; __u64 o_padding_6; @@ -3273,13 +3132,14 @@ static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd, { *wobdo = *lobdo; wobdo->o_flags &= ~OBD_FL_LOCAL_MASK; - if (ocd == NULL) + if (!ocd) return; if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) { /* Currently OBD_FL_OSTID will only be used when 2.4 echo - * client communicate with pre-2.4 server */ + * client communicate with pre-2.4 server + */ wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid); wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid); } @@ -3292,7 +3152,7 @@ static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd, __u32 local_flags = 0; if (lobdo->o_valid & OBD_MD_FLFLAGS) - local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK; + local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK; *lobdo = *wobdo; if (local_flags != 0) { @@ -3300,7 +3160,7 @@ static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd, lobdo->o_flags &= ~OBD_FL_LOCAL_MASK; lobdo->o_flags |= local_flags; } - if (ocd == NULL) + if (!ocd) return; if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && @@ -3349,100 +3209,14 @@ void dump_ioo(struct obd_ioobj *nb); void dump_ost_body(struct ost_body *ob); void dump_rcs(__u32 *rc); -#define IDX_INFO_MAGIC 0x3D37CC37 - -/* Index file transfer through the network. The server serializes the index into - * a byte stream which is sent to the client via a bulk transfer */ -struct idx_info { - __u32 ii_magic; - - /* reply: see idx_info_flags below */ - __u32 ii_flags; - - /* request & reply: number of lu_idxpage (to be) transferred */ - __u16 ii_count; - __u16 ii_pad0; - - /* request: requested attributes passed down to the iterator API */ - __u32 ii_attrs; - - /* request & reply: index file identifier (FID) */ - struct lu_fid ii_fid; - - /* reply: version of the index file before starting to walk the index. - * Please note that the version can be modified at any time during the - * transfer */ - __u64 ii_version; - - /* request: hash to start with: - * reply: hash of the first entry of the first lu_idxpage and hash - * of the entry to read next if any */ - __u64 ii_hash_start; - __u64 ii_hash_end; - - /* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is - * set */ - __u16 ii_keysize; - - /* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC - * is set */ - __u16 ii_recsize; - - __u32 ii_pad1; - __u64 ii_pad2; - __u64 ii_pad3; -}; - -void lustre_swab_idx_info(struct idx_info *ii); - -#define II_END_OFF MDS_DIR_END_OFF /* all entries have been read */ - -/* List of flags used in idx_info::ii_flags */ -enum idx_info_flags { - II_FL_NOHASH = 1 << 0, /* client doesn't care about hash value */ - II_FL_VARKEY = 1 << 1, /* keys can be of variable size */ - II_FL_VARREC = 1 << 2, /* records can be of variable size */ - II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */ -}; - -#define LIP_MAGIC 0x8A6D6B6C - -/* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */ -struct lu_idxpage { - /* 16-byte header */ - __u32 lip_magic; - __u16 lip_flags; - __u16 lip_nr; /* number of entries in the container */ - __u64 lip_pad0; /* additional padding for future use */ - - /* key/record pairs are stored in the remaining 4080 bytes. - * depending upon the flags in idx_info::ii_flags, each key/record - * pair might be preceded by: - * - a hash value - * - the key size (II_FL_VARKEY is set) - * - the record size (II_FL_VARREC is set) - * - * For the time being, we only support fixed-size key & record. */ - char lip_entries[0]; -}; - -#define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries)) - -/* Gather all possible type associated with a 4KB container */ -union lu_page { - struct lu_dirpage lp_dir; /* for MDS_READPAGE */ - struct lu_idxpage lp_idx; /* for OBD_IDX_READ */ - char lp_array[LU_PAGE_SIZE]; -}; - /* security opcodes */ -typedef enum { +enum sec_cmd { SEC_CTX_INIT = 801, SEC_CTX_INIT_CONT = 802, SEC_CTX_FINI = 803, SEC_LAST_OPC, SEC_FIRST_OPC = SEC_CTX_INIT -} sec_cmd_t; +}; /* * capa related definitions @@ -3451,7 +3225,8 @@ typedef enum { #define CAPA_HMAC_KEY_MAX_LEN 56 /* NB take care when changing the sequence of elements this struct, - * because the offset info is used in find_capa() */ + * because the offset info is used in find_capa() + */ struct lustre_capa { struct lu_fid lc_fid; /** fid */ __u64 lc_opc; /** operations allowed */ @@ -3463,7 +3238,7 @@ struct lustre_capa { /* FIXME: y2038 time_t overflow: */ __u32 lc_expiry; /** expiry time (sec) */ __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */ -} __attribute__((packed)); +} __packed; void lustre_swab_lustre_capa(struct lustre_capa *c); @@ -3497,7 +3272,7 @@ struct lustre_capa_key { __u32 lk_keyid; /**< key# */ __u32 lk_padding; __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */ -} __attribute__((packed)); +} __packed; /** The link ea holds 1 \a link_ea_entry for each hardlink */ #define LINK_EA_MAGIC 0x11EAF1DFUL @@ -3518,7 +3293,7 @@ struct link_ea_entry { unsigned char lee_reclen[2]; unsigned char lee_parent_fid[sizeof(struct lu_fid)]; char lee_name[0]; -} __attribute__((packed)); +} __packed; /** fid2path request/reply structure */ struct getinfo_fid2path { @@ -3527,7 +3302,7 @@ struct getinfo_fid2path { __u32 gf_linkno; __u32 gf_pathlen; char gf_path[0]; -} __attribute__((packed)); +} __packed; void lustre_swab_fid2path (struct getinfo_fid2path *gf); @@ -3558,7 +3333,7 @@ void lustre_swab_layout_intent(struct layout_intent *li); */ struct hsm_progress_kernel { /* Field taken from struct hsm_progress */ - lustre_fid hpk_fid; + struct lu_fid hpk_fid; __u64 hpk_cookie; struct hsm_extent hpk_extent; __u16 hpk_flags; @@ -3567,7 +3342,7 @@ struct hsm_progress_kernel { /* Additional fields */ __u64 hpk_data_version; __u64 hpk_padding2; -} __attribute__((packed)); +} __packed; void lustre_swab_hsm_user_state(struct hsm_user_state *hus); void lustre_swab_hsm_current_action(struct hsm_current_action *action); @@ -3576,92 +3351,6 @@ void lustre_swab_hsm_user_state(struct hsm_user_state *hus); void lustre_swab_hsm_user_item(struct hsm_user_item *hui); void lustre_swab_hsm_request(struct hsm_request *hr); -/** - * These are object update opcode under UPDATE_OBJ, which is currently - * being used by cross-ref operations between MDT. - * - * During the cross-ref operation, the Master MDT, which the client send the - * request to, will disassembly the operation into object updates, then OSP - * will send these updates to the remote MDT to be executed. - * - * Update request format - * magic: UPDATE_BUFFER_MAGIC_V1 - * Count: How many updates in the req. - * bufs[0] : following are packets of object. - * update[0]: - * type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * update[1]: - * type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * .......... - * update[7]: type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * Current 8 maxim updates per object update request. - * - ******************************************************************* - * update reply format: - * - * ur_version: UPDATE_REPLY_V1 - * ur_count: The count of the reply, which is usually equal - * to the number of updates in the request. - * ur_lens: The reply lengths of each object update. - * - * replies: 1st update reply [4bytes_ret: other body] - * 2nd update reply [4bytes_ret: other body] - * ..... - * nth update reply [4bytes_ret: other body] - * - * For each reply of the update, the format would be - * result(4 bytes):Other stuff - */ - -#define UPDATE_MAX_OPS 10 -#define UPDATE_BUFFER_MAGIC_V1 0xBDDE0001 -#define UPDATE_BUFFER_MAGIC UPDATE_BUFFER_MAGIC_V1 -#define UPDATE_BUF_COUNT 8 -enum object_update_op { - OBJ_CREATE = 1, - OBJ_DESTROY = 2, - OBJ_REF_ADD = 3, - OBJ_REF_DEL = 4, - OBJ_ATTR_SET = 5, - OBJ_ATTR_GET = 6, - OBJ_XATTR_SET = 7, - OBJ_XATTR_GET = 8, - OBJ_INDEX_LOOKUP = 9, - OBJ_INDEX_INSERT = 10, - OBJ_INDEX_DELETE = 11, - OBJ_LAST -}; - -struct update { - __u32 u_type; - __u32 u_batchid; - struct lu_fid u_fid; - __u32 u_lens[UPDATE_BUF_COUNT]; - __u32 u_bufs[0]; -}; - -struct update_buf { - __u32 ub_magic; - __u32 ub_count; - __u32 ub_bufs[0]; -}; - -#define UPDATE_REPLY_V1 0x00BD0001 -struct update_reply { - __u32 ur_version; - __u32 ur_count; - __u32 ur_lens[0]; -}; - -void lustre_swab_update_buf(struct update_buf *ub); -void lustre_swab_update_reply_buf(struct update_reply *ur); - /** layout swap request structure * fid1 and fid2 are in mdt_body */ diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h index 2b4dd656d..276906e64 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h @@ -85,9 +85,8 @@ struct obd_statfs { __u32 os_namelen; __u64 os_maxbytes; __u32 os_state; /**< obd_statfs_state OS_STATE_* flag */ - __u32 os_fprecreated; /* objs available now to the caller */ - /* used in QoS code to find preferred - * OSTs */ + __u32 os_fprecreated; /* objs available now to the caller */ + /* used in QoS code to find preferred OSTs */ __u32 os_spare2; __u32 os_spare3; __u32 os_spare4; @@ -135,8 +134,9 @@ struct filter_fid_old { /* Userspace should treat lu_fid as opaque, and only use the following methods * to print or parse them. Other functions (e.g. compare, swab) could be moved - * here from lustre_idl.h if needed. */ -typedef struct lu_fid lustre_fid; + * here from lustre_idl.h if needed. + */ +struct lu_fid; /** * Following struct for object attributes, that will be kept inode's EA. @@ -266,7 +266,8 @@ struct ost_id { /* Define O_LOV_DELAY_CREATE to be a mask that is not useful for regular * files, but are unlikely to be used in practice and are not harmful if * used incorrectly. O_NOCTTY and FASYNC are only meaningful for character - * devices and are safe for use on new files (See LU-812, LU-4209). */ + * devices and are safe for use on new files (See LU-812, LU-4209). + */ #define O_LOV_DELAY_CREATE (O_NOCTTY | FASYNC) #define LL_FILE_IGNORE_LOCK 0x00000001 @@ -302,7 +303,8 @@ struct ost_id { * The limit of 12 pages is somewhat arbitrary, but is a reasonably large * allocation that is sufficient for the current generation of systems. * - * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1) */ + * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1) + */ #define LOV_MAX_STRIPE_COUNT 2000 /* ((12 * 4096 - 256) / 24) */ #define LOV_ALL_STRIPES 0xffff /* only valid for directories */ #define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */ @@ -323,9 +325,11 @@ struct lov_user_md_v1 { /* LOV EA user data (host-endian) */ __u16 lmm_stripe_count; /* num stripes in use for this object */ union { __u16 lmm_stripe_offset; /* starting stripe offset in - * lmm_objects, use when writing */ + * lmm_objects, use when writing + */ __u16 lmm_layout_gen; /* layout generation number - * used when reading */ + * used when reading + */ }; struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */ } __attribute__((packed, __may_alias__)); @@ -338,9 +342,11 @@ struct lov_user_md_v3 { /* LOV EA user data (host-endian) */ __u16 lmm_stripe_count; /* num stripes in use for this object */ union { __u16 lmm_stripe_offset; /* starting stripe offset in - * lmm_objects, use when writing */ + * lmm_objects, use when writing + */ __u16 lmm_layout_gen; /* layout generation number - * used when reading */ + * used when reading + */ }; char lmm_pool_name[LOV_MAXPOOLNAME]; /* pool name */ struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */ @@ -442,9 +448,13 @@ static inline void obd_str2uuid(struct obd_uuid *uuid, const char *tmp) /* For printf's only, make sure uuid is terminated */ static inline char *obd_uuid2str(const struct obd_uuid *uuid) { + if (!uuid) + return NULL; + if (uuid->uuid[sizeof(*uuid) - 1] != '\0') { /* Obviously not safe, but for printfs, no real harm done... - we're always null-terminated, even in a race. */ + * we're always null-terminated, even in a race. + */ static char temp[sizeof(*uuid)]; memcpy(temp, uuid->uuid, sizeof(*uuid) - 1); @@ -455,8 +465,9 @@ static inline char *obd_uuid2str(const struct obd_uuid *uuid) } /* Extract fsname from uuid (or target name) of a target - e.g. (myfs-OST0007_UUID -> myfs) - see also deuuidify. */ + * e.g. (myfs-OST0007_UUID -> myfs) + * see also deuuidify. + */ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen) { char *p; @@ -465,11 +476,12 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen) buf[buflen - 1] = '\0'; p = strrchr(buf, '-'); if (p) - *p = '\0'; + *p = '\0'; } /* printf display format - e.g. printf("file FID is "DFID"\n", PFID(fid)); */ + * e.g. printf("file FID is "DFID"\n", PFID(fid)); + */ #define FID_NOBRACE_LEN 40 #define FID_LEN (FID_NOBRACE_LEN + 2) #define DFID_NOBRACE "%#llx:0x%x:0x%x" @@ -480,7 +492,8 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen) (fid)->f_ver /* scanf input parse format -- strip '[' first. - e.g. sscanf(fidstr, SFID, RFID(&fid)); */ + * e.g. sscanf(fidstr, SFID, RFID(&fid)); + */ #define SFID "0x%llx:0x%x:0x%x" #define RFID(fid) \ &((fid)->f_seq), \ @@ -542,22 +555,6 @@ enum { RMT_RGETFACL = 4 }; -#ifdef NEED_QUOTA_DEFS -#ifndef QIF_BLIMITS -#define QIF_BLIMITS 1 -#define QIF_SPACE 2 -#define QIF_ILIMITS 4 -#define QIF_INODES 8 -#define QIF_BTIME 16 -#define QIF_ITIME 32 -#define QIF_LIMITS (QIF_BLIMITS | QIF_ILIMITS) -#define QIF_USAGE (QIF_SPACE | QIF_INODES) -#define QIF_TIMES (QIF_BTIME | QIF_ITIME) -#define QIF_ALL (QIF_LIMITS | QIF_USAGE | QIF_TIMES) -#endif - -#endif /* !__KERNEL__ */ - /* lustre volatile file support * file name header: .^L^S^T^R:volatile" */ @@ -566,9 +563,9 @@ enum { /* hdr + MDT index */ #define LUSTRE_VOLATILE_IDX LUSTRE_VOLATILE_HDR":%.4X:" -typedef enum lustre_quota_version { +enum lustre_quota_version { LUSTRE_QUOTA_V2 = 1 -} lustre_quota_version_t; +}; /* XXX: same as if_dqinfo struct in kernel */ struct obd_dqinfo { @@ -698,7 +695,8 @@ static inline const char *changelog_type2str(int type) #define CLF_HSM_LAST 15 /* Remove bits higher than _h, then extract the value - * between _h and _l by shifting lower weigth to bit 0. */ + * between _h and _l by shifting lower weigth to bit 0. + */ #define CLF_GET_BITS(_b, _h, _l) (((_b << (CLF_HSM_LAST - _h)) & 0xFFFF) \ >> (CLF_HSM_LAST - _h + _l)) @@ -761,10 +759,10 @@ struct changelog_rec { __u64 cr_prev; /**< last index for this target fid */ __u64 cr_time; union { - lustre_fid cr_tfid; /**< target fid */ + struct lu_fid cr_tfid; /**< target fid */ __u32 cr_markerflags; /**< CL_MARK flags */ }; - lustre_fid cr_pfid; /**< parent fid */ + struct lu_fid cr_pfid; /**< parent fid */ char cr_name[0]; /**< last element */ } __packed; @@ -775,18 +773,19 @@ struct changelog_rec { struct changelog_ext_rec { __u16 cr_namelen; __u16 cr_flags; /**< (flags & CLF_FLAGMASK) | - CLF_EXT_VERSION */ + * CLF_EXT_VERSION + */ __u32 cr_type; /**< \a changelog_rec_type */ __u64 cr_index; /**< changelog record number */ __u64 cr_prev; /**< last index for this target fid */ __u64 cr_time; union { - lustre_fid cr_tfid; /**< target fid */ + struct lu_fid cr_tfid; /**< target fid */ __u32 cr_markerflags; /**< CL_MARK flags */ }; - lustre_fid cr_pfid; /**< target parent fid */ - lustre_fid cr_sfid; /**< source fid, or zero */ - lustre_fid cr_spfid; /**< source parent fid, or zero */ + struct lu_fid cr_pfid; /**< target parent fid */ + struct lu_fid cr_sfid; /**< source fid, or zero */ + struct lu_fid cr_spfid; /**< source parent fid, or zero */ char cr_name[0]; /**< last element */ } __packed; @@ -835,7 +834,8 @@ struct ioc_data_version { }; #define LL_DV_NOFLUSH 0x01 /* Do not take READ EXTENT LOCK before sampling - version. Dirty caches are left unchanged. */ + * version. Dirty caches are left unchanged. + */ #ifndef offsetof # define offsetof(typ, memb) ((unsigned long)((char *)&(((typ *)0)->memb))) @@ -976,8 +976,8 @@ struct hsm_request { }; struct hsm_user_item { - lustre_fid hui_fid; - struct hsm_extent hui_extent; + struct lu_fid hui_fid; + struct hsm_extent hui_extent; } __packed; struct hsm_user_request { @@ -1046,8 +1046,8 @@ static inline char *hsm_copytool_action2name(enum hsm_copytool_action a) struct hsm_action_item { __u32 hai_len; /* valid size of this struct */ __u32 hai_action; /* hsm_copytool_action, but use known size */ - lustre_fid hai_fid; /* Lustre FID to operated on */ - lustre_fid hai_dfid; /* fid used for data access */ + struct lu_fid hai_fid; /* Lustre FID to operated on */ + struct lu_fid hai_dfid; /* fid used for data access */ struct hsm_extent hai_extent; /* byte range to operate on */ __u64 hai_cookie; /* action cookie from coordinator */ __u64 hai_gid; /* grouplock id */ @@ -1095,7 +1095,8 @@ struct hsm_action_list { __u32 padding1; char hal_fsname[0]; /* null-terminated */ /* struct hsm_action_item[hal_count] follows, aligned on 8-byte - boundaries. See hai_zero */ + * boundaries. See hai_zero + */ } __packed; #ifndef HAVE_CFS_SIZE_ROUND @@ -1157,7 +1158,7 @@ struct hsm_user_import { #define HP_FLAG_RETRY 0x02 struct hsm_progress { - lustre_fid hp_fid; + struct lu_fid hp_fid; __u64 hp_cookie; struct hsm_extent hp_extent; __u16 hp_flags; diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h index eb6b292b7..bb16ae980 100644 --- a/drivers/staging/lustre/lustre/include/lustre_cfg.h +++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h @@ -50,12 +50,13 @@ #define LUSTRE_CFG_MAX_BUFCOUNT 8 #define LCFG_HDR_SIZE(count) \ - cfs_size_round(offsetof (struct lustre_cfg, lcfg_buflens[(count)])) + cfs_size_round(offsetof(struct lustre_cfg, lcfg_buflens[(count)])) /** If the LCFG_REQUIRED bit is set in a configuration command, * then the client is required to understand this parameter * in order to mount the filesystem. If it does not understand - * a REQUIRED command the client mount will fail. */ + * a REQUIRED command the client mount will fail. + */ #define LCFG_REQUIRED 0x0001000 enum lcfg_command_type { @@ -87,9 +88,11 @@ enum lcfg_command_type { LCFG_POOL_DEL = 0x00ce023, /**< destroy an ost pool name */ LCFG_SET_LDLM_TIMEOUT = 0x00ce030, /**< set ldlm_timeout */ LCFG_PRE_CLEANUP = 0x00cf031, /**< call type-specific pre - * cleanup cleanup */ + * cleanup cleanup + */ LCFG_SET_PARAM = 0x00ce032, /**< use set_param syntax to set - *a proc parameters */ + * a proc parameters + */ }; struct lustre_cfg_bufs { @@ -128,7 +131,7 @@ static inline void lustre_cfg_bufs_set(struct lustre_cfg_bufs *bufs, { if (index >= LUSTRE_CFG_MAX_BUFCOUNT) return; - if (bufs == NULL) + if (!bufs) return; if (bufs->lcfg_bufcount <= index) @@ -158,7 +161,6 @@ static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index) int offset; int bufcount; - LASSERT (lcfg != NULL); LASSERT (index >= 0); bufcount = lcfg->lcfg_bufcount; @@ -191,7 +193,7 @@ static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index) return NULL; s = lustre_cfg_buf(lcfg, index); - if (s == NULL) + if (!s) return NULL; /* @@ -252,10 +254,6 @@ static inline struct lustre_cfg *lustre_cfg_new(int cmd, static inline void lustre_cfg_free(struct lustre_cfg *lcfg) { - int len; - - len = lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens); - kfree(lcfg); return; } diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h index 7c6933ffc..95fd36063 100644 --- a/drivers/staging/lustre/lustre/include/lustre_disk.h +++ b/drivers/staging/lustre/lustre/include/lustre_disk.h @@ -65,7 +65,8 @@ /****************** mount command *********************/ /* The lmd is only used internally by Lustre; mount simply passes - everything as string options */ + * everything as string options + */ #define LMD_MAGIC 0xbdacbd03 #define LMD_PARAMS_MAXLEN 4096 @@ -79,23 +80,26 @@ struct lustre_mount_data { int lmd_recovery_time_soft; int lmd_recovery_time_hard; char *lmd_dev; /* device name */ - char *lmd_profile; /* client only */ + char *lmd_profile; /* client only */ char *lmd_mgssec; /* sptlrpc flavor to mgs */ - char *lmd_opts; /* lustre mount options (as opposed to - _device_ mount options) */ + char *lmd_opts; /* lustre mount options (as opposed to + * _device_ mount options) + */ char *lmd_params; /* lustre params */ - __u32 *lmd_exclude; /* array of OSTs to ignore */ - char *lmd_mgs; /* MGS nid */ - char *lmd_osd_type; /* OSD type */ + __u32 *lmd_exclude; /* array of OSTs to ignore */ + char *lmd_mgs; /* MGS nid */ + char *lmd_osd_type; /* OSD type */ }; #define LMD_FLG_SERVER 0x0001 /* Mounting a server */ #define LMD_FLG_CLIENT 0x0002 /* Mounting a client */ #define LMD_FLG_ABORT_RECOV 0x0008 /* Abort recovery */ #define LMD_FLG_NOSVC 0x0010 /* Only start MGS/MGC for servers, - no other services */ -#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers, reusing - existing MGS services */ + * no other services + */ +#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers, + * reusing existing MGS services + */ #define LMD_FLG_WRITECONF 0x0040 /* Rewrite config log */ #define LMD_FLG_NOIR 0x0080 /* NO imperative recovery */ #define LMD_FLG_NOSCRUB 0x0100 /* Do not trigger scrub automatically */ @@ -116,231 +120,6 @@ struct lustre_mount_data { #define LR_EXPIRE_INTERVALS 16 /**< number of intervals to track transno */ #define ENOENT_VERSION 1 /** 'virtual' version of non-existent object */ -#define LR_SERVER_SIZE 512 -#define LR_CLIENT_START 8192 -#define LR_CLIENT_SIZE 128 -#if LR_CLIENT_START < LR_SERVER_SIZE -#error "Can't have LR_CLIENT_START < LR_SERVER_SIZE" -#endif - -/* - * This limit is arbitrary (131072 clients on x86), but it is convenient to use - * 2^n * PAGE_CACHE_SIZE * 8 for the number of bits that fit an order-n allocation. - * If we need more than 131072 clients (order-2 allocation on x86) then this - * should become an array of single-page pointers that are allocated on demand. - */ -#if (128 * 1024UL) > (PAGE_CACHE_SIZE * 8) -#define LR_MAX_CLIENTS (128 * 1024UL) -#else -#define LR_MAX_CLIENTS (PAGE_CACHE_SIZE * 8) -#endif - -/** COMPAT_146: this is an OST (temporary) */ -#define OBD_COMPAT_OST 0x00000002 -/** COMPAT_146: this is an MDT (temporary) */ -#define OBD_COMPAT_MDT 0x00000004 -/** 2.0 server, interop flag to show server version is changed */ -#define OBD_COMPAT_20 0x00000008 - -/** MDS handles LOV_OBJID file */ -#define OBD_ROCOMPAT_LOVOBJID 0x00000001 - -/** OST handles group subdirs */ -#define OBD_INCOMPAT_GROUPS 0x00000001 -/** this is an OST */ -#define OBD_INCOMPAT_OST 0x00000002 -/** this is an MDT */ -#define OBD_INCOMPAT_MDT 0x00000004 -/** common last_rvcd format */ -#define OBD_INCOMPAT_COMMON_LR 0x00000008 -/** FID is enabled */ -#define OBD_INCOMPAT_FID 0x00000010 -/** Size-on-MDS is enabled */ -#define OBD_INCOMPAT_SOM 0x00000020 -/** filesystem using iam format to store directory entries */ -#define OBD_INCOMPAT_IAM_DIR 0x00000040 -/** LMA attribute contains per-inode incompatible flags */ -#define OBD_INCOMPAT_LMA 0x00000080 -/** lmm_stripe_count has been shrunk from __u32 to __u16 and the remaining 16 - * bits are now used to store a generation. Once we start changing the layout - * and bumping the generation, old versions expecting a 32-bit lmm_stripe_count - * will be confused by interpreting stripe_count | gen << 16 as the actual - * stripe count */ -#define OBD_INCOMPAT_LMM_VER 0x00000100 -/** multiple OI files for MDT */ -#define OBD_INCOMPAT_MULTI_OI 0x00000200 - -/* Data stored per server at the head of the last_rcvd file. In le32 order. - This should be common to filter_internal.h, lustre_mds.h */ -struct lr_server_data { - __u8 lsd_uuid[40]; /* server UUID */ - __u64 lsd_last_transno; /* last completed transaction ID */ - __u64 lsd_compat14; /* reserved - compat with old last_rcvd */ - __u64 lsd_mount_count; /* incarnation number */ - __u32 lsd_feature_compat; /* compatible feature flags */ - __u32 lsd_feature_rocompat;/* read-only compatible feature flags */ - __u32 lsd_feature_incompat;/* incompatible feature flags */ - __u32 lsd_server_size; /* size of server data area */ - __u32 lsd_client_start; /* start of per-client data area */ - __u16 lsd_client_size; /* size of per-client data area */ - __u16 lsd_subdir_count; /* number of subdirectories for objects */ - __u64 lsd_catalog_oid; /* recovery catalog object id */ - __u32 lsd_catalog_ogen; /* recovery catalog inode generation */ - __u8 lsd_peeruuid[40]; /* UUID of MDS associated with this OST */ - __u32 lsd_osd_index; /* index number of OST in LOV */ - __u32 lsd_padding1; /* was lsd_mdt_index, unused in 2.4.0 */ - __u32 lsd_start_epoch; /* VBR: start epoch from last boot */ - /** transaction values since lsd_trans_table_time */ - __u64 lsd_trans_table[LR_EXPIRE_INTERVALS]; - /** start point of transno table below */ - __u32 lsd_trans_table_time; /* time of first slot in table above */ - __u32 lsd_expire_intervals; /* LR_EXPIRE_INTERVALS */ - __u8 lsd_padding[LR_SERVER_SIZE - 288]; -}; - -/* Data stored per client in the last_rcvd file. In le32 order. */ -struct lsd_client_data { - __u8 lcd_uuid[40]; /* client UUID */ - __u64 lcd_last_transno; /* last completed transaction ID */ - __u64 lcd_last_xid; /* xid for the last transaction */ - __u32 lcd_last_result; /* result from last RPC */ - __u32 lcd_last_data; /* per-op data (disposition for open &c.) */ - /* for MDS_CLOSE requests */ - __u64 lcd_last_close_transno; /* last completed transaction ID */ - __u64 lcd_last_close_xid; /* xid for the last transaction */ - __u32 lcd_last_close_result; /* result from last RPC */ - __u32 lcd_last_close_data; /* per-op data */ - /* VBR: last versions */ - __u64 lcd_pre_versions[4]; - __u32 lcd_last_epoch; - /** orphans handling for delayed export rely on that */ - __u32 lcd_first_epoch; - __u8 lcd_padding[LR_CLIENT_SIZE - 128]; -}; - -/* bug20354: the lcd_uuid for export of clients may be wrong */ -static inline void check_lcd(char *obd_name, int index, - struct lsd_client_data *lcd) -{ - int length = sizeof(lcd->lcd_uuid); - - if (strnlen((char *)lcd->lcd_uuid, length) == length) { - lcd->lcd_uuid[length - 1] = '\0'; - - LCONSOLE_ERROR("the client UUID (%s) on %s for exports stored in last_rcvd(index = %d) is bad!\n", - lcd->lcd_uuid, obd_name, index); - } -} - -/* last_rcvd handling */ -static inline void lsd_le_to_cpu(struct lr_server_data *buf, - struct lr_server_data *lsd) -{ - int i; - - memcpy(lsd->lsd_uuid, buf->lsd_uuid, sizeof(lsd->lsd_uuid)); - lsd->lsd_last_transno = le64_to_cpu(buf->lsd_last_transno); - lsd->lsd_compat14 = le64_to_cpu(buf->lsd_compat14); - lsd->lsd_mount_count = le64_to_cpu(buf->lsd_mount_count); - lsd->lsd_feature_compat = le32_to_cpu(buf->lsd_feature_compat); - lsd->lsd_feature_rocompat = le32_to_cpu(buf->lsd_feature_rocompat); - lsd->lsd_feature_incompat = le32_to_cpu(buf->lsd_feature_incompat); - lsd->lsd_server_size = le32_to_cpu(buf->lsd_server_size); - lsd->lsd_client_start = le32_to_cpu(buf->lsd_client_start); - lsd->lsd_client_size = le16_to_cpu(buf->lsd_client_size); - lsd->lsd_subdir_count = le16_to_cpu(buf->lsd_subdir_count); - lsd->lsd_catalog_oid = le64_to_cpu(buf->lsd_catalog_oid); - lsd->lsd_catalog_ogen = le32_to_cpu(buf->lsd_catalog_ogen); - memcpy(lsd->lsd_peeruuid, buf->lsd_peeruuid, sizeof(lsd->lsd_peeruuid)); - lsd->lsd_osd_index = le32_to_cpu(buf->lsd_osd_index); - lsd->lsd_padding1 = le32_to_cpu(buf->lsd_padding1); - lsd->lsd_start_epoch = le32_to_cpu(buf->lsd_start_epoch); - for (i = 0; i < LR_EXPIRE_INTERVALS; i++) - lsd->lsd_trans_table[i] = le64_to_cpu(buf->lsd_trans_table[i]); - lsd->lsd_trans_table_time = le32_to_cpu(buf->lsd_trans_table_time); - lsd->lsd_expire_intervals = le32_to_cpu(buf->lsd_expire_intervals); -} - -static inline void lsd_cpu_to_le(struct lr_server_data *lsd, - struct lr_server_data *buf) -{ - int i; - - memcpy(buf->lsd_uuid, lsd->lsd_uuid, sizeof(buf->lsd_uuid)); - buf->lsd_last_transno = cpu_to_le64(lsd->lsd_last_transno); - buf->lsd_compat14 = cpu_to_le64(lsd->lsd_compat14); - buf->lsd_mount_count = cpu_to_le64(lsd->lsd_mount_count); - buf->lsd_feature_compat = cpu_to_le32(lsd->lsd_feature_compat); - buf->lsd_feature_rocompat = cpu_to_le32(lsd->lsd_feature_rocompat); - buf->lsd_feature_incompat = cpu_to_le32(lsd->lsd_feature_incompat); - buf->lsd_server_size = cpu_to_le32(lsd->lsd_server_size); - buf->lsd_client_start = cpu_to_le32(lsd->lsd_client_start); - buf->lsd_client_size = cpu_to_le16(lsd->lsd_client_size); - buf->lsd_subdir_count = cpu_to_le16(lsd->lsd_subdir_count); - buf->lsd_catalog_oid = cpu_to_le64(lsd->lsd_catalog_oid); - buf->lsd_catalog_ogen = cpu_to_le32(lsd->lsd_catalog_ogen); - memcpy(buf->lsd_peeruuid, lsd->lsd_peeruuid, sizeof(buf->lsd_peeruuid)); - buf->lsd_osd_index = cpu_to_le32(lsd->lsd_osd_index); - buf->lsd_padding1 = cpu_to_le32(lsd->lsd_padding1); - buf->lsd_start_epoch = cpu_to_le32(lsd->lsd_start_epoch); - for (i = 0; i < LR_EXPIRE_INTERVALS; i++) - buf->lsd_trans_table[i] = cpu_to_le64(lsd->lsd_trans_table[i]); - buf->lsd_trans_table_time = cpu_to_le32(lsd->lsd_trans_table_time); - buf->lsd_expire_intervals = cpu_to_le32(lsd->lsd_expire_intervals); -} - -static inline void lcd_le_to_cpu(struct lsd_client_data *buf, - struct lsd_client_data *lcd) -{ - memcpy(lcd->lcd_uuid, buf->lcd_uuid, sizeof (lcd->lcd_uuid)); - lcd->lcd_last_transno = le64_to_cpu(buf->lcd_last_transno); - lcd->lcd_last_xid = le64_to_cpu(buf->lcd_last_xid); - lcd->lcd_last_result = le32_to_cpu(buf->lcd_last_result); - lcd->lcd_last_data = le32_to_cpu(buf->lcd_last_data); - lcd->lcd_last_close_transno = le64_to_cpu(buf->lcd_last_close_transno); - lcd->lcd_last_close_xid = le64_to_cpu(buf->lcd_last_close_xid); - lcd->lcd_last_close_result = le32_to_cpu(buf->lcd_last_close_result); - lcd->lcd_last_close_data = le32_to_cpu(buf->lcd_last_close_data); - lcd->lcd_pre_versions[0] = le64_to_cpu(buf->lcd_pre_versions[0]); - lcd->lcd_pre_versions[1] = le64_to_cpu(buf->lcd_pre_versions[1]); - lcd->lcd_pre_versions[2] = le64_to_cpu(buf->lcd_pre_versions[2]); - lcd->lcd_pre_versions[3] = le64_to_cpu(buf->lcd_pre_versions[3]); - lcd->lcd_last_epoch = le32_to_cpu(buf->lcd_last_epoch); - lcd->lcd_first_epoch = le32_to_cpu(buf->lcd_first_epoch); -} - -static inline void lcd_cpu_to_le(struct lsd_client_data *lcd, - struct lsd_client_data *buf) -{ - memcpy(buf->lcd_uuid, lcd->lcd_uuid, sizeof (lcd->lcd_uuid)); - buf->lcd_last_transno = cpu_to_le64(lcd->lcd_last_transno); - buf->lcd_last_xid = cpu_to_le64(lcd->lcd_last_xid); - buf->lcd_last_result = cpu_to_le32(lcd->lcd_last_result); - buf->lcd_last_data = cpu_to_le32(lcd->lcd_last_data); - buf->lcd_last_close_transno = cpu_to_le64(lcd->lcd_last_close_transno); - buf->lcd_last_close_xid = cpu_to_le64(lcd->lcd_last_close_xid); - buf->lcd_last_close_result = cpu_to_le32(lcd->lcd_last_close_result); - buf->lcd_last_close_data = cpu_to_le32(lcd->lcd_last_close_data); - buf->lcd_pre_versions[0] = cpu_to_le64(lcd->lcd_pre_versions[0]); - buf->lcd_pre_versions[1] = cpu_to_le64(lcd->lcd_pre_versions[1]); - buf->lcd_pre_versions[2] = cpu_to_le64(lcd->lcd_pre_versions[2]); - buf->lcd_pre_versions[3] = cpu_to_le64(lcd->lcd_pre_versions[3]); - buf->lcd_last_epoch = cpu_to_le32(lcd->lcd_last_epoch); - buf->lcd_first_epoch = cpu_to_le32(lcd->lcd_first_epoch); -} - -static inline __u64 lcd_last_transno(struct lsd_client_data *lcd) -{ - return (lcd->lcd_last_transno > lcd->lcd_last_close_transno ? - lcd->lcd_last_transno : lcd->lcd_last_close_transno); -} - -static inline __u64 lcd_last_xid(struct lsd_client_data *lcd) -{ - return (lcd->lcd_last_xid > lcd->lcd_last_close_xid ? - lcd->lcd_last_xid : lcd->lcd_last_close_xid); -} - /****************** superblock additional info *********************/ struct ll_sb_info; @@ -360,7 +139,8 @@ struct lustre_sb_info { char lsi_osd_type[16]; char lsi_fstype[16]; struct backing_dev_info lsi_bdi; /* each client mountpoint needs - own backing_dev_info */ + * own backing_dev_info + */ }; #define LSI_UMOUNT_FAILOVER 0x00200000 diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h index 9b319f1df..8b0364f71 100644 --- a/drivers/staging/lustre/lustre/include/lustre_dlm.h +++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h @@ -69,7 +69,7 @@ struct obd_device; /** * LDLM non-error return states */ -typedef enum { +enum ldlm_error { ELDLM_OK = 0, ELDLM_LOCK_CHANGED = 300, @@ -80,7 +80,7 @@ typedef enum { ELDLM_NAMESPACE_EXISTS = 400, ELDLM_BAD_NAMESPACE = 401 -} ldlm_error_t; +}; /** * LDLM namespace type. @@ -145,16 +145,17 @@ typedef enum { #define LCK_COMPAT_COS (LCK_COS) /** @} Lock Compatibility Matrix */ -extern ldlm_mode_t lck_compat_array[]; +extern enum ldlm_mode lck_compat_array[]; -static inline void lockmode_verify(ldlm_mode_t mode) +static inline void lockmode_verify(enum ldlm_mode mode) { - LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE); + LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE); } -static inline int lockmode_compat(ldlm_mode_t exist_mode, ldlm_mode_t new_mode) +static inline int lockmode_compat(enum ldlm_mode exist_mode, + enum ldlm_mode new_mode) { - return (lck_compat_array[exist_mode] & new_mode); + return (lck_compat_array[exist_mode] & new_mode); } /* @@ -249,7 +250,8 @@ struct ldlm_pool { /** Current biggest client lock volume. Protected by pl_lock. */ __u64 pl_client_lock_volume; /** Lock volume factor. SLV on client is calculated as following: - * server_slv * lock_volume_factor. */ + * server_slv * lock_volume_factor. + */ atomic_t pl_lock_volume_factor; /** Time when last SLV from server was obtained. */ time64_t pl_recalc_time; @@ -295,10 +297,10 @@ struct ldlm_valblock_ops { * LDLM pools related, type of lock pool in the namespace. * Greedy means release cached locks aggressively */ -typedef enum { +enum ldlm_appetite { LDLM_NAMESPACE_GREEDY = 1 << 0, LDLM_NAMESPACE_MODEST = 1 << 1 -} ldlm_appetite_t; +}; struct ldlm_ns_bucket { /** back pointer to namespace */ @@ -317,7 +319,7 @@ enum { LDLM_NSS_LAST }; -typedef enum { +enum ldlm_ns_type { /** invalid type */ LDLM_NS_TYPE_UNKNOWN = 0, /** mdc namespace */ @@ -332,7 +334,7 @@ typedef enum { LDLM_NS_TYPE_MGC, /** mgs namespace */ LDLM_NS_TYPE_MGT, -} ldlm_ns_type_t; +}; /** * LDLM Namespace. @@ -373,7 +375,7 @@ struct ldlm_namespace { /** * Namespace connect flags supported by server (may be changed via - * /proc, LRU resize may be disabled/enabled). + * sysfs, LRU resize may be disabled/enabled). */ __u64 ns_connect_flags; @@ -439,7 +441,7 @@ struct ldlm_namespace { /** LDLM pool structure for this namespace */ struct ldlm_pool ns_pool; /** Definition of how eagerly unused locks will be released from LRU */ - ldlm_appetite_t ns_appetite; + enum ldlm_appetite ns_appetite; /** Limit of parallel AST RPC count. */ unsigned ns_max_parallel_ast; @@ -465,7 +467,6 @@ struct ldlm_namespace { */ static inline int ns_connect_cancelset(struct ldlm_namespace *ns) { - LASSERT(ns != NULL); return !!(ns->ns_connect_flags & OBD_CONNECT_CANCELSET); } @@ -474,14 +475,12 @@ static inline int ns_connect_cancelset(struct ldlm_namespace *ns) */ static inline int ns_connect_lru_resize(struct ldlm_namespace *ns) { - LASSERT(ns != NULL); return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE); } static inline void ns_register_cancel(struct ldlm_namespace *ns, ldlm_cancel_for_recovery arg) { - LASSERT(ns != NULL); ns->ns_cancel_for_recovery = arg; } @@ -503,7 +502,8 @@ struct ldlm_glimpse_work { struct list_head gl_list; /* linkage to other gl work structs */ __u32 gl_flags;/* see LDLM_GL_WORK_* below */ union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in - * glimpse callback request */ + * glimpse callback request + */ }; /** The ldlm_glimpse_work is allocated on the stack and should not be freed. */ @@ -512,8 +512,9 @@ struct ldlm_glimpse_work { /** Interval node data for each LDLM_EXTENT lock. */ struct ldlm_interval { struct interval_node li_node; /* node for tree management */ - struct list_head li_group; /* the locks which have the same - * policy - group of the policy */ + struct list_head li_group; /* the locks which have the same + * policy - group of the policy + */ }; #define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node) @@ -527,7 +528,7 @@ struct ldlm_interval { struct ldlm_interval_tree { /** Tree size. */ int lit_size; - ldlm_mode_t lit_mode; /* lock mode */ + enum ldlm_mode lit_mode; /* lock mode */ struct interval_node *lit_root; /* actual ldlm_interval */ }; @@ -535,12 +536,13 @@ struct ldlm_interval_tree { #define LUSTRE_TRACKS_LOCK_EXP_REFS (0) /** Cancel flags. */ -typedef enum { +enum ldlm_cancel_flags { LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */ LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */ LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST - * in the same RPC */ -} ldlm_cancel_flags_t; + * in the same RPC + */ +}; struct ldlm_flock { __u64 start; @@ -559,7 +561,7 @@ typedef union { struct ldlm_inodebits l_inodebits; } ldlm_policy_data_t; -void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type, +void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type, const ldlm_wire_policy_data_t *wpolicy, ldlm_policy_data_t *lpolicy); @@ -637,11 +639,11 @@ struct ldlm_lock { * Requested mode. * Protected by lr_lock. */ - ldlm_mode_t l_req_mode; + enum ldlm_mode l_req_mode; /** * Granted mode, also protected by lr_lock. */ - ldlm_mode_t l_granted_mode; + enum ldlm_mode l_granted_mode; /** Lock completion handler pointer. Called when lock is granted. */ ldlm_completion_callback l_completion_ast; /** @@ -841,20 +843,19 @@ struct ldlm_resource { /** * protected by lr_lock - * @{ */ + * @{ + */ /** List of locks in granted state */ struct list_head lr_granted; /** * List of locks that could not be granted due to conflicts and - * that are waiting for conflicts to go away */ + * that are waiting for conflicts to go away + */ struct list_head lr_waiting; /** @} */ - /* XXX No longer needed? Remove ASAP */ - ldlm_mode_t lr_most_restr; - /** Type of locks this resource can hold. Only one type per resource. */ - ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */ + enum ldlm_type lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */ /** Resource name */ struct ldlm_res_id lr_name; @@ -921,7 +922,7 @@ static inline int ldlm_lvbo_init(struct ldlm_resource *res) { struct ldlm_namespace *ns = ldlm_res_to_ns(res); - if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_init != NULL) + if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) return ns->ns_lvbo->lvbo_init(res); return 0; @@ -931,7 +932,7 @@ static inline int ldlm_lvbo_size(struct ldlm_lock *lock) { struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_size != NULL) + if (ns->ns_lvbo && ns->ns_lvbo->lvbo_size) return ns->ns_lvbo->lvbo_size(lock); return 0; @@ -941,10 +942,9 @@ static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len) { struct ldlm_namespace *ns = ldlm_lock_to_ns(lock); - if (ns->ns_lvbo != NULL) { - LASSERT(ns->ns_lvbo->lvbo_fill != NULL); + if (ns->ns_lvbo) return ns->ns_lvbo->lvbo_fill(lock, buf, len); - } + return 0; } @@ -1015,7 +1015,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, /** Non-rate-limited lock printing function for debugging purposes. */ #define LDLM_DEBUG(lock, fmt, a...) do { \ - if (likely(lock != NULL)) { \ + if (likely(lock)) { \ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL); \ ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, \ "### " fmt, ##a); \ @@ -1025,7 +1025,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, } while (0) typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags, - int first_enq, ldlm_error_t *err, + int first_enq, enum ldlm_error *err, struct list_head *work_list); /** @@ -1042,7 +1042,8 @@ typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *); * * LDLM provides for a way to iterate through every lock on a resource or * namespace or every resource in a namespace. - * @{ */ + * @{ + */ int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *, ldlm_iterator_t iter, void *data); /** @} ldlm_iterator */ @@ -1091,7 +1092,7 @@ ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags) struct ldlm_lock *lock; lock = __ldlm_handle2lock(h, flags); - if (lock != NULL) + if (lock) LDLM_LOCK_REF_DEL(lock); return lock; } @@ -1111,7 +1112,7 @@ static inline int ldlm_res_lvbo_update(struct ldlm_resource *res, return 0; } -int ldlm_error2errno(ldlm_error_t error); +int ldlm_error2errno(enum ldlm_error error); #if LUSTRE_TRACKS_LOCK_EXP_REFS void ldlm_dump_export_locks(struct obd_export *exp); @@ -1168,12 +1169,13 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode); void ldlm_lock_fail_match_locked(struct ldlm_lock *lock); void ldlm_lock_allow_match(struct ldlm_lock *lock); void ldlm_lock_allow_match_locked(struct ldlm_lock *lock); -ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, - const struct ldlm_res_id *, ldlm_type_t type, - ldlm_policy_data_t *, ldlm_mode_t mode, - struct lustre_handle *, int unref); -ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, - __u64 *bits); +enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, + const struct ldlm_res_id *, + enum ldlm_type type, ldlm_policy_data_t *, + enum ldlm_mode mode, struct lustre_handle *, + int unref); +enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh, + __u64 *bits); void ldlm_lock_cancel(struct ldlm_lock *lock); void ldlm_lock_dump_handle(int level, struct lustre_handle *); void ldlm_unlink_lock_skiplist(struct ldlm_lock *req); @@ -1181,8 +1183,8 @@ void ldlm_unlink_lock_skiplist(struct ldlm_lock *req); /* resource.c */ struct ldlm_namespace * ldlm_namespace_new(struct obd_device *obd, char *name, - ldlm_side_t client, ldlm_appetite_t apt, - ldlm_ns_type_t ns_type); + ldlm_side_t client, enum ldlm_appetite apt, + enum ldlm_ns_type ns_type); int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags); void ldlm_namespace_get(struct ldlm_namespace *ns); void ldlm_namespace_put(struct ldlm_namespace *ns); @@ -1193,7 +1195,7 @@ void ldlm_debugfs_cleanup(void); struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, const struct ldlm_res_id *, - ldlm_type_t type, int create); + enum ldlm_type type, int create); int ldlm_resource_putref(struct ldlm_resource *res); void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head, @@ -1219,7 +1221,8 @@ int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *, * These AST handlers are typically used for server-side local locks and are * also used by client-side lock handlers to perform minimum level base * processing. - * @{ */ + * @{ + */ int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data); int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data); /** @} ldlm_local_ast */ @@ -1227,7 +1230,8 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data); /** \defgroup ldlm_cli_api API to operate on locks from actual LDLM users. * These are typically used by client and server (*_local versions) * to obtain and release locks. - * @{ */ + * @{ + */ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, struct ldlm_enqueue_info *einfo, const struct ldlm_res_id *res_id, @@ -1244,29 +1248,32 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct list_head *cancels, int count); int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, - ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode, + enum ldlm_type type, __u8 with_policy, + enum ldlm_mode mode, __u64 *flags, void *lvb, __u32 lvb_len, struct lustre_handle *lockh, int rc); int ldlm_cli_update_pool(struct ptlrpc_request *req); int ldlm_cli_cancel(struct lustre_handle *lockh, - ldlm_cancel_flags_t cancel_flags); + enum ldlm_cancel_flags cancel_flags); int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *, - ldlm_cancel_flags_t flags, void *opaque); + enum ldlm_cancel_flags flags, void *opaque); int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, ldlm_policy_data_t *policy, - ldlm_mode_t mode, - ldlm_cancel_flags_t flags, + enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque); int ldlm_cancel_resource_local(struct ldlm_resource *res, struct list_head *cancels, ldlm_policy_data_t *policy, - ldlm_mode_t mode, __u64 lock_flags, - ldlm_cancel_flags_t cancel_flags, void *opaque); + enum ldlm_mode mode, __u64 lock_flags, + enum ldlm_cancel_flags cancel_flags, + void *opaque); int ldlm_cli_cancel_list_local(struct list_head *cancels, int count, - ldlm_cancel_flags_t flags); + enum ldlm_cancel_flags flags); int ldlm_cli_cancel_list(struct list_head *head, int count, - struct ptlrpc_request *req, ldlm_cancel_flags_t flags); + struct ptlrpc_request *req, + enum ldlm_cancel_flags flags); /** @} ldlm_cli_api */ /* mds/handler.c */ diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h index 0d3ed87d3..7f2ba2ffe 100644 --- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h +++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h @@ -57,7 +57,8 @@ /** * Server placed lock on granted list, or a recovering client wants the - * lock added to the granted list, no questions asked. */ + * lock added to the granted list, no questions asked. + */ #define LDLM_FL_BLOCK_GRANTED 0x0000000000000002ULL /* bit 1 */ #define ldlm_is_block_granted(_l) LDLM_TEST_FLAG((_l), 1ULL << 1) #define ldlm_set_block_granted(_l) LDLM_SET_FLAG((_l), 1ULL << 1) @@ -65,7 +66,8 @@ /** * Server placed lock on conv list, or a recovering client wants the lock - * added to the conv list, no questions asked. */ + * added to the conv list, no questions asked. + */ #define LDLM_FL_BLOCK_CONV 0x0000000000000004ULL /* bit 2 */ #define ldlm_is_block_conv(_l) LDLM_TEST_FLAG((_l), 1ULL << 2) #define ldlm_set_block_conv(_l) LDLM_SET_FLAG((_l), 1ULL << 2) @@ -73,7 +75,8 @@ /** * Server placed lock on wait list, or a recovering client wants the lock - * added to the wait list, no questions asked. */ + * added to the wait list, no questions asked. + */ #define LDLM_FL_BLOCK_WAIT 0x0000000000000008ULL /* bit 3 */ #define ldlm_is_block_wait(_l) LDLM_TEST_FLAG((_l), 1ULL << 3) #define ldlm_set_block_wait(_l) LDLM_SET_FLAG((_l), 1ULL << 3) @@ -87,7 +90,8 @@ /** * Lock is being replayed. This could probably be implied by the fact that - * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */ + * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. + */ #define LDLM_FL_REPLAY 0x0000000000000100ULL /* bit 8 */ #define ldlm_is_replay(_l) LDLM_TEST_FLAG((_l), 1ULL << 8) #define ldlm_set_replay(_l) LDLM_SET_FLAG((_l), 1ULL << 8) @@ -125,7 +129,8 @@ /** * Server told not to wait if blocked. For AGL, OST will not send glimpse - * callback. */ + * callback. + */ #define LDLM_FL_BLOCK_NOWAIT 0x0000000000040000ULL /* bit 18 */ #define ldlm_is_block_nowait(_l) LDLM_TEST_FLAG((_l), 1ULL << 18) #define ldlm_set_block_nowait(_l) LDLM_SET_FLAG((_l), 1ULL << 18) @@ -141,7 +146,8 @@ * Immediately cancel such locks when they block some other locks. Send * cancel notification to original lock holder, but expect no reply. This * is for clients (like liblustre) that cannot be expected to reliably - * response to blocking AST. */ + * response to blocking AST. + */ #define LDLM_FL_CANCEL_ON_BLOCK 0x0000000000800000ULL /* bit 23 */ #define ldlm_is_cancel_on_block(_l) LDLM_TEST_FLAG((_l), 1ULL << 23) #define ldlm_set_cancel_on_block(_l) LDLM_SET_FLAG((_l), 1ULL << 23) @@ -164,7 +170,8 @@ /** * Used for marking lock as a target for -EINTR while cp_ast sleep emulation - * + race with upcoming bl_ast. */ + * + race with upcoming bl_ast. + */ #define LDLM_FL_FAIL_LOC 0x0000000100000000ULL /* bit 32 */ #define ldlm_is_fail_loc(_l) LDLM_TEST_FLAG((_l), 1ULL << 32) #define ldlm_set_fail_loc(_l) LDLM_SET_FLAG((_l), 1ULL << 32) @@ -172,7 +179,8 @@ /** * Used while processing the unused list to know that we have already - * handled this lock and decided to skip it. */ + * handled this lock and decided to skip it. + */ #define LDLM_FL_SKIPPED 0x0000000200000000ULL /* bit 33 */ #define ldlm_is_skipped(_l) LDLM_TEST_FLAG((_l), 1ULL << 33) #define ldlm_set_skipped(_l) LDLM_SET_FLAG((_l), 1ULL << 33) @@ -231,7 +239,8 @@ * The proper fix is to do the granting inside of the completion AST, * which can be replaced with a LVB-aware wrapping function for OSC locks. * That change is pretty high-risk, though, and would need a lot more - * testing. */ + * testing. + */ #define LDLM_FL_LVB_READY 0x0000020000000000ULL /* bit 41 */ #define ldlm_is_lvb_ready(_l) LDLM_TEST_FLAG((_l), 1ULL << 41) #define ldlm_set_lvb_ready(_l) LDLM_SET_FLAG((_l), 1ULL << 41) @@ -243,7 +252,8 @@ * dirty pages. It can remain on the granted list during this whole time. * Threads racing to update the KMS after performing their writeback need * to know to exclude each other's locks from the calculation as they walk - * the granted list. */ + * the granted list. + */ #define LDLM_FL_KMS_IGNORE 0x0000040000000000ULL /* bit 42 */ #define ldlm_is_kms_ignore(_l) LDLM_TEST_FLAG((_l), 1ULL << 42) #define ldlm_set_kms_ignore(_l) LDLM_SET_FLAG((_l), 1ULL << 42) @@ -263,7 +273,8 @@ /** * optimization hint: LDLM can run blocking callback from current context - * w/o involving separate thread. in order to decrease cs rate */ + * w/o involving separate thread. in order to decrease cs rate + */ #define LDLM_FL_ATOMIC_CB 0x0000200000000000ULL /* bit 45 */ #define ldlm_is_atomic_cb(_l) LDLM_TEST_FLAG((_l), 1ULL << 45) #define ldlm_set_atomic_cb(_l) LDLM_SET_FLAG((_l), 1ULL << 45) @@ -280,7 +291,8 @@ * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is * dropped to let ldlm_callback_handler() return EINVAL to the server. It * is used when ELC RPC is already prepared and is waiting for rpc_lock, - * too late to send a separate CANCEL RPC. */ + * too late to send a separate CANCEL RPC. + */ #define LDLM_FL_BL_AST 0x0000400000000000ULL /* bit 46 */ #define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG((_l), 1ULL << 46) #define ldlm_set_bl_ast(_l) LDLM_SET_FLAG((_l), 1ULL << 46) @@ -295,7 +307,8 @@ /** * Don't put lock into the LRU list, so that it is not canceled due * to aging. Used by MGC locks, they are cancelled only at unmount or - * by callback. */ + * by callback. + */ #define LDLM_FL_NO_LRU 0x0001000000000000ULL /* bit 48 */ #define ldlm_is_no_lru(_l) LDLM_TEST_FLAG((_l), 1ULL << 48) #define ldlm_set_no_lru(_l) LDLM_SET_FLAG((_l), 1ULL << 48) @@ -304,7 +317,8 @@ /** * Set for locks that failed and where the server has been notified. * - * Protected by lock and resource locks. */ + * Protected by lock and resource locks. + */ #define LDLM_FL_FAIL_NOTIFIED 0x0002000000000000ULL /* bit 49 */ #define ldlm_is_fail_notified(_l) LDLM_TEST_FLAG((_l), 1ULL << 49) #define ldlm_set_fail_notified(_l) LDLM_SET_FLAG((_l), 1ULL << 49) @@ -315,7 +329,8 @@ * be destroyed when last reference to them is released. Set by * ldlm_lock_destroy_internal(). * - * Protected by lock and resource locks. */ + * Protected by lock and resource locks. + */ #define LDLM_FL_DESTROYED 0x0004000000000000ULL /* bit 50 */ #define ldlm_is_destroyed(_l) LDLM_TEST_FLAG((_l), 1ULL << 50) #define ldlm_set_destroyed(_l) LDLM_SET_FLAG((_l), 1ULL << 50) @@ -333,7 +348,8 @@ * NB: compared with check_res_locked(), checking this bit is cheaper. * Also, spin_is_locked() is deprecated for kernel code; one reason is * because it works only for SMP so user needs to add extra macros like - * LASSERT_SPIN_LOCKED for uniprocessor kernels. */ + * LASSERT_SPIN_LOCKED for uniprocessor kernels. + */ #define LDLM_FL_RES_LOCKED 0x0010000000000000ULL /* bit 52 */ #define ldlm_is_res_locked(_l) LDLM_TEST_FLAG((_l), 1ULL << 52) #define ldlm_set_res_locked(_l) LDLM_SET_FLAG((_l), 1ULL << 52) @@ -343,7 +359,8 @@ * It's set once we call ldlm_add_waiting_lock_res_locked() to start the * lock-timeout timer and it will never be reset. * - * Protected by lock and resource locks. */ + * Protected by lock and resource locks. + */ #define LDLM_FL_WAITED 0x0020000000000000ULL /* bit 53 */ #define ldlm_is_waited(_l) LDLM_TEST_FLAG((_l), 1ULL << 53) #define ldlm_set_waited(_l) LDLM_SET_FLAG((_l), 1ULL << 53) @@ -365,10 +382,10 @@ #define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0) /** set a ldlm_lock flag bit */ -#define LDLM_SET_FLAG(_l, _b) (((_l)->l_flags |= (_b)) +#define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b)) /** clear a ldlm_lock flag bit */ -#define LDLM_CLEAR_FLAG(_l, _b) (((_l)->l_flags &= ~(_b)) +#define LDLM_CLEAR_FLAG(_l, _b) ((_l)->l_flags &= ~(_b)) /** Mask of flags inherited from parent lock when doing intents. */ #define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h index 311e5aa9b..3014d27e6 100644 --- a/drivers/staging/lustre/lustre/include/lustre_export.h +++ b/drivers/staging/lustre/lustre/include/lustre_export.h @@ -50,62 +50,6 @@ #include "lustre/lustre_idl.h" #include "lustre_dlm.h" -struct mds_client_data; -struct mdt_client_data; -struct mds_idmap_table; -struct mdt_idmap_table; - -/** - * Target-specific export data - */ -struct tg_export_data { - /** Protects led_lcd below */ - struct mutex ted_lcd_lock; - /** Per-client data for each export */ - struct lsd_client_data *ted_lcd; - /** Offset of record in last_rcvd file */ - loff_t ted_lr_off; - /** Client index in last_rcvd file */ - int ted_lr_idx; -}; - -/** - * MDT-specific export data - */ -struct mdt_export_data { - struct tg_export_data med_ted; - /** List of all files opened by client on this MDT */ - struct list_head med_open_head; - spinlock_t med_open_lock; /* med_open_head, mfd_list */ - /** Bitmask of all ibit locks this MDT understands */ - __u64 med_ibits_known; - struct mutex med_idmap_mutex; - struct lustre_idmap_table *med_idmap; -}; - -struct ec_export_data { /* echo client */ - struct list_head eced_locks; -}; - -/* In-memory access to client data from OST struct */ -/** Filter (oss-side) specific import data */ -struct filter_export_data { - struct tg_export_data fed_ted; - spinlock_t fed_lock; /**< protects fed_mod_list */ - long fed_dirty; /* in bytes */ - long fed_grant; /* in bytes */ - struct list_head fed_mod_list; /* files being modified */ - int fed_mod_count;/* items in fed_writing list */ - long fed_pending; /* bytes just being written */ - __u32 fed_group; - __u8 fed_pagesize; /* log2 of client page size */ -}; - -struct mgs_export_data { - struct list_head med_clients; /* mgc fs client via this exp */ - spinlock_t med_lock; /* protect med_clients */ -}; - enum obd_option { OBD_OPT_FORCE = 0x0001, OBD_OPT_FAILOVER = 0x0002, @@ -179,7 +123,8 @@ struct obd_export { */ spinlock_t exp_lock; /** Compatibility flags for this export are embedded into - * exp_connect_data */ + * exp_connect_data + */ struct obd_connect_data exp_connect_data; enum obd_option exp_flags; unsigned long exp_failed:1, @@ -200,22 +145,8 @@ struct obd_export { /** blocking dlm lock list, protected by exp_bl_list_lock */ struct list_head exp_bl_list; spinlock_t exp_bl_list_lock; - - /** Target specific data */ - union { - struct tg_export_data eu_target_data; - struct mdt_export_data eu_mdt_data; - struct filter_export_data eu_filter_data; - struct ec_export_data eu_ec_data; - struct mgs_export_data eu_mgs_data; - } u; }; -#define exp_target_data u.eu_target_data -#define exp_mdt_data u.eu_mdt_data -#define exp_filter_data u.eu_filter_data -#define exp_ec_data u.eu_ec_data - static inline __u64 *exp_connect_flags_ptr(struct obd_export *exp) { return &exp->exp_connect_data.ocd_connect_flags; @@ -228,7 +159,6 @@ static inline __u64 exp_connect_flags(struct obd_export *exp) static inline int exp_max_brw_size(struct obd_export *exp) { - LASSERT(exp != NULL); if (exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE) return exp->exp_connect_data.ocd_brw_size; @@ -242,19 +172,16 @@ static inline int exp_connect_multibulk(struct obd_export *exp) static inline int exp_connect_cancelset(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_CANCELSET); } static inline int exp_connect_lru_resize(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_LRU_RESIZE); } static inline int exp_connect_rmtclient(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_RMT_CLIENT); } @@ -268,14 +195,11 @@ static inline int client_is_remote(struct obd_export *exp) static inline int exp_connect_vbr(struct obd_export *exp) { - LASSERT(exp != NULL); - LASSERT(exp->exp_connection); return !!(exp_connect_flags(exp) & OBD_CONNECT_VBR); } static inline int exp_connect_som(struct obd_export *exp) { - LASSERT(exp != NULL); return !!(exp_connect_flags(exp) & OBD_CONNECT_SOM); } @@ -288,7 +212,6 @@ static inline int imp_connect_lru_resize(struct obd_import *imp) { struct obd_connect_data *ocd; - LASSERT(imp != NULL); ocd = &imp->imp_connect_data; return !!(ocd->ocd_connect_flags & OBD_CONNECT_LRU_RESIZE); } @@ -300,7 +223,6 @@ static inline int exp_connect_layout(struct obd_export *exp) static inline bool exp_connect_lvb_type(struct obd_export *exp) { - LASSERT(exp != NULL); if (exp_connect_flags(exp) & OBD_CONNECT_LVB_TYPE) return true; else @@ -311,7 +233,6 @@ static inline bool imp_connect_lvb_type(struct obd_import *imp) { struct obd_connect_data *ocd; - LASSERT(imp != NULL); ocd = &imp->imp_connect_data; if (ocd->ocd_connect_flags & OBD_CONNECT_LVB_TYPE) return true; @@ -331,13 +252,19 @@ static inline bool imp_connect_disp_stripe(struct obd_import *imp) { struct obd_connect_data *ocd; - LASSERT(imp != NULL); ocd = &imp->imp_connect_data; return ocd->ocd_connect_flags & OBD_CONNECT_DISP_STRIPE; } struct obd_export *class_conn2export(struct lustre_handle *conn); +#define KKUC_CT_DATA_MAGIC 0x092013cea +struct kkuc_ct_data { + __u32 kcd_magic; + struct obd_uuid kcd_uuid; + __u32 kcd_archive; +}; + /** @} export */ #endif /* __EXPORT_H */ diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h index 9b1a9c695..ab4a92390 100644 --- a/drivers/staging/lustre/lustre/include/lustre_fid.h +++ b/drivers/staging/lustre/lustre/include/lustre_fid.h @@ -251,7 +251,8 @@ static inline void lu_local_name_obj_fid(struct lu_fid *fid, __u32 oid) /* For new FS (>= 2.4), the root FID will be changed to * [FID_SEQ_ROOT:1:0], for existing FS, (upgraded to 2.4), - * the root FID will still be IGIF */ + * the root FID will still be IGIF + */ static inline int fid_is_root(const struct lu_fid *fid) { return unlikely((fid_seq(fid) == FID_SEQ_ROOT && @@ -294,7 +295,8 @@ static inline int fid_is_namespace_visible(const struct lu_fid *fid) const __u64 seq = fid_seq(fid); /* Here, we cannot distinguish whether the normal FID is for OST - * object or not. It is caller's duty to check more if needed. */ + * object or not. It is caller's duty to check more if needed. + */ return (!fid_is_last_id(fid) && (fid_seq_is_norm(seq) || fid_seq_is_igif(seq))) || fid_is_root(fid) || fid_is_dot_lustre(fid); @@ -433,7 +435,7 @@ fid_extract_from_res_name(struct lu_fid *fid, const struct ldlm_res_id *res) */ static inline struct ldlm_res_id * fid_build_quota_res_name(const struct lu_fid *glb_fid, union lquota_id *qid, - struct ldlm_res_id *res) + struct ldlm_res_id *res) { fid_build_reg_res_name(glb_fid, res); res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] = fid_seq(&qid->qid_fid); @@ -516,7 +518,8 @@ static inline int ostid_res_name_eq(struct ost_id *oi, struct ldlm_res_id *name) { /* Note: it is just a trick here to save some effort, probably the - * correct way would be turn them into the FID and compare */ + * correct way would be turn them into the FID and compare + */ if (fid_seq_is_mdt0(ostid_seq(oi))) { return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_id(oi) && name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_seq(oi); @@ -589,12 +592,14 @@ static inline __u64 fid_flatten(const struct lu_fid *fid) static inline __u32 fid_hash(const struct lu_fid *f, int bits) { /* all objects with same id and different versions will belong to same - * collisions list. */ + * collisions list. + */ return hash_long(fid_flatten(f), bits); } /** - * map fid to 32 bit value for ino on 32bit systems. */ + * map fid to 32 bit value for ino on 32bit systems. + */ static inline __u32 fid_flatten32(const struct lu_fid *fid) { __u32 ino; @@ -611,7 +616,8 @@ static inline __u32 fid_flatten32(const struct lu_fid *fid) * that inodes generated at about the same time have a reduced chance * of collisions. This will give a period of 2^12 = 1024 unique clients * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects - * (from OID), or up to 128M inodes without collisions for new files. */ + * (from OID), or up to 128M inodes without collisions for new files. + */ ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) + (seq >> (64 - (40-8)) & 0xffffff00) + (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8); diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h index 551162624..4cf2b0e61 100644 --- a/drivers/staging/lustre/lustre/include/lustre_fld.h +++ b/drivers/staging/lustre/lustre/include/lustre_fld.h @@ -71,50 +71,41 @@ struct lu_fld_target { struct lu_server_fld { /** * super sequence controller export, needed to forward fld - * lookup request. */ + * lookup request. + */ struct obd_export *lsf_control_exp; - /** - * Client FLD cache. */ + /** Client FLD cache. */ struct fld_cache *lsf_cache; - /** - * Protect index modifications */ + /** Protect index modifications */ struct mutex lsf_lock; - /** - * Fld service name in form "fld-srv-lustre-MDTXXX" */ + /** Fld service name in form "fld-srv-lustre-MDTXXX" */ char lsf_name[LUSTRE_MDT_MAXNAMELEN]; }; struct lu_client_fld { - /** - * Client side debugfs entry. */ + /** Client side debugfs entry. */ struct dentry *lcf_debugfs_entry; - /** - * List of exports client FLD knows about. */ + /** List of exports client FLD knows about. */ struct list_head lcf_targets; - /** - * Current hash to be used to chose an export. */ + /** Current hash to be used to chose an export. */ struct lu_fld_hash *lcf_hash; - /** - * Exports count. */ + /** Exports count. */ int lcf_count; - /** - * Lock protecting exports list and fld_hash. */ + /** Lock protecting exports list and fld_hash. */ spinlock_t lcf_lock; - /** - * Client FLD cache. */ + /** Client FLD cache. */ struct fld_cache *lcf_cache; - /** - * Client fld debugfs entry name. */ + /** Client fld debugfs entry name. */ char lcf_name[LUSTRE_MDT_MAXNAMELEN]; int lcf_flags; diff --git a/drivers/staging/lustre/lustre/include/lustre_handles.h b/drivers/staging/lustre/lustre/include/lustre_handles.h index f39780ae4..27f169d2e 100644 --- a/drivers/staging/lustre/lustre/include/lustre_handles.h +++ b/drivers/staging/lustre/lustre/include/lustre_handles.h @@ -65,7 +65,8 @@ struct portals_handle_ops { * * Now you're able to assign the results of cookie2handle directly to an * ldlm_lock. If it's not at the top, you'll want to use container_of() - * to compute the start of the structure based on the handle field. */ + * to compute the start of the structure based on the handle field. + */ struct portals_handle { struct list_head h_link; __u64 h_cookie; diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h index 4e4230e94..dac2d84d8 100644 --- a/drivers/staging/lustre/lustre/include/lustre_import.h +++ b/drivers/staging/lustre/lustre/include/lustre_import.h @@ -292,7 +292,8 @@ struct obd_import { /* need IR MNE swab */ imp_need_mne_swab:1, /* import must be reconnected instead of - * chose new connection */ + * chosing new connection + */ imp_force_reconnect:1, /* import has tried to connect with server */ imp_connect_tried:1; diff --git a/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h new file mode 100644 index 000000000..970610b6d --- /dev/null +++ b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h @@ -0,0 +1,55 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2013 Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * + * Author: Nathan Rutman <nathan.rutman@sun.com> + * + * Kernel <-> userspace communication routines. + * The definitions below are used in the kernel and userspace. + */ + +#ifndef __LUSTRE_KERNELCOMM_H__ +#define __LUSTRE_KERNELCOMM_H__ + +/* For declarations shared with userspace */ +#include "uapi_kernelcomm.h" + +/* prototype for callback function on kuc groups */ +typedef int (*libcfs_kkuc_cb_t)(void *data, void *cb_arg); + +/* Kernel methods */ +int libcfs_kkuc_msg_put(struct file *fp, void *payload); +int libcfs_kkuc_group_put(unsigned int group, void *payload); +int libcfs_kkuc_group_add(struct file *fp, int uid, unsigned int group, + void *data, size_t data_len); +int libcfs_kkuc_group_rem(int uid, unsigned int group); +int libcfs_kkuc_group_foreach(unsigned int group, libcfs_kkuc_cb_t cb_func, + void *cb_arg); + +#endif /* __LUSTRE_KERNELCOMM_H__ */ diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h index 428469fec..f2223d558 100644 --- a/drivers/staging/lustre/lustre/include/lustre_lib.h +++ b/drivers/staging/lustre/lustre/include/lustre_lib.h @@ -153,9 +153,9 @@ struct obd_ioctl_data { /* buffers the kernel will treat as user pointers */ __u32 ioc_plen1; - char *ioc_pbuf1; + void __user *ioc_pbuf1; __u32 ioc_plen2; - char *ioc_pbuf2; + void __user *ioc_pbuf2; /* inline buffers for various arguments */ __u32 ioc_inllen1; @@ -252,8 +252,8 @@ static inline int obd_ioctl_is_invalid(struct obd_ioctl_data *data) #include "obd_support.h" /* function defined in lustre/obdclass/<platform>/<platform>-module.c */ -int obd_ioctl_getdata(char **buf, int *len, void *arg); -int obd_ioctl_popdata(void *arg, void *data, int len); +int obd_ioctl_getdata(char **buf, int *len, void __user *arg); +int obd_ioctl_popdata(void __user *arg, void *data, int len); static inline void obd_ioctl_freedata(char *buf, int len) { @@ -365,10 +365,10 @@ static inline void obd_ioctl_freedata(char *buf, int len) /* OBD_IOC_LLOG_CATINFO is deprecated */ #define OBD_IOC_LLOG_CATINFO _IOWR('f', 196, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE) -#define ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE) +/* #define ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE) */ +/* #define ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE) */ +/* #define ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE) */ +/* #define ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE) */ #define OBD_IOC_GET_OBJ_VERSION _IOR('f', 210, OBD_IOC_DATA_TYPE) @@ -387,7 +387,8 @@ static inline void obd_ioctl_freedata(char *buf, int len) */ /* Until such time as we get_info the per-stripe maximum from the OST, - * we define this to be 2T - 4k, which is the ext3 maxbytes. */ + * we define this to be 2T - 4k, which is the ext3 maxbytes. + */ #define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL /* Special values for remove LOV EA from disk */ @@ -540,7 +541,7 @@ do { \ l_add_wait(&wq, &__wait); \ \ /* Block all signals (just the non-fatal ones if no timeout). */ \ - if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \ + if (info->lwi_on_signal && (__timeout == 0 || __allow_intr)) \ __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \ else \ __blocked = cfs_block_sigsinv(0); \ @@ -562,13 +563,13 @@ do { \ __timeout = cfs_time_sub(__timeout, \ cfs_time_sub(interval, remaining));\ if (__timeout == 0) { \ - if (info->lwi_on_timeout == NULL || \ + if (!info->lwi_on_timeout || \ info->lwi_on_timeout(info->lwi_cb_data)) { \ ret = -ETIMEDOUT; \ break; \ } \ /* Take signals after the timeout expires. */ \ - if (info->lwi_on_signal != NULL) \ + if (info->lwi_on_signal) \ (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\ } \ } \ @@ -578,7 +579,7 @@ do { \ if (condition) \ break; \ if (cfs_signal_pending()) { \ - if (info->lwi_on_signal != NULL && \ + if (info->lwi_on_signal && \ (__timeout == 0 || __allow_intr)) { \ if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \ info->lwi_on_signal(info->lwi_cb_data);\ diff --git a/drivers/staging/lustre/lustre/include/lustre_lite.h b/drivers/staging/lustre/lustre/include/lustre_lite.h index f6d7aae3a..fcc5ebbce 100644 --- a/drivers/staging/lustre/lustre/include/lustre_lite.h +++ b/drivers/staging/lustre/lustre/include/lustre_lite.h @@ -53,56 +53,8 @@ #define LL_MAX_BLKSIZE_BITS (22) #define LL_MAX_BLKSIZE (1UL<<LL_MAX_BLKSIZE_BITS) -#include "lustre/lustre_user.h" - -struct lustre_rw_params { - int lrp_lock_mode; - ldlm_policy_data_t lrp_policy; - u32 lrp_brw_flags; - int lrp_ast_flags; -}; - -/* - * XXX nikita: this function lives in the header because it is used by both - * llite kernel module and liblustre library, and there is no (?) better place - * to put it in. - */ -static inline void lustre_build_lock_params(int cmd, unsigned long open_flags, - __u64 connect_flags, - loff_t pos, ssize_t len, - struct lustre_rw_params *params) -{ - params->lrp_lock_mode = (cmd == OBD_BRW_READ) ? LCK_PR : LCK_PW; - params->lrp_brw_flags = 0; - - params->lrp_policy.l_extent.start = pos; - params->lrp_policy.l_extent.end = pos + len - 1; - /* - * for now O_APPEND always takes local locks. - */ - if (cmd == OBD_BRW_WRITE && (open_flags & O_APPEND)) { - params->lrp_policy.l_extent.start = 0; - params->lrp_policy.l_extent.end = OBD_OBJECT_EOF; - } else if (LIBLUSTRE_CLIENT && (connect_flags & OBD_CONNECT_SRVLOCK)) { - /* - * liblustre: OST-side locking for all non-O_APPEND - * reads/writes. - */ - params->lrp_lock_mode = LCK_NL; - params->lrp_brw_flags = OBD_BRW_SRVLOCK; - } else { - /* - * nothing special for the kernel. In the future llite may use - * OST-side locks for small writes into highly contended - * files. - */ - } - params->lrp_ast_flags = (open_flags & O_NONBLOCK) ? - LDLM_FL_BLOCK_NOWAIT : 0; -} - /* - * This is embedded into liblustre and llite super-blocks to keep track of + * This is embedded into llite super-blocks to keep track of * connect flags (capabilities) supported by all imports given mount is * connected to. */ diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h index e4fc8b5e1..49618e186 100644 --- a/drivers/staging/lustre/lustre/include/lustre_log.h +++ b/drivers/staging/lustre/lustre/include/lustre_log.h @@ -241,7 +241,8 @@ struct llog_ctxt { struct obd_llog_group *loc_olg; /* group containing that ctxt */ struct obd_export *loc_exp; /* parent "disk" export (e.g. MDS) */ struct obd_import *loc_imp; /* to use in RPC's: can be backward - pointing import */ + * pointing import + */ struct llog_operations *loc_logops; struct llog_handle *loc_handle; struct mutex loc_mutex; /* protect loc_imp */ @@ -255,7 +256,7 @@ struct llog_ctxt { static inline int llog_handle2ops(struct llog_handle *loghandle, struct llog_operations **lop) { - if (loghandle == NULL || loghandle->lgh_logops == NULL) + if (!loghandle || !loghandle->lgh_logops) return -EINVAL; *lop = loghandle->lgh_logops; @@ -272,7 +273,7 @@ static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt) static inline void llog_ctxt_put(struct llog_ctxt *ctxt) { - if (ctxt == NULL) + if (!ctxt) return; LASSERT_ATOMIC_GT_LT(&ctxt->loc_refcount, 0, LI_POISON); CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt, @@ -294,7 +295,7 @@ static inline int llog_group_set_ctxt(struct obd_llog_group *olg, LASSERT(index >= 0 && index < LLOG_MAX_CTXTS); spin_lock(&olg->olg_lock); - if (olg->olg_ctxts[index] != NULL) { + if (olg->olg_ctxts[index]) { spin_unlock(&olg->olg_lock); return -EEXIST; } @@ -311,7 +312,7 @@ static inline struct llog_ctxt *llog_group_get_ctxt(struct obd_llog_group *olg, LASSERT(index >= 0 && index < LLOG_MAX_CTXTS); spin_lock(&olg->olg_lock); - if (olg->olg_ctxts[index] == NULL) + if (!olg->olg_ctxts[index]) ctxt = NULL; else ctxt = llog_ctxt_get(olg->olg_ctxts[index]); @@ -335,7 +336,7 @@ static inline struct llog_ctxt *llog_get_context(struct obd_device *obd, static inline int llog_group_ctxt_null(struct obd_llog_group *olg, int index) { - return (olg->olg_ctxts[index] == NULL); + return (!olg->olg_ctxts[index]); } static inline int llog_ctxt_null(struct obd_device *obd, int index) @@ -354,7 +355,7 @@ static inline int llog_next_block(const struct lu_env *env, rc = llog_handle2ops(loghandle, &lop); if (rc) return rc; - if (lop->lop_next_block == NULL) + if (!lop->lop_next_block) return -EOPNOTSUPP; rc = lop->lop_next_block(env, loghandle, cur_idx, next_idx, diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h index 3da373315..af77eb359 100644 --- a/drivers/staging/lustre/lustre/include/lustre_mdc.h +++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h @@ -81,8 +81,8 @@ static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck) static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck, struct lookup_intent *it) { - if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || - it->it_op == IT_LAYOUT)) + if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || + it->it_op == IT_LAYOUT)) return; /* This would normally block until the existing request finishes. @@ -90,7 +90,8 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck, * done, then set rpcl_it to MDC_FAKE_RPCL_IT. Once that is set * it will only be cleared when all fake requests are finished. * Only when all fake requests are finished can normal requests - * be sent, to ensure they are recoverable again. */ + * be sent, to ensure they are recoverable again. + */ again: mutex_lock(&lck->rpcl_mutex); @@ -105,22 +106,23 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck, * just turned off but there are still requests in progress. * Wait until they finish. It doesn't need to be efficient * in this extremely rare case, just have low overhead in - * the common case when it isn't true. */ + * the common case when it isn't true. + */ while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) { mutex_unlock(&lck->rpcl_mutex); schedule_timeout(cfs_time_seconds(1) / 4); goto again; } - LASSERT(lck->rpcl_it == NULL); + LASSERT(!lck->rpcl_it); lck->rpcl_it = it; } static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck, struct lookup_intent *it) { - if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || - it->it_op == IT_LAYOUT)) + if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || + it->it_op == IT_LAYOUT)) return; if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */ @@ -153,12 +155,12 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp, if (cli->cl_max_mds_easize < body->max_mdsize) { cli->cl_max_mds_easize = body->max_mdsize; cli->cl_default_mds_easize = - min_t(__u32, body->max_mdsize, PAGE_CACHE_SIZE); + min_t(__u32, body->max_mdsize, PAGE_SIZE); } if (cli->cl_max_mds_cookiesize < body->max_cookiesize) { cli->cl_max_mds_cookiesize = body->max_cookiesize; cli->cl_default_mds_cookiesize = - min_t(__u32, body->max_cookiesize, PAGE_CACHE_SIZE); + min_t(__u32, body->max_cookiesize, PAGE_SIZE); } } } diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h index d834ddd81..69586a522 100644 --- a/drivers/staging/lustre/lustre/include/lustre_net.h +++ b/drivers/staging/lustre/lustre/include/lustre_net.h @@ -76,7 +76,8 @@ * In order for the client and server to properly negotiate the maximum * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two * value. The client is free to limit the actual RPC size for any bulk - * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */ + * transfer via cl_max_pages_per_rpc to some non-power-of-two value. + */ #define PTLRPC_BULK_OPS_BITS 2 #define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS) /** @@ -85,7 +86,8 @@ * protocol limitation on the maximum RPC size that can be used by any * RPC sent to that server in the future. Instead, the server should * use the negotiated per-client ocd_brw_size to determine the bulk - * RPC count. */ + * RPC count. + */ #define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1)) /** @@ -97,21 +99,21 @@ */ #define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS) #define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS) -#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT) #define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS) #define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) -#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT) #define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE -#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT) #define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */ # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) # error "PTLRPC_MAX_BRW_PAGES isn't a power of two" # endif -# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE)) -# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE" +# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE)) +# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE" # endif # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) # error "PTLRPC_MAX_BRW_SIZE too big" @@ -419,16 +421,18 @@ struct ptlrpc_reply_state { /** A spinlock to protect the reply state flags */ spinlock_t rs_lock; /** Reply state flags */ - unsigned long rs_difficult:1; /* ACK/commit stuff */ + unsigned long rs_difficult:1; /* ACK/commit stuff */ unsigned long rs_no_ack:1; /* no ACK, even for - difficult requests */ + * difficult requests + */ unsigned long rs_scheduled:1; /* being handled? */ unsigned long rs_scheduled_ever:1;/* any schedule attempts? */ unsigned long rs_handled:1; /* been handled yet? */ unsigned long rs_on_net:1; /* reply_out_callback pending? */ unsigned long rs_prealloc:1; /* rs from prealloc list */ unsigned long rs_committed:1;/* the transaction was committed - * and the rs was dispatched */ + * and the rs was dispatched + */ /** Size of the state */ int rs_size; /** opcode */ @@ -463,7 +467,7 @@ struct ptlrpc_reply_state { /** Handles of locks awaiting client reply ACK */ struct lustre_handle rs_locks[RS_MAX_LOCKS]; /** Lock modes of locks in \a rs_locks */ - ldlm_mode_t rs_modes[RS_MAX_LOCKS]; + enum ldlm_mode rs_modes[RS_MAX_LOCKS]; }; struct ptlrpc_thread; @@ -1181,7 +1185,7 @@ struct nrs_fifo_req { * purpose of this object is to hold references to the request's resources * for the lifetime of the request, and to hold properties that policies use * use for determining the request's scheduling priority. - * */ + */ struct ptlrpc_nrs_request { /** * The request's resource hierarchy. @@ -1321,15 +1325,17 @@ struct ptlrpc_request { /* do not resend request on -EINPROGRESS */ rq_no_retry_einprogress:1, /* allow the req to be sent if the import is in recovery - * status */ + * status + */ rq_allow_replay:1; unsigned int rq_nr_resend; enum rq_phase rq_phase; /* one of RQ_PHASE_* */ enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */ - atomic_t rq_refcount;/* client-side refcount for SENT race, - server-side refcount for multiple replies */ + atomic_t rq_refcount; /* client-side refcount for SENT race, + * server-side refcount for multiple replies + */ /** Portal to which this request would be sent */ short rq_request_portal; /* XXX FIXME bug 249 */ @@ -1363,7 +1369,8 @@ struct ptlrpc_request { /** * security and encryption data - * @{ */ + * @{ + */ struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */ struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */ struct list_head rq_ctx_chain; /**< link to waited ctx */ @@ -1477,7 +1484,8 @@ struct ptlrpc_request { /** when request must finish. volatile * so that servers' early reply updates to the deadline aren't - * kept in per-cpu cache */ + * kept in per-cpu cache + */ volatile time64_t rq_deadline; /** when req reply unlink must finish. */ time64_t rq_reply_deadline; @@ -1518,7 +1526,7 @@ struct ptlrpc_request { static inline int ptlrpc_req_interpret(const struct lu_env *env, struct ptlrpc_request *req, int rc) { - if (req->rq_interpret_reply != NULL) { + if (req->rq_interpret_reply) { req->rq_status = req->rq_interpret_reply(env, req, &req->rq_async_args, rc); @@ -1678,7 +1686,8 @@ do { \ /** * This is the debug print function you need to use to print request structure * content into lustre debug log. - * for most callers (level is a constant) this is resolved at compile time */ + * for most callers (level is a constant) this is resolved at compile time + */ #define DEBUG_REQ(level, req, fmt, args...) \ do { \ if ((level) & (D_ERROR | D_WARNING)) { \ @@ -1947,7 +1956,7 @@ struct ptlrpc_service_ops { * or general metadata service for MDS. */ struct ptlrpc_service { - /** serialize /proc operations */ + /** serialize sysfs operations */ spinlock_t srv_lock; /** most often accessed fields */ /** chain thru all services */ @@ -2101,7 +2110,8 @@ struct ptlrpc_service_part { /** NRS head for regular requests */ struct ptlrpc_nrs scp_nrs_reg; /** NRS head for HP requests; this is only valid for services that can - * handle HP requests */ + * handle HP requests + */ struct ptlrpc_nrs *scp_nrs_hp; /** AT stuff */ @@ -2141,8 +2151,8 @@ struct ptlrpc_service_part { #define ptlrpc_service_for_each_part(part, i, svc) \ for (i = 0; \ i < (svc)->srv_ncpts && \ - (svc)->srv_parts != NULL && \ - ((part) = (svc)->srv_parts[i]) != NULL; i++) + (svc)->srv_parts && \ + ((part) = (svc)->srv_parts[i]); i++) /** * Declaration of ptlrpcd control structure @@ -2259,7 +2269,6 @@ static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc, static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc, const struct ptlrpc_nrs_pol_desc *desc) { - LASSERT(desc->pd_compat_svc_name != NULL); return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0; } @@ -2303,7 +2312,6 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req) struct ptlrpc_bulk_desc *desc; int rc; - LASSERT(req != NULL); desc = req->rq_bulk; if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) && @@ -2374,14 +2382,14 @@ void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req); struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp, const struct req_format *format); struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp, - struct ptlrpc_request_pool *, - const struct req_format *format); + struct ptlrpc_request_pool *, + const struct req_format *); void ptlrpc_request_free(struct ptlrpc_request *request); int ptlrpc_request_pack(struct ptlrpc_request *request, __u32 version, int opcode); -struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp, - const struct req_format *format, - __u32 version, int opcode); +struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *, + const struct req_format *, + __u32, int); int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, __u32 version, int opcode, char **bufs, struct ptlrpc_cli_ctx *ctx); @@ -2462,7 +2470,8 @@ struct ptlrpc_service_thr_conf { /* "soft" limit for total threads number */ unsigned int tc_nthrs_max; /* user specified threads number, it will be validated due to - * other members of this structure. */ + * other members of this structure. + */ unsigned int tc_nthrs_user; /* set NUMA node affinity for service threads */ unsigned int tc_cpu_affinity; @@ -2500,14 +2509,12 @@ struct ptlrpc_service_conf { */ void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs); void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs); -struct ptlrpc_service *ptlrpc_register_service( - struct ptlrpc_service_conf *conf, - struct kset *parent, - struct dentry *debugfs_entry); +struct ptlrpc_service *ptlrpc_register_service(struct ptlrpc_service_conf *conf, + struct kset *parent, + struct dentry *debugfs_entry); int ptlrpc_start_threads(struct ptlrpc_service *svc); int ptlrpc_unregister_service(struct ptlrpc_service *service); -int liblustre_check_services(void *arg); int ptlrpc_hr_init(void); void ptlrpc_hr_fini(void); @@ -2536,7 +2543,7 @@ int ptlrpc_reconnect_import(struct obd_import *imp); int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout, int index); void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout, - int index); + int index); int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len); int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len); @@ -2726,7 +2733,7 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req) static inline void ptlrpc_client_wake_req(struct ptlrpc_request *req) { - if (req->rq_set == NULL) + if (!req->rq_set) wake_up(&req->rq_reply_waitq); else wake_up(&req->rq_set->set_waitq); @@ -2750,7 +2757,7 @@ ptlrpc_rs_decref(struct ptlrpc_reply_state *rs) /* Should only be called once per req */ static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req) { - if (req->rq_reply_state == NULL) + if (!req->rq_reply_state) return; /* shouldn't occur */ ptlrpc_rs_decref(req->rq_reply_state); req->rq_reply_state = NULL; @@ -2807,7 +2814,6 @@ ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt) static inline struct ptlrpc_service * ptlrpc_req2svc(struct ptlrpc_request *req) { - LASSERT(req->rq_rqbd != NULL); return req->rq_rqbd->rqbd_svcpt->scp_service; } diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h index 46a662f89..b2e67fcf9 100644 --- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h +++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h @@ -107,8 +107,8 @@ void req_capsule_set_size(struct req_capsule *pill, const struct req_msg_field *field, enum req_location loc, int size); int req_capsule_get_size(const struct req_capsule *pill, - const struct req_msg_field *field, - enum req_location loc); + const struct req_msg_field *field, + enum req_location loc); int req_capsule_msg_size(struct req_capsule *pill, enum req_location loc); int req_capsule_fmt_size(__u32 magic, const struct req_format *fmt, enum req_location loc); @@ -130,7 +130,6 @@ void req_layout_fini(void); extern struct req_format RQF_OBD_PING; extern struct req_format RQF_OBD_SET_INFO; extern struct req_format RQF_SEC_CTX; -extern struct req_format RQF_OBD_IDX_READ; /* MGS req_format */ extern struct req_format RQF_MGS_TARGET_REG; extern struct req_format RQF_MGS_SET_INFO; @@ -146,7 +145,6 @@ extern struct req_format RQF_MDS_GETSTATUS; extern struct req_format RQF_MDS_SYNC; extern struct req_format RQF_MDS_GETXATTR; extern struct req_format RQF_MDS_GETATTR; -extern struct req_format RQF_UPDATE_OBJ; /* * This is format of direct (non-intent) MDS_GETATTR_NAME request. @@ -177,7 +175,6 @@ extern struct req_format RQF_MDS_REINT_SETXATTR; extern struct req_format RQF_MDS_QUOTACHECK; extern struct req_format RQF_MDS_QUOTACTL; extern struct req_format RQF_QC_CALLBACK; -extern struct req_format RQF_QUOTA_DQACQ; extern struct req_format RQF_MDS_SWAP_LAYOUTS; /* MDS hsm formats */ extern struct req_format RQF_MDS_HSM_STATE_GET; @@ -220,7 +217,6 @@ extern struct req_format RQF_LDLM_INTENT_OPEN; extern struct req_format RQF_LDLM_INTENT_CREATE; extern struct req_format RQF_LDLM_INTENT_UNLINK; extern struct req_format RQF_LDLM_INTENT_GETXATTR; -extern struct req_format RQF_LDLM_INTENT_QUOTA; extern struct req_format RQF_LDLM_CANCEL; extern struct req_format RQF_LDLM_CALLBACK; extern struct req_format RQF_LDLM_CP_CALLBACK; @@ -252,7 +248,6 @@ extern struct req_msg_field RMF_SETINFO_KEY; extern struct req_msg_field RMF_GETINFO_VAL; extern struct req_msg_field RMF_GETINFO_VALLEN; extern struct req_msg_field RMF_GETINFO_KEY; -extern struct req_msg_field RMF_IDX_INFO; extern struct req_msg_field RMF_CLOSE_DATA; /* @@ -277,7 +272,6 @@ extern struct req_msg_field RMF_CAPA1; extern struct req_msg_field RMF_CAPA2; extern struct req_msg_field RMF_OBD_QUOTACHECK; extern struct req_msg_field RMF_OBD_QUOTACTL; -extern struct req_msg_field RMF_QUOTA_BODY; extern struct req_msg_field RMF_STRING; extern struct req_msg_field RMF_SWAP_LAYOUTS; extern struct req_msg_field RMF_MDS_HSM_PROGRESS; @@ -322,9 +316,6 @@ extern struct req_msg_field RMF_MGS_CONFIG_RES; /* generic uint32 */ extern struct req_msg_field RMF_U32; -/* OBJ update format */ -extern struct req_msg_field RMF_UPDATE; -extern struct req_msg_field RMF_UPDATE_REPLY; /** @} req_layout */ #endif /* _LUSTRE_REQ_LAYOUT_H__ */ diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h index dd1033be6..01b4e6726 100644 --- a/drivers/staging/lustre/lustre/include/lustre_sec.h +++ b/drivers/staging/lustre/lustre/include/lustre_sec.h @@ -351,26 +351,23 @@ struct ptlrpc_ctx_ops { /** * To determine whether it's suitable to use the \a ctx for \a vcred. */ - int (*match) (struct ptlrpc_cli_ctx *ctx, - struct vfs_cred *vcred); + int (*match)(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred); /** * To bring the \a ctx uptodate. */ - int (*refresh) (struct ptlrpc_cli_ctx *ctx); + int (*refresh)(struct ptlrpc_cli_ctx *ctx); /** * Validate the \a ctx. */ - int (*validate) (struct ptlrpc_cli_ctx *ctx); + int (*validate)(struct ptlrpc_cli_ctx *ctx); /** * Force the \a ctx to die. */ - void (*force_die) (struct ptlrpc_cli_ctx *ctx, - int grace); - int (*display) (struct ptlrpc_cli_ctx *ctx, - char *buf, int bufsize); + void (*force_die)(struct ptlrpc_cli_ctx *ctx, int grace); + int (*display)(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize); /** * Sign the request message using \a ctx. @@ -382,8 +379,7 @@ struct ptlrpc_ctx_ops { * * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign(). */ - int (*sign) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*sign)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Verify the reply message using \a ctx. @@ -395,8 +391,7 @@ struct ptlrpc_ctx_ops { * * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify(). */ - int (*verify) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*verify)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Encrypt the request message using \a ctx. @@ -408,8 +403,7 @@ struct ptlrpc_ctx_ops { * * \see gss_cli_ctx_seal(). */ - int (*seal) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*seal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Decrypt the reply message using \a ctx. @@ -421,8 +415,7 @@ struct ptlrpc_ctx_ops { * * \see gss_cli_ctx_unseal(). */ - int (*unseal) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req); + int (*unseal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req); /** * Wrap bulk request data. This is called before wrapping RPC @@ -444,9 +437,9 @@ struct ptlrpc_ctx_ops { * * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk(). */ - int (*wrap_bulk) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*wrap_bulk)(struct ptlrpc_cli_ctx *ctx, + struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); /** * Unwrap bulk reply data. This is called after wrapping RPC @@ -461,9 +454,9 @@ struct ptlrpc_ctx_ops { * * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk(). */ - int (*unwrap_bulk) (struct ptlrpc_cli_ctx *ctx, - struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*unwrap_bulk)(struct ptlrpc_cli_ctx *ctx, + struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); }; #define PTLRPC_CTX_NEW_BIT (0) /* newly created */ @@ -515,9 +508,9 @@ struct ptlrpc_sec_cops { * * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr(). */ - struct ptlrpc_sec * (*create_sec) (struct obd_import *imp, - struct ptlrpc_svc_ctx *ctx, - struct sptlrpc_flavor *flavor); + struct ptlrpc_sec *(*create_sec)(struct obd_import *imp, + struct ptlrpc_svc_ctx *ctx, + struct sptlrpc_flavor *flavor); /** * Destructor of ptlrpc_sec. When called, refcount has been dropped @@ -525,7 +518,7 @@ struct ptlrpc_sec_cops { * * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr(). */ - void (*destroy_sec) (struct ptlrpc_sec *sec); + void (*destroy_sec)(struct ptlrpc_sec *sec); /** * Notify that this ptlrpc_sec is going to die. Optionally, policy @@ -534,7 +527,7 @@ struct ptlrpc_sec_cops { * * \see plain_kill_sec(), gss_sec_kill(). */ - void (*kill_sec) (struct ptlrpc_sec *sec); + void (*kill_sec)(struct ptlrpc_sec *sec); /** * Given \a vcred, lookup and/or create its context. The policy module @@ -544,10 +537,9 @@ struct ptlrpc_sec_cops { * * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr(). */ - struct ptlrpc_cli_ctx * (*lookup_ctx) (struct ptlrpc_sec *sec, - struct vfs_cred *vcred, - int create, - int remove_dead); + struct ptlrpc_cli_ctx *(*lookup_ctx)(struct ptlrpc_sec *sec, + struct vfs_cred *vcred, + int create, int remove_dead); /** * Called then the reference of \a ctx dropped to 0. The policy module @@ -559,9 +551,8 @@ struct ptlrpc_sec_cops { * * \see plain_release_ctx(), gss_sec_release_ctx_kr(). */ - void (*release_ctx) (struct ptlrpc_sec *sec, - struct ptlrpc_cli_ctx *ctx, - int sync); + void (*release_ctx)(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx, + int sync); /** * Flush the context cache. @@ -573,11 +564,8 @@ struct ptlrpc_sec_cops { * * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr(). */ - int (*flush_ctx_cache) - (struct ptlrpc_sec *sec, - uid_t uid, - int grace, - int force); + int (*flush_ctx_cache)(struct ptlrpc_sec *sec, uid_t uid, + int grace, int force); /** * Called periodically by garbage collector to remove dead contexts @@ -585,7 +573,7 @@ struct ptlrpc_sec_cops { * * \see gss_sec_gc_ctx_kr(). */ - void (*gc_ctx) (struct ptlrpc_sec *sec); + void (*gc_ctx)(struct ptlrpc_sec *sec); /** * Given an context \a ctx, install a corresponding reverse service @@ -593,9 +581,8 @@ struct ptlrpc_sec_cops { * XXX currently it's only used by GSS module, maybe we should remove * this from general API. */ - int (*install_rctx)(struct obd_import *imp, - struct ptlrpc_sec *sec, - struct ptlrpc_cli_ctx *ctx); + int (*install_rctx)(struct obd_import *imp, struct ptlrpc_sec *sec, + struct ptlrpc_cli_ctx *ctx); /** * To allocate request buffer for \a req. @@ -608,9 +595,8 @@ struct ptlrpc_sec_cops { * * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf(). */ - int (*alloc_reqbuf)(struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int lustre_msg_size); + int (*alloc_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req, + int lustre_msg_size); /** * To free request buffer for \a req. @@ -619,8 +605,7 @@ struct ptlrpc_sec_cops { * * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf(). */ - void (*free_reqbuf) (struct ptlrpc_sec *sec, - struct ptlrpc_request *req); + void (*free_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req); /** * To allocate reply buffer for \a req. @@ -632,9 +617,8 @@ struct ptlrpc_sec_cops { * * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf(). */ - int (*alloc_repbuf)(struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int lustre_msg_size); + int (*alloc_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req, + int lustre_msg_size); /** * To free reply buffer for \a req. @@ -645,8 +629,7 @@ struct ptlrpc_sec_cops { * * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf(). */ - void (*free_repbuf) (struct ptlrpc_sec *sec, - struct ptlrpc_request *req); + void (*free_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req); /** * To expand the request buffer of \a req, thus the \a segment in @@ -658,15 +641,13 @@ struct ptlrpc_sec_cops { * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(), * gss_enlarge_reqbuf(). */ - int (*enlarge_reqbuf) - (struct ptlrpc_sec *sec, - struct ptlrpc_request *req, - int segment, int newsize); + int (*enlarge_reqbuf)(struct ptlrpc_sec *sec, + struct ptlrpc_request *req, + int segment, int newsize); /* * misc */ - int (*display) (struct ptlrpc_sec *sec, - struct seq_file *seq); + int (*display)(struct ptlrpc_sec *sec, struct seq_file *seq); }; /** @@ -690,7 +671,7 @@ struct ptlrpc_sec_sops { * * \see null_accept(), plain_accept(), gss_svc_accept_kr(). */ - int (*accept) (struct ptlrpc_request *req); + int (*accept)(struct ptlrpc_request *req); /** * Perform security transformation upon reply message. @@ -702,15 +683,14 @@ struct ptlrpc_sec_sops { * * \see null_authorize(), plain_authorize(), gss_svc_authorize(). */ - int (*authorize) (struct ptlrpc_request *req); + int (*authorize)(struct ptlrpc_request *req); /** * Invalidate server context \a ctx. * * \see gss_svc_invalidate_ctx(). */ - void (*invalidate_ctx) - (struct ptlrpc_svc_ctx *ctx); + void (*invalidate_ctx)(struct ptlrpc_svc_ctx *ctx); /** * Allocate a ptlrpc_reply_state. @@ -724,28 +704,26 @@ struct ptlrpc_sec_sops { * * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs(). */ - int (*alloc_rs) (struct ptlrpc_request *req, - int msgsize); + int (*alloc_rs)(struct ptlrpc_request *req, int msgsize); /** * Free a ptlrpc_reply_state. */ - void (*free_rs) (struct ptlrpc_reply_state *rs); + void (*free_rs)(struct ptlrpc_reply_state *rs); /** * Release the server context \a ctx. * * \see gss_svc_free_ctx(). */ - void (*free_ctx) (struct ptlrpc_svc_ctx *ctx); + void (*free_ctx)(struct ptlrpc_svc_ctx *ctx); /** * Install a reverse context based on the server context \a ctx. * * \see gss_svc_install_rctx_kr(). */ - int (*install_rctx)(struct obd_import *imp, - struct ptlrpc_svc_ctx *ctx); + int (*install_rctx)(struct obd_import *imp, struct ptlrpc_svc_ctx *ctx); /** * Prepare buffer for incoming bulk write. @@ -755,24 +733,24 @@ struct ptlrpc_sec_sops { * * \see gss_svc_prep_bulk(). */ - int (*prep_bulk) (struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*prep_bulk)(struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); /** * Unwrap the bulk write data. * * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk(). */ - int (*unwrap_bulk) (struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*unwrap_bulk)(struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); /** * Wrap the bulk read data. * * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk(). */ - int (*wrap_bulk) (struct ptlrpc_request *req, - struct ptlrpc_bulk_desc *desc); + int (*wrap_bulk)(struct ptlrpc_request *req, + struct ptlrpc_bulk_desc *desc); }; struct ptlrpc_sec_policy { diff --git a/drivers/staging/lustre/lustre/include/lustre_ver.h b/drivers/staging/lustre/lustre/include/lustre_ver.h index caa4da12f..64559a16f 100644 --- a/drivers/staging/lustre/lustre/include/lustre_ver.h +++ b/drivers/staging/lustre/lustre/include/lustre_ver.h @@ -1,26 +1,20 @@ #ifndef _LUSTRE_VER_H_ #define _LUSTRE_VER_H_ -/* This file automatically generated from lustre/include/lustre_ver.h.in, - * based on parameters in lustre/autoconf/lustre-version.ac. - * Changes made directly to this file will be lost. */ #define LUSTRE_MAJOR 2 -#define LUSTRE_MINOR 3 -#define LUSTRE_PATCH 64 +#define LUSTRE_MINOR 4 +#define LUSTRE_PATCH 60 #define LUSTRE_FIX 0 -#define LUSTRE_VERSION_STRING "2.3.64" +#define LUSTRE_VERSION_STRING "2.4.60" #define LUSTRE_VERSION_CODE OBD_OCD_VERSION(LUSTRE_MAJOR, \ LUSTRE_MINOR, LUSTRE_PATCH, \ LUSTRE_FIX) -/* liblustre clients are only allowed to connect if their LUSTRE_FIX mismatches - * by this amount (set in lustre/autoconf/lustre-version.ac). */ -#define LUSTRE_VERSION_ALLOWED_OFFSET OBD_OCD_VERSION(0, 0, 1, 32) - -/* If lustre version of client and servers it connects to differs by more +/* + * If lustre version of client and servers it connects to differs by more * than this amount, client would issue a warning. - * (set in lustre/autoconf/lustre-version.ac) */ + */ #define LUSTRE_VERSION_OFFSET_WARN OBD_OCD_VERSION(0, 4, 0, 0) #endif diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h index bcbe61301..4264d9765 100644 --- a/drivers/staging/lustre/lustre/include/obd.h +++ b/drivers/staging/lustre/lustre/include/obd.h @@ -90,7 +90,8 @@ struct lov_stripe_md { pid_t lsm_lock_owner; /* debugging */ /* maximum possible file size, might change as OSTs status changes, - * e.g. disconnected, deactivated */ + * e.g. disconnected, deactivated + */ __u64 lsm_maxbytes; struct { /* Public members. */ @@ -123,7 +124,7 @@ static inline bool lsm_is_released(struct lov_stripe_md *lsm) static inline bool lsm_has_objects(struct lov_stripe_md *lsm) { - if (lsm == NULL) + if (!lsm) return false; if (lsm_is_released(lsm)) return false; @@ -159,7 +160,8 @@ struct obd_info { /* An update callback which is called to update some data on upper * level. E.g. it is used for update lsm->lsm_oinfo at every received * request in osc level for enqueue requests. It is also possible to - * update some caller data from LOV layer if needed. */ + * update some caller data from LOV layer if needed. + */ obd_enqueue_update_f oi_cb_up; }; @@ -216,7 +218,6 @@ struct timeout_item { }; #define OSC_MAX_RIF_DEFAULT 8 -#define MDS_OSC_MAX_RIF_DEFAULT 50 #define OSC_MAX_RIF_MAX 256 #define OSC_MAX_DIRTY_DEFAULT (OSC_MAX_RIF_DEFAULT * 4) #define OSC_MAX_DIRTY_MB_MAX 2048 /* arbitrary, but < MAX_LONG bytes */ @@ -241,7 +242,8 @@ struct client_obd { struct obd_import *cl_import; /* ptlrpc connection state */ int cl_conn_count; /* max_mds_easize is purely a performance thing so we don't have to - * call obd_size_diskmd() all the time. */ + * call obd_size_diskmd() all the time. + */ int cl_default_mds_easize; int cl_max_mds_easize; int cl_default_mds_cookiesize; @@ -261,7 +263,8 @@ struct client_obd { /* since we allocate grant by blocks, we don't know how many grant will * be used to add a page into cache. As a solution, we reserve maximum * grant before trying to dirty a page and unreserve the rest. - * See osc_{reserve|unreserve}_grant for details. */ + * See osc_{reserve|unreserve}_grant for details. + */ long cl_reserved_grant; struct list_head cl_cache_waiters; /* waiting for cache/grant */ unsigned long cl_next_shrink_grant; /* jiffies */ @@ -269,14 +272,16 @@ struct client_obd { int cl_grant_shrink_interval; /* seconds */ /* A chunk is an optimal size used by osc_extent to determine - * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */ + * the extent size. A chunk is max(PAGE_SIZE, OST block size) + */ int cl_chunkbits; int cl_chunk; int cl_extent_tax; /* extent overhead, by bytes */ /* keep track of objects that have lois that contain pages which * have been queued for async brw. this lock also protects the - * lists of osc_client_pages that hang off of the loi */ + * lists of osc_client_pages that hang off of the loi + */ /* * ->cl_loi_list_lock protects consistency of * ->cl_loi_{ready,read,write}_list. ->ap_make_ready() and @@ -295,14 +300,14 @@ struct client_obd { * NB by Jinshan: though field names are still _loi_, but actually * osc_object{}s are in the list. */ - client_obd_lock_t cl_loi_list_lock; + struct client_obd_lock cl_loi_list_lock; struct list_head cl_loi_ready_list; struct list_head cl_loi_hp_ready_list; struct list_head cl_loi_write_list; struct list_head cl_loi_read_list; int cl_r_in_flight; int cl_w_in_flight; - /* just a sum of the loi/lop pending numbers to be exported by /proc */ + /* just a sum of the loi/lop pending numbers to be exported by sysfs */ atomic_t cl_pending_w_pages; atomic_t cl_pending_r_pages; __u32 cl_max_pages_per_rpc; @@ -322,7 +327,7 @@ struct client_obd { atomic_t cl_lru_shrinkers; atomic_t cl_lru_in_list; struct list_head cl_lru_list; /* lru page list */ - client_obd_lock_t cl_lru_list_lock; /* page list protector */ + struct client_obd_lock cl_lru_list_lock; /* page list protector */ /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */ atomic_t cl_destroy_in_flight; @@ -340,7 +345,7 @@ struct client_obd { /* supported checksum types that are worked out at connect time */ __u32 cl_supp_cksum_types; /* checksum algorithm to be used */ - cksum_type_t cl_cksum_type; + enum cksum_type cl_cksum_type; /* also protected by the poorly named _loi_list_lock lock above */ struct osc_async_rc cl_ar; @@ -375,14 +380,12 @@ struct echo_client_obd { spinlock_t ec_lock; struct list_head ec_objects; struct list_head ec_locks; - int ec_nstripes; __u64 ec_unique; }; /* Generic subset of OSTs */ struct ost_pool { - __u32 *op_array; /* array of index of - lov_obd->lov_tgts */ + __u32 *op_array; /* array of index of lov_obd->lov_tgts */ unsigned int op_count; /* number of OSTs in the array */ unsigned int op_size; /* allocated size of lp_array */ struct rw_semaphore op_rw_sem; /* to protect ost_pool use */ @@ -415,14 +418,16 @@ struct lov_qos { struct lov_qos_rr lq_rr; /* round robin qos data */ unsigned long lq_dirty:1, /* recalc qos data */ lq_same_space:1,/* the ost's all have approx. - the same space avail */ + * the same space avail + */ lq_reset:1, /* zero current penalties */ lq_statfs_in_progress:1; /* statfs op in progress */ /* qos statfs data */ struct lov_statfs_data *lq_statfs_data; - wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs - * requests completion */ + wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs + * requests completion + */ }; struct lov_tgt_desc { @@ -450,16 +455,16 @@ struct pool_desc { struct lov_qos_rr pool_rr; /* round robin qos */ struct hlist_node pool_hash; /* access by poolname */ struct list_head pool_list; /* serial access */ - struct dentry *pool_debugfs_entry; /* file in /proc */ + struct dentry *pool_debugfs_entry; /* file in debugfs */ struct obd_device *pool_lobd; /* obd of the lov/lod to which - * this pool belongs */ + * this pool belongs + */ }; struct lov_obd { struct lov_desc desc; struct lov_tgt_desc **lov_tgts; /* sparse array */ - struct ost_pool lov_packed; /* all OSTs in a packed - array */ + struct ost_pool lov_packed; /* all OSTs in a packed array */ struct mutex lov_lock; struct obd_connect_data lov_ocd; atomic_t lov_refcount; @@ -596,34 +601,6 @@ struct obd_trans_info { struct obd_uuid *oti_ost_uuid; }; -static inline void oti_init(struct obd_trans_info *oti, - struct ptlrpc_request *req) -{ - if (oti == NULL) - return; - memset(oti, 0, sizeof(*oti)); - - if (req == NULL) - return; - - oti->oti_xid = req->rq_xid; - /** VBR: take versions from request */ - if (req->rq_reqmsg != NULL && - lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) { - __u64 *pre_version = lustre_msg_get_versions(req->rq_reqmsg); - - oti->oti_pre_version = pre_version ? pre_version[0] : 0; - oti->oti_transno = lustre_msg_get_transno(req->rq_reqmsg); - } - - /** called from mds_create_objects */ - if (req->rq_repmsg != NULL) - oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg); - oti->oti_thread = req->rq_svc_thread; - if (req->rq_reqmsg != NULL) - oti->oti_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg); -} - static inline void oti_alloc_cookies(struct obd_trans_info *oti, int num_cookies) { @@ -681,7 +658,7 @@ enum obd_notify_event { /* * Data structure used to pass obd_notify()-event to non-obd listeners (llite - * and liblustre being main examples). + * being main example). */ struct obd_notify_upcall { int (*onu_upcall)(struct obd_device *host, struct obd_device *watched, @@ -728,21 +705,23 @@ struct obd_device { unsigned long obd_attached:1, /* finished attach */ obd_set_up:1, /* finished setup */ obd_version_recov:1, /* obd uses version checking */ - obd_replayable:1, /* recovery is enabled; inform clients */ - obd_no_transno:1, /* no committed-transno notification */ + obd_replayable:1,/* recovery is enabled; inform clients */ + obd_no_transno:1, /* no committed-transno notification */ obd_no_recov:1, /* fail instead of retry messages */ obd_stopping:1, /* started cleanup */ obd_starting:1, /* started setup */ obd_force:1, /* cleanup with > 0 obd refcount */ - obd_fail:1, /* cleanup with failover */ - obd_async_recov:1, /* allow asynchronous orphan cleanup */ + obd_fail:1, /* cleanup with failover */ + obd_async_recov:1, /* allow asynchronous orphan cleanup */ obd_no_conn:1, /* deny new connections */ obd_inactive:1, /* device active/inactive - * (for /proc/status only!!) */ + * (for sysfs status only!!) + */ obd_no_ir:1, /* no imperative recovery. */ obd_process_conf:1; /* device is processing mgs config */ /* use separate field as it is set in interrupt to don't mess with - * protection of other bits using _bh lock */ + * protection of other bits using _bh lock + */ unsigned long obd_recovery_expired:1; /* uuid-export hash body */ struct cfs_hash *obd_uuid_hash; @@ -935,7 +914,8 @@ struct md_op_data { __u32 op_npages; /* used to transfer info between the stacks of MD client - * see enum op_cli_flags */ + * see enum op_cli_flags + */ __u32 op_cli_flags; /* File object data version for HSM release, on client */ @@ -957,7 +937,7 @@ struct md_enqueue_info { struct lustre_handle mi_lockh; struct inode *mi_dir; int (*mi_cb)(struct ptlrpc_request *req, - struct md_enqueue_info *minfo, int rc); + struct md_enqueue_info *minfo, int rc); __u64 mi_cbdata; unsigned int mi_generation; }; @@ -965,7 +945,7 @@ struct md_enqueue_info { struct obd_ops { struct module *owner; int (*iocontrol)(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg); + void *karg, void __user *uarg); int (*get_info)(const struct lu_env *env, struct obd_export *, __u32 keylen, void *key, __u32 *vallen, void *val, struct lov_stripe_md *lsm); @@ -987,7 +967,8 @@ struct obd_ops { /* connect to the target device with given connection * data. @ocd->ocd_connect_flags is modified to reflect flags actually * granted by the target, which are guaranteed to be a subset of flags - * asked for. If @ocd == NULL, use default parameters. */ + * asked for. If @ocd == NULL, use default parameters. + */ int (*connect)(const struct lu_env *env, struct obd_export **exp, struct obd_device *src, struct obd_uuid *cluuid, struct obd_connect_data *ocd, @@ -1083,7 +1064,8 @@ struct obd_ops { /* * NOTE: If adding ops, add another LPROCFS_OBD_OP_INIT() line * to lprocfs_alloc_obd_stats() in obdclass/lprocfs_status.c. - * Also, add a wrapper function in include/linux/obd_class.h. */ + * Also, add a wrapper function in include/linux/obd_class.h. + */ }; enum { @@ -1189,14 +1171,14 @@ struct md_ops { struct obd_client_handle *); int (*set_lock_data)(struct obd_export *, __u64 *, void *, __u64 *); - ldlm_mode_t (*lock_match)(struct obd_export *, __u64, - const struct lu_fid *, ldlm_type_t, - ldlm_policy_data_t *, ldlm_mode_t, - struct lustre_handle *); + enum ldlm_mode (*lock_match)(struct obd_export *, __u64, + const struct lu_fid *, enum ldlm_type, + ldlm_policy_data_t *, enum ldlm_mode, + struct lustre_handle *); int (*cancel_unused)(struct obd_export *, const struct lu_fid *, - ldlm_policy_data_t *, ldlm_mode_t, - ldlm_cancel_flags_t flags, void *opaque); + ldlm_policy_data_t *, enum ldlm_mode, + enum ldlm_cancel_flags flags, void *opaque); int (*get_remote_perm)(struct obd_export *, const struct lu_fid *, __u32, struct ptlrpc_request **); @@ -1224,9 +1206,9 @@ struct lsm_operations { void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, u64 *, u64 *); int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes, - __u16 *stripe_count); + __u16 *stripe_count); int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm, - struct lov_mds_md *lmm); + struct lov_mds_md *lmm); }; extern const struct lsm_operations lsm_v1_ops; @@ -1253,7 +1235,7 @@ static inline struct md_open_data *obd_mod_alloc(void) struct md_open_data *mod; mod = kzalloc(sizeof(*mod), GFP_NOFS); - if (mod == NULL) + if (!mod) return NULL; atomic_set(&mod->mod_refcount, 1); return mod; @@ -1300,7 +1282,7 @@ static inline bool filename_is_volatile(const char *name, int namelen, int *idx) return false; /* caller does not care of idx */ - if (idx == NULL) + if (!idx) return true; /* volatile file, the MDT can be set from name */ @@ -1327,7 +1309,8 @@ static inline bool filename_is_volatile(const char *name, int namelen, int *idx) return true; bad_format: /* bad format of mdt idx, we cannot return an error - * to caller so we use hash algo */ + * to caller so we use hash algo + */ CERROR("Bad volatile file name format: %s\n", name + LUSTRE_VOLATILE_HDR_LEN); return false; @@ -1335,8 +1318,7 @@ bad_format: static inline int cli_brw_size(struct obd_device *obd) { - LASSERT(obd != NULL); - return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT; } #endif /* __OBD_H */ diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h index 01db60405..637fa2211 100644 --- a/drivers/staging/lustre/lustre/include/obd_cksum.h +++ b/drivers/staging/lustre/lustre/include/obd_cksum.h @@ -37,7 +37,7 @@ #include "../../include/linux/libcfs/libcfs.h" #include "lustre/lustre_idl.h" -static inline unsigned char cksum_obd2cfs(cksum_type_t cksum_type) +static inline unsigned char cksum_obd2cfs(enum cksum_type cksum_type) { switch (cksum_type) { case OBD_CKSUM_CRC32: @@ -63,8 +63,9 @@ static inline unsigned char cksum_obd2cfs(cksum_type_t cksum_type) * In case of an unsupported types/flags we fall back to ADLER * because that is supported by all clients since 1.8 * - * In case multiple algorithms are supported the best one is used. */ -static inline u32 cksum_type_pack(cksum_type_t cksum_type) + * In case multiple algorithms are supported the best one is used. + */ +static inline u32 cksum_type_pack(enum cksum_type cksum_type) { unsigned int performance = 0, tmp; u32 flag = OBD_FL_CKSUM_ADLER; @@ -98,7 +99,7 @@ static inline u32 cksum_type_pack(cksum_type_t cksum_type) return flag; } -static inline cksum_type_t cksum_type_unpack(u32 o_flags) +static inline enum cksum_type cksum_type_unpack(u32 o_flags) { switch (o_flags & OBD_FL_CKSUM_ALL) { case OBD_FL_CKSUM_CRC32C: @@ -116,9 +117,9 @@ static inline cksum_type_t cksum_type_unpack(u32 o_flags) * 1.8 supported ADLER it is base and not depend on hw * Client uses all available local algos */ -static inline cksum_type_t cksum_types_supported_client(void) +static inline enum cksum_type cksum_types_supported_client(void) { - cksum_type_t ret = OBD_CKSUM_ADLER; + enum cksum_type ret = OBD_CKSUM_ADLER; CDEBUG(D_INFO, "Crypto hash speed: crc %d, crc32c %d, adler %d\n", cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)), @@ -139,14 +140,16 @@ static inline cksum_type_t cksum_types_supported_client(void) * Currently, calling cksum_type_pack() with a mask will return the fastest * checksum type due to its benchmarking at libcfs module load. * Caution is advised, however, since what is fastest on a single client may - * not be the fastest or most efficient algorithm on the server. */ -static inline cksum_type_t cksum_type_select(cksum_type_t cksum_types) + * not be the fastest or most efficient algorithm on the server. + */ +static inline enum cksum_type cksum_type_select(enum cksum_type cksum_types) { return cksum_type_unpack(cksum_type_pack(cksum_types)); } /* Checksum algorithm names. Must be defined in the same order as the - * OBD_CKSUM_* flags. */ + * OBD_CKSUM_* flags. + */ #define DECLARE_CKSUM_NAME char *cksum_name[] = {"crc32", "adler", "crc32c"} #endif /* __OBD_H */ diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h index 97d803975..706869f8c 100644 --- a/drivers/staging/lustre/lustre/include/obd_class.h +++ b/drivers/staging/lustre/lustre/include/obd_class.h @@ -45,18 +45,22 @@ #include "lprocfs_status.h" #define OBD_STATFS_NODELAY 0x0001 /* requests should be send without delay - * and resends for avoid deadlocks */ + * and resends for avoid deadlocks + */ #define OBD_STATFS_FROM_CACHE 0x0002 /* the statfs callback should not update - * obd_osfs_age */ + * obd_osfs_age + */ #define OBD_STATFS_PTLRPCD 0x0004 /* requests will be sent via ptlrpcd * instead of a specific set. This * means that we cannot rely on the set * interpret routine to be called. * lov_statfs_fini() must thus be called - * by the request interpret routine */ + * by the request interpret routine + */ #define OBD_STATFS_FOR_MDT0 0x0008 /* The statfs is only for retrieving - * information from MDT0. */ -#define OBD_FL_PUNCH 0x00000001 /* To indicate it is punch operation */ + * information from MDT0. + */ +#define OBD_FL_PUNCH 0x00000001 /* To indicate it is punch operation */ /* OBD Device Declarations */ extern struct obd_device *obd_devs[MAX_OBD_DEVICES]; @@ -83,10 +87,10 @@ int class_name2dev(const char *name); struct obd_device *class_name2obd(const char *name); int class_uuid2dev(struct obd_uuid *uuid); struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid, - const char *typ_name, - struct obd_uuid *grp_uuid); + const char *typ_name, + struct obd_uuid *grp_uuid); struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, - int *next); + int *next); struct obd_device *class_num2obd(int num); int class_notify_sptlrpc_conf(const char *fsname, int namelen); @@ -160,8 +164,9 @@ struct config_llog_data { struct mutex cld_lock; int cld_type; unsigned int cld_stopping:1, /* we were told to stop - * watching */ - cld_lostlock:1; /* lock not requeued */ + * watching + */ + cld_lostlock:1; /* lock not requeued */ char cld_logname[0]; }; @@ -193,7 +198,7 @@ extern void (*class_export_dump_hook)(struct obd_export *); struct obd_export *class_export_get(struct obd_export *exp); void class_export_put(struct obd_export *exp); struct obd_export *class_new_export(struct obd_device *obddev, - struct obd_uuid *cluuid); + struct obd_uuid *cluuid); void class_unlink_export(struct obd_export *exp); struct obd_import *class_import_get(struct obd_import *); @@ -203,7 +208,7 @@ void class_destroy_import(struct obd_import *exp); void class_put_type(struct obd_type *type); int class_connect(struct lustre_handle *conn, struct obd_device *obd, - struct obd_uuid *cluuid); + struct obd_uuid *cluuid); int class_disconnect(struct obd_export *exp); void class_fail_export(struct obd_export *exp); int class_manual_cleanup(struct obd_device *obd); @@ -275,7 +280,8 @@ void md_from_obdo(struct md_op_data *op_data, struct obdo *oa, u32 valid); #define CTXTP(ctxt, op) (ctxt)->loc_logops->lop_##op /* Ensure obd_setup: used for cleanup which must be called - while obd is stopping */ + * while obd is stopping + */ static inline int obd_check_dev(struct obd_device *obd) { if (!obd) { @@ -306,7 +312,7 @@ static inline int obd_check_dev_active(struct obd_device *obd) / sizeof(((struct obd_ops *)(0))->iocontrol)) #define OBD_COUNTER_INCREMENT(obdx, op) \ - if ((obdx)->obd_stats != NULL) { \ + if ((obdx)->obd_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((obdx)->obd_cntr_base) + \ OBD_COUNTER_OFFSET(op); \ @@ -315,7 +321,7 @@ static inline int obd_check_dev_active(struct obd_device *obd) } #define EXP_COUNTER_INCREMENT(export, op) \ - if ((export)->exp_obd->obd_stats != NULL) { \ + if ((export)->exp_obd->obd_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((export)->exp_obd->obd_cntr_base) + \ OBD_COUNTER_OFFSET(op); \ @@ -329,7 +335,7 @@ static inline int obd_check_dev_active(struct obd_device *obd) / sizeof(((struct md_ops *)(0))->getstatus)) #define MD_COUNTER_INCREMENT(obdx, op) \ - if ((obd)->md_stats != NULL) { \ + if ((obd)->md_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((obdx)->md_cntr_base) + \ MD_COUNTER_OFFSET(op); \ @@ -338,24 +344,24 @@ static inline int obd_check_dev_active(struct obd_device *obd) } #define EXP_MD_COUNTER_INCREMENT(export, op) \ - if ((export)->exp_obd->obd_stats != NULL) { \ + if ((export)->exp_obd->obd_stats) { \ unsigned int coffset; \ coffset = (unsigned int)((export)->exp_obd->md_cntr_base) + \ MD_COUNTER_OFFSET(op); \ LASSERT(coffset < (export)->exp_obd->md_stats->ls_num); \ lprocfs_counter_incr((export)->exp_obd->md_stats, coffset); \ - if ((export)->exp_md_stats != NULL) \ + if ((export)->exp_md_stats) \ lprocfs_counter_incr( \ (export)->exp_md_stats, coffset); \ } #define EXP_CHECK_MD_OP(exp, op) \ do { \ - if ((exp) == NULL) { \ + if (!(exp)) { \ CERROR("obd_" #op ": NULL export\n"); \ return -ENODEV; \ } \ - if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \ + if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \ CERROR("obd_" #op ": cleaned up obd\n"); \ return -EOPNOTSUPP; \ } \ @@ -379,11 +385,11 @@ do { \ #define EXP_CHECK_DT_OP(exp, op) \ do { \ - if ((exp) == NULL) { \ + if (!(exp)) { \ CERROR("obd_" #op ": NULL export\n"); \ return -ENODEV; \ } \ - if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \ + if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \ CERROR("obd_" #op ": cleaned up obd\n"); \ return -EOPNOTSUPP; \ } \ @@ -467,7 +473,7 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg) DECLARE_LU_VARS(ldt, d); ldt = obd->obd_type->typ_lu; - if (ldt != NULL) { + if (ldt) { struct lu_context session_ctx; struct lu_env env; @@ -509,7 +515,7 @@ static inline int obd_precleanup(struct obd_device *obd, return rc; ldt = obd->obd_type->typ_lu; d = obd->obd_lu_dev; - if (ldt != NULL && d != NULL) { + if (ldt && d) { if (cleanup_stage == OBD_CLEANUP_EXPORTS) { struct lu_env env; @@ -538,7 +544,7 @@ static inline int obd_cleanup(struct obd_device *obd) ldt = obd->obd_type->typ_lu; d = obd->obd_lu_dev; - if (ldt != NULL && d != NULL) { + if (ldt && d) { struct lu_env env; rc = lu_env_init(&env, ldt->ldt_ctx_tags); @@ -558,7 +564,8 @@ static inline int obd_cleanup(struct obd_device *obd) static inline void obd_cleanup_client_import(struct obd_device *obd) { /* If we set up but never connected, the - client import will not have been cleaned. */ + * client import will not have been cleaned. + */ down_write(&obd->u.cli.cl_sem); if (obd->u.cli.cl_import) { struct obd_import *imp; @@ -586,7 +593,7 @@ obd_process_config(struct obd_device *obd, int datalen, void *data) obd->obd_process_conf = 1; ldt = obd->obd_type->typ_lu; d = obd->obd_lu_dev; - if (ldt != NULL && d != NULL) { + if (ldt && d) { struct lu_env env; rc = lu_env_init(&env, ldt->ldt_ctx_tags); @@ -674,7 +681,7 @@ static inline int obd_alloc_memmd(struct obd_export *exp, struct lov_stripe_md **mem_tgt) { LASSERT(mem_tgt); - LASSERT(*mem_tgt == NULL); + LASSERT(!*mem_tgt); return obd_unpackmd(exp, mem_tgt, NULL, 0); } @@ -767,7 +774,7 @@ static inline int obd_setattr_rqset(struct obd_export *exp, EXP_COUNTER_INCREMENT(exp, setattr_async); set = ptlrpc_prep_set(); - if (set == NULL) + if (!set) return -ENOMEM; rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set); @@ -778,7 +785,8 @@ static inline int obd_setattr_rqset(struct obd_export *exp, } /* This adds all the requests into @set if @set != NULL, otherwise - all requests are sent asynchronously without waiting for response. */ + * all requests are sent asynchronously without waiting for response. + */ static inline int obd_setattr_async(struct obd_export *exp, struct obd_info *oinfo, struct obd_trans_info *oti, @@ -848,7 +856,8 @@ static inline int obd_connect(const struct lu_env *env, { int rc; __u64 ocf = data ? data->ocd_connect_flags : 0; /* for post-condition - * check */ + * check + */ rc = obd_check_dev_active(obd); if (rc) @@ -858,7 +867,7 @@ static inline int obd_connect(const struct lu_env *env, rc = OBP(obd, connect)(env, exp, obd, cluuid, data, localdata); /* check that only subset is granted */ - LASSERT(ergo(data != NULL, (data->ocd_connect_flags & ocf) == + LASSERT(ergo(data, (data->ocd_connect_flags & ocf) == data->ocd_connect_flags)); return rc; } @@ -871,8 +880,7 @@ static inline int obd_reconnect(const struct lu_env *env, void *localdata) { int rc; - __u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition - * check */ + __u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition check */ rc = obd_check_dev_active(obd); if (rc) @@ -882,8 +890,7 @@ static inline int obd_reconnect(const struct lu_env *env, rc = OBP(obd, reconnect)(env, exp, obd, cluuid, d, localdata); /* check that only subset is granted */ - LASSERT(ergo(d != NULL, - (d->ocd_connect_flags & ocf) == d->ocd_connect_flags)); + LASSERT(ergo(d, (d->ocd_connect_flags & ocf) == d->ocd_connect_flags)); return rc; } @@ -998,7 +1005,7 @@ static inline int obd_init_export(struct obd_export *exp) { int rc = 0; - if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) && + if ((exp)->exp_obd && OBT((exp)->exp_obd) && OBP((exp)->exp_obd, init_export)) rc = OBP(exp->exp_obd, init_export)(exp); return rc; @@ -1006,7 +1013,7 @@ static inline int obd_init_export(struct obd_export *exp) static inline int obd_destroy_export(struct obd_export *exp) { - if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) && + if ((exp)->exp_obd && OBT((exp)->exp_obd) && OBP((exp)->exp_obd, destroy_export)) OBP(exp->exp_obd, destroy_export)(exp); return 0; @@ -1014,7 +1021,8 @@ static inline int obd_destroy_export(struct obd_export *exp) /* @max_age is the oldest time in jiffies that we accept using a cached data. * If the cache is older than @max_age we will get a new value from the - * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. */ + * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. + */ static inline int obd_statfs_async(struct obd_export *exp, struct obd_info *oinfo, __u64 max_age, @@ -1023,7 +1031,7 @@ static inline int obd_statfs_async(struct obd_export *exp, int rc = 0; struct obd_device *obd; - if (exp == NULL || exp->exp_obd == NULL) + if (!exp || !exp->exp_obd) return -EINVAL; obd = exp->exp_obd; @@ -1059,7 +1067,7 @@ static inline int obd_statfs_rqset(struct obd_export *exp, int rc = 0; set = ptlrpc_prep_set(); - if (set == NULL) + if (!set) return -ENOMEM; oinfo.oi_osfs = osfs; @@ -1073,7 +1081,8 @@ static inline int obd_statfs_rqset(struct obd_export *exp, /* @max_age is the oldest time in jiffies that we accept using a cached data. * If the cache is older than @max_age we will get a new value from the - * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. */ + * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. + */ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp, struct obd_statfs *osfs, __u64 max_age, __u32 flags) @@ -1081,7 +1090,7 @@ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp, int rc = 0; struct obd_device *obd = exp->exp_obd; - if (obd == NULL) + if (!obd) return -EINVAL; OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP); @@ -1155,7 +1164,7 @@ static inline int obd_adjust_kms(struct obd_export *exp, } static inline int obd_iocontrol(unsigned int cmd, struct obd_export *exp, - int len, void *karg, void *uarg) + int len, void *karg, void __user *uarg) { int rc; @@ -1205,9 +1214,10 @@ static inline int obd_notify(struct obd_device *obd, return rc; /* the check for async_recov is a complete hack - I'm hereby - overloading the meaning to also mean "this was called from - mds_postsetup". I know that my mds is able to handle notifies - by this point, and it needs to get them to execute mds_postrecov. */ + * overloading the meaning to also mean "this was called from + * mds_postsetup". I know that my mds is able to handle notifies + * by this point, and it needs to get them to execute mds_postrecov. + */ if (!obd->obd_set_up && !obd->obd_async_recov) { CDEBUG(D_HA, "obd %s not set up\n", obd->obd_name); return -EINVAL; @@ -1241,7 +1251,7 @@ static inline int obd_notify_observer(struct obd_device *observer, * Also, call non-obd listener, if any */ onu = &observer->obd_upcall; - if (onu->onu_upcall != NULL) + if (onu->onu_upcall) rc2 = onu->onu_upcall(observer, observed, ev, onu->onu_owner, NULL); else @@ -1287,7 +1297,7 @@ static inline int obd_health_check(const struct lu_env *env, int rc; /* don't use EXP_CHECK_DT_OP, because NULL method is normal here */ - if (obd == NULL || !OBT(obd)) { + if (!obd || !OBT(obd)) { CERROR("cleaned up obd\n"); return -EOPNOTSUPP; } @@ -1318,57 +1328,6 @@ static inline int obd_register_observer(struct obd_device *obd, return 0; } -#if 0 -static inline int obd_register_page_removal_cb(struct obd_export *exp, - obd_page_removal_cb_t cb, - obd_pin_extent_cb pin_cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, register_page_removal_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, register_page_removal_cb); - - rc = OBP(exp->exp_obd, register_page_removal_cb)(exp, cb, pin_cb); - return rc; -} - -static inline int obd_unregister_page_removal_cb(struct obd_export *exp, - obd_page_removal_cb_t cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, unregister_page_removal_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_page_removal_cb); - - rc = OBP(exp->exp_obd, unregister_page_removal_cb)(exp, cb); - return rc; -} - -static inline int obd_register_lock_cancel_cb(struct obd_export *exp, - obd_lock_cancel_cb cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, register_lock_cancel_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, register_lock_cancel_cb); - - rc = OBP(exp->exp_obd, register_lock_cancel_cb)(exp, cb); - return rc; -} - -static inline int obd_unregister_lock_cancel_cb(struct obd_export *exp, - obd_lock_cancel_cb cb) -{ - int rc; - - OBD_CHECK_DT_OP(exp->exp_obd, unregister_lock_cancel_cb, 0); - OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_lock_cancel_cb); - - rc = OBP(exp->exp_obd, unregister_lock_cancel_cb)(exp, cb); - return rc; -} -#endif - /* metadata helpers */ static inline int md_getstatus(struct obd_export *exp, struct lu_fid *fid) { @@ -1392,7 +1351,7 @@ static inline int md_getattr(struct obd_export *exp, struct md_op_data *op_data, } static inline int md_null_inode(struct obd_export *exp, - const struct lu_fid *fid) + const struct lu_fid *fid) { int rc; @@ -1657,8 +1616,8 @@ static inline int md_set_lock_data(struct obd_export *exp, static inline int md_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, ldlm_policy_data_t *policy, - ldlm_mode_t mode, - ldlm_cancel_flags_t flags, + enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque) { int rc; @@ -1671,12 +1630,12 @@ static inline int md_cancel_unused(struct obd_export *exp, return rc; } -static inline ldlm_mode_t md_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, - ldlm_type_t type, - ldlm_policy_data_t *policy, - ldlm_mode_t mode, - struct lustre_handle *lockh) +static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags, + const struct lu_fid *fid, + enum ldlm_type type, + ldlm_policy_data_t *policy, + enum ldlm_mode mode, + struct lustre_handle *lockh) { EXP_CHECK_MD_OP(exp, lock_match); EXP_MD_COUNTER_INCREMENT(exp, lock_match); @@ -1759,7 +1718,8 @@ struct lwp_register_item { /* I'm as embarrassed about this as you are. * * <shaver> // XXX do not look into _superhack with remaining eye - * <shaver> // XXX if this were any uglier, I'd get my own show on MTV */ + * <shaver> // XXX if this were any uglier, I'd get my own show on MTV + */ extern int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c); /* obd_mount.c */ @@ -1774,7 +1734,7 @@ void class_uuid_unparse(class_uuid_t in, struct obd_uuid *out); /* lustre_peer.c */ int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index); int class_add_uuid(const char *uuid, __u64 nid); -int class_del_uuid (const char *uuid); +int class_del_uuid(const char *uuid); int class_check_uuid(struct obd_uuid *uuid, __u64 nid); void class_init_uuidlist(void); void class_exit_uuidlist(void); diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h index d031437c0..f8ee3a325 100644 --- a/drivers/staging/lustre/lustre/include/obd_support.h +++ b/drivers/staging/lustre/lustre/include/obd_support.h @@ -47,7 +47,8 @@ extern unsigned int obd_debug_peer_on_timeout; extern unsigned int obd_dump_on_timeout; extern unsigned int obd_dump_on_eviction; /* obd_timeout should only be used for recovery, not for - networking / disk / timings affected by load (use Adaptive Timeouts) */ + * networking / disk / timings affected by load (use Adaptive Timeouts) + */ extern unsigned int obd_timeout; /* seconds */ extern unsigned int obd_timeout_set; extern unsigned int at_min; @@ -104,18 +105,21 @@ extern char obd_jobid_var[]; * failover targets the client only pings one server at a time, and pings * can be lost on a loaded network. Since eviction has serious consequences, * and there's no urgent need to evict a client just because it's idle, we - * should be very conservative here. */ + * should be very conservative here. + */ #define PING_EVICT_TIMEOUT (PING_INTERVAL * 6) #define DISK_TIMEOUT 50 /* Beyond this we warn about disk speed */ #define CONNECTION_SWITCH_MIN 5U /* Connection switching rate limiter */ - /* Max connect interval for nonresponsive servers; ~50s to avoid building up - connect requests in the LND queues, but within obd_timeout so we don't - miss the recovery window */ +/* Max connect interval for nonresponsive servers; ~50s to avoid building up + * connect requests in the LND queues, but within obd_timeout so we don't + * miss the recovery window + */ #define CONNECTION_SWITCH_MAX min(50U, max(CONNECTION_SWITCH_MIN, obd_timeout)) #define CONNECTION_SWITCH_INC 5 /* Connection timeout backoff */ /* In general this should be low to have quick detection of a system - running on a backup server. (If it's too low, import_select_connection - will increase the timeout anyhow.) */ + * running on a backup server. (If it's too low, import_select_connection + * will increase the timeout anyhow.) + */ #define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN, obd_timeout/20) /* The max delay between connects is SWITCH_MAX + SWITCH_INC + INITIAL */ #define RECONNECT_DELAY_MAX (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC + \ @@ -496,7 +500,7 @@ extern char obd_jobid_var[]; #ifdef POISON_BULK #define POISON_PAGE(page, val) do { \ - memset(kmap(page), val, PAGE_CACHE_SIZE); \ + memset(kmap(page), val, PAGE_SIZE); \ kunmap(page); \ } while (0) #else @@ -507,7 +511,6 @@ extern char obd_jobid_var[]; do { \ struct portals_handle *__h = (handle); \ \ - LASSERT(handle != NULL); \ __h->h_cookie = (unsigned long)(ptr); \ __h->h_size = (size); \ call_rcu(&__h->h_rcu, class_handle_free_cb); \ diff --git a/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h b/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h new file mode 100644 index 000000000..5e998362e --- /dev/null +++ b/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h @@ -0,0 +1,94 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2013, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * + * Author: Nathan Rutman <nathan.rutman@sun.com> + * + * Kernel <-> userspace communication routines. + * The definitions below are used in the kernel and userspace. + */ + +#ifndef __UAPI_KERNELCOMM_H__ +#define __UAPI_KERNELCOMM_H__ + +#include <linux/types.h> + +/* KUC message header. + * All current and future KUC messages should use this header. + * To avoid having to include Lustre headers from libcfs, define this here. + */ +struct kuc_hdr { + __u16 kuc_magic; + /* Each new Lustre feature should use a different transport */ + __u8 kuc_transport; + __u8 kuc_flags; + /* Message type or opcode, transport-specific */ + __u16 kuc_msgtype; + /* Including header */ + __u16 kuc_msglen; +} __aligned(sizeof(__u64)); + +#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr) + CR_MAXSIZE) + +#define KUC_MAGIC 0x191C /*Lustre9etLinC */ + +/* kuc_msgtype values are defined in each transport */ +enum kuc_transport_type { + KUC_TRANSPORT_GENERIC = 1, + KUC_TRANSPORT_HSM = 2, + KUC_TRANSPORT_CHANGELOG = 3, +}; + +enum kuc_generic_message_type { + KUC_MSG_SHUTDOWN = 1, +}; + +/* KUC Broadcast Groups. This determines which userspace process hears which + * messages. Mutliple transports may be used within a group, or multiple + * groups may use the same transport. Broadcast + * groups need not be used if e.g. a UID is specified instead; + * use group 0 to signify unicast. + */ +#define KUC_GRP_HSM 0x02 +#define KUC_GRP_MAX KUC_GRP_HSM + +#define LK_FLG_STOP 0x01 +#define LK_NOFD -1U + +/* kernelcomm control structure, passed from userspace to kernel */ +struct lustre_kernelcomm { + __u32 lk_wfd; + __u32 lk_rfd; + __u32 lk_uid; + __u32 lk_group; + __u32 lk_data; + __u32 lk_flags; +} __packed; + +#endif /* __UAPI_KERNELCOMM_H__ */ diff --git a/drivers/staging/lustre/lustre/lclient/glimpse.c b/drivers/staging/lustre/lustre/lclient/glimpse.c index 8533a1e53..c4e8a0878 100644 --- a/drivers/staging/lustre/lustre/lclient/glimpse.c +++ b/drivers/staging/lustre/lustre/lclient/glimpse.c @@ -109,7 +109,8 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io, * if there were no conflicting locks. If there * were conflicting locks, enqueuing or waiting * fails with -ENAVAIL, but valid inode - * attributes are returned anyway. */ + * attributes are returned anyway. + */ *descr = whole_file; descr->cld_obj = clob; descr->cld_mode = CLM_PHANTOM; diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c index 34dde7ded..96141d17d 100644 --- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c +++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c @@ -116,8 +116,8 @@ void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key) { struct ccc_thread_info *info; - info = kmem_cache_alloc(ccc_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(ccc_thread_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -135,8 +135,8 @@ void *ccc_session_key_init(const struct lu_context *ctx, { struct ccc_session *session; - session = kmem_cache_alloc(ccc_session_kmem, GFP_NOFS | __GFP_ZERO); - if (session == NULL) + session = kmem_cache_zalloc(ccc_session_kmem, GFP_NOFS); + if (!session) session = ERR_PTR(-ENOMEM); return session; } @@ -173,7 +173,7 @@ int ccc_device_init(const struct lu_env *env, struct lu_device *d, vdv = lu2ccc_dev(d); vdv->cdv_next = lu2cl_dev(next); - LASSERT(d->ld_site != NULL && next->ld_type != NULL); + LASSERT(d->ld_site && next->ld_type); next->ld_site = d->ld_site; rc = next->ld_type->ldt_ops->ldto_device_init( env, next, next->ld_type->ldt_name, NULL); @@ -211,12 +211,12 @@ struct lu_device *ccc_device_alloc(const struct lu_env *env, vdv->cdv_cl.cd_ops = clops; site = kzalloc(sizeof(*site), GFP_NOFS); - if (site != NULL) { + if (site) { rc = cl_site_init(site, &vdv->cdv_cl); if (rc == 0) rc = lu_site_init_finish(&site->cs_lu); else { - LASSERT(lud->ld_site == NULL); + LASSERT(!lud->ld_site); CERROR("Cannot init lu_site, rc %d.\n", rc); kfree(site); } @@ -236,7 +236,7 @@ struct lu_device *ccc_device_free(const struct lu_env *env, struct cl_site *site = lu2cl_site(d->ld_site); struct lu_device *next = cl2lu_dev(vdv->cdv_next); - if (d->ld_site != NULL) { + if (d->ld_site) { cl_site_fini(site); kfree(site); } @@ -251,8 +251,8 @@ int ccc_req_init(const struct lu_env *env, struct cl_device *dev, struct ccc_req *vrq; int result; - vrq = kmem_cache_alloc(ccc_req_kmem, GFP_NOFS | __GFP_ZERO); - if (vrq != NULL) { + vrq = kmem_cache_zalloc(ccc_req_kmem, GFP_NOFS); + if (vrq) { cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops); result = 0; } else @@ -304,7 +304,7 @@ out_kmem: void ccc_global_fini(struct lu_device_type *device_type) { - if (ccc_inode_fini_env != NULL) { + if (ccc_inode_fini_env) { cl_env_put(ccc_inode_fini_env, &dummy_refcheck); ccc_inode_fini_env = NULL; } @@ -327,8 +327,8 @@ struct lu_object *ccc_object_alloc(const struct lu_env *env, struct ccc_object *vob; struct lu_object *obj; - vob = kmem_cache_alloc(ccc_object_kmem, GFP_NOFS | __GFP_ZERO); - if (vob != NULL) { + vob = kmem_cache_zalloc(ccc_object_kmem, GFP_NOFS); + if (vob) { struct cl_object_header *hdr; obj = ccc2lu(vob); @@ -365,7 +365,7 @@ int ccc_object_init(const struct lu_env *env, struct lu_object *obj, under = &dev->cdv_next->cd_lu_dev; below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under); - if (below != NULL) { + if (below) { const struct cl_object_conf *cconf; cconf = lu2cl_conf(conf); @@ -396,8 +396,8 @@ int ccc_lock_init(const struct lu_env *env, CLOBINVRNT(env, obj, ccc_object_invariant(obj)); - clk = kmem_cache_alloc(ccc_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (clk != NULL) { + clk = kmem_cache_zalloc(ccc_lock_kmem, GFP_NOFS); + if (clk) { cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops); result = 0; } else @@ -613,7 +613,8 @@ void ccc_lock_state(const struct lu_env *env, * stale i_size when doing appending writes and effectively * cancel the result of the truncate. Getting the * ll_inode_size_lock() after the enqueue maintains the DLM - * -> ll_inode_size_lock() acquiring order. */ + * -> ll_inode_size_lock() acquiring order. + */ if (lock->cll_descr.cld_start == 0 && lock->cll_descr.cld_end == CL_PAGE_EOF) cl_merge_lvb(env, inode); @@ -660,7 +661,7 @@ void ccc_io_update_iov(const struct lu_env *env, { size_t size = io->u.ci_rw.crw_count; - if (!cl_is_normalio(env, io) || cio->cui_iter == NULL) + if (!cl_is_normalio(env, io) || !cio->cui_iter) return; iov_iter_truncate(cio->cui_iter, size); @@ -749,16 +750,17 @@ int ccc_prep_size(const struct lu_env *env, struct cl_object *obj, */ ccc_object_size_unlock(obj); result = cl_glimpse_lock(env, io, inode, obj, 0); - if (result == 0 && exceed != NULL) { + if (result == 0 && exceed) { /* If objective page index exceed end-of-file * page index, return directly. Do not expect * kernel will check such case correctly. * linux-2.6.18-128.1.1 miss to do that. - * --bug 17336 */ + * --bug 17336 + */ loff_t size = cl_isize_read(inode); - loff_t cur_index = start >> PAGE_CACHE_SHIFT; + loff_t cur_index = start >> PAGE_SHIFT; loff_t size_index = (size - 1) >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; if ((size == 0 && cur_index != 0) || size_index < cur_index) @@ -884,7 +886,8 @@ again: if (attr->ia_valid & ATTR_FILE) /* populate the file descriptor for ftruncate to honor - * group lock - see LU-787 */ + * group lock - see LU-787 + */ cio->cui_fd = cl_iattr2fd(inode, attr); result = cl_io_loop(env, io); @@ -896,7 +899,8 @@ again: goto again; /* HSM import case: file is released, cannot be restored * no need to fail except if restore registration failed - * with -ENODATA */ + * with -ENODATA + */ if (result == -ENODATA && io->ci_restore_needed && io->ci_result != -ENODATA) result = 0; @@ -986,17 +990,6 @@ struct inode *ccc_object_inode(const struct cl_object *obj) } /** - * Returns a pointer to cl_page associated with \a vmpage, without acquiring - * additional reference to the resulting page. This is an unsafe version of - * cl_vmpage_page() that can only be used under vmpage lock. - */ -struct cl_page *ccc_vmpage_page_transient(struct page *vmpage) -{ - KLASSERT(PageLocked(vmpage)); - return (struct cl_page *)vmpage->private; -} - -/** * Initialize or update CLIO structures for regular files when new * meta-data arrives from the server. * @@ -1033,11 +1026,12 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md) fid = &lli->lli_fid; LASSERT(fid_is_sane(fid)); - if (lli->lli_clob == NULL) { + if (!lli->lli_clob) { /* clob is slave of inode, empty lli_clob means for new inode, * there is no clob in cache with the given fid, so it is * unnecessary to perform lookup-alloc-lookup-insert, just - * alloc and insert directly. */ + * alloc and insert directly. + */ LASSERT(inode->i_state & I_NEW); conf.coc_lu.loc_flags = LOC_F_NEW; clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev), @@ -1109,7 +1103,7 @@ void cl_inode_fini(struct inode *inode) int refcheck; int emergency; - if (clob != NULL) { + if (clob) { void *cookie; cookie = cl_env_reenter(); @@ -1117,7 +1111,7 @@ void cl_inode_fini(struct inode *inode) emergency = IS_ERR(env); if (emergency) { mutex_lock(&ccc_inode_fini_guard); - LASSERT(ccc_inode_fini_env != NULL); + LASSERT(ccc_inode_fini_env); cl_env_implant(ccc_inode_fini_env, &refcheck); env = ccc_inode_fini_env; } @@ -1162,7 +1156,8 @@ __u16 ll_dirent_type_get(struct lu_dirent *ent) } /** - * build inode number from passed @fid */ + * build inode number from passed @fid + */ __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32) { if (BITS_PER_LONG == 32 || api32) @@ -1173,7 +1168,8 @@ __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32) /** * build inode generation from passed @fid. If our FID overflows the 32-bit - * inode number then return a non-zero generation to distinguish them. */ + * inode number then return a non-zero generation to distinguish them. + */ __u32 cl_fid_build_gen(const struct lu_fid *fid) { __u32 gen; @@ -1194,7 +1190,8 @@ __u32 cl_fid_build_gen(const struct lu_fid *fid) * have to wait for the refcount to become zero to destroy the older layout. * * Notice that the lsm returned by this function may not be valid unless called - * inside layout lock - MDS_INODELOCK_LAYOUT. */ + * inside layout lock - MDS_INODELOCK_LAYOUT. + */ struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode) { return lov_lsm_get(cl_i2info(inode)->lli_clob); diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c index 8389a0eda..d80bcedd7 100644 --- a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c +++ b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c @@ -48,7 +48,8 @@ /* Initialize the default and maximum LOV EA and cookie sizes. This allows * us to make MDS RPCs with large enough reply buffers to hold the * maximum-sized (= maximum striped) EA and cookie without having to - * calculate this (via a call into the LOV + OSCs) each time we make an RPC. */ + * calculate this (via a call into the LOV + OSCs) each time we make an RPC. + */ int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp) { struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC_V3 }; @@ -74,7 +75,8 @@ int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp) cookiesize = stripes * sizeof(struct llog_cookie); /* default cookiesize is 0 because from 2.4 server doesn't send - * llog cookies to client. */ + * llog cookies to client. + */ CDEBUG(D_HA, "updating def/max_easize: %d/%d def/max_cookiesize: 0/%d\n", def_easize, easize, cookiesize); diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c index a2ea8e5b9..323060626 100644 --- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c +++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c @@ -49,13 +49,11 @@ enum { static inline int node_is_left_child(struct interval_node *node) { - LASSERT(node->in_parent != NULL); return node == node->in_parent->in_left; } static inline int node_is_right_child(struct interval_node *node) { - LASSERT(node->in_parent != NULL); return node == node->in_parent->in_right; } @@ -135,7 +133,8 @@ static void __rotate_change_maxhigh(struct interval_node *node, /* The left rotation "pivots" around the link from node to node->right, and * - node will be linked to node->right's left child, and - * - node->right's left child will be linked to node's right child. */ + * - node->right's left child will be linked to node's right child. + */ static void __rotate_left(struct interval_node *node, struct interval_node **root) { @@ -164,7 +163,8 @@ static void __rotate_left(struct interval_node *node, /* The right rotation "pivots" around the link from node to node->left, and * - node will be linked to node->left's right child, and - * - node->left's right child will be linked to node's left child. */ + * - node->left's right child will be linked to node's left child. + */ static void __rotate_right(struct interval_node *node, struct interval_node **root) { diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c index 9c70f31ea..a803e200f 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c @@ -62,7 +62,8 @@ * is the "highest lock". This function returns the new KMS value. * Caller must hold lr_lock already. * - * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */ + * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! + */ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms) { struct ldlm_resource *res = lock->l_resource; @@ -72,7 +73,8 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms) /* don't let another thread in ldlm_extent_shift_kms race in * just after we finish and take our lock into account in its - * calculation of the kms */ + * calculation of the kms + */ lock->l_flags |= LDLM_FL_KMS_IGNORE; list_for_each(tmp, &res->lr_granted) { @@ -85,7 +87,8 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms) return old_kms; /* This extent _has_ to be smaller than old_kms (checked above) - * so kms can only ever be smaller or the same as old_kms. */ + * so kms can only ever be smaller or the same as old_kms. + */ if (lck->l_policy_data.l_extent.end + 1 > kms) kms = lck->l_policy_data.l_extent.end + 1; } @@ -112,8 +115,8 @@ struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock) struct ldlm_interval *node; LASSERT(lock->l_resource->lr_type == LDLM_EXTENT); - node = kmem_cache_alloc(ldlm_interval_slab, GFP_NOFS | __GFP_ZERO); - if (node == NULL) + node = kmem_cache_zalloc(ldlm_interval_slab, GFP_NOFS); + if (!node) return NULL; INIT_LIST_HEAD(&node->li_group); @@ -134,7 +137,7 @@ struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l) { struct ldlm_interval *n = l->l_tree_node; - if (n == NULL) + if (!n) return NULL; LASSERT(!list_empty(&n->li_group)); @@ -144,7 +147,7 @@ struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l) return list_empty(&n->li_group) ? n : NULL; } -static inline int lock_mode_to_index(ldlm_mode_t mode) +static inline int lock_mode_to_index(enum ldlm_mode mode) { int index; @@ -168,7 +171,7 @@ void ldlm_extent_add_lock(struct ldlm_resource *res, LASSERT(lock->l_granted_mode == lock->l_req_mode); node = lock->l_tree_node; - LASSERT(node != NULL); + LASSERT(node); LASSERT(!interval_is_intree(&node->li_node)); idx = lock_mode_to_index(lock->l_granted_mode); @@ -185,14 +188,14 @@ void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_interval *tmp; tmp = ldlm_interval_detach(lock); - LASSERT(tmp != NULL); ldlm_interval_free(tmp); ldlm_interval_attach(to_ldlm_interval(found), lock); } res->lr_itree[idx].lit_size++; /* even though we use interval tree to manage the extent lock, we also - * add the locks into grant list, for debug purpose, .. */ + * add the locks into grant list, for debug purpose, .. + */ ldlm_resource_add_lock(res, &res->lr_granted, lock); } @@ -211,7 +214,7 @@ void ldlm_extent_unlink_lock(struct ldlm_lock *lock) LASSERT(lock->l_granted_mode == 1 << idx); tree = &res->lr_itree[idx]; - LASSERT(tree->lit_root != NULL); /* assure the tree is not null */ + LASSERT(tree->lit_root); /* assure the tree is not null */ tree->lit_size--; node = ldlm_interval_detach(lock); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c index 4310154e1..b88b78606 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c @@ -92,7 +92,7 @@ ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new) } static inline void -ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags) +ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags) { LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)", mode, flags); @@ -107,7 +107,8 @@ ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags) lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING; /* when reaching here, it is under lock_res_and_lock(). Thus, - need call the nolock version of ldlm_lock_decref_internal*/ + * need call the nolock version of ldlm_lock_decref_internal + */ ldlm_lock_decref_internal_nolock(lock, mode); } @@ -133,7 +134,7 @@ ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags) * would be collected and ASTs sent. */ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, - int first_enq, ldlm_error_t *err, + int first_enq, enum ldlm_error *err, struct list_head *work_list) { struct ldlm_resource *res = req->l_resource; @@ -143,7 +144,7 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, struct ldlm_lock *lock = NULL; struct ldlm_lock *new = req; struct ldlm_lock *new2 = NULL; - ldlm_mode_t mode = req->l_req_mode; + enum ldlm_mode mode = req->l_req_mode; int added = (mode == LCK_NL); int overlaps = 0; int splitted = 0; @@ -159,13 +160,15 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, *err = ELDLM_OK; /* No blocking ASTs are sent to the clients for - * Posix file & record locks */ + * Posix file & record locks + */ req->l_blocking_ast = NULL; reprocess: if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) { /* This loop determines where this processes locks start - * in the resource lr_granted list. */ + * in the resource lr_granted list. + */ list_for_each(tmp, &res->lr_granted) { lock = list_entry(tmp, struct ldlm_lock, l_res_link); @@ -180,7 +183,8 @@ reprocess: lockmode_verify(mode); /* This loop determines if there are existing locks - * that conflict with the new lock request. */ + * that conflict with the new lock request. + */ list_for_each(tmp, &res->lr_granted) { lock = list_entry(tmp, struct ldlm_lock, l_res_link); @@ -238,8 +242,8 @@ reprocess: } /* Scan the locks owned by this process that overlap this request. - * We may have to merge or split existing locks. */ - + * We may have to merge or split existing locks. + */ if (!ownlocks) ownlocks = &res->lr_granted; @@ -253,7 +257,8 @@ reprocess: /* If the modes are the same then we need to process * locks that overlap OR adjoin the new lock. The extra * logic condition is necessary to deal with arithmetic - * overflow and underflow. */ + * overflow and underflow. + */ if ((new->l_policy_data.l_flock.start > (lock->l_policy_data.l_flock.end + 1)) && (lock->l_policy_data.l_flock.end != @@ -327,11 +332,13 @@ reprocess: * with the request but this would complicate the reply * processing since updates to req get reflected in the * reply. The client side replays the lock request so - * it must see the original lock data in the reply. */ + * it must see the original lock data in the reply. + */ /* XXX - if ldlm_lock_new() can sleep we should * release the lr_lock, allocate the new lock, - * and restart processing this lock. */ + * and restart processing this lock. + */ if (!new2) { unlock_res_and_lock(req); new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK, @@ -361,7 +368,7 @@ reprocess: lock->l_policy_data.l_flock.start = new->l_policy_data.l_flock.end + 1; new2->l_conn_export = lock->l_conn_export; - if (lock->l_export != NULL) { + if (lock->l_export) { new2->l_export = class_export_lock_get(lock->l_export, new2); if (new2->l_export->exp_lock_hash && @@ -381,7 +388,7 @@ reprocess: } /* if new2 is created but never used, destroy it*/ - if (splitted == 0 && new2 != NULL) + if (splitted == 0 && new2) ldlm_lock_destroy_nolock(new2); /* At this point we're granting the lock request. */ @@ -396,7 +403,8 @@ reprocess: if (*flags != LDLM_FL_WAIT_NOREPROC) { /* The only one possible case for client-side calls flock * policy function is ldlm_flock_completion_ast inside which - * carries LDLM_FL_WAIT_NOREPROC flag. */ + * carries LDLM_FL_WAIT_NOREPROC flag. + */ CERROR("Illegal parameter for client-side-only module.\n"); LBUG(); } @@ -404,7 +412,8 @@ reprocess: /* In case we're reprocessing the requested lock we can't destroy * it until after calling ldlm_add_ast_work_item() above so that laawi() * can bump the reference count on \a req. Otherwise \a req - * could be freed before the completion AST can be sent. */ + * could be freed before the completion AST can be sent. + */ if (added) ldlm_flock_destroy(req, mode, *flags); @@ -449,7 +458,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) struct obd_import *imp = NULL; struct ldlm_flock_wait_data fwd; struct l_wait_info lwi; - ldlm_error_t err; + enum ldlm_error err; int rc = 0; CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n", @@ -458,12 +467,12 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) /* Import invalidation. We need to actually release the lock * references being held, so that it can go away. No point in * holding the lock even if app still believes it has it, since - * server already dropped it anyway. Only for granted locks too. */ + * server already dropped it anyway. Only for granted locks too. + */ if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) == (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) { if (lock->l_req_mode == lock->l_granted_mode && - lock->l_granted_mode != LCK_NL && - data == NULL) + lock->l_granted_mode != LCK_NL && !data) ldlm_lock_decref_internal(lock, lock->l_req_mode); /* Need to wake up the waiter if we were evicted */ @@ -475,7 +484,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED | LDLM_FL_BLOCK_CONV))) { - if (data == NULL) + if (!data) /* mds granted the lock in the reply */ goto granted; /* CP AST RPC: lock get granted, wake it up */ @@ -488,10 +497,10 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) obd = class_exp2obd(lock->l_conn_export); /* if this is a local lock, there is no import */ - if (obd != NULL) + if (obd) imp = obd->u.cli.cl_import; - if (imp != NULL) { + if (imp) { spin_lock(&imp->imp_lock); fwd.fwd_generation = imp->imp_generation; spin_unlock(&imp->imp_lock); @@ -540,7 +549,8 @@ granted: } else if (flags & LDLM_FL_TEST_LOCK) { /* fcntl(F_GETLK) request */ /* The old mode was saved in getlk->fl_type so that if the mode - * in the lock changes we can decref the appropriate refcount.*/ + * in the lock changes we can decref the appropriate refcount. + */ ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC); switch (lock->l_granted_mode) { case LCK_PR: @@ -559,7 +569,8 @@ granted: __u64 noreproc = LDLM_FL_WAIT_NOREPROC; /* We need to reprocess the lock to do merges or splits - * with existing locks owned by this process. */ + * with existing locks owned by this process. + */ ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL); } unlock_res_and_lock(lock); @@ -576,7 +587,8 @@ void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy, lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid; /* Compat code, old clients had no idea about owner field and * relied solely on pid for ownership. Introduced in LU-104, 2.1, - * April 2011 */ + * April 2011 + */ lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid; } diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h index 849cc98df..e21373e73 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h @@ -96,14 +96,15 @@ enum { LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */ LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */ LDLM_CANCEL_NO_WAIT = 1 << 4 /* Cancel locks w/o blocking (neither - * sending nor waiting for any rpcs) */ + * sending nor waiting for any rpcs) + */ }; int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, - ldlm_cancel_flags_t sync, int flags); + enum ldlm_cancel_flags sync, int flags); int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, int count, int max, - ldlm_cancel_flags_t cancel_flags, int flags); + enum ldlm_cancel_flags cancel_flags, int flags); extern int ldlm_enqueue_min; /* ldlm_resource.c */ @@ -133,11 +134,11 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, enum req_location loc, void *data, int size); struct ldlm_lock * ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *, - ldlm_type_t type, ldlm_mode_t, + enum ldlm_type type, enum ldlm_mode mode, const struct ldlm_callback_suite *cbs, void *data, __u32 lvb_len, enum lvb_type lvb_type); -ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **, - void *cookie, __u64 *flags); +enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **, + void *cookie, __u64 *flags); void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode); void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode); void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode); @@ -154,7 +155,7 @@ int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct list_head *cancels, int count, - ldlm_cancel_flags_t cancel_flags); + enum ldlm_cancel_flags cancel_flags); void ldlm_handle_bl_callback(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c index 3c8d4413d..7dd7df59a 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c @@ -219,7 +219,8 @@ EXPORT_SYMBOL(client_import_find_conn); void client_destroy_import(struct obd_import *imp) { /* Drop security policy instance after all RPCs have finished/aborted - * to let all busy contexts be released. */ + * to let all busy contexts be released. + */ class_import_get(imp); class_destroy_import(imp); sptlrpc_import_sec_put(imp); @@ -227,29 +228,6 @@ void client_destroy_import(struct obd_import *imp) } EXPORT_SYMBOL(client_destroy_import); -/** - * Check whether or not the OSC is on MDT. - * In the config log, - * osc on MDT - * setup 0:{fsname}-OSTxxxx-osc[-MDTxxxx] 1:lustre-OST0000_UUID 2:NID - * osc on client - * setup 0:{fsname}-OSTxxxx-osc 1:lustre-OST0000_UUID 2:NID - * - **/ -static int osc_on_mdt(char *obdname) -{ - char *ptr; - - ptr = strrchr(obdname, '-'); - if (ptr == NULL) - return 0; - - if (strncmp(ptr + 1, "MDT", 3) == 0) - return 1; - - return 0; -} - /* Configure an RPC client OBD device. * * lcfg parameters: @@ -264,11 +242,12 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) struct obd_uuid server_uuid; int rq_portal, rp_portal, connect_op; char *name = obddev->obd_type->typ_name; - ldlm_ns_type_t ns_type = LDLM_NS_TYPE_UNKNOWN; + enum ldlm_ns_type ns_type = LDLM_NS_TYPE_UNKNOWN; int rc; /* In a more perfect world, we would hang a ptlrpc_client off of - * obd_type and just use the values from there. */ + * obd_type and just use the values from there. + */ if (!strcmp(name, LUSTRE_OSC_NAME)) { rq_portal = OST_REQUEST_PORTAL; rp_portal = OSC_REPLY_PORTAL; @@ -284,22 +263,6 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) cli->cl_sp_me = LUSTRE_SP_CLI; cli->cl_sp_to = LUSTRE_SP_MDT; ns_type = LDLM_NS_TYPE_MDC; - } else if (!strcmp(name, LUSTRE_OSP_NAME)) { - if (strstr(lustre_cfg_buf(lcfg, 1), "OST") == NULL) { - /* OSP_on_MDT for other MDTs */ - connect_op = MDS_CONNECT; - cli->cl_sp_to = LUSTRE_SP_MDT; - ns_type = LDLM_NS_TYPE_MDC; - rq_portal = OUT_PORTAL; - } else { - /* OSP on MDT for OST */ - connect_op = OST_CONNECT; - cli->cl_sp_to = LUSTRE_SP_OST; - ns_type = LDLM_NS_TYPE_OSC; - rq_portal = OST_REQUEST_PORTAL; - } - rp_portal = OSC_REPLY_PORTAL; - cli->cl_sp_me = LUSTRE_SP_CLI; } else if (!strcmp(name, LUSTRE_MGC_NAME)) { rq_portal = MGS_REQUEST_PORTAL; rp_portal = MGC_REPLY_PORTAL; @@ -344,8 +307,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) cli->cl_avail_grant = 0; /* FIXME: Should limit this for the sum of all cl_dirty_max. */ cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024; - if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8) - cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3); + if (cli->cl_dirty_max >> PAGE_SHIFT > totalram_pages / 8) + cli->cl_dirty_max = totalram_pages << (PAGE_SHIFT - 3); INIT_LIST_HEAD(&cli->cl_cache_waiters); INIT_LIST_HEAD(&cli->cl_loi_ready_list); INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list); @@ -387,23 +350,21 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) /* This value may be reduced at connect time in * ptlrpc_connect_interpret() . We initialize it to only * 1MB until we know what the performance looks like. - * In the future this should likely be increased. LU-1431 */ + * In the future this should likely be increased. LU-1431 + */ cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES, - LNET_MTU >> PAGE_CACHE_SHIFT); + LNET_MTU >> PAGE_SHIFT); if (!strcmp(name, LUSTRE_MDC_NAME)) { cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT; - } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) { + } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) { cli->cl_max_rpcs_in_flight = 2; - } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) { + } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) { cli->cl_max_rpcs_in_flight = 3; - } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) { + } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) { cli->cl_max_rpcs_in_flight = 4; } else { - if (osc_on_mdt(obddev->obd_name)) - cli->cl_max_rpcs_in_flight = MDS_OSC_MAX_RIF_DEFAULT; - else - cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT; + cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT; } rc = ldlm_get_ref(); if (rc) { @@ -415,7 +376,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) &obddev->obd_ldlm_client); imp = class_new_import(obddev); - if (imp == NULL) { + if (!imp) { rc = -ENOENT; goto err_ldlm; } @@ -451,7 +412,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) LDLM_NAMESPACE_CLIENT, LDLM_NAMESPACE_GREEDY, ns_type); - if (obddev->obd_namespace == NULL) { + if (!obddev->obd_namespace) { CERROR("Unable to create client namespace - %s\n", obddev->obd_name); rc = -ENOMEM; @@ -477,7 +438,7 @@ int client_obd_cleanup(struct obd_device *obddev) ldlm_namespace_free_post(obddev->obd_namespace); obddev->obd_namespace = NULL; - LASSERT(obddev->u.cli.cl_import == NULL); + LASSERT(!obddev->u.cli.cl_import); ldlm_put_ref(); return 0; @@ -528,7 +489,7 @@ int client_connect_import(const struct lu_env *env, LASSERT(imp->imp_state == LUSTRE_IMP_DISCON); goto out_ldlm; } - LASSERT(*exp != NULL && (*exp)->exp_connection); + LASSERT(*exp && (*exp)->exp_connection); if (data) { LASSERTF((ocd->ocd_connect_flags & data->ocd_connect_flags) == @@ -587,17 +548,19 @@ int client_disconnect_export(struct obd_export *exp) /* Mark import deactivated now, so we don't try to reconnect if any * of the cleanup RPCs fails (e.g. LDLM cancel, etc). We don't - * fully deactivate the import, or that would drop all requests. */ + * fully deactivate the import, or that would drop all requests. + */ spin_lock(&imp->imp_lock); imp->imp_deactive = 1; spin_unlock(&imp->imp_lock); /* Some non-replayable imports (MDS's OSCs) are pinged, so just * delete it regardless. (It's safe to delete an import that was - * never added.) */ + * never added.) + */ (void)ptlrpc_pinger_del_import(imp); - if (obd->obd_namespace != NULL) { + if (obd->obd_namespace) { /* obd_force == local only */ ldlm_cli_cancel_unused(obd->obd_namespace, NULL, obd->obd_force ? LCF_LOCAL : 0, NULL); @@ -606,7 +569,8 @@ int client_disconnect_export(struct obd_export *exp) } /* There's no need to hold sem while disconnecting an import, - * and it may actually cause deadlock in GSS. */ + * and it may actually cause deadlock in GSS. + */ up_write(&cli->cl_sem); rc = ptlrpc_disconnect_import(imp, 0); down_write(&cli->cl_sem); @@ -615,7 +579,8 @@ int client_disconnect_export(struct obd_export *exp) out_disconnect: /* Use server style - class_disconnect should be always called for - * o_disconnect. */ + * o_disconnect. + */ err = class_disconnect(exp); if (!rc && err) rc = err; @@ -634,7 +599,8 @@ int target_pack_pool_reply(struct ptlrpc_request *req) struct obd_device *obd; /* Check that we still have all structures alive as this may - * be some late RPC at shutdown time. */ + * be some late RPC at shutdown time. + */ if (unlikely(!req->rq_export || !req->rq_export->exp_obd || !exp_connect_lru_resize(req->rq_export))) { lustre_msg_set_slv(req->rq_repmsg, 0); @@ -684,14 +650,14 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id) svcpt = req->rq_rqbd->rqbd_svcpt; rs = req->rq_reply_state; - if (rs == NULL || !rs->rs_difficult) { + if (!rs || !rs->rs_difficult) { /* no notifiers */ target_send_reply_msg(req, rc, fail_id); return; } /* must be an export if locks saved */ - LASSERT(req->rq_export != NULL); + LASSERT(req->rq_export); /* req/reply consistent */ LASSERT(rs->rs_svcpt == svcpt); @@ -700,7 +666,7 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id) LASSERT(!rs->rs_scheduled_ever); LASSERT(!rs->rs_handled); LASSERT(!rs->rs_on_net); - LASSERT(rs->rs_export == NULL); + LASSERT(!rs->rs_export); LASSERT(list_empty(&rs->rs_obd_list)); LASSERT(list_empty(&rs->rs_exp_list)); @@ -739,7 +705,8 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id) * reply ref until ptlrpc_handle_rs() is done * with the reply state (if the send was successful, there * would have been +1 ref for the net, which - * reply_out_callback leaves alone) */ + * reply_out_callback leaves alone) + */ rs->rs_on_net = 0; ptlrpc_rs_addref(rs); } @@ -760,7 +727,7 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id) } EXPORT_SYMBOL(target_send_reply); -ldlm_mode_t lck_compat_array[] = { +enum ldlm_mode lck_compat_array[] = { [LCK_EX] = LCK_COMPAT_EX, [LCK_PW] = LCK_COMPAT_PW, [LCK_PR] = LCK_COMPAT_PR, @@ -775,7 +742,7 @@ ldlm_mode_t lck_compat_array[] = { * Rather arbitrary mapping from LDLM error codes to errno values. This should * not escape to the user level. */ -int ldlm_error2errno(ldlm_error_t error) +int ldlm_error2errno(enum ldlm_error error) { int result; @@ -803,7 +770,7 @@ int ldlm_error2errno(ldlm_error_t error) break; default: if (((int)error) < 0) /* cast to signed type */ - result = error; /* as ldlm_error_t can be unsigned */ + result = error; /* as enum ldlm_error can be unsigned */ else { CERROR("Invalid DLM result code: %d\n", error); result = -EPROTO; diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c index cf9ec0cfe..ecd65a7a3 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c @@ -91,7 +91,7 @@ static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = { /** * Converts lock policy from local format to on the wire lock_desc format */ -static void ldlm_convert_policy_to_wire(ldlm_type_t type, +static void ldlm_convert_policy_to_wire(enum ldlm_type type, const ldlm_policy_data_t *lpolicy, ldlm_wire_policy_data_t *wpolicy) { @@ -105,7 +105,7 @@ static void ldlm_convert_policy_to_wire(ldlm_type_t type, /** * Converts lock policy from on the wire lock_desc format to local format */ -void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type, +void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type, const ldlm_wire_policy_data_t *wpolicy, ldlm_policy_data_t *lpolicy) { @@ -326,9 +326,11 @@ static int ldlm_lock_destroy_internal(struct ldlm_lock *lock) if (lock->l_export && lock->l_export->exp_lock_hash) { /* NB: it's safe to call cfs_hash_del() even lock isn't - * in exp_lock_hash. */ + * in exp_lock_hash. + */ /* In the function below, .hs_keycmp resolves to - * ldlm_export_lock_keycmp() */ + * ldlm_export_lock_keycmp() + */ /* coverity[overrun-buffer-val] */ cfs_hash_del(lock->l_export->exp_lock_hash, &lock->l_remote_handle, &lock->l_exp_hash); @@ -337,16 +339,6 @@ static int ldlm_lock_destroy_internal(struct ldlm_lock *lock) ldlm_lock_remove_from_lru(lock); class_handle_unhash(&lock->l_handle); -#if 0 - /* Wake anyone waiting for this lock */ - /* FIXME: I should probably add yet another flag, instead of using - * l_export to only call this on clients */ - if (lock->l_export) - class_export_put(lock->l_export); - lock->l_export = NULL; - if (lock->l_export && lock->l_completion_ast) - lock->l_completion_ast(lock, 0); -#endif return 1; } @@ -412,11 +404,10 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource) { struct ldlm_lock *lock; - if (resource == NULL) - LBUG(); + LASSERT(resource); - lock = kmem_cache_alloc(ldlm_lock_slab, GFP_NOFS | __GFP_ZERO); - if (lock == NULL) + lock = kmem_cache_zalloc(ldlm_lock_slab, GFP_NOFS); + if (!lock) return NULL; spin_lock_init(&lock->l_lock); @@ -485,7 +476,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock, unlock_res_and_lock(lock); newres = ldlm_resource_get(ns, NULL, new_resid, type, 1); - if (newres == NULL) + if (!newres) return -ENOMEM; lu_ref_add(&newres->lr_reference, "lock", lock); @@ -547,11 +538,12 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, LASSERT(handle); lock = class_handle2object(handle->cookie); - if (lock == NULL) + if (!lock) return NULL; /* It's unlikely but possible that someone marked the lock as - * destroyed after we did handle2object on it */ + * destroyed after we did handle2object on it + */ if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) { lu_ref_add(&lock->l_reference, "handle", current); return lock; @@ -559,7 +551,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, lock_res_and_lock(lock); - LASSERT(lock->l_resource != NULL); + LASSERT(lock->l_resource); lu_ref_add_atomic(&lock->l_reference, "handle", current); if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) { @@ -611,13 +603,14 @@ static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new, LDLM_DEBUG(lock, "lock incompatible; sending blocking AST."); lock->l_flags |= LDLM_FL_AST_SENT; /* If the enqueuing client said so, tell the AST recipient to - * discard dirty data, rather than writing back. */ + * discard dirty data, rather than writing back. + */ if (new->l_flags & LDLM_FL_AST_DISCARD_DATA) lock->l_flags |= LDLM_FL_DISCARD_DATA; LASSERT(list_empty(&lock->l_bl_ast)); list_add(&lock->l_bl_ast, work_list); LDLM_LOCK_GET(lock); - LASSERT(lock->l_blocking_lock == NULL); + LASSERT(!lock->l_blocking_lock); lock->l_blocking_lock = LDLM_LOCK_GET(new); } } @@ -664,7 +657,7 @@ void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode) struct ldlm_lock *lock; lock = ldlm_handle2lock(lockh); - LASSERT(lock != NULL); + LASSERT(lock); ldlm_lock_addref_internal(lock, mode); LDLM_LOCK_PUT(lock); } @@ -708,7 +701,7 @@ int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode) result = -EAGAIN; lock = ldlm_handle2lock(lockh); - if (lock != NULL) { + if (lock) { lock_res_and_lock(lock); if (lock->l_readers != 0 || lock->l_writers != 0 || !(lock->l_flags & LDLM_FL_CBPENDING)) { @@ -780,7 +773,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) if (lock->l_flags & LDLM_FL_LOCAL && !lock->l_readers && !lock->l_writers) { /* If this is a local lock on a server namespace and this was - * the last reference, cancel the lock. */ + * the last reference, cancel the lock. + */ CDEBUG(D_INFO, "forcing cancel of local lock\n"); lock->l_flags |= LDLM_FL_CBPENDING; } @@ -788,7 +782,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) if (!lock->l_readers && !lock->l_writers && (lock->l_flags & LDLM_FL_CBPENDING)) { /* If we received a blocked AST and this was the last reference, - * run the callback. */ + * run the callback. + */ LDLM_DEBUG(lock, "final decref done on cbpending lock"); @@ -809,7 +804,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) LDLM_DEBUG(lock, "add lock into lru list"); /* If this is a client-side namespace and this was the last - * reference, put it on the LRU. */ + * reference, put it on the LRU. + */ ldlm_lock_add_to_lru(lock); unlock_res_and_lock(lock); @@ -818,7 +814,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE * are not supported by the server, otherwise, it is done on - * enqueue. */ + * enqueue. + */ if (!exp_connect_cancelset(lock->l_conn_export) && !ns_connect_lru_resize(ns)) ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0); @@ -835,7 +832,7 @@ void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode) { struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); - LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie); + LASSERTF(lock, "Non-existing lock: %#llx\n", lockh->cookie); ldlm_lock_decref_internal(lock, mode); LDLM_LOCK_PUT(lock); } @@ -852,7 +849,7 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode) { struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0); - LASSERT(lock != NULL); + LASSERT(lock); LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]); lock_res_and_lock(lock); @@ -893,8 +890,7 @@ static void search_granted_lock(struct list_head *queue, list_for_each(tmp, queue) { lock = list_entry(tmp, struct ldlm_lock, l_res_link); - mode_end = list_entry(lock->l_sl_mode.prev, - struct ldlm_lock, l_sl_mode); + mode_end = list_prev_entry(lock, l_sl_mode); if (lock->l_req_mode != req->l_req_mode) { /* jump to last lock of mode group */ @@ -914,14 +910,13 @@ static void search_granted_lock(struct list_head *queue, if (lock->l_resource->lr_type == LDLM_IBITS) { for (;;) { policy_end = - list_entry(lock->l_sl_policy.prev, - struct ldlm_lock, - l_sl_policy); + list_prev_entry(lock, l_sl_policy); if (lock->l_policy_data.l_inodebits.bits == req->l_policy_data.l_inodebits.bits) { /* insert point is last lock of - * the policy group */ + * the policy group + */ prev->res_link = &policy_end->l_res_link; prev->mode_link = @@ -942,7 +937,8 @@ static void search_granted_lock(struct list_head *queue, } /* loop over policy groups within the mode group */ /* insert point is last lock of the mode group, - * new policy group is started */ + * new policy group is started + */ prev->res_link = &mode_end->l_res_link; prev->mode_link = &mode_end->l_sl_mode; prev->policy_link = &req->l_sl_policy; @@ -954,7 +950,8 @@ static void search_granted_lock(struct list_head *queue, } /* insert point is last lock on the queue, - * new mode group and new policy group are started */ + * new mode group and new policy group are started + */ prev->res_link = queue->prev; prev->mode_link = &req->l_sl_mode; prev->policy_link = &req->l_sl_policy; @@ -1034,10 +1031,7 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) else ldlm_resource_add_lock(res, &res->lr_granted, lock); - if (lock->l_granted_mode < res->lr_most_restr) - res->lr_most_restr = lock->l_granted_mode; - - if (work_list && lock->l_completion_ast != NULL) + if (work_list && lock->l_completion_ast) ldlm_add_ast_work_item(lock, NULL, work_list); ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock); @@ -1050,7 +1044,7 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) * comment above ldlm_lock_match */ static struct ldlm_lock *search_queue(struct list_head *queue, - ldlm_mode_t *mode, + enum ldlm_mode *mode, ldlm_policy_data_t *policy, struct ldlm_lock *old_lock, __u64 flags, int unref) @@ -1059,7 +1053,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue, struct list_head *tmp; list_for_each(tmp, queue) { - ldlm_mode_t match; + enum ldlm_mode match; lock = list_entry(tmp, struct ldlm_lock, l_res_link); @@ -1067,7 +1061,8 @@ static struct ldlm_lock *search_queue(struct list_head *queue, break; /* Check if this lock can be matched. - * Used by LU-2919(exclusive open) for open lease lock */ + * Used by LU-2919(exclusive open) for open lease lock + */ if (ldlm_is_excl(lock)) continue; @@ -1076,7 +1071,8 @@ static struct ldlm_lock *search_queue(struct list_head *queue, * if it passes in CBPENDING and the lock still has users. * this is generally only going to be used by children * whose parents already hold a lock so forward progress - * can still happen. */ + * can still happen. + */ if (lock->l_flags & LDLM_FL_CBPENDING && !(flags & LDLM_FL_CBPENDING)) continue; @@ -1100,7 +1096,8 @@ static struct ldlm_lock *search_queue(struct list_head *queue, continue; /* We match if we have existing lock with same or wider set - of bits. */ + * of bits. + */ if (lock->l_resource->lr_type == LDLM_IBITS && ((lock->l_policy_data.l_inodebits.bits & policy->l_inodebits.bits) != @@ -1192,16 +1189,18 @@ EXPORT_SYMBOL(ldlm_lock_allow_match); * keep caller code unchanged), the context failure will be discovered by * caller sometime later. */ -ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, - const struct ldlm_res_id *res_id, ldlm_type_t type, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh, int unref) +enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, + const struct ldlm_res_id *res_id, + enum ldlm_type type, + ldlm_policy_data_t *policy, + enum ldlm_mode mode, + struct lustre_handle *lockh, int unref) { struct ldlm_resource *res; struct ldlm_lock *lock, *old_lock = NULL; int rc = 0; - if (ns == NULL) { + if (!ns) { old_lock = ldlm_handle2lock(lockh); LASSERT(old_lock); @@ -1212,8 +1211,8 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, } res = ldlm_resource_get(ns, NULL, res_id, type, 0); - if (res == NULL) { - LASSERT(old_lock == NULL); + if (!res) { + LASSERT(!old_lock); return 0; } @@ -1222,7 +1221,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, lock = search_queue(&res->lr_granted, &mode, policy, old_lock, flags, unref); - if (lock != NULL) { + if (lock) { rc = 1; goto out; } @@ -1232,7 +1231,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, } lock = search_queue(&res->lr_waiting, &mode, policy, old_lock, flags, unref); - if (lock != NULL) { + if (lock) { rc = 1; goto out; } @@ -1317,14 +1316,14 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, } EXPORT_SYMBOL(ldlm_lock_match); -ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, - __u64 *bits) +enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh, + __u64 *bits) { struct ldlm_lock *lock; - ldlm_mode_t mode = 0; + enum ldlm_mode mode = 0; lock = ldlm_handle2lock(lockh); - if (lock != NULL) { + if (lock) { lock_res_and_lock(lock); if (lock->l_flags & LDLM_FL_GONE_MASK) goto out; @@ -1340,7 +1339,7 @@ ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh, } out: - if (lock != NULL) { + if (lock) { unlock_res_and_lock(lock); LDLM_LOCK_PUT(lock); } @@ -1354,7 +1353,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, { void *lvb; - LASSERT(data != NULL); + LASSERT(data); LASSERT(size >= 0); switch (lock->l_lvb_type) { @@ -1368,7 +1367,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, lvb = req_capsule_server_swab_get(pill, &RMF_DLM_LVB, lustre_swab_ost_lvb); - if (unlikely(lvb == NULL)) { + if (unlikely(!lvb)) { LDLM_ERROR(lock, "no LVB"); return -EPROTO; } @@ -1385,7 +1384,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, lvb = req_capsule_server_sized_swab_get(pill, &RMF_DLM_LVB, size, lustre_swab_ost_lvb_v1); - if (unlikely(lvb == NULL)) { + if (unlikely(!lvb)) { LDLM_ERROR(lock, "no LVB"); return -EPROTO; } @@ -1410,7 +1409,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, lvb = req_capsule_server_swab_get(pill, &RMF_DLM_LVB, lustre_swab_lquota_lvb); - if (unlikely(lvb == NULL)) { + if (unlikely(!lvb)) { LDLM_ERROR(lock, "no LVB"); return -EPROTO; } @@ -1431,7 +1430,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, lvb = req_capsule_client_get(pill, &RMF_DLM_LVB); else lvb = req_capsule_server_get(pill, &RMF_DLM_LVB); - if (unlikely(lvb == NULL)) { + if (unlikely(!lvb)) { LDLM_ERROR(lock, "no LVB"); return -EPROTO; } @@ -1453,8 +1452,8 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill, */ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, - ldlm_type_t type, - ldlm_mode_t mode, + enum ldlm_type type, + enum ldlm_mode mode, const struct ldlm_callback_suite *cbs, void *data, __u32 lvb_len, enum lvb_type lvb_type) @@ -1463,12 +1462,12 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, struct ldlm_resource *res; res = ldlm_resource_get(ns, NULL, res_id, type, 1); - if (res == NULL) + if (!res) return NULL; lock = ldlm_lock_new(res); - if (lock == NULL) + if (!lock) return NULL; lock->l_req_mode = mode; @@ -1483,7 +1482,7 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, lock->l_tree_node = NULL; /* if this is the extent lock, allocate the interval tree node */ if (type == LDLM_EXTENT) { - if (ldlm_interval_alloc(lock) == NULL) + if (!ldlm_interval_alloc(lock)) goto out; } @@ -1514,9 +1513,9 @@ out: * Does not block. As a result of enqueue the lock would be put * into granted or waiting list. */ -ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, - struct ldlm_lock **lockp, - void *cookie, __u64 *flags) +enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns, + struct ldlm_lock **lockp, + void *cookie, __u64 *flags) { struct ldlm_lock *lock = *lockp; struct ldlm_resource *res = lock->l_resource; @@ -1527,7 +1526,8 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, if (lock->l_req_mode == lock->l_granted_mode) { /* The server returned a blocked lock, but it was granted * before we got a chance to actually enqueue it. We don't - * need to do anything else. */ + * need to do anything else. + */ *flags &= ~(LDLM_FL_BLOCK_GRANTED | LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT); goto out; @@ -1540,7 +1540,8 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, LBUG(); /* Some flags from the enqueue want to make it into the AST, via the - * lock's l_flags. */ + * lock's l_flags. + */ lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA; /* @@ -1621,19 +1622,21 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq) * This can't happen with the blocking_ast, however, because we * will never call the local blocking_ast until we drop our * reader/writer reference, which we won't do until we get the - * reply and finish enqueueing. */ + * reply and finish enqueueing. + */ /* nobody should touch l_cp_ast */ lock_res_and_lock(lock); list_del_init(&lock->l_cp_ast); LASSERT(lock->l_flags & LDLM_FL_CP_REQD); /* save l_completion_ast since it can be changed by - * mds_intent_policy(), see bug 14225 */ + * mds_intent_policy(), see bug 14225 + */ completion_callback = lock->l_completion_ast; lock->l_flags &= ~LDLM_FL_CP_REQD; unlock_res_and_lock(lock); - if (completion_callback != NULL) + if (completion_callback) rc = completion_callback(lock, 0, (void *)arg); LDLM_LOCK_RELEASE(lock); @@ -1749,10 +1752,11 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list, /* We create a ptlrpc request set with flow control extension. * This request set will use the work_ast_lock function to produce new * requests and will send a new request each time one completes in order - * to keep the number of requests in flight to ns_max_parallel_ast */ + * to keep the number of requests in flight to ns_max_parallel_ast + */ arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX, work_ast_lock, arg); - if (arg->set == NULL) { + if (!arg->set) { rc = -ENOMEM; goto out; } @@ -1815,7 +1819,8 @@ void ldlm_lock_cancel(struct ldlm_lock *lock) ns = ldlm_res_to_ns(res); /* Please do not, no matter how tempting, remove this LBUG without - * talking to me first. -phik */ + * talking to me first. -phik + */ if (lock->l_readers || lock->l_writers) { LDLM_ERROR(lock, "lock still has references"); LBUG(); @@ -1831,7 +1836,8 @@ void ldlm_lock_cancel(struct ldlm_lock *lock) ldlm_pool_del(&ns->ns_pool, lock); /* Make sure we will not be called again for same lock what is possible - * if not to zero out lock->l_granted_mode */ + * if not to zero out lock->l_granted_mode + */ lock->l_granted_mode = LCK_MINMODE; unlock_res_and_lock(lock); } @@ -1846,7 +1852,7 @@ int ldlm_lock_set_data(struct lustre_handle *lockh, void *data) int rc = -EINVAL; if (lock) { - if (lock->l_ast_data == NULL) + if (!lock->l_ast_data) lock->l_ast_data = data; if (lock->l_ast_data == data) rc = 0; @@ -1874,7 +1880,7 @@ void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh) return; lock = ldlm_handle2lock(lockh); - if (lock == NULL) + if (!lock) return; LDLM_DEBUG_LIMIT(level, lock, "###"); @@ -1900,13 +1906,13 @@ void _ldlm_lock_debug(struct ldlm_lock *lock, if (exp && exp->exp_connection) { nid = libcfs_nid2str(exp->exp_connection->c_peer.nid); - } else if (exp && exp->exp_obd != NULL) { + } else if (exp && exp->exp_obd) { struct obd_import *imp = exp->exp_obd->u.cli.cl_import; nid = libcfs_nid2str(imp->imp_connection->c_peer.nid); } - if (resource == NULL) { + if (!resource) { libcfs_debug_vmsg2(msgdata, fmt, args, " ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n", lock, diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c index 79aeb2bf6..ebe9042ad 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c @@ -107,7 +107,7 @@ struct ldlm_bl_work_item { struct list_head blwi_head; int blwi_count; struct completion blwi_comp; - ldlm_cancel_flags_t blwi_flags; + enum ldlm_cancel_flags blwi_flags; int blwi_mem_pressure; }; @@ -136,7 +136,7 @@ void ldlm_handle_bl_callback(struct ldlm_namespace *ns, CDEBUG(D_DLMTRACE, "Lock %p already unused, calling callback (%p)\n", lock, lock->l_blocking_ast); - if (lock->l_blocking_ast != NULL) + if (lock->l_blocking_ast) lock->l_blocking_ast(lock, ld, lock->l_ast_data, LDLM_CB_BLOCKING); } else { @@ -185,7 +185,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, } else if (lvb_len > 0) { if (lock->l_lvb_len > 0) { /* for extent lock, lvb contains ost_lvb{}. */ - LASSERT(lock->l_lvb_data != NULL); + LASSERT(lock->l_lvb_data); if (unlikely(lock->l_lvb_len < lvb_len)) { LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d", @@ -194,7 +194,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, goto out; } } else if (ldlm_has_layout(lock)) { /* for layout lock, lvb has - * variable length */ + * variable length + */ void *lvb_data; lvb_data = kzalloc(lvb_len, GFP_NOFS); @@ -205,7 +206,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, } lock_res_and_lock(lock); - LASSERT(lock->l_lvb_data == NULL); + LASSERT(!lock->l_lvb_data); lock->l_lvb_type = LVB_T_LAYOUT; lock->l_lvb_data = lvb_data; lock->l_lvb_len = lvb_len; @@ -224,7 +225,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, } /* If we receive the completion AST before the actual enqueue returned, - * then we might need to switch lock modes, resources, or extents. */ + * then we might need to switch lock modes, resources, or extents. + */ if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) { lock->l_req_mode = dlm_req->lock_desc.l_granted_mode; LDLM_DEBUG(lock, "completion AST, new lock mode"); @@ -256,7 +258,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, if (dlm_req->lock_flags & LDLM_FL_AST_SENT) { /* BL_AST locks are not needed in LRU. - * Let ldlm_cancel_lru() be fast. */ + * Let ldlm_cancel_lru() be fast. + */ ldlm_lock_remove_from_lru(lock); lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST; LDLM_DEBUG(lock, "completion AST includes blocking AST"); @@ -276,8 +279,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work"); - /* Let Enqueue to call osc_lock_upcall() and initialize - * l_ast_data */ + /* Let Enqueue to call osc_lock_upcall() and initialize l_ast_data */ OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2); ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST); @@ -312,10 +314,10 @@ static void ldlm_handle_gl_callback(struct ptlrpc_request *req, LDLM_DEBUG(lock, "client glimpse AST callback handler"); - if (lock->l_glimpse_ast != NULL) + if (lock->l_glimpse_ast) rc = lock->l_glimpse_ast(lock, req); - if (req->rq_repmsg != NULL) { + if (req->rq_repmsg) { ptlrpc_reply(req); } else { req->rq_status = rc; @@ -353,7 +355,7 @@ static int ldlm_callback_reply(struct ptlrpc_request *req, int rc) } static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, - ldlm_cancel_flags_t cancel_flags) + enum ldlm_cancel_flags cancel_flags) { struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool; @@ -371,7 +373,8 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, wake_up(&blp->blp_waitq); /* can not check blwi->blwi_flags as blwi could be already freed in - LCF_ASYNC mode */ + * LCF_ASYNC mode + */ if (!(cancel_flags & LCF_ASYNC)) wait_for_completion(&blwi->blwi_comp); @@ -383,7 +386,7 @@ static inline void init_blwi(struct ldlm_bl_work_item *blwi, struct ldlm_lock_desc *ld, struct list_head *cancels, int count, struct ldlm_lock *lock, - ldlm_cancel_flags_t cancel_flags) + enum ldlm_cancel_flags cancel_flags) { init_completion(&blwi->blwi_comp); INIT_LIST_HEAD(&blwi->blwi_head); @@ -393,7 +396,7 @@ static inline void init_blwi(struct ldlm_bl_work_item *blwi, blwi->blwi_ns = ns; blwi->blwi_flags = cancel_flags; - if (ld != NULL) + if (ld) blwi->blwi_ld = *ld; if (count) { list_add(&blwi->blwi_head, cancels); @@ -417,7 +420,7 @@ static int ldlm_bl_to_thread(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock, struct list_head *cancels, int count, - ldlm_cancel_flags_t cancel_flags) + enum ldlm_cancel_flags cancel_flags) { if (cancels && count == 0) return 0; @@ -451,7 +454,7 @@ int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct list_head *cancels, int count, - ldlm_cancel_flags_t cancel_flags) + enum ldlm_cancel_flags cancel_flags) { return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags); } @@ -470,14 +473,14 @@ static int ldlm_handle_setinfo(struct ptlrpc_request *req) req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO); key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY); - if (key == NULL) { + if (!key) { DEBUG_REQ(D_IOCTL, req, "no set_info key"); return -EFAULT; } keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY, RCL_CLIENT); val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL); - if (val == NULL) { + if (!val) { DEBUG_REQ(D_IOCTL, req, "no set_info val"); return -EFAULT; } @@ -519,7 +522,7 @@ static int ldlm_handle_qc_callback(struct ptlrpc_request *req) struct client_obd *cli = &req->rq_export->exp_obd->u.cli; oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); - if (oqctl == NULL) { + if (!oqctl) { CERROR("Can't unpack obd_quotactl\n"); return -EPROTO; } @@ -541,7 +544,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) /* Requests arrive in sender's byte order. The ptlrpc service * handler has already checked and, if necessary, byte-swapped the * incoming request message body, but I am responsible for the - * message buffers. */ + * message buffers. + */ /* do nothing for sec context finalize */ if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI) @@ -549,15 +553,14 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) req_capsule_init(&req->rq_pill, req, RCL_SERVER); - if (req->rq_export == NULL) { + if (!req->rq_export) { rc = ldlm_callback_reply(req, -ENOTCONN); ldlm_callback_errmsg(req, "Operate on unconnected server", rc, NULL); return 0; } - LASSERT(req->rq_export != NULL); - LASSERT(req->rq_export->exp_obd != NULL); + LASSERT(req->rq_export->exp_obd); switch (lustre_msg_get_opc(req->rq_reqmsg)) { case LDLM_BL_CALLBACK: @@ -591,12 +594,12 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) } ns = req->rq_export->exp_obd->obd_namespace; - LASSERT(ns != NULL); + LASSERT(ns); req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK); dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ); - if (dlm_req == NULL) { + if (!dlm_req) { rc = ldlm_callback_reply(req, -EPROTO); ldlm_callback_errmsg(req, "Operate without parameter", rc, NULL); @@ -604,7 +607,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) } /* Force a known safe race, send a cancel to the server for a lock - * which the server has already started a blocking callback on. */ + * which the server has already started a blocking callback on. + */ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) && lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) { rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0); @@ -634,7 +638,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) /* If somebody cancels lock and cache is already dropped, * or lock is failed before cp_ast received on client, * we can tell the server we have no lock. Otherwise, we - * should send cancel after dropping the cache. */ + * should send cancel after dropping the cache. + */ if (((lock->l_flags & LDLM_FL_CANCELING) && (lock->l_flags & LDLM_FL_BL_DONE)) || (lock->l_flags & LDLM_FL_FAILED)) { @@ -648,7 +653,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) return 0; } /* BL_AST locks are not needed in LRU. - * Let ldlm_cancel_lru() be fast. */ + * Let ldlm_cancel_lru() be fast. + */ ldlm_lock_remove_from_lru(lock); lock->l_flags |= LDLM_FL_BL_AST; } @@ -661,7 +667,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) * But we'd also like to be able to indicate in the reply that we're * cancelling right now, because it's unused, or have an intent result * in the reply, so we might have to push the responsibility for sending - * the reply down into the AST handlers, alas. */ + * the reply down into the AST handlers, alas. + */ switch (lustre_msg_get_opc(req->rq_reqmsg)) { case LDLM_BL_CALLBACK: @@ -781,17 +788,17 @@ static int ldlm_bl_thread_main(void *arg) blwi = ldlm_bl_get_work(blp); - if (blwi == NULL) { + if (!blwi) { atomic_dec(&blp->blp_busy_threads); l_wait_event_exclusive(blp->blp_waitq, - (blwi = ldlm_bl_get_work(blp)) != NULL, + (blwi = ldlm_bl_get_work(blp)), &lwi); busy = atomic_inc_return(&blp->blp_busy_threads); } else { busy = atomic_read(&blp->blp_busy_threads); } - if (blwi->blwi_ns == NULL) + if (!blwi->blwi_ns) /* added by ldlm_cleanup() */ break; @@ -810,7 +817,8 @@ static int ldlm_bl_thread_main(void *arg) /* The special case when we cancel locks in LRU * asynchronously, we pass the list of locks here. * Thus locks are marked LDLM_FL_CANCELING, but NOT - * canceled locally yet. */ + * canceled locally yet. + */ count = ldlm_cli_cancel_list_local(&blwi->blwi_head, blwi->blwi_count, LCF_BL_AST); @@ -915,7 +923,7 @@ static int ldlm_setup(void) int rc = 0; int i; - if (ldlm_state != NULL) + if (ldlm_state) return -EALREADY; ldlm_state = kzalloc(sizeof(*ldlm_state), GFP_NOFS); @@ -1040,7 +1048,7 @@ static int ldlm_cleanup(void) ldlm_pools_fini(); - if (ldlm_state->ldlm_bl_pool != NULL) { + if (ldlm_state->ldlm_bl_pool) { struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool; while (atomic_read(&blp->blp_num_threads) > 0) { @@ -1059,7 +1067,7 @@ static int ldlm_cleanup(void) kfree(blp); } - if (ldlm_state->ldlm_cb_service != NULL) + if (ldlm_state->ldlm_cb_service) ptlrpc_unregister_service(ldlm_state->ldlm_cb_service); if (ldlm_ns_kset) @@ -1085,13 +1093,13 @@ int ldlm_init(void) ldlm_resource_slab = kmem_cache_create("ldlm_resources", sizeof(struct ldlm_resource), 0, SLAB_HWCACHE_ALIGN, NULL); - if (ldlm_resource_slab == NULL) + if (!ldlm_resource_slab) return -ENOMEM; ldlm_lock_slab = kmem_cache_create("ldlm_locks", sizeof(struct ldlm_lock), 0, SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL); - if (ldlm_lock_slab == NULL) { + if (!ldlm_lock_slab) { kmem_cache_destroy(ldlm_resource_slab); return -ENOMEM; } @@ -1099,7 +1107,7 @@ int ldlm_init(void) ldlm_interval_slab = kmem_cache_create("interval_node", sizeof(struct ldlm_interval), 0, SLAB_HWCACHE_ALIGN, NULL); - if (ldlm_interval_slab == NULL) { + if (!ldlm_interval_slab) { kmem_cache_destroy(ldlm_resource_slab); kmem_cache_destroy(ldlm_lock_slab); return -ENOMEM; @@ -1117,7 +1125,8 @@ void ldlm_exit(void) kmem_cache_destroy(ldlm_resource_slab); /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call * synchronize_rcu() to wait a grace period elapsed, so that - * ldlm_lock_free() get a chance to be called. */ + * ldlm_lock_free() get a chance to be called. + */ synchronize_rcu(); kmem_cache_destroy(ldlm_lock_slab); kmem_cache_destroy(ldlm_interval_slab); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c index 3d7c137d2..b913ba9cf 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c @@ -107,7 +107,7 @@ /* * 50 ldlm locks for 1MB of RAM. */ -#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50) +#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50) /* * Maximal possible grant step plan in %. @@ -246,7 +246,6 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl) */ obd = container_of(pl, struct ldlm_namespace, ns_pool)->ns_obd; - LASSERT(obd != NULL); read_lock(&obd->obd_pool_lock); pl->pl_server_lock_volume = obd->obd_pool_slv; atomic_set(&pl->pl_limit, obd->obd_pool_limit); @@ -381,7 +380,7 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl) spin_unlock(&pl->pl_lock); recalc: - if (pl->pl_ops->po_recalc != NULL) { + if (pl->pl_ops->po_recalc) { count = pl->pl_ops->po_recalc(pl); lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT, count); @@ -409,7 +408,7 @@ static int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask) { int cancel = 0; - if (pl->pl_ops->po_shrink != NULL) { + if (pl->pl_ops->po_shrink) { cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask); if (nr > 0) { lprocfs_counter_add(pl->pl_stats, @@ -643,11 +642,11 @@ static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl) static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl) { - if (pl->pl_stats != NULL) { + if (pl->pl_stats) { lprocfs_free_stats(&pl->pl_stats); pl->pl_stats = NULL; } - if (pl->pl_debugfs_entry != NULL) { + if (pl->pl_debugfs_entry) { ldebugfs_remove(&pl->pl_debugfs_entry); pl->pl_debugfs_entry = NULL; } @@ -834,7 +833,7 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask) continue; } - if (ns_old == NULL) + if (!ns_old) ns_old = ns; ldlm_namespace_get(ns); @@ -957,7 +956,7 @@ static int ldlm_pools_recalc(ldlm_side_t client) continue; } - if (ns_old == NULL) + if (!ns_old) ns_old = ns; spin_lock(&ns->ns_lock); @@ -1040,7 +1039,7 @@ static int ldlm_pools_thread_start(void) struct l_wait_info lwi = { 0 }; struct task_struct *task; - if (ldlm_pools_thread != NULL) + if (ldlm_pools_thread) return -EALREADY; ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS); @@ -1065,7 +1064,7 @@ static int ldlm_pools_thread_start(void) static void ldlm_pools_thread_stop(void) { - if (ldlm_pools_thread == NULL) + if (!ldlm_pools_thread) return; thread_set_flags(ldlm_pools_thread, SVC_STOPPING); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c index b9eb37762..74e193e52 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c @@ -94,7 +94,7 @@ static int ldlm_expired_completion_wait(void *data) struct obd_import *imp; struct obd_device *obd; - if (lock->l_conn_export == NULL) { + if (!lock->l_conn_export) { static unsigned long next_dump, last_dump; LCONSOLE_WARN("lock timed out (enqueued at %lld, %llds ago)\n", @@ -128,7 +128,8 @@ static int ldlm_expired_completion_wait(void *data) } /* We use the same basis for both server side and client side functions - from a single node. */ + * from a single node. + */ static int ldlm_get_enq_timeout(struct ldlm_lock *lock) { int timeout = at_get(ldlm_lock_to_ns_at(lock)); @@ -136,8 +137,9 @@ static int ldlm_get_enq_timeout(struct ldlm_lock *lock) if (AT_OFF) return obd_timeout / 2; /* Since these are non-updating timeouts, we should be conservative. - It would be nice to have some kind of "early reply" mechanism for - lock callbacks too... */ + * It would be nice to have some kind of "early reply" mechanism for + * lock callbacks too... + */ timeout = min_t(int, at_max, timeout + (timeout >> 1)); /* 150% */ return max(timeout, ldlm_enqueue_min); } @@ -239,12 +241,13 @@ noreproc: obd = class_exp2obd(lock->l_conn_export); /* if this is a local lock, then there is no import */ - if (obd != NULL) + if (obd) imp = obd->u.cli.cl_import; /* Wait a long time for enqueue - server may have to callback a - lock from another client. Server will evict the other client if it - doesn't respond reasonably, and then give us the lock. */ + * lock from another client. Server will evict the other client if it + * doesn't respond reasonably, and then give us the lock. + */ timeout = ldlm_get_enq_timeout(lock) * 2; lwd.lwd_lock = lock; @@ -258,7 +261,7 @@ noreproc: interrupted_completion_wait, &lwd); } - if (imp != NULL) { + if (imp) { spin_lock(&imp->imp_lock); lwd.lwd_conn_cnt = imp->imp_conn_cnt; spin_unlock(&imp->imp_lock); @@ -296,7 +299,8 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns, !(lock->l_flags & LDLM_FL_FAILED)) { /* Make sure that this lock will not be found by raced * bl_ast and -EINVAL reply is sent to server anyways. - * bug 17645 */ + * bug 17645 + */ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED | LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING; need_cancel = 1; @@ -312,11 +316,13 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns, ldlm_lock_decref_internal(lock, mode); /* XXX - HACK because we shouldn't call ldlm_lock_destroy() - * from llite/file.c/ll_file_flock(). */ + * from llite/file.c/ll_file_flock(). + */ /* This code makes for the fact that we do not have blocking handler on * a client for flock locks. As such this is the place where we must * completely kill failed locks. (interrupted and those that - * were waiting to be granted when server evicted us. */ + * were waiting to be granted when server evicted us. + */ if (lock->l_resource->lr_type == LDLM_FLOCK) { lock_res_and_lock(lock); ldlm_resource_unlink_lock(lock); @@ -331,7 +337,8 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns, * Called after receiving reply from server. */ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, - ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode, + enum ldlm_type type, __u8 with_policy, + enum ldlm_mode mode, __u64 *flags, void *lvb, __u32 lvb_len, struct lustre_handle *lockh, int rc) { @@ -363,13 +370,13 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, /* Before we return, swab the reply */ reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - if (reply == NULL) { + if (!reply) { rc = -EPROTO; goto cleanup; } if (lvb_len != 0) { - LASSERT(lvb != NULL); + LASSERT(lvb); size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER); @@ -401,7 +408,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, /* Key change rehash lock in per-export hash with new key */ if (exp->exp_lock_hash) { /* In the function below, .hs_keycmp resolves to - * ldlm_export_lock_keycmp() */ + * ldlm_export_lock_keycmp() + */ /* coverity[overrun-buffer-val] */ cfs_hash_rehash_key(exp->exp_lock_hash, &lock->l_remote_handle, @@ -415,7 +423,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags & LDLM_INHERIT_FLAGS); /* move NO_TIMEOUT flag to the lock to force ldlm_lock_match() - * to wait with no timeout as well */ + * to wait with no timeout as well + */ lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags & LDLM_FL_NO_TIMEOUT); unlock_res_and_lock(lock); @@ -425,7 +434,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, /* If enqueue returned a blocked lock but the completion handler has * already run, then it fixed up the resource and we don't need to do it - * again. */ + * again. + */ if ((*flags) & LDLM_FL_LOCK_CHANGED) { int newmode = reply->lock_desc.l_req_mode; @@ -445,7 +455,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, rc = ldlm_lock_change_resource(ns, lock, &reply->lock_desc.l_resource.lr_name); - if (rc || lock->l_resource == NULL) { + if (rc || !lock->l_resource) { rc = -ENOMEM; goto cleanup; } @@ -467,7 +477,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, if ((*flags) & LDLM_FL_AST_SENT || /* Cancel extent locks as soon as possible on a liblustre client, * because it cannot handle asynchronous ASTs robustly (see - * bug 7311). */ + * bug 7311). + */ (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) { lock_res_and_lock(lock); lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST; @@ -476,12 +487,14 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, } /* If the lock has already been granted by a completion AST, don't - * clobber the LVB with an older one. */ + * clobber the LVB with an older one. + */ if (lvb_len != 0) { /* We must lock or a racing completion might update lvb without * letting us know and we'll clobber the correct value. - * Cannot unlock after the check either, a that still leaves - * a tiny window for completion to get in */ + * Cannot unlock after the check either, as that still leaves + * a tiny window for completion to get in + */ lock_res_and_lock(lock); if (lock->l_req_mode != lock->l_granted_mode) rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER, @@ -495,7 +508,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, if (!is_replay) { rc = ldlm_lock_enqueue(ns, &lock, NULL, flags); - if (lock->l_completion_ast != NULL) { + if (lock->l_completion_ast) { int err = lock->l_completion_ast(lock, *flags, NULL); if (!rc) @@ -505,9 +518,10 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req, } } - if (lvb_len && lvb != NULL) { + if (lvb_len && lvb) { /* Copy the LVB here, and not earlier, because the completion - * AST (if any) can override what we got in the reply */ + * AST (if any) can override what we got in the reply + */ memcpy(lvb, lock->l_lvb_data, lvb_len); } @@ -532,7 +546,7 @@ static inline int ldlm_req_handles_avail(int req_size, int off) { int avail; - avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size; + avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size; if (likely(avail >= 0)) avail /= (int)sizeof(struct lustre_handle); else @@ -579,7 +593,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, LIST_HEAD(head); int rc; - if (cancels == NULL) + if (!cancels) cancels = &head; if (ns_connect_cancelset(ns)) { /* Estimate the amount of available space in the request. */ @@ -593,7 +607,8 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, /* Cancel LRU locks here _only_ if the server supports * EARLY_CANCEL. Otherwise we have to send extra CANCEL - * RPC, which will make us slower. */ + * RPC, which will make us slower. + */ if (avail > count) count += ldlm_cancel_lru_local(ns, cancels, to_free, avail - count, 0, flags); @@ -618,7 +633,8 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, /* Skip first lock handler in ldlm_request_pack(), * this method will increment @lock_count according * to the lock handle amount actually written to - * the buffer. */ + * the buffer. + */ dlm->lock_count = canceloff; } /* Pack into the request @pack lock handles. */ @@ -665,15 +681,14 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, int rc, err; struct ptlrpc_request *req; - LASSERT(exp != NULL); - ns = exp->exp_obd->obd_namespace; /* If we're replaying this lock, just check some invariants. - * If we're creating a new lock, get everything all setup nice. */ + * If we're creating a new lock, get everything all setup nicely. + */ if (is_replay) { lock = ldlm_handle2lock_long(lockh, 0); - LASSERT(lock != NULL); + LASSERT(lock); LDLM_DEBUG(lock, "client-side enqueue START"); LASSERT(exp == lock->l_conn_export); } else { @@ -685,16 +700,21 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, lock = ldlm_lock_create(ns, res_id, einfo->ei_type, einfo->ei_mode, &cbs, einfo->ei_cbdata, lvb_len, lvb_type); - if (lock == NULL) + if (!lock) return -ENOMEM; /* for the local lock, add the reference */ ldlm_lock_addref_internal(lock, einfo->ei_mode); ldlm_lock2handle(lock, lockh); - if (policy != NULL) - lock->l_policy_data = *policy; + if (policy) + lock->l_policy_data = *policy; + + if (einfo->ei_type == LDLM_EXTENT) { + /* extent lock without policy is a bug */ + if (!policy) + LBUG(); - if (einfo->ei_type == LDLM_EXTENT) lock->l_req_extent = policy->l_extent; + } LDLM_DEBUG(lock, "client-side enqueue START, flags %llx\n", *flags); } @@ -706,12 +726,12 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, /* lock not sent to server yet */ - if (reqp == NULL || *reqp == NULL) { + if (!reqp || !*reqp) { req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION, LDLM_ENQUEUE); - if (req == NULL) { + if (!req) { failed_lock_cleanup(ns, lock, einfo->ei_mode); LDLM_LOCK_RELEASE(lock); return -ENOMEM; @@ -754,7 +774,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, policy->l_extent.end == OBD_OBJECT_EOF)); if (async) { - LASSERT(reqp != NULL); + LASSERT(reqp); return 0; } @@ -767,13 +787,14 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, lockh, rc); /* If ldlm_cli_enqueue_fini did not find the lock, we need to free - * one reference that we took */ + * one reference that we took + */ if (err == -ENOLCK) LDLM_LOCK_RELEASE(lock); else rc = err; - if (!req_passed_in && req != NULL) { + if (!req_passed_in && req) { ptlrpc_req_finished(req); if (reqp) *reqp = NULL; @@ -832,7 +853,7 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req, int max, packed = 0; dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ); - LASSERT(dlm != NULL); + LASSERT(dlm); /* Check the room in the request buffer. */ max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) - @@ -843,7 +864,8 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req, /* XXX: it would be better to pack lock handles grouped by resource. * so that the server cancel would call filter_lvbo_update() less - * frequently. */ + * frequently. + */ list_for_each_entry(lock, head, l_bl_ast) { if (!count--) break; @@ -858,17 +880,18 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req, /** * Prepare and send a batched cancel RPC. It will include \a count lock - * handles of locks given in \a cancels list. */ + * handles of locks given in \a cancels list. + */ static int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels, - int count, ldlm_cancel_flags_t flags) + int count, enum ldlm_cancel_flags flags) { struct ptlrpc_request *req = NULL; struct obd_import *imp; int free, sent = 0; int rc = 0; - LASSERT(exp != NULL); + LASSERT(exp); LASSERT(count > 0); CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val); @@ -883,14 +906,14 @@ static int ldlm_cli_cancel_req(struct obd_export *exp, while (1) { imp = class_exp2cliimp(exp); - if (imp == NULL || imp->imp_invalid) { + if (!imp || imp->imp_invalid) { CDEBUG(D_DLMTRACE, "skipping cancel on invalid import %p\n", imp); return count; } req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -946,7 +969,6 @@ out: static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp) { - LASSERT(imp != NULL); return &imp->imp_obd->obd_namespace->ns_pool; } @@ -971,7 +993,8 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req) * is the case when server does not support LRU resize feature. * This is also possible in some recovery cases when server-side * reqs have no reference to the OBD export and thus access to - * server-side namespace is not possible. */ + * server-side namespace is not possible. + */ if (lustre_msg_get_slv(req->rq_repmsg) == 0 || lustre_msg_get_limit(req->rq_repmsg) == 0) { DEBUG_REQ(D_HA, req, @@ -989,7 +1012,8 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req) * to the pool thread. We do not access obd_namespace and pool * directly here as there is no reliable way to make sure that * they are still alive at cleanup time. Evil races are possible - * which may cause Oops at that time. */ + * which may cause Oops at that time. + */ write_lock(&obd->obd_pool_lock); obd->obd_pool_slv = new_slv; obd->obd_pool_limit = new_limit; @@ -1005,7 +1029,7 @@ EXPORT_SYMBOL(ldlm_cli_update_pool); * Lock must not have any readers or writers by this time. */ int ldlm_cli_cancel(struct lustre_handle *lockh, - ldlm_cancel_flags_t cancel_flags) + enum ldlm_cancel_flags cancel_flags) { struct obd_export *exp; int avail, flags, count = 1; @@ -1016,8 +1040,8 @@ int ldlm_cli_cancel(struct lustre_handle *lockh, /* concurrent cancels on the same handle can happen */ lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING); - if (lock == NULL) { - LDLM_DEBUG_NOLOCK("lock is already being destroyed\n"); + if (!lock) { + LDLM_DEBUG_NOLOCK("lock is already being destroyed"); return 0; } @@ -1028,7 +1052,8 @@ int ldlm_cli_cancel(struct lustre_handle *lockh, } /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL * RPC which goes to canceld portal, so we can cancel other LRU locks - * here and send them all as one LDLM_CANCEL RPC. */ + * here and send them all as one LDLM_CANCEL RPC. + */ LASSERT(list_empty(&lock->l_bl_ast)); list_add(&lock->l_bl_ast, &cancels); @@ -1055,7 +1080,7 @@ EXPORT_SYMBOL(ldlm_cli_cancel); * Return the number of cancelled locks. */ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count, - ldlm_cancel_flags_t flags) + enum ldlm_cancel_flags flags) { LIST_HEAD(head); struct ldlm_lock *lock, *next; @@ -1076,7 +1101,8 @@ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count, /* Until we have compound requests and can send LDLM_CANCEL * requests batched with generic RPCs, we need to send cancels * with the LDLM_FL_BL_AST flag in a separate RPC from - * the one being generated now. */ + * the one being generated now. + */ if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) { LDLM_DEBUG(lock, "Cancel lock separately"); list_del_init(&lock->l_bl_ast); @@ -1116,7 +1142,8 @@ static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, lock_res_and_lock(lock); /* don't check added & count since we want to process all locks - * from unused list */ + * from unused list + */ switch (lock->l_resource->lr_type) { case LDLM_EXTENT: case LDLM_IBITS: @@ -1152,7 +1179,8 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, unsigned long la; /* Stop LRU processing when we reach past @count or have checked all - * locks in LRU. */ + * locks in LRU. + */ if (count && added >= count) return LDLM_POLICY_KEEP_LOCK; @@ -1166,7 +1194,8 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns, ldlm_pool_set_clv(pl, lv); /* Stop when SLV is not yet come from server or lv is smaller than - * it is. */ + * it is. + */ return (slv == 0 || lv < slv) ? LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; } @@ -1186,7 +1215,8 @@ static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns, int count) { /* Stop LRU processing when we reach past @count or have checked all - * locks in LRU. */ + * locks in LRU. + */ return (added >= count) ? LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; } @@ -1227,7 +1257,8 @@ static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns, int count) { /* Stop LRU processing when we reach past count or have checked all - * locks in LRU. */ + * locks in LRU. + */ return (added >= count) ? LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK; } @@ -1307,7 +1338,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, count += unused - ns->ns_max_unused; pf = ldlm_cancel_lru_policy(ns, flags); - LASSERT(pf != NULL); + LASSERT(pf); while (!list_empty(&ns->ns_unused_list)) { ldlm_policy_res_t result; @@ -1331,7 +1362,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, continue; /* Somebody is already doing CANCEL. No need for this - * lock in LRU, do not traverse it again. */ + * lock in LRU, do not traverse it again. + */ if (!(lock->l_flags & LDLM_FL_CANCELING)) break; @@ -1380,7 +1412,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, /* Another thread is removing lock from LRU, or * somebody is already doing CANCEL, or there * is a blocking request which will send cancel - * by itself, or the lock is no longer unused. */ + * by itself, or the lock is no longer unused. + */ unlock_res_and_lock(lock); lu_ref_del(&lock->l_reference, __func__, current); @@ -1394,7 +1427,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, * better send cancel notification to server, so that it * frees appropriate state. This might lead to a race * where while we are doing cancel here, server is also - * silently cancelling this lock. */ + * silently cancelling this lock. + */ lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK; /* Setting the CBPENDING flag is a little misleading, @@ -1402,7 +1436,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, * CBPENDING is set, the lock can accumulate no more * readers/writers. Since readers and writers are * already zero here, ldlm_lock_decref() won't see - * this flag and call l_blocking_ast */ + * this flag and call l_blocking_ast + */ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING; /* We can't re-add to l_lru as it confuses the @@ -1410,7 +1445,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, * arrives after we drop lr_lock below. We use l_bl_ast * and can't use l_pending_chain as it is used both on * server and client nevertheless bug 5666 says it is - * used only on server */ + * used only on server + */ LASSERT(list_empty(&lock->l_bl_ast)); list_add(&lock->l_bl_ast, cancels); unlock_res_and_lock(lock); @@ -1425,7 +1461,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, int count, int max, - ldlm_cancel_flags_t cancel_flags, int flags) + enum ldlm_cancel_flags cancel_flags, int flags) { int added; @@ -1444,14 +1480,15 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, * callback will be performed in this function. */ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, - ldlm_cancel_flags_t cancel_flags, + enum ldlm_cancel_flags cancel_flags, int flags) { LIST_HEAD(cancels); int count, rc; /* Just prepare the list of locks, do not actually cancel them yet. - * Locks are cancelled later in a separate thread. */ + * Locks are cancelled later in a separate thread. + */ count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags); rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags); if (rc == 0) @@ -1468,15 +1505,16 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, int ldlm_cancel_resource_local(struct ldlm_resource *res, struct list_head *cancels, ldlm_policy_data_t *policy, - ldlm_mode_t mode, __u64 lock_flags, - ldlm_cancel_flags_t cancel_flags, void *opaque) + enum ldlm_mode mode, __u64 lock_flags, + enum ldlm_cancel_flags cancel_flags, + void *opaque) { struct ldlm_lock *lock; int count = 0; lock_res(res); list_for_each_entry(lock, &res->lr_granted, l_res_link) { - if (opaque != NULL && lock->l_ast_data != opaque) { + if (opaque && lock->l_ast_data != opaque) { LDLM_ERROR(lock, "data %p doesn't match opaque %p", lock->l_ast_data, opaque); continue; @@ -1486,7 +1524,8 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res, continue; /* If somebody is already doing CANCEL, or blocking AST came, - * skip this lock. */ + * skip this lock. + */ if (lock->l_flags & LDLM_FL_BL_AST || lock->l_flags & LDLM_FL_CANCELING) continue; @@ -1495,7 +1534,8 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res, continue; /* If policy is given and this is IBITS lock, add to list only - * those locks that match by policy. */ + * those locks that match by policy. + */ if (policy && (lock->l_resource->lr_type == LDLM_IBITS) && !(lock->l_policy_data.l_inodebits.bits & policy->l_inodebits.bits)) @@ -1527,7 +1567,8 @@ EXPORT_SYMBOL(ldlm_cancel_resource_local); * Destroy \a cancels at the end. */ int ldlm_cli_cancel_list(struct list_head *cancels, int count, - struct ptlrpc_request *req, ldlm_cancel_flags_t flags) + struct ptlrpc_request *req, + enum ldlm_cancel_flags flags) { struct ldlm_lock *lock; int res = 0; @@ -1539,7 +1580,8 @@ int ldlm_cli_cancel_list(struct list_head *cancels, int count, * Usually it is enough to have just 1 RPC, but it is possible that * there are too many locks to be cancelled in LRU or on a resource. * It would also speed up the case when the server does not support - * the feature. */ + * the feature. + */ while (count > 0) { LASSERT(!list_empty(cancels)); lock = list_entry(cancels->next, struct ldlm_lock, @@ -1577,12 +1619,13 @@ EXPORT_SYMBOL(ldlm_cli_cancel_list); * Cancel all locks on a resource that have 0 readers/writers. * * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying - * to notify the server. */ + * to notify the server. + */ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, ldlm_policy_data_t *policy, - ldlm_mode_t mode, - ldlm_cancel_flags_t flags, + enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque) { struct ldlm_resource *res; @@ -1591,7 +1634,7 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, int rc; res = ldlm_resource_get(ns, NULL, res_id, 0, 0); - if (res == NULL) { + if (!res) { /* This is not a problem. */ CDEBUG(D_INFO, "No resource %llu\n", res_id->name[0]); return 0; @@ -1638,17 +1681,17 @@ static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs, * to notify the server. */ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, - ldlm_cancel_flags_t flags, void *opaque) + enum ldlm_cancel_flags flags, void *opaque) { struct ldlm_cli_cancel_arg arg = { .lc_flags = flags, .lc_opaque = opaque, }; - if (ns == NULL) + if (!ns) return ELDLM_OK; - if (res_id != NULL) { + if (res_id) { return ldlm_cli_cancel_unused_resource(ns, res_id, NULL, LCK_MINMODE, flags, opaque); @@ -1743,13 +1786,13 @@ int ldlm_resource_iterate(struct ldlm_namespace *ns, struct ldlm_resource *res; int rc; - if (ns == NULL) { + if (!ns) { CERROR("must pass in namespace\n"); LBUG(); } res = ldlm_resource_get(ns, NULL, res_id, 0, 0); - if (res == NULL) + if (!res) return 0; LDLM_RESOURCE_ADDREF(res); @@ -1796,7 +1839,7 @@ static int replay_lock_interpret(const struct lu_env *env, goto out; reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - if (reply == NULL) { + if (!reply) { rc = -EPROTO; goto out; } @@ -1815,7 +1858,8 @@ static int replay_lock_interpret(const struct lu_env *env, exp = req->rq_export; if (exp && exp->exp_lock_hash) { /* In the function below, .hs_keycmp resolves to - * ldlm_export_lock_keycmp() */ + * ldlm_export_lock_keycmp() + */ /* coverity[overrun-buffer-val] */ cfs_hash_rehash_key(exp->exp_lock_hash, &lock->l_remote_handle, @@ -1850,7 +1894,8 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) /* If this is reply-less callback lock, we cannot replay it, since * server might have long dropped it, but notification of that event was - * lost by network. (and server granted conflicting lock already) */ + * lost by network. (and server granted conflicting lock already) + */ if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) { LDLM_DEBUG(lock, "Not replaying reply-less lock:"); ldlm_lock_cancel(lock); @@ -1882,7 +1927,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION, LDLM_ENQUEUE); - if (req == NULL) + if (!req) return -ENOMEM; /* We're part of recovery, so don't wait for it. */ @@ -1901,7 +1946,8 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) /* notify the server we've replayed all requests. * also, we mark the request to be put on a dedicated * queue to be processed after all request replayes. - * bug 6063 */ + * bug 6063 + */ lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE); LDLM_DEBUG(lock, "replaying lock:"); @@ -1936,7 +1982,8 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns) /* We don't need to care whether or not LRU resize is enabled * because the LDLM_CANCEL_NO_WAIT policy doesn't use the - * count parameter */ + * count parameter + */ canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0, LCF_LOCAL, LDLM_CANCEL_NO_WAIT); diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c index 0ae610015..9dede87ad 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c @@ -56,7 +56,8 @@ LIST_HEAD(ldlm_srv_namespace_list); struct mutex ldlm_cli_namespace_lock; /* Client Namespaces that have active resources in them. * Once all resources go away, ldlm_poold moves such namespaces to the - * inactive list */ + * inactive list + */ LIST_HEAD(ldlm_cli_active_namespace_list); /* Client namespaces that don't have any locks in them */ static LIST_HEAD(ldlm_cli_inactive_namespace_list); @@ -66,7 +67,8 @@ static struct dentry *ldlm_ns_debugfs_dir; struct dentry *ldlm_svc_debugfs_dir; /* during debug dump certain amount of granted locks for one resource to avoid - * DDOS. */ + * DDOS. + */ static unsigned int ldlm_dump_granted_max = 256; static ssize_t @@ -275,7 +277,8 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr, ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED); /* Make sure that LRU resize was originally supported before - * turning it on here. */ + * turning it on here. + */ if (lru_resize && (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) { CDEBUG(D_DLMTRACE, @@ -380,7 +383,7 @@ static void ldlm_namespace_debugfs_unregister(struct ldlm_namespace *ns) else ldebugfs_remove(&ns->ns_debugfs_entry); - if (ns->ns_stats != NULL) + if (ns->ns_stats) lprocfs_free_stats(&ns->ns_stats); } @@ -400,7 +403,7 @@ static int ldlm_namespace_sysfs_register(struct ldlm_namespace *ns) "%s", ldlm_ns_name(ns)); ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0); - if (ns->ns_stats == NULL) { + if (!ns->ns_stats) { kobject_put(&ns->ns_kobj); return -ENOMEM; } @@ -420,7 +423,7 @@ static int ldlm_namespace_debugfs_register(struct ldlm_namespace *ns) } else { ns_entry = debugfs_create_dir(ldlm_ns_name(ns), ldlm_ns_debugfs_dir); - if (ns_entry == NULL) + if (!ns_entry) return -ENOMEM; ns->ns_debugfs_entry = ns_entry; } @@ -554,7 +557,7 @@ static struct cfs_hash_ops ldlm_ns_fid_hash_ops = { }; struct ldlm_ns_hash_def { - ldlm_ns_type_t nsd_type; + enum ldlm_ns_type nsd_type; /** hash bucket bits */ unsigned nsd_bkt_bits; /** hash bits */ @@ -621,8 +624,8 @@ static void ldlm_namespace_register(struct ldlm_namespace *ns, */ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name, ldlm_side_t client, - ldlm_appetite_t apt, - ldlm_ns_type_t ns_type) + enum ldlm_appetite apt, + enum ldlm_ns_type ns_type) { struct ldlm_namespace *ns = NULL; struct ldlm_ns_bucket *nsb; @@ -631,7 +634,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name, int idx; int rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = ldlm_get_ref(); if (rc) { @@ -664,7 +667,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name, CFS_HASH_BIGNAME | CFS_HASH_SPIN_BKTLOCK | CFS_HASH_NO_ITEMREF); - if (ns->ns_rs_hash == NULL) + if (!ns->ns_rs_hash) goto out_ns; cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) { @@ -749,7 +752,8 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q, struct lustre_handle lockh; /* First, we look for non-cleaned-yet lock - * all cleaned locks are marked by CLEANED flag. */ + * all cleaned locks are marked by CLEANED flag. + */ lock_res(res); list_for_each(tmp, q) { lock = list_entry(tmp, struct ldlm_lock, @@ -763,13 +767,14 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q, break; } - if (lock == NULL) { + if (!lock) { unlock_res(res); break; } /* Set CBPENDING so nothing in the cancellation path - * can match this lock. */ + * can match this lock. + */ lock->l_flags |= LDLM_FL_CBPENDING; lock->l_flags |= LDLM_FL_FAILED; lock->l_flags |= flags; @@ -782,7 +787,8 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q, /* This is a little bit gross, but much better than the * alternative: pretend that we got a blocking AST from * the server, so that when the lock is decref'd, it - * will go away ... */ + * will go away ... + */ unlock_res(res); LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY"); if (lock->l_completion_ast) @@ -837,7 +843,7 @@ static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd, */ int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags) { - if (ns == NULL) { + if (!ns) { CDEBUG(D_INFO, "NULL ns, skipping cleanup\n"); return ELDLM_OK; } @@ -873,7 +879,8 @@ force_wait: atomic_read(&ns->ns_bref) == 0, &lwi); /* Forced cleanups should be able to reclaim all references, - * so it's safe to wait forever... we can't leak locks... */ + * so it's safe to wait forever... we can't leak locks... + */ if (force && rc == -ETIMEDOUT) { LCONSOLE_ERROR("Forced cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n", ldlm_ns_name(ns), @@ -943,7 +950,8 @@ static void ldlm_namespace_unregister(struct ldlm_namespace *ns, LASSERT(!list_empty(&ns->ns_list_chain)); /* Some asserts and possibly other parts of the code are still * using list_empty(&ns->ns_list_chain). This is why it is - * important to use list_del_init() here. */ + * important to use list_del_init() here. + */ list_del_init(&ns->ns_list_chain); ldlm_namespace_nr_dec(client); mutex_unlock(ldlm_namespace_lock(client)); @@ -963,7 +971,8 @@ void ldlm_namespace_free_post(struct ldlm_namespace *ns) ldlm_namespace_unregister(ns, ns->ns_client); /* Fini pool _before_ parent proc dir is removed. This is important as * ldlm_pool_fini() removes own proc dir which is child to @dir. - * Removing it after @dir may cause oops. */ + * Removing it after @dir may cause oops. + */ ldlm_pool_fini(&ns->ns_pool); ldlm_namespace_debugfs_unregister(ns); @@ -971,7 +980,8 @@ void ldlm_namespace_free_post(struct ldlm_namespace *ns) cfs_hash_putref(ns->ns_rs_hash); /* Namespace \a ns should be not on list at this time, otherwise * this will cause issues related to using freed \a ns in poold - * thread. */ + * thread. + */ LASSERT(list_empty(&ns->ns_list_chain)); kfree(ns); ldlm_put_ref(); @@ -1031,8 +1041,8 @@ static struct ldlm_resource *ldlm_resource_new(void) struct ldlm_resource *res; int idx; - res = kmem_cache_alloc(ldlm_resource_slab, GFP_NOFS | __GFP_ZERO); - if (res == NULL) + res = kmem_cache_zalloc(ldlm_resource_slab, GFP_NOFS); + if (!res) return NULL; INIT_LIST_HEAD(&res->lr_granted); @@ -1050,7 +1060,8 @@ static struct ldlm_resource *ldlm_resource_new(void) lu_ref_init(&res->lr_reference); /* The creator of the resource must unlock the mutex after LVB - * initialization. */ + * initialization. + */ mutex_init(&res->lr_lvb_mutex); mutex_lock(&res->lr_lvb_mutex); @@ -1065,7 +1076,8 @@ static struct ldlm_resource *ldlm_resource_new(void) */ struct ldlm_resource * ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, - const struct ldlm_res_id *name, ldlm_type_t type, int create) + const struct ldlm_res_id *name, enum ldlm_type type, + int create) { struct hlist_node *hnode; struct ldlm_resource *res; @@ -1073,14 +1085,13 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, __u64 version; int ns_refcount = 0; - LASSERT(ns != NULL); - LASSERT(parent == NULL); - LASSERT(ns->ns_rs_hash != NULL); + LASSERT(!parent); + LASSERT(ns->ns_rs_hash); LASSERT(name->name[0] != 0); cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0); hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name); - if (hnode != NULL) { + if (hnode) { cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0); res = hlist_entry(hnode, struct ldlm_resource, lr_hash); /* Synchronize with regard to resource creation. */ @@ -1111,13 +1122,12 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd); res->lr_name = *name; res->lr_type = type; - res->lr_most_restr = LCK_NL; cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1); hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL : cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name); - if (hnode != NULL) { + if (hnode) { /* Someone won the race and already added the resource. */ cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); /* Clean lu_ref for failed resource. */ @@ -1167,7 +1177,8 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, /* Let's see if we happened to be the very first resource in this * namespace. If so, and this is a client namespace, we need to move * the namespace into the active namespaces list to be patrolled by - * the ldlm_poold. */ + * the ldlm_poold. + */ if (ns_refcount == 1) { mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT)); ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT); diff --git a/drivers/staging/lustre/lustre/libcfs/Makefile b/drivers/staging/lustre/lustre/libcfs/Makefile deleted file mode 100644 index 03d3f3d7b..000000000 --- a/drivers/staging/lustre/lustre/libcfs/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -obj-$(CONFIG_LUSTRE_FS) += libcfs.o - -libcfs-linux-objs := linux-tracefile.o linux-debug.o -libcfs-linux-objs += linux-prim.o linux-cpu.o -libcfs-linux-objs += linux-curproc.o -libcfs-linux-objs += linux-module.o -libcfs-linux-objs += linux-crypto.o -libcfs-linux-objs += linux-crypto-adler.o -libcfs-linux-objs += linux-mem.o - -libcfs-linux-objs := $(addprefix linux/,$(libcfs-linux-objs)) - -libcfs-all-objs := debug.o fail.o module.o tracefile.o \ - libcfs_string.o hash.o kernel_user_comm.o \ - prng.o workitem.o libcfs_cpu.o \ - libcfs_mem.o libcfs_lock.o - -libcfs-objs := $(libcfs-linux-objs) $(libcfs-all-objs) diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c deleted file mode 100644 index 0b38dad13..000000000 --- a/drivers/staging/lustre/lustre/libcfs/debug.c +++ /dev/null @@ -1,559 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/debug.c - * - * Author: Phil Schwan <phil@clusterfs.com> - * - */ - -# define DEBUG_SUBSYSTEM S_LNET - -#include "../../include/linux/libcfs/libcfs.h" -#include "tracefile.h" - -static char debug_file_name[1024]; - -unsigned int libcfs_subsystem_debug = ~0; -module_param(libcfs_subsystem_debug, int, 0644); -MODULE_PARM_DESC(libcfs_subsystem_debug, "Lustre kernel debug subsystem mask"); -EXPORT_SYMBOL(libcfs_subsystem_debug); - -unsigned int libcfs_debug = (D_CANTMASK | - D_NETERROR | D_HA | D_CONFIG | D_IOCTL); -module_param(libcfs_debug, int, 0644); -MODULE_PARM_DESC(libcfs_debug, "Lustre kernel debug mask"); -EXPORT_SYMBOL(libcfs_debug); - -static int libcfs_param_debug_mb_set(const char *val, - const struct kernel_param *kp) -{ - int rc; - unsigned num; - - rc = kstrtouint(val, 0, &num); - if (rc < 0) - return rc; - - if (!*((unsigned int *)kp->arg)) { - *((unsigned int *)kp->arg) = num; - return 0; - } - - rc = cfs_trace_set_debug_mb(num); - - if (!rc) - *((unsigned int *)kp->arg) = cfs_trace_get_debug_mb(); - - return rc; -} - -/* While debug_mb setting look like unsigned int, in fact - * it needs quite a bunch of extra processing, so we define special - * debugmb parameter type with corresponding methods to handle this case */ -static struct kernel_param_ops param_ops_debugmb = { - .set = libcfs_param_debug_mb_set, - .get = param_get_uint, -}; - -#define param_check_debugmb(name, p) \ - __param_check(name, p, unsigned int) - -static unsigned int libcfs_debug_mb; -module_param(libcfs_debug_mb, debugmb, 0644); -MODULE_PARM_DESC(libcfs_debug_mb, "Total debug buffer size."); - -unsigned int libcfs_printk = D_CANTMASK; -module_param(libcfs_printk, uint, 0644); -MODULE_PARM_DESC(libcfs_printk, "Lustre kernel debug console mask"); - -unsigned int libcfs_console_ratelimit = 1; -module_param(libcfs_console_ratelimit, uint, 0644); -MODULE_PARM_DESC(libcfs_console_ratelimit, "Lustre kernel debug console ratelimit (0 to disable)"); - -static int param_set_delay_minmax(const char *val, - const struct kernel_param *kp, - long min, long max) -{ - long d; - int sec; - int rc; - - rc = kstrtoint(val, 0, &sec); - if (rc) - return -EINVAL; - - d = cfs_time_seconds(sec) / 100; - if (d < min || d > max) - return -EINVAL; - - *((unsigned int *)kp->arg) = d; - - return 0; -} - -static int param_get_delay(char *buffer, const struct kernel_param *kp) -{ - unsigned int d = *(unsigned int *)kp->arg; - - return sprintf(buffer, "%u", (unsigned int)cfs_duration_sec(d * 100)); -} - -unsigned int libcfs_console_max_delay; -unsigned int libcfs_console_min_delay; - -static int param_set_console_max_delay(const char *val, - const struct kernel_param *kp) -{ - return param_set_delay_minmax(val, kp, - libcfs_console_min_delay, INT_MAX); -} - -static struct kernel_param_ops param_ops_console_max_delay = { - .set = param_set_console_max_delay, - .get = param_get_delay, -}; - -#define param_check_console_max_delay(name, p) \ - __param_check(name, p, unsigned int) - -module_param(libcfs_console_max_delay, console_max_delay, 0644); -MODULE_PARM_DESC(libcfs_console_max_delay, "Lustre kernel debug console max delay (jiffies)"); - -static int param_set_console_min_delay(const char *val, - const struct kernel_param *kp) -{ - return param_set_delay_minmax(val, kp, - 1, libcfs_console_max_delay); -} - -static struct kernel_param_ops param_ops_console_min_delay = { - .set = param_set_console_min_delay, - .get = param_get_delay, -}; - -#define param_check_console_min_delay(name, p) \ - __param_check(name, p, unsigned int) - -module_param(libcfs_console_min_delay, console_min_delay, 0644); -MODULE_PARM_DESC(libcfs_console_min_delay, "Lustre kernel debug console min delay (jiffies)"); - -static int param_set_uint_minmax(const char *val, - const struct kernel_param *kp, - unsigned int min, unsigned int max) -{ - unsigned int num; - int ret; - - if (!val) - return -EINVAL; - ret = kstrtouint(val, 0, &num); - if (ret < 0 || num < min || num > max) - return -EINVAL; - *((unsigned int *)kp->arg) = num; - return 0; -} - -static int param_set_uintpos(const char *val, const struct kernel_param *kp) -{ - return param_set_uint_minmax(val, kp, 1, -1); -} - -static struct kernel_param_ops param_ops_uintpos = { - .set = param_set_uintpos, - .get = param_get_uint, -}; - -#define param_check_uintpos(name, p) \ - __param_check(name, p, unsigned int) - -unsigned int libcfs_console_backoff = CDEBUG_DEFAULT_BACKOFF; -module_param(libcfs_console_backoff, uintpos, 0644); -MODULE_PARM_DESC(libcfs_console_backoff, "Lustre kernel debug console backoff factor"); - -unsigned int libcfs_debug_binary = 1; - -unsigned int libcfs_stack = 3 * THREAD_SIZE / 4; -EXPORT_SYMBOL(libcfs_stack); - -unsigned int libcfs_catastrophe; -EXPORT_SYMBOL(libcfs_catastrophe); - -unsigned int libcfs_panic_on_lbug = 1; -module_param(libcfs_panic_on_lbug, uint, 0644); -MODULE_PARM_DESC(libcfs_panic_on_lbug, "Lustre kernel panic on LBUG"); - -static wait_queue_head_t debug_ctlwq; - -char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT; - -/* We need to pass a pointer here, but elsewhere this must be a const */ -static char *libcfs_debug_file_path; -module_param(libcfs_debug_file_path, charp, 0644); -MODULE_PARM_DESC(libcfs_debug_file_path, - "Path for dumping debug logs, set 'NONE' to prevent log dumping"); - -int libcfs_panic_in_progress; - -/* libcfs_debug_token2mask() expects the returned - * string in lower-case */ -static const char * -libcfs_debug_subsys2str(int subsys) -{ - switch (1 << subsys) { - default: - return NULL; - case S_UNDEFINED: - return "undefined"; - case S_MDC: - return "mdc"; - case S_MDS: - return "mds"; - case S_OSC: - return "osc"; - case S_OST: - return "ost"; - case S_CLASS: - return "class"; - case S_LOG: - return "log"; - case S_LLITE: - return "llite"; - case S_RPC: - return "rpc"; - case S_LNET: - return "lnet"; - case S_LND: - return "lnd"; - case S_PINGER: - return "pinger"; - case S_FILTER: - return "filter"; - case S_ECHO: - return "echo"; - case S_LDLM: - return "ldlm"; - case S_LOV: - return "lov"; - case S_LQUOTA: - return "lquota"; - case S_OSD: - return "osd"; - case S_LMV: - return "lmv"; - case S_SEC: - return "sec"; - case S_GSS: - return "gss"; - case S_MGC: - return "mgc"; - case S_MGS: - return "mgs"; - case S_FID: - return "fid"; - case S_FLD: - return "fld"; - } -} - -/* libcfs_debug_token2mask() expects the returned - * string in lower-case */ -static const char * -libcfs_debug_dbg2str(int debug) -{ - switch (1 << debug) { - default: - return NULL; - case D_TRACE: - return "trace"; - case D_INODE: - return "inode"; - case D_SUPER: - return "super"; - case D_EXT2: - return "ext2"; - case D_MALLOC: - return "malloc"; - case D_CACHE: - return "cache"; - case D_INFO: - return "info"; - case D_IOCTL: - return "ioctl"; - case D_NETERROR: - return "neterror"; - case D_NET: - return "net"; - case D_WARNING: - return "warning"; - case D_BUFFS: - return "buffs"; - case D_OTHER: - return "other"; - case D_DENTRY: - return "dentry"; - case D_NETTRACE: - return "nettrace"; - case D_PAGE: - return "page"; - case D_DLMTRACE: - return "dlmtrace"; - case D_ERROR: - return "error"; - case D_EMERG: - return "emerg"; - case D_HA: - return "ha"; - case D_RPCTRACE: - return "rpctrace"; - case D_VFSTRACE: - return "vfstrace"; - case D_READA: - return "reada"; - case D_MMAP: - return "mmap"; - case D_CONFIG: - return "config"; - case D_CONSOLE: - return "console"; - case D_QUOTA: - return "quota"; - case D_SEC: - return "sec"; - case D_LFSCK: - return "lfsck"; - } -} - -int -libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys) -{ - const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str : - libcfs_debug_dbg2str; - int len = 0; - const char *token; - int i; - - if (mask == 0) { /* "0" */ - if (size > 0) - str[0] = '0'; - len = 1; - } else { /* space-separated tokens */ - for (i = 0; i < 32; i++) { - if ((mask & (1 << i)) == 0) - continue; - - token = fn(i); - if (token == NULL) /* unused bit */ - continue; - - if (len > 0) { /* separator? */ - if (len < size) - str[len] = ' '; - len++; - } - - while (*token != 0) { - if (len < size) - str[len] = *token; - token++; - len++; - } - } - } - - /* terminate 'str' */ - if (len < size) - str[len] = 0; - else - str[size - 1] = 0; - - return len; -} - -int -libcfs_debug_str2mask(int *mask, const char *str, int is_subsys) -{ - const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str : - libcfs_debug_dbg2str; - int m = 0; - int matched; - int n; - int t; - - /* Allow a number for backwards compatibility */ - - for (n = strlen(str); n > 0; n--) - if (!isspace(str[n-1])) - break; - matched = n; - t = sscanf(str, "%i%n", &m, &matched); - if (t >= 1 && matched == n) { - /* don't print warning for lctl set_param debug=0 or -1 */ - if (m != 0 && m != -1) - CWARN("You are trying to use a numerical value for the mask - this will be deprecated in a future release.\n"); - *mask = m; - return 0; - } - - return cfs_str2mask(str, fn, mask, is_subsys ? 0 : D_CANTMASK, - 0xffffffff); -} - -/** - * Dump Lustre log to ::debug_file_path by calling tracefile_dump_all_pages() - */ -void libcfs_debug_dumplog_internal(void *arg) -{ - void *journal_info; - - journal_info = current->journal_info; - current->journal_info = NULL; - - if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0) { - snprintf(debug_file_name, sizeof(debug_file_name) - 1, - "%s.%lld.%ld", libcfs_debug_file_path_arr, - (s64)ktime_get_real_seconds(), (long_ptr_t)arg); - pr_alert("LustreError: dumping log to %s\n", - debug_file_name); - cfs_tracefile_dump_all_pages(debug_file_name); - libcfs_run_debug_log_upcall(debug_file_name); - } - - current->journal_info = journal_info; -} - -static int libcfs_debug_dumplog_thread(void *arg) -{ - libcfs_debug_dumplog_internal(arg); - wake_up(&debug_ctlwq); - return 0; -} - -void libcfs_debug_dumplog(void) -{ - wait_queue_t wait; - struct task_struct *dumper; - - /* we're being careful to ensure that the kernel thread is - * able to set our state to running as it exits before we - * get to schedule() */ - init_waitqueue_entry(&wait, current); - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&debug_ctlwq, &wait); - - dumper = kthread_run(libcfs_debug_dumplog_thread, - (void *)(long)current_pid(), - "libcfs_debug_dumper"); - if (IS_ERR(dumper)) - pr_err("LustreError: cannot start log dump thread: %ld\n", - PTR_ERR(dumper)); - else - schedule(); - - /* be sure to teardown if cfs_create_thread() failed */ - remove_wait_queue(&debug_ctlwq, &wait); - set_current_state(TASK_RUNNING); -} -EXPORT_SYMBOL(libcfs_debug_dumplog); - -int libcfs_debug_init(unsigned long bufsize) -{ - int rc = 0; - unsigned int max = libcfs_debug_mb; - - init_waitqueue_head(&debug_ctlwq); - - if (libcfs_console_max_delay <= 0 || /* not set by user or */ - libcfs_console_min_delay <= 0 || /* set to invalid values */ - libcfs_console_min_delay >= libcfs_console_max_delay) { - libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY; - libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY; - } - - if (libcfs_debug_file_path != NULL) { - strlcpy(libcfs_debug_file_path_arr, - libcfs_debug_file_path, - sizeof(libcfs_debug_file_path_arr)); - } - - /* If libcfs_debug_mb is set to an invalid value or uninitialized - * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES */ - if (max > cfs_trace_max_debug_mb() || max < num_possible_cpus()) { - max = TCD_MAX_PAGES; - } else { - max = max / num_possible_cpus(); - max <<= (20 - PAGE_CACHE_SHIFT); - } - rc = cfs_tracefile_init(max); - - if (rc == 0) { - libcfs_register_panic_notifier(); - libcfs_debug_mb = cfs_trace_get_debug_mb(); - } - - return rc; -} - -int libcfs_debug_cleanup(void) -{ - libcfs_unregister_panic_notifier(); - cfs_tracefile_exit(); - return 0; -} - -int libcfs_debug_clear_buffer(void) -{ - cfs_trace_flush_pages(); - return 0; -} - -/* Debug markers, although printed by S_LNET - * should not be be marked as such. */ -#undef DEBUG_SUBSYSTEM -#define DEBUG_SUBSYSTEM S_UNDEFINED -int libcfs_debug_mark_buffer(const char *text) -{ - CDEBUG(D_TRACE, - "***************************************************\n"); - LCONSOLE(D_WARNING, "DEBUG MARKER: %s\n", text); - CDEBUG(D_TRACE, - "***************************************************\n"); - - return 0; -} - -#undef DEBUG_SUBSYSTEM -#define DEBUG_SUBSYSTEM S_LNET diff --git a/drivers/staging/lustre/lustre/libcfs/fail.c b/drivers/staging/lustre/lustre/libcfs/fail.c deleted file mode 100644 index 27831432d..000000000 --- a/drivers/staging/lustre/lustre/libcfs/fail.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please contact Oracle Corporation, Inc., 500 Oracle Parkway, Redwood Shores, - * CA 94065 USA or visit www.oracle.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Oracle Corporation, Inc. - */ - -#include "../../include/linux/libcfs/libcfs.h" - -unsigned long cfs_fail_loc; -EXPORT_SYMBOL(cfs_fail_loc); - -unsigned int cfs_fail_val; -EXPORT_SYMBOL(cfs_fail_val); - -DECLARE_WAIT_QUEUE_HEAD(cfs_race_waitq); -EXPORT_SYMBOL(cfs_race_waitq); - -int cfs_race_state; -EXPORT_SYMBOL(cfs_race_state); - -int __cfs_fail_check_set(__u32 id, __u32 value, int set) -{ - static atomic_t cfs_fail_count = ATOMIC_INIT(0); - - LASSERT(!(id & CFS_FAIL_ONCE)); - - if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) == - (CFS_FAILED | CFS_FAIL_ONCE)) { - atomic_set(&cfs_fail_count, 0); /* paranoia */ - return 0; - } - - /* Fail 1/cfs_fail_val times */ - if (cfs_fail_loc & CFS_FAIL_RAND) { - if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0) - return 0; - } - - /* Skip the first cfs_fail_val, then fail */ - if (cfs_fail_loc & CFS_FAIL_SKIP) { - if (atomic_inc_return(&cfs_fail_count) <= cfs_fail_val) - return 0; - } - - /* check cfs_fail_val... */ - if (set == CFS_FAIL_LOC_VALUE) { - if (cfs_fail_val != -1 && cfs_fail_val != value) - return 0; - } - - /* Fail cfs_fail_val times, overridden by FAIL_ONCE */ - if (cfs_fail_loc & CFS_FAIL_SOME && - (!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) { - int count = atomic_inc_return(&cfs_fail_count); - - if (count >= cfs_fail_val) { - set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc); - atomic_set(&cfs_fail_count, 0); - /* we are lost race to increase */ - if (count > cfs_fail_val) - return 0; - } - } - - if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) && - (value & CFS_FAIL_ONCE)) - set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc); - /* Lost race to set CFS_FAILED_BIT. */ - if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) { - /* If CFS_FAIL_ONCE is valid, only one process can fail, - * otherwise multi-process can fail at the same time. */ - if (cfs_fail_loc & CFS_FAIL_ONCE) - return 0; - } - - switch (set) { - case CFS_FAIL_LOC_NOSET: - case CFS_FAIL_LOC_VALUE: - break; - case CFS_FAIL_LOC_ORSET: - cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE); - break; - case CFS_FAIL_LOC_RESET: - cfs_fail_loc = value; - break; - default: - LASSERTF(0, "called with bad set %u\n", set); - break; - } - - return 1; -} -EXPORT_SYMBOL(__cfs_fail_check_set); - -int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set) -{ - int ret; - - ret = __cfs_fail_check_set(id, value, set); - if (ret && likely(ms > 0)) { - CERROR("cfs_fail_timeout id %x sleeping for %dms\n", - id, ms); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(ms) / 1000); - CERROR("cfs_fail_timeout id %x awake\n", id); - } - return ret; -} -EXPORT_SYMBOL(__cfs_fail_timeout_set); diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c deleted file mode 100644 index 4d5051043..000000000 --- a/drivers/staging/lustre/lustre/libcfs/hash.c +++ /dev/null @@ -1,2092 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/hash.c - * - * Implement a hash class for hash process in lustre system. - * - * Author: YuZhangyong <yzy@clusterfs.com> - * - * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov> - * - Simplified API and improved documentation - * - Added per-hash feature flags: - * * CFS_HASH_DEBUG additional validation - * * CFS_HASH_REHASH dynamic rehashing - * - Added per-hash statistics - * - General performance enhancements - * - * 2009-07-31: Liang Zhen <zhen.liang@sun.com> - * - move all stuff to libcfs - * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH - * - ignore hs_rwlock if without CFS_HASH_REHASH setting - * - buckets are allocated one by one(instead of contiguous memory), - * to avoid unnecessary cacheline conflict - * - * 2010-03-01: Liang Zhen <zhen.liang@sun.com> - * - "bucket" is a group of hlist_head now, user can specify bucket size - * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share - * one lock for reducing memory overhead. - * - * - support lockless hash, caller will take care of locks: - * avoid lock overhead for hash tables that are already protected - * by locking in the caller for another reason - * - * - support both spin_lock/rwlock for bucket: - * overhead of spinlock contention is lower than read/write - * contention of rwlock, so using spinlock to serialize operations on - * bucket is more reasonable for those frequently changed hash tables - * - * - support one-single lock mode: - * one lock to protect all hash operations to avoid overhead of - * multiple locks if hash table is always small - * - * - removed a lot of unnecessary addref & decref on hash element: - * addref & decref are atomic operations in many use-cases which - * are expensive. - * - * - support non-blocking cfs_hash_add() and cfs_hash_findadd(): - * some lustre use-cases require these functions to be strictly - * non-blocking, we need to schedule required rehash on a different - * thread on those cases. - * - * - safer rehash on large hash table - * In old implementation, rehash function will exclusively lock the - * hash table and finish rehash in one batch, it's dangerous on SMP - * system because rehash millions of elements could take long time. - * New implemented rehash can release lock and relax CPU in middle - * of rehash, it's safe for another thread to search/change on the - * hash table even it's in rehasing. - * - * - support two different refcount modes - * . hash table has refcount on element - * . hash table doesn't change refcount on adding/removing element - * - * - support long name hash table (for param-tree) - * - * - fix a bug for cfs_hash_rehash_key: - * in old implementation, cfs_hash_rehash_key could screw up the - * hash-table because @key is overwritten without any protection. - * Now we need user to define hs_keycpy for those rehash enabled - * hash tables, cfs_hash_rehash_key will overwrite hash-key - * inside lock by calling hs_keycpy. - * - * - better hash iteration: - * Now we support both locked iteration & lockless iteration of hash - * table. Also, user can break the iteration by return 1 in callback. - */ -#include <linux/seq_file.h> -#include <linux/log2.h> - -#include "../../include/linux/libcfs/libcfs.h" - -#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 -static unsigned int warn_on_depth = 8; -module_param(warn_on_depth, uint, 0644); -MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high."); -#endif - -struct cfs_wi_sched *cfs_sched_rehash; - -static inline void -cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {} - -static inline void -cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {} - -static inline void -cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive) - __acquires(&lock->spin) -{ - spin_lock(&lock->spin); -} - -static inline void -cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive) - __releases(&lock->spin) -{ - spin_unlock(&lock->spin); -} - -static inline void -cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive) - __acquires(&lock->rw) -{ - if (!exclusive) - read_lock(&lock->rw); - else - write_lock(&lock->rw); -} - -static inline void -cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive) - __releases(&lock->rw) -{ - if (!exclusive) - read_unlock(&lock->rw); - else - write_unlock(&lock->rw); -} - -/** No lock hash */ -static struct cfs_hash_lock_ops cfs_hash_nl_lops = { - .hs_lock = cfs_hash_nl_lock, - .hs_unlock = cfs_hash_nl_unlock, - .hs_bkt_lock = cfs_hash_nl_lock, - .hs_bkt_unlock = cfs_hash_nl_unlock, -}; - -/** no bucket lock, one spinlock to protect everything */ -static struct cfs_hash_lock_ops cfs_hash_nbl_lops = { - .hs_lock = cfs_hash_spin_lock, - .hs_unlock = cfs_hash_spin_unlock, - .hs_bkt_lock = cfs_hash_nl_lock, - .hs_bkt_unlock = cfs_hash_nl_unlock, -}; - -/** spin bucket lock, rehash is enabled */ -static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = { - .hs_lock = cfs_hash_rw_lock, - .hs_unlock = cfs_hash_rw_unlock, - .hs_bkt_lock = cfs_hash_spin_lock, - .hs_bkt_unlock = cfs_hash_spin_unlock, -}; - -/** rw bucket lock, rehash is enabled */ -static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = { - .hs_lock = cfs_hash_rw_lock, - .hs_unlock = cfs_hash_rw_unlock, - .hs_bkt_lock = cfs_hash_rw_lock, - .hs_bkt_unlock = cfs_hash_rw_unlock, -}; - -/** spin bucket lock, rehash is disabled */ -static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = { - .hs_lock = cfs_hash_nl_lock, - .hs_unlock = cfs_hash_nl_unlock, - .hs_bkt_lock = cfs_hash_spin_lock, - .hs_bkt_unlock = cfs_hash_spin_unlock, -}; - -/** rw bucket lock, rehash is disabled */ -static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = { - .hs_lock = cfs_hash_nl_lock, - .hs_unlock = cfs_hash_nl_unlock, - .hs_bkt_lock = cfs_hash_rw_lock, - .hs_bkt_unlock = cfs_hash_rw_unlock, -}; - -static void -cfs_hash_lock_setup(struct cfs_hash *hs) -{ - if (cfs_hash_with_no_lock(hs)) { - hs->hs_lops = &cfs_hash_nl_lops; - - } else if (cfs_hash_with_no_bktlock(hs)) { - hs->hs_lops = &cfs_hash_nbl_lops; - spin_lock_init(&hs->hs_lock.spin); - - } else if (cfs_hash_with_rehash(hs)) { - rwlock_init(&hs->hs_lock.rw); - - if (cfs_hash_with_rw_bktlock(hs)) - hs->hs_lops = &cfs_hash_bkt_rw_lops; - else if (cfs_hash_with_spin_bktlock(hs)) - hs->hs_lops = &cfs_hash_bkt_spin_lops; - else - LBUG(); - } else { - if (cfs_hash_with_rw_bktlock(hs)) - hs->hs_lops = &cfs_hash_nr_bkt_rw_lops; - else if (cfs_hash_with_spin_bktlock(hs)) - hs->hs_lops = &cfs_hash_nr_bkt_spin_lops; - else - LBUG(); - } -} - -/** - * Simple hash head without depth tracking - * new element is always added to head of hlist - */ -struct cfs_hash_head { - struct hlist_head hh_head; /**< entries list */ -}; - -static int -cfs_hash_hh_hhead_size(struct cfs_hash *hs) -{ - return sizeof(struct cfs_hash_head); -} - -static struct hlist_head * -cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) -{ - struct cfs_hash_head *head; - - head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].hh_head; -} - -static int -cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd)); - return -1; /* unknown depth */ -} - -static int -cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - hlist_del_init(hnode); - return -1; /* unknown depth */ -} - -/** - * Simple hash head with depth tracking - * new element is always added to head of hlist - */ -struct cfs_hash_head_dep { - struct hlist_head hd_head; /**< entries list */ - unsigned int hd_depth; /**< list length */ -}; - -static int -cfs_hash_hd_hhead_size(struct cfs_hash *hs) -{ - return sizeof(struct cfs_hash_head_dep); -} - -static struct hlist_head * -cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) -{ - struct cfs_hash_head_dep *head; - - head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].hd_head; -} - -static int -cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - struct cfs_hash_head_dep *hh; - - hh = container_of(cfs_hash_hd_hhead(hs, bd), - struct cfs_hash_head_dep, hd_head); - hlist_add_head(hnode, &hh->hd_head); - return ++hh->hd_depth; -} - -static int -cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - struct cfs_hash_head_dep *hh; - - hh = container_of(cfs_hash_hd_hhead(hs, bd), - struct cfs_hash_head_dep, hd_head); - hlist_del_init(hnode); - return --hh->hd_depth; -} - -/** - * double links hash head without depth tracking - * new element is always added to tail of hlist - */ -struct cfs_hash_dhead { - struct hlist_head dh_head; /**< entries list */ - struct hlist_node *dh_tail; /**< the last entry */ -}; - -static int -cfs_hash_dh_hhead_size(struct cfs_hash *hs) -{ - return sizeof(struct cfs_hash_dhead); -} - -static struct hlist_head * -cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) -{ - struct cfs_hash_dhead *head; - - head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].dh_head; -} - -static int -cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - struct cfs_hash_dhead *dh; - - dh = container_of(cfs_hash_dh_hhead(hs, bd), - struct cfs_hash_dhead, dh_head); - if (dh->dh_tail != NULL) /* not empty */ - hlist_add_behind(hnode, dh->dh_tail); - else /* empty list */ - hlist_add_head(hnode, &dh->dh_head); - dh->dh_tail = hnode; - return -1; /* unknown depth */ -} - -static int -cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnd) -{ - struct cfs_hash_dhead *dh; - - dh = container_of(cfs_hash_dh_hhead(hs, bd), - struct cfs_hash_dhead, dh_head); - if (hnd->next == NULL) { /* it's the tail */ - dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL : - container_of(hnd->pprev, struct hlist_node, next); - } - hlist_del_init(hnd); - return -1; /* unknown depth */ -} - -/** - * double links hash head with depth tracking - * new element is always added to tail of hlist - */ -struct cfs_hash_dhead_dep { - struct hlist_head dd_head; /**< entries list */ - struct hlist_node *dd_tail; /**< the last entry */ - unsigned int dd_depth; /**< list length */ -}; - -static int -cfs_hash_dd_hhead_size(struct cfs_hash *hs) -{ - return sizeof(struct cfs_hash_dhead_dep); -} - -static struct hlist_head * -cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd) -{ - struct cfs_hash_dhead_dep *head; - - head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0]; - return &head[bd->bd_offset].dd_head; -} - -static int -cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - struct cfs_hash_dhead_dep *dh; - - dh = container_of(cfs_hash_dd_hhead(hs, bd), - struct cfs_hash_dhead_dep, dd_head); - if (dh->dd_tail != NULL) /* not empty */ - hlist_add_behind(hnode, dh->dd_tail); - else /* empty list */ - hlist_add_head(hnode, &dh->dd_head); - dh->dd_tail = hnode; - return ++dh->dd_depth; -} - -static int -cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnd) -{ - struct cfs_hash_dhead_dep *dh; - - dh = container_of(cfs_hash_dd_hhead(hs, bd), - struct cfs_hash_dhead_dep, dd_head); - if (hnd->next == NULL) { /* it's the tail */ - dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL : - container_of(hnd->pprev, struct hlist_node, next); - } - hlist_del_init(hnd); - return --dh->dd_depth; -} - -static struct cfs_hash_hlist_ops cfs_hash_hh_hops = { - .hop_hhead = cfs_hash_hh_hhead, - .hop_hhead_size = cfs_hash_hh_hhead_size, - .hop_hnode_add = cfs_hash_hh_hnode_add, - .hop_hnode_del = cfs_hash_hh_hnode_del, -}; - -static struct cfs_hash_hlist_ops cfs_hash_hd_hops = { - .hop_hhead = cfs_hash_hd_hhead, - .hop_hhead_size = cfs_hash_hd_hhead_size, - .hop_hnode_add = cfs_hash_hd_hnode_add, - .hop_hnode_del = cfs_hash_hd_hnode_del, -}; - -static struct cfs_hash_hlist_ops cfs_hash_dh_hops = { - .hop_hhead = cfs_hash_dh_hhead, - .hop_hhead_size = cfs_hash_dh_hhead_size, - .hop_hnode_add = cfs_hash_dh_hnode_add, - .hop_hnode_del = cfs_hash_dh_hnode_del, -}; - -static struct cfs_hash_hlist_ops cfs_hash_dd_hops = { - .hop_hhead = cfs_hash_dd_hhead, - .hop_hhead_size = cfs_hash_dd_hhead_size, - .hop_hnode_add = cfs_hash_dd_hnode_add, - .hop_hnode_del = cfs_hash_dd_hnode_del, -}; - -static void -cfs_hash_hlist_setup(struct cfs_hash *hs) -{ - if (cfs_hash_with_add_tail(hs)) { - hs->hs_hops = cfs_hash_with_depth(hs) ? - &cfs_hash_dd_hops : &cfs_hash_dh_hops; - } else { - hs->hs_hops = cfs_hash_with_depth(hs) ? - &cfs_hash_hd_hops : &cfs_hash_hh_hops; - } -} - -static void -cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts, - unsigned int bits, const void *key, struct cfs_hash_bd *bd) -{ - unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1); - - LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits); - - bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)]; - bd->bd_offset = index >> (bits - hs->hs_bkt_bits); -} - -void -cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd) -{ - /* NB: caller should hold hs->hs_rwlock if REHASH is set */ - if (likely(hs->hs_rehash_buckets == NULL)) { - cfs_hash_bd_from_key(hs, hs->hs_buckets, - hs->hs_cur_bits, key, bd); - } else { - LASSERT(hs->hs_rehash_bits != 0); - cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, - hs->hs_rehash_bits, key, bd); - } -} -EXPORT_SYMBOL(cfs_hash_bd_get); - -static inline void -cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur) -{ - if (likely(dep_cur <= bd->bd_bucket->hsb_depmax)) - return; - - bd->bd_bucket->hsb_depmax = dep_cur; -# if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 - if (likely(warn_on_depth == 0 || - max(warn_on_depth, hs->hs_dep_max) >= dep_cur)) - return; - - spin_lock(&hs->hs_dep_lock); - hs->hs_dep_max = dep_cur; - hs->hs_dep_bkt = bd->bd_bucket->hsb_index; - hs->hs_dep_off = bd->bd_offset; - hs->hs_dep_bits = hs->hs_cur_bits; - spin_unlock(&hs->hs_dep_lock); - - cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi); -# endif -} - -void -cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - int rc; - - rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode); - cfs_hash_bd_dep_record(hs, bd, rc); - bd->bd_bucket->hsb_version++; - if (unlikely(bd->bd_bucket->hsb_version == 0)) - bd->bd_bucket->hsb_version++; - bd->bd_bucket->hsb_count++; - - if (cfs_hash_with_counter(hs)) - atomic_inc(&hs->hs_count); - if (!cfs_hash_with_no_itemref(hs)) - cfs_hash_get(hs, hnode); -} -EXPORT_SYMBOL(cfs_hash_bd_add_locked); - -void -cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode) -{ - hs->hs_hops->hop_hnode_del(hs, bd, hnode); - - LASSERT(bd->bd_bucket->hsb_count > 0); - bd->bd_bucket->hsb_count--; - bd->bd_bucket->hsb_version++; - if (unlikely(bd->bd_bucket->hsb_version == 0)) - bd->bd_bucket->hsb_version++; - - if (cfs_hash_with_counter(hs)) { - LASSERT(atomic_read(&hs->hs_count) > 0); - atomic_dec(&hs->hs_count); - } - if (!cfs_hash_with_no_itemref(hs)) - cfs_hash_put_locked(hs, hnode); -} -EXPORT_SYMBOL(cfs_hash_bd_del_locked); - -void -cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old, - struct cfs_hash_bd *bd_new, struct hlist_node *hnode) -{ - struct cfs_hash_bucket *obkt = bd_old->bd_bucket; - struct cfs_hash_bucket *nbkt = bd_new->bd_bucket; - int rc; - - if (cfs_hash_bd_compare(bd_old, bd_new) == 0) - return; - - /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops - * in cfs_hash_bd_del/add_locked */ - hs->hs_hops->hop_hnode_del(hs, bd_old, hnode); - rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode); - cfs_hash_bd_dep_record(hs, bd_new, rc); - - LASSERT(obkt->hsb_count > 0); - obkt->hsb_count--; - obkt->hsb_version++; - if (unlikely(obkt->hsb_version == 0)) - obkt->hsb_version++; - nbkt->hsb_count++; - nbkt->hsb_version++; - if (unlikely(nbkt->hsb_version == 0)) - nbkt->hsb_version++; -} - -enum { - /** always set, for sanity (avoid ZERO intent) */ - CFS_HS_LOOKUP_MASK_FIND = BIT(0), - /** return entry with a ref */ - CFS_HS_LOOKUP_MASK_REF = BIT(1), - /** add entry if not existing */ - CFS_HS_LOOKUP_MASK_ADD = BIT(2), - /** delete entry, ignore other masks */ - CFS_HS_LOOKUP_MASK_DEL = BIT(3), -}; - -enum cfs_hash_lookup_intent { - /** return item w/o refcount */ - CFS_HS_LOOKUP_IT_PEEK = CFS_HS_LOOKUP_MASK_FIND, - /** return item with refcount */ - CFS_HS_LOOKUP_IT_FIND = (CFS_HS_LOOKUP_MASK_FIND | - CFS_HS_LOOKUP_MASK_REF), - /** return item w/o refcount if existed, otherwise add */ - CFS_HS_LOOKUP_IT_ADD = (CFS_HS_LOOKUP_MASK_FIND | - CFS_HS_LOOKUP_MASK_ADD), - /** return item with refcount if existed, otherwise add */ - CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND | - CFS_HS_LOOKUP_MASK_ADD), - /** delete if existed */ - CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND | - CFS_HS_LOOKUP_MASK_DEL) -}; - -static struct hlist_node * -cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd, - const void *key, struct hlist_node *hnode, - enum cfs_hash_lookup_intent intent) - -{ - struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd); - struct hlist_node *ehnode; - struct hlist_node *match; - int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0; - - /* with this function, we can avoid a lot of useless refcount ops, - * which are expensive atomic operations most time. */ - match = intent_add ? NULL : hnode; - hlist_for_each(ehnode, hhead) { - if (!cfs_hash_keycmp(hs, key, ehnode)) - continue; - - if (match != NULL && match != ehnode) /* can't match */ - continue; - - /* match and ... */ - if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) { - cfs_hash_bd_del_locked(hs, bd, ehnode); - return ehnode; - } - - /* caller wants refcount? */ - if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0) - cfs_hash_get(hs, ehnode); - return ehnode; - } - /* no match item */ - if (!intent_add) - return NULL; - - LASSERT(hnode != NULL); - cfs_hash_bd_add_locked(hs, bd, hnode); - return hnode; -} - -struct hlist_node * -cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - const void *key) -{ - return cfs_hash_bd_lookup_intent(hs, bd, key, NULL, - CFS_HS_LOOKUP_IT_FIND); -} -EXPORT_SYMBOL(cfs_hash_bd_lookup_locked); - -struct hlist_node * -cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - const void *key) -{ - return cfs_hash_bd_lookup_intent(hs, bd, key, NULL, - CFS_HS_LOOKUP_IT_PEEK); -} -EXPORT_SYMBOL(cfs_hash_bd_peek_locked); - -static void -cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned n, int excl) -{ - struct cfs_hash_bucket *prev = NULL; - int i; - - /** - * bds must be ascendantly ordered by bd->bd_bucket->hsb_index. - * NB: it's possible that several bds point to the same bucket but - * have different bd::bd_offset, so need take care of deadlock. - */ - cfs_hash_for_each_bd(bds, n, i) { - if (prev == bds[i].bd_bucket) - continue; - - LASSERT(prev == NULL || - prev->hsb_index < bds[i].bd_bucket->hsb_index); - cfs_hash_bd_lock(hs, &bds[i], excl); - prev = bds[i].bd_bucket; - } -} - -static void -cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned n, int excl) -{ - struct cfs_hash_bucket *prev = NULL; - int i; - - cfs_hash_for_each_bd(bds, n, i) { - if (prev != bds[i].bd_bucket) { - cfs_hash_bd_unlock(hs, &bds[i], excl); - prev = bds[i].bd_bucket; - } - } -} - -static struct hlist_node * -cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned n, const void *key) -{ - struct hlist_node *ehnode; - unsigned i; - - cfs_hash_for_each_bd(bds, n, i) { - ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL, - CFS_HS_LOOKUP_IT_FIND); - if (ehnode != NULL) - return ehnode; - } - return NULL; -} - -static struct hlist_node * -cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned n, const void *key, - struct hlist_node *hnode, int noref) -{ - struct hlist_node *ehnode; - int intent; - unsigned i; - - LASSERT(hnode != NULL); - intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK; - - cfs_hash_for_each_bd(bds, n, i) { - ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, - NULL, intent); - if (ehnode != NULL) - return ehnode; - } - - if (i == 1) { /* only one bucket */ - cfs_hash_bd_add_locked(hs, &bds[0], hnode); - } else { - struct cfs_hash_bd mybd; - - cfs_hash_bd_get(hs, key, &mybd); - cfs_hash_bd_add_locked(hs, &mybd, hnode); - } - - return hnode; -} - -static struct hlist_node * -cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - unsigned n, const void *key, - struct hlist_node *hnode) -{ - struct hlist_node *ehnode; - unsigned int i; - - cfs_hash_for_each_bd(bds, n, i) { - ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode, - CFS_HS_LOOKUP_IT_FINDDEL); - if (ehnode != NULL) - return ehnode; - } - return NULL; -} - -static void -cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2) -{ - int rc; - - if (bd2->bd_bucket == NULL) - return; - - if (bd1->bd_bucket == NULL) { - *bd1 = *bd2; - bd2->bd_bucket = NULL; - return; - } - - rc = cfs_hash_bd_compare(bd1, bd2); - if (rc == 0) { - bd2->bd_bucket = NULL; - - } else if (rc > 0) { /* swab bd1 and bd2 */ - struct cfs_hash_bd tmp; - - tmp = *bd2; - *bd2 = *bd1; - *bd1 = tmp; - } -} - -void -cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, - struct cfs_hash_bd *bds) -{ - /* NB: caller should hold hs_lock.rw if REHASH is set */ - cfs_hash_bd_from_key(hs, hs->hs_buckets, - hs->hs_cur_bits, key, &bds[0]); - if (likely(hs->hs_rehash_buckets == NULL)) { - /* no rehash or not rehashing */ - bds[1].bd_bucket = NULL; - return; - } - - LASSERT(hs->hs_rehash_bits != 0); - cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, - hs->hs_rehash_bits, key, &bds[1]); - - cfs_hash_bd_order(&bds[0], &bds[1]); -} - -void -cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl) -{ - cfs_hash_multi_bd_lock(hs, bds, 2, excl); -} - -void -cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl) -{ - cfs_hash_multi_bd_unlock(hs, bds, 2, excl); -} - -struct hlist_node * -cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - const void *key) -{ - return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key); -} - -struct hlist_node * -cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - const void *key, struct hlist_node *hnode, - int noref) -{ - return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key, - hnode, noref); -} - -struct hlist_node * -cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds, - const void *key, struct hlist_node *hnode) -{ - return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode); -} - -static void -cfs_hash_buckets_free(struct cfs_hash_bucket **buckets, - int bkt_size, int prev_size, int size) -{ - int i; - - for (i = prev_size; i < size; i++) { - if (buckets[i] != NULL) - LIBCFS_FREE(buckets[i], bkt_size); - } - - LIBCFS_FREE(buckets, sizeof(buckets[0]) * size); -} - -/* - * Create or grow bucket memory. Return old_buckets if no allocation was - * needed, the newly allocated buckets if allocation was needed and - * successful, and NULL on error. - */ -static struct cfs_hash_bucket ** -cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts, - unsigned int old_size, unsigned int new_size) -{ - struct cfs_hash_bucket **new_bkts; - int i; - - LASSERT(old_size == 0 || old_bkts != NULL); - - if (old_bkts != NULL && old_size == new_size) - return old_bkts; - - LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size); - if (new_bkts == NULL) - return NULL; - - if (old_bkts != NULL) { - memcpy(new_bkts, old_bkts, - min(old_size, new_size) * sizeof(*old_bkts)); - } - - for (i = old_size; i < new_size; i++) { - struct hlist_head *hhead; - struct cfs_hash_bd bd; - - LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs)); - if (new_bkts[i] == NULL) { - cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs), - old_size, new_size); - return NULL; - } - - new_bkts[i]->hsb_index = i; - new_bkts[i]->hsb_version = 1; /* shouldn't be zero */ - new_bkts[i]->hsb_depmax = -1; /* unknown */ - bd.bd_bucket = new_bkts[i]; - cfs_hash_bd_for_each_hlist(hs, &bd, hhead) - INIT_HLIST_HEAD(hhead); - - if (cfs_hash_with_no_lock(hs) || - cfs_hash_with_no_bktlock(hs)) - continue; - - if (cfs_hash_with_rw_bktlock(hs)) - rwlock_init(&new_bkts[i]->hsb_lock.rw); - else if (cfs_hash_with_spin_bktlock(hs)) - spin_lock_init(&new_bkts[i]->hsb_lock.spin); - else - LBUG(); /* invalid use-case */ - } - return new_bkts; -} - -/** - * Initialize new libcfs hash, where: - * @name - Descriptive hash name - * @cur_bits - Initial hash table size, in bits - * @max_bits - Maximum allowed hash table resize, in bits - * @ops - Registered hash table operations - * @flags - CFS_HASH_REHASH enable synamic hash resizing - * - CFS_HASH_SORT enable chained hash sort - */ -static int cfs_hash_rehash_worker(cfs_workitem_t *wi); - -#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 -static int cfs_hash_dep_print(cfs_workitem_t *wi) -{ - struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi); - int dep; - int bkt; - int off; - int bits; - - spin_lock(&hs->hs_dep_lock); - dep = hs->hs_dep_max; - bkt = hs->hs_dep_bkt; - off = hs->hs_dep_off; - bits = hs->hs_dep_bits; - spin_unlock(&hs->hs_dep_lock); - - LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n", - hs->hs_name, bits, dep, bkt, off); - spin_lock(&hs->hs_dep_lock); - hs->hs_dep_bits = 0; /* mark as workitem done */ - spin_unlock(&hs->hs_dep_lock); - return 0; -} - -static void cfs_hash_depth_wi_init(struct cfs_hash *hs) -{ - spin_lock_init(&hs->hs_dep_lock); - cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print); -} - -static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) -{ - if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi)) - return; - - spin_lock(&hs->hs_dep_lock); - while (hs->hs_dep_bits != 0) { - spin_unlock(&hs->hs_dep_lock); - cond_resched(); - spin_lock(&hs->hs_dep_lock); - } - spin_unlock(&hs->hs_dep_lock); -} - -#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */ - -static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {} -static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {} - -#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */ - -struct cfs_hash * -cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits, - unsigned bkt_bits, unsigned extra_bytes, - unsigned min_theta, unsigned max_theta, - struct cfs_hash_ops *ops, unsigned flags) -{ - struct cfs_hash *hs; - int len; - - CLASSERT(CFS_HASH_THETA_BITS < 15); - - LASSERT(name != NULL); - LASSERT(ops != NULL); - LASSERT(ops->hs_key); - LASSERT(ops->hs_hash); - LASSERT(ops->hs_object); - LASSERT(ops->hs_keycmp); - LASSERT(ops->hs_get != NULL); - LASSERT(ops->hs_put_locked != NULL); - - if ((flags & CFS_HASH_REHASH) != 0) - flags |= CFS_HASH_COUNTER; /* must have counter */ - - LASSERT(cur_bits > 0); - LASSERT(cur_bits >= bkt_bits); - LASSERT(max_bits >= cur_bits && max_bits < 31); - LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits)); - LASSERT(ergo((flags & CFS_HASH_REHASH) != 0, - (flags & CFS_HASH_NO_LOCK) == 0)); - LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0, - ops->hs_keycpy != NULL)); - - len = (flags & CFS_HASH_BIGNAME) == 0 ? - CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN; - LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len])); - if (hs == NULL) - return NULL; - - strlcpy(hs->hs_name, name, len); - hs->hs_flags = flags; - - atomic_set(&hs->hs_refcount, 1); - atomic_set(&hs->hs_count, 0); - - cfs_hash_lock_setup(hs); - cfs_hash_hlist_setup(hs); - - hs->hs_cur_bits = (__u8)cur_bits; - hs->hs_min_bits = (__u8)cur_bits; - hs->hs_max_bits = (__u8)max_bits; - hs->hs_bkt_bits = (__u8)bkt_bits; - - hs->hs_ops = ops; - hs->hs_extra_bytes = extra_bytes; - hs->hs_rehash_bits = 0; - cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker); - cfs_hash_depth_wi_init(hs); - - if (cfs_hash_with_rehash(hs)) - __cfs_hash_set_theta(hs, min_theta, max_theta); - - hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0, - CFS_HASH_NBKT(hs)); - if (hs->hs_buckets != NULL) - return hs; - - LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len])); - return NULL; -} -EXPORT_SYMBOL(cfs_hash_create); - -/** - * Cleanup libcfs hash @hs. - */ -static void -cfs_hash_destroy(struct cfs_hash *hs) -{ - struct hlist_node *hnode; - struct hlist_node *pos; - struct cfs_hash_bd bd; - int i; - - LASSERT(hs != NULL); - LASSERT(!cfs_hash_is_exiting(hs) && - !cfs_hash_is_iterating(hs)); - - /** - * prohibit further rehashes, don't need any lock because - * I'm the only (last) one can change it. - */ - hs->hs_exiting = 1; - if (cfs_hash_with_rehash(hs)) - cfs_hash_rehash_cancel(hs); - - cfs_hash_depth_wi_cancel(hs); - /* rehash should be done/canceled */ - LASSERT(hs->hs_buckets != NULL && - hs->hs_rehash_buckets == NULL); - - cfs_hash_for_each_bucket(hs, &bd, i) { - struct hlist_head *hhead; - - LASSERT(bd.bd_bucket != NULL); - /* no need to take this lock, just for consistent code */ - cfs_hash_bd_lock(hs, &bd, 1); - - cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - hlist_for_each_safe(hnode, pos, hhead) { - LASSERTF(!cfs_hash_with_assert_empty(hs), - "hash %s bucket %u(%u) is not empty: %u items left\n", - hs->hs_name, bd.bd_bucket->hsb_index, - bd.bd_offset, bd.bd_bucket->hsb_count); - /* can't assert key valicate, because we - * can interrupt rehash */ - cfs_hash_bd_del_locked(hs, &bd, hnode); - cfs_hash_exit(hs, hnode); - } - } - LASSERT(bd.bd_bucket->hsb_count == 0); - cfs_hash_bd_unlock(hs, &bd, 1); - cond_resched(); - } - - LASSERT(atomic_read(&hs->hs_count) == 0); - - cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs), - 0, CFS_HASH_NBKT(hs)); - i = cfs_hash_with_bigname(hs) ? - CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN; - LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i])); -} - -struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs) -{ - if (atomic_inc_not_zero(&hs->hs_refcount)) - return hs; - return NULL; -} -EXPORT_SYMBOL(cfs_hash_getref); - -void cfs_hash_putref(struct cfs_hash *hs) -{ - if (atomic_dec_and_test(&hs->hs_refcount)) - cfs_hash_destroy(hs); -} -EXPORT_SYMBOL(cfs_hash_putref); - -static inline int -cfs_hash_rehash_bits(struct cfs_hash *hs) -{ - if (cfs_hash_with_no_lock(hs) || - !cfs_hash_with_rehash(hs)) - return -EOPNOTSUPP; - - if (unlikely(cfs_hash_is_exiting(hs))) - return -ESRCH; - - if (unlikely(cfs_hash_is_rehashing(hs))) - return -EALREADY; - - if (unlikely(cfs_hash_is_iterating(hs))) - return -EAGAIN; - - /* XXX: need to handle case with max_theta != 2.0 - * and the case with min_theta != 0.5 */ - if ((hs->hs_cur_bits < hs->hs_max_bits) && - (__cfs_hash_theta(hs) > hs->hs_max_theta)) - return hs->hs_cur_bits + 1; - - if (!cfs_hash_with_shrink(hs)) - return 0; - - if ((hs->hs_cur_bits > hs->hs_min_bits) && - (__cfs_hash_theta(hs) < hs->hs_min_theta)) - return hs->hs_cur_bits - 1; - - return 0; -} - -/** - * don't allow inline rehash if: - * - user wants non-blocking change (add/del) on hash table - * - too many elements - */ -static inline int -cfs_hash_rehash_inline(struct cfs_hash *hs) -{ - return !cfs_hash_with_nblk_change(hs) && - atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG; -} - -/** - * Add item @hnode to libcfs hash @hs using @key. The registered - * ops->hs_get function will be called when the item is added. - */ -void -cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) -{ - struct cfs_hash_bd bd; - int bits; - - LASSERT(hlist_unhashed(hnode)); - - cfs_hash_lock(hs, 0); - cfs_hash_bd_get_and_lock(hs, key, &bd, 1); - - cfs_hash_key_validate(hs, key, hnode); - cfs_hash_bd_add_locked(hs, &bd, hnode); - - cfs_hash_bd_unlock(hs, &bd, 1); - - bits = cfs_hash_rehash_bits(hs); - cfs_hash_unlock(hs, 0); - if (bits > 0) - cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs)); -} -EXPORT_SYMBOL(cfs_hash_add); - -static struct hlist_node * -cfs_hash_find_or_add(struct cfs_hash *hs, const void *key, - struct hlist_node *hnode, int noref) -{ - struct hlist_node *ehnode; - struct cfs_hash_bd bds[2]; - int bits = 0; - - LASSERT(hlist_unhashed(hnode)); - - cfs_hash_lock(hs, 0); - cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1); - - cfs_hash_key_validate(hs, key, hnode); - ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key, - hnode, noref); - cfs_hash_dual_bd_unlock(hs, bds, 1); - - if (ehnode == hnode) /* new item added */ - bits = cfs_hash_rehash_bits(hs); - cfs_hash_unlock(hs, 0); - if (bits > 0) - cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs)); - - return ehnode; -} - -/** - * Add item @hnode to libcfs hash @hs using @key. The registered - * ops->hs_get function will be called if the item was added. - * Returns 0 on success or -EALREADY on key collisions. - */ -int -cfs_hash_add_unique(struct cfs_hash *hs, const void *key, - struct hlist_node *hnode) -{ - return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ? - -EALREADY : 0; -} -EXPORT_SYMBOL(cfs_hash_add_unique); - -/** - * Add item @hnode to libcfs hash @hs using @key. If this @key - * already exists in the hash then ops->hs_get will be called on the - * conflicting entry and that entry will be returned to the caller. - * Otherwise ops->hs_get is called on the item which was added. - */ -void * -cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key, - struct hlist_node *hnode) -{ - hnode = cfs_hash_find_or_add(hs, key, hnode, 0); - - return cfs_hash_object(hs, hnode); -} -EXPORT_SYMBOL(cfs_hash_findadd_unique); - -/** - * Delete item @hnode from the libcfs hash @hs using @key. The @key - * is required to ensure the correct hash bucket is locked since there - * is no direct linkage from the item to the bucket. The object - * removed from the hash will be returned and obs->hs_put is called - * on the removed object. - */ -void * -cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode) -{ - void *obj = NULL; - int bits = 0; - struct cfs_hash_bd bds[2]; - - cfs_hash_lock(hs, 0); - cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1); - - /* NB: do nothing if @hnode is not in hash table */ - if (hnode == NULL || !hlist_unhashed(hnode)) { - if (bds[1].bd_bucket == NULL && hnode != NULL) { - cfs_hash_bd_del_locked(hs, &bds[0], hnode); - } else { - hnode = cfs_hash_dual_bd_finddel_locked(hs, bds, - key, hnode); - } - } - - if (hnode != NULL) { - obj = cfs_hash_object(hs, hnode); - bits = cfs_hash_rehash_bits(hs); - } - - cfs_hash_dual_bd_unlock(hs, bds, 1); - cfs_hash_unlock(hs, 0); - if (bits > 0) - cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs)); - - return obj; -} -EXPORT_SYMBOL(cfs_hash_del); - -/** - * Delete item given @key in libcfs hash @hs. The first @key found in - * the hash will be removed, if the key exists multiple times in the hash - * @hs this function must be called once per key. The removed object - * will be returned and ops->hs_put is called on the removed object. - */ -void * -cfs_hash_del_key(struct cfs_hash *hs, const void *key) -{ - return cfs_hash_del(hs, key, NULL); -} -EXPORT_SYMBOL(cfs_hash_del_key); - -/** - * Lookup an item using @key in the libcfs hash @hs and return it. - * If the @key is found in the hash hs->hs_get() is called and the - * matching objects is returned. It is the callers responsibility - * to call the counterpart ops->hs_put using the cfs_hash_put() macro - * when when finished with the object. If the @key was not found - * in the hash @hs NULL is returned. - */ -void * -cfs_hash_lookup(struct cfs_hash *hs, const void *key) -{ - void *obj = NULL; - struct hlist_node *hnode; - struct cfs_hash_bd bds[2]; - - cfs_hash_lock(hs, 0); - cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0); - - hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key); - if (hnode != NULL) - obj = cfs_hash_object(hs, hnode); - - cfs_hash_dual_bd_unlock(hs, bds, 0); - cfs_hash_unlock(hs, 0); - - return obj; -} -EXPORT_SYMBOL(cfs_hash_lookup); - -static void -cfs_hash_for_each_enter(struct cfs_hash *hs) -{ - LASSERT(!cfs_hash_is_exiting(hs)); - - if (!cfs_hash_with_rehash(hs)) - return; - /* - * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter - * because it's just an unreliable signal to rehash-thread, - * rehash-thread will try to finish rehash ASAP when seeing this. - */ - hs->hs_iterating = 1; - - cfs_hash_lock(hs, 1); - hs->hs_iterators++; - - /* NB: iteration is mostly called by service thread, - * we tend to cancel pending rehash-request, instead of - * blocking service thread, we will relaunch rehash request - * after iteration */ - if (cfs_hash_is_rehashing(hs)) - cfs_hash_rehash_cancel_locked(hs); - cfs_hash_unlock(hs, 1); -} - -static void -cfs_hash_for_each_exit(struct cfs_hash *hs) -{ - int remained; - int bits; - - if (!cfs_hash_with_rehash(hs)) - return; - cfs_hash_lock(hs, 1); - remained = --hs->hs_iterators; - bits = cfs_hash_rehash_bits(hs); - cfs_hash_unlock(hs, 1); - /* NB: it's race on cfs_has_t::hs_iterating, see above */ - if (remained == 0) - hs->hs_iterating = 0; - if (bits > 0) { - cfs_hash_rehash(hs, atomic_read(&hs->hs_count) < - CFS_HASH_LOOP_HOG); - } -} - -/** - * For each item in the libcfs hash @hs call the passed callback @func - * and pass to it as an argument each hash item and the private @data. - * - * a) the function may sleep! - * b) during the callback: - * . the bucket lock is held so the callback must never sleep. - * . if @removal_safe is true, use can remove current item by - * cfs_hash_bd_del_locked - */ -static __u64 -cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data, int remove_safe) -{ - struct hlist_node *hnode; - struct hlist_node *pos; - struct cfs_hash_bd bd; - __u64 count = 0; - int excl = !!remove_safe; - int loop = 0; - int i; - - cfs_hash_for_each_enter(hs); - - cfs_hash_lock(hs, 0); - LASSERT(!cfs_hash_is_rehashing(hs)); - - cfs_hash_for_each_bucket(hs, &bd, i) { - struct hlist_head *hhead; - - cfs_hash_bd_lock(hs, &bd, excl); - if (func == NULL) { /* only glimpse size */ - count += bd.bd_bucket->hsb_count; - cfs_hash_bd_unlock(hs, &bd, excl); - continue; - } - - cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - hlist_for_each_safe(hnode, pos, hhead) { - cfs_hash_bucket_validate(hs, &bd, hnode); - count++; - loop++; - if (func(hs, &bd, hnode, data)) { - cfs_hash_bd_unlock(hs, &bd, excl); - goto out; - } - } - } - cfs_hash_bd_unlock(hs, &bd, excl); - if (loop < CFS_HASH_LOOP_HOG) - continue; - loop = 0; - cfs_hash_unlock(hs, 0); - cond_resched(); - cfs_hash_lock(hs, 0); - } - out: - cfs_hash_unlock(hs, 0); - - cfs_hash_for_each_exit(hs); - return count; -} - -struct cfs_hash_cond_arg { - cfs_hash_cond_opt_cb_t func; - void *arg; -}; - -static int -cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode, void *data) -{ - struct cfs_hash_cond_arg *cond = data; - - if (cond->func(cfs_hash_object(hs, hnode), cond->arg)) - cfs_hash_bd_del_locked(hs, bd, hnode); - return 0; -} - -/** - * Delete item from the libcfs hash @hs when @func return true. - * The write lock being hold during loop for each bucket to avoid - * any object be reference. - */ -void -cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data) -{ - struct cfs_hash_cond_arg arg = { - .func = func, - .arg = data, - }; - - cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1); -} -EXPORT_SYMBOL(cfs_hash_cond_del); - -void -cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data) -{ - cfs_hash_for_each_tight(hs, func, data, 0); -} -EXPORT_SYMBOL(cfs_hash_for_each); - -void -cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data) -{ - cfs_hash_for_each_tight(hs, func, data, 1); -} -EXPORT_SYMBOL(cfs_hash_for_each_safe); - -static int -cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd, - struct hlist_node *hnode, void *data) -{ - *(int *)data = 0; - return 1; /* return 1 to break the loop */ -} - -int -cfs_hash_is_empty(struct cfs_hash *hs) -{ - int empty = 1; - - cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0); - return empty; -} -EXPORT_SYMBOL(cfs_hash_is_empty); - -__u64 -cfs_hash_size_get(struct cfs_hash *hs) -{ - return cfs_hash_with_counter(hs) ? - atomic_read(&hs->hs_count) : - cfs_hash_for_each_tight(hs, NULL, NULL, 0); -} -EXPORT_SYMBOL(cfs_hash_size_get); - -/* - * cfs_hash_for_each_relax: - * Iterate the hash table and call @func on each item without - * any lock. This function can't guarantee to finish iteration - * if these features are enabled: - * - * a. if rehash_key is enabled, an item can be moved from - * one bucket to another bucket - * b. user can remove non-zero-ref item from hash-table, - * so the item can be removed from hash-table, even worse, - * it's possible that user changed key and insert to another - * hash bucket. - * there's no way for us to finish iteration correctly on previous - * two cases, so iteration has to be stopped on change. - */ -static int -cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data) -{ - struct hlist_node *hnode; - struct hlist_node *tmp; - struct cfs_hash_bd bd; - __u32 version; - int count = 0; - int stop_on_change; - int rc; - int i; - - stop_on_change = cfs_hash_with_rehash_key(hs) || - !cfs_hash_with_no_itemref(hs) || - hs->hs_ops->hs_put_locked == NULL; - cfs_hash_lock(hs, 0); - LASSERT(!cfs_hash_is_rehashing(hs)); - - cfs_hash_for_each_bucket(hs, &bd, i) { - struct hlist_head *hhead; - - cfs_hash_bd_lock(hs, &bd, 0); - version = cfs_hash_bd_version_get(&bd); - - cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { - for (hnode = hhead->first; hnode != NULL;) { - cfs_hash_bucket_validate(hs, &bd, hnode); - cfs_hash_get(hs, hnode); - cfs_hash_bd_unlock(hs, &bd, 0); - cfs_hash_unlock(hs, 0); - - rc = func(hs, &bd, hnode, data); - if (stop_on_change) - cfs_hash_put(hs, hnode); - cond_resched(); - count++; - - cfs_hash_lock(hs, 0); - cfs_hash_bd_lock(hs, &bd, 0); - if (!stop_on_change) { - tmp = hnode->next; - cfs_hash_put_locked(hs, hnode); - hnode = tmp; - } else { /* bucket changed? */ - if (version != - cfs_hash_bd_version_get(&bd)) - break; - /* safe to continue because no change */ - hnode = hnode->next; - } - if (rc) /* callback wants to break iteration */ - break; - } - if (rc) /* callback wants to break iteration */ - break; - } - cfs_hash_bd_unlock(hs, &bd, 0); - if (rc) /* callback wants to break iteration */ - break; - } - cfs_hash_unlock(hs, 0); - - return count; -} - -int -cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data) -{ - if (cfs_hash_with_no_lock(hs) || - cfs_hash_with_rehash_key(hs) || - !cfs_hash_with_no_itemref(hs)) - return -EOPNOTSUPP; - - if (hs->hs_ops->hs_get == NULL || - (hs->hs_ops->hs_put == NULL && - hs->hs_ops->hs_put_locked == NULL)) - return -EOPNOTSUPP; - - cfs_hash_for_each_enter(hs); - cfs_hash_for_each_relax(hs, func, data); - cfs_hash_for_each_exit(hs); - - return 0; -} -EXPORT_SYMBOL(cfs_hash_for_each_nolock); - -/** - * For each hash bucket in the libcfs hash @hs call the passed callback - * @func until all the hash buckets are empty. The passed callback @func - * or the previously registered callback hs->hs_put must remove the item - * from the hash. You may either use the cfs_hash_del() or hlist_del() - * functions. No rwlocks will be held during the callback @func it is - * safe to sleep if needed. This function will not terminate until the - * hash is empty. Note it is still possible to concurrently add new - * items in to the hash. It is the callers responsibility to ensure - * the required locking is in place to prevent concurrent insertions. - */ -int -cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, - void *data) -{ - unsigned i = 0; - - if (cfs_hash_with_no_lock(hs)) - return -EOPNOTSUPP; - - if (hs->hs_ops->hs_get == NULL || - (hs->hs_ops->hs_put == NULL && - hs->hs_ops->hs_put_locked == NULL)) - return -EOPNOTSUPP; - - cfs_hash_for_each_enter(hs); - while (cfs_hash_for_each_relax(hs, func, data)) { - CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n", - hs->hs_name, i++); - } - cfs_hash_for_each_exit(hs); - return 0; -} -EXPORT_SYMBOL(cfs_hash_for_each_empty); - -void -cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex, - cfs_hash_for_each_cb_t func, void *data) -{ - struct hlist_head *hhead; - struct hlist_node *hnode; - struct cfs_hash_bd bd; - - cfs_hash_for_each_enter(hs); - cfs_hash_lock(hs, 0); - if (hindex >= CFS_HASH_NHLIST(hs)) - goto out; - - cfs_hash_bd_index_set(hs, hindex, &bd); - - cfs_hash_bd_lock(hs, &bd, 0); - hhead = cfs_hash_bd_hhead(hs, &bd); - hlist_for_each(hnode, hhead) { - if (func(hs, &bd, hnode, data)) - break; - } - cfs_hash_bd_unlock(hs, &bd, 0); -out: - cfs_hash_unlock(hs, 0); - cfs_hash_for_each_exit(hs); -} - -EXPORT_SYMBOL(cfs_hash_hlist_for_each); - -/* - * For each item in the libcfs hash @hs which matches the @key call - * the passed callback @func and pass to it as an argument each hash - * item and the private @data. During the callback the bucket lock - * is held so the callback must never sleep. - */ -void -cfs_hash_for_each_key(struct cfs_hash *hs, const void *key, - cfs_hash_for_each_cb_t func, void *data) -{ - struct hlist_node *hnode; - struct cfs_hash_bd bds[2]; - unsigned int i; - - cfs_hash_lock(hs, 0); - - cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0); - - cfs_hash_for_each_bd(bds, 2, i) { - struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]); - - hlist_for_each(hnode, hlist) { - cfs_hash_bucket_validate(hs, &bds[i], hnode); - - if (cfs_hash_keycmp(hs, key, hnode)) { - if (func(hs, &bds[i], hnode, data)) - break; - } - } - } - - cfs_hash_dual_bd_unlock(hs, bds, 0); - cfs_hash_unlock(hs, 0); -} -EXPORT_SYMBOL(cfs_hash_for_each_key); - -/** - * Rehash the libcfs hash @hs to the given @bits. This can be used - * to grow the hash size when excessive chaining is detected, or to - * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH - * flag is set in @hs the libcfs hash may be dynamically rehashed - * during addition or removal if the hash's theta value exceeds - * either the hs->hs_min_theta or hs->max_theta values. By default - * these values are tuned to keep the chained hash depth small, and - * this approach assumes a reasonably uniform hashing function. The - * theta thresholds for @hs are tunable via cfs_hash_set_theta(). - */ -void -cfs_hash_rehash_cancel_locked(struct cfs_hash *hs) -{ - int i; - - /* need hold cfs_hash_lock(hs, 1) */ - LASSERT(cfs_hash_with_rehash(hs) && - !cfs_hash_with_no_lock(hs)); - - if (!cfs_hash_is_rehashing(hs)) - return; - - if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) { - hs->hs_rehash_bits = 0; - return; - } - - for (i = 2; cfs_hash_is_rehashing(hs); i++) { - cfs_hash_unlock(hs, 1); - /* raise console warning while waiting too long */ - CDEBUG(is_power_of_2(i >> 3) ? D_WARNING : D_INFO, - "hash %s is still rehashing, rescheded %d\n", - hs->hs_name, i - 1); - cond_resched(); - cfs_hash_lock(hs, 1); - } -} - -void -cfs_hash_rehash_cancel(struct cfs_hash *hs) -{ - cfs_hash_lock(hs, 1); - cfs_hash_rehash_cancel_locked(hs); - cfs_hash_unlock(hs, 1); -} - -int -cfs_hash_rehash(struct cfs_hash *hs, int do_rehash) -{ - int rc; - - LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs)); - - cfs_hash_lock(hs, 1); - - rc = cfs_hash_rehash_bits(hs); - if (rc <= 0) { - cfs_hash_unlock(hs, 1); - return rc; - } - - hs->hs_rehash_bits = rc; - if (!do_rehash) { - /* launch and return */ - cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi); - cfs_hash_unlock(hs, 1); - return 0; - } - - /* rehash right now */ - cfs_hash_unlock(hs, 1); - - return cfs_hash_rehash_worker(&hs->hs_rehash_wi); -} - -static int -cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old) -{ - struct cfs_hash_bd new; - struct hlist_head *hhead; - struct hlist_node *hnode; - struct hlist_node *pos; - void *key; - int c = 0; - - /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */ - cfs_hash_bd_for_each_hlist(hs, old, hhead) { - hlist_for_each_safe(hnode, pos, hhead) { - key = cfs_hash_key(hs, hnode); - LASSERT(key != NULL); - /* Validate hnode is in the correct bucket. */ - cfs_hash_bucket_validate(hs, old, hnode); - /* - * Delete from old hash bucket; move to new bucket. - * ops->hs_key must be defined. - */ - cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets, - hs->hs_rehash_bits, key, &new); - cfs_hash_bd_move_locked(hs, old, &new, hnode); - c++; - } - } - - return c; -} - -static int -cfs_hash_rehash_worker(cfs_workitem_t *wi) -{ - struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi); - struct cfs_hash_bucket **bkts; - struct cfs_hash_bd bd; - unsigned int old_size; - unsigned int new_size; - int bsize; - int count = 0; - int rc = 0; - int i; - - LASSERT(hs != NULL && cfs_hash_with_rehash(hs)); - - cfs_hash_lock(hs, 0); - LASSERT(cfs_hash_is_rehashing(hs)); - - old_size = CFS_HASH_NBKT(hs); - new_size = CFS_HASH_RH_NBKT(hs); - - cfs_hash_unlock(hs, 0); - - /* - * don't need hs::hs_rwlock for hs::hs_buckets, - * because nobody can change bkt-table except me. - */ - bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets, - old_size, new_size); - cfs_hash_lock(hs, 1); - if (bkts == NULL) { - rc = -ENOMEM; - goto out; - } - - if (bkts == hs->hs_buckets) { - bkts = NULL; /* do nothing */ - goto out; - } - - rc = __cfs_hash_theta(hs); - if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) { - /* free the new allocated bkt-table */ - old_size = new_size; - new_size = CFS_HASH_NBKT(hs); - rc = -EALREADY; - goto out; - } - - LASSERT(hs->hs_rehash_buckets == NULL); - hs->hs_rehash_buckets = bkts; - - rc = 0; - cfs_hash_for_each_bucket(hs, &bd, i) { - if (cfs_hash_is_exiting(hs)) { - rc = -ESRCH; - /* someone wants to destroy the hash, abort now */ - if (old_size < new_size) /* OK to free old bkt-table */ - break; - /* it's shrinking, need free new bkt-table */ - hs->hs_rehash_buckets = NULL; - old_size = new_size; - new_size = CFS_HASH_NBKT(hs); - goto out; - } - - count += cfs_hash_rehash_bd(hs, &bd); - if (count < CFS_HASH_LOOP_HOG || - cfs_hash_is_iterating(hs)) { /* need to finish ASAP */ - continue; - } - - count = 0; - cfs_hash_unlock(hs, 1); - cond_resched(); - cfs_hash_lock(hs, 1); - } - - hs->hs_rehash_count++; - - bkts = hs->hs_buckets; - hs->hs_buckets = hs->hs_rehash_buckets; - hs->hs_rehash_buckets = NULL; - - hs->hs_cur_bits = hs->hs_rehash_bits; -out: - hs->hs_rehash_bits = 0; - if (rc == -ESRCH) /* never be scheduled again */ - cfs_wi_exit(cfs_sched_rehash, wi); - bsize = cfs_hash_bkt_size(hs); - cfs_hash_unlock(hs, 1); - /* can't refer to @hs anymore because it could be destroyed */ - if (bkts != NULL) - cfs_hash_buckets_free(bkts, bsize, new_size, old_size); - if (rc != 0) - CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc); - /* return 1 only if cfs_wi_exit is called */ - return rc == -ESRCH; -} - -/** - * Rehash the object referenced by @hnode in the libcfs hash @hs. The - * @old_key must be provided to locate the objects previous location - * in the hash, and the @new_key will be used to reinsert the object. - * Use this function instead of a cfs_hash_add() + cfs_hash_del() - * combo when it is critical that there is no window in time where the - * object is missing from the hash. When an object is being rehashed - * the registered cfs_hash_get() and cfs_hash_put() functions will - * not be called. - */ -void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key, - void *new_key, struct hlist_node *hnode) -{ - struct cfs_hash_bd bds[3]; - struct cfs_hash_bd old_bds[2]; - struct cfs_hash_bd new_bd; - - LASSERT(!hlist_unhashed(hnode)); - - cfs_hash_lock(hs, 0); - - cfs_hash_dual_bd_get(hs, old_key, old_bds); - cfs_hash_bd_get(hs, new_key, &new_bd); - - bds[0] = old_bds[0]; - bds[1] = old_bds[1]; - bds[2] = new_bd; - - /* NB: bds[0] and bds[1] are ordered already */ - cfs_hash_bd_order(&bds[1], &bds[2]); - cfs_hash_bd_order(&bds[0], &bds[1]); - - cfs_hash_multi_bd_lock(hs, bds, 3, 1); - if (likely(old_bds[1].bd_bucket == NULL)) { - cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode); - } else { - cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode); - cfs_hash_bd_add_locked(hs, &new_bd, hnode); - } - /* overwrite key inside locks, otherwise may screw up with - * other operations, i.e: rehash */ - cfs_hash_keycpy(hs, hnode, new_key); - - cfs_hash_multi_bd_unlock(hs, bds, 3, 1); - cfs_hash_unlock(hs, 0); -} -EXPORT_SYMBOL(cfs_hash_rehash_key); - -void cfs_hash_debug_header(struct seq_file *m) -{ - seq_printf(m, "%-*s cur min max theta t-min t-max flags rehash count maxdep maxdepb distribution\n", - CFS_HASH_BIGNAME_LEN, "name"); -} -EXPORT_SYMBOL(cfs_hash_debug_header); - -static struct cfs_hash_bucket ** -cfs_hash_full_bkts(struct cfs_hash *hs) -{ - /* NB: caller should hold hs->hs_rwlock if REHASH is set */ - if (hs->hs_rehash_buckets == NULL) - return hs->hs_buckets; - - LASSERT(hs->hs_rehash_bits != 0); - return hs->hs_rehash_bits > hs->hs_cur_bits ? - hs->hs_rehash_buckets : hs->hs_buckets; -} - -static unsigned int -cfs_hash_full_nbkt(struct cfs_hash *hs) -{ - /* NB: caller should hold hs->hs_rwlock if REHASH is set */ - if (hs->hs_rehash_buckets == NULL) - return CFS_HASH_NBKT(hs); - - LASSERT(hs->hs_rehash_bits != 0); - return hs->hs_rehash_bits > hs->hs_cur_bits ? - CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs); -} - -void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m) -{ - int dist[8] = { 0, }; - int maxdep = -1; - int maxdepb = -1; - int total = 0; - int theta; - int i; - - cfs_hash_lock(hs, 0); - theta = __cfs_hash_theta(hs); - - seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ", - CFS_HASH_BIGNAME_LEN, hs->hs_name, - 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits, - 1 << hs->hs_max_bits, - __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta), - __cfs_hash_theta_int(hs->hs_min_theta), - __cfs_hash_theta_frac(hs->hs_min_theta), - __cfs_hash_theta_int(hs->hs_max_theta), - __cfs_hash_theta_frac(hs->hs_max_theta), - hs->hs_flags, hs->hs_rehash_count); - - /* - * The distribution is a summary of the chained hash depth in - * each of the libcfs hash buckets. Each buckets hsb_count is - * divided by the hash theta value and used to generate a - * histogram of the hash distribution. A uniform hash will - * result in all hash buckets being close to the average thus - * only the first few entries in the histogram will be non-zero. - * If you hash function results in a non-uniform hash the will - * be observable by outlier bucks in the distribution histogram. - * - * Uniform hash distribution: 128/128/0/0/0/0/0/0 - * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1 - */ - for (i = 0; i < cfs_hash_full_nbkt(hs); i++) { - struct cfs_hash_bd bd; - - bd.bd_bucket = cfs_hash_full_bkts(hs)[i]; - cfs_hash_bd_lock(hs, &bd, 0); - if (maxdep < bd.bd_bucket->hsb_depmax) { - maxdep = bd.bd_bucket->hsb_depmax; - maxdepb = ffz(~maxdep); - } - total += bd.bd_bucket->hsb_count; - dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++; - cfs_hash_bd_unlock(hs, &bd, 0); - } - - seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb); - for (i = 0; i < 8; i++) - seq_printf(m, "%d%c", dist[i], (i == 7) ? '\n' : '/'); - - cfs_hash_unlock(hs, 0); -} -EXPORT_SYMBOL(cfs_hash_debug_str); diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c deleted file mode 100644 index 933525c73..000000000 --- a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c +++ /dev/null @@ -1,224 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * - * GPL HEADER END - */ -/* - * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Please see comments in libcfs/include/libcfs/libcfs_cpu.h for introduction - * - * Author: liang@whamcloud.com - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include "../../include/linux/libcfs/libcfs.h" - -/** Global CPU partition table */ -struct cfs_cpt_table *cfs_cpt_table __read_mostly; -EXPORT_SYMBOL(cfs_cpt_table); - -#ifndef HAVE_LIBCFS_CPT - -#define CFS_CPU_VERSION_MAGIC 0xbabecafe - -struct cfs_cpt_table * -cfs_cpt_table_alloc(unsigned int ncpt) -{ - struct cfs_cpt_table *cptab; - - if (ncpt != 1) { - CERROR("Can't support cpu partition number %d\n", ncpt); - return NULL; - } - - LIBCFS_ALLOC(cptab, sizeof(*cptab)); - if (cptab != NULL) { - cptab->ctb_version = CFS_CPU_VERSION_MAGIC; - cptab->ctb_nparts = ncpt; - } - - return cptab; -} -EXPORT_SYMBOL(cfs_cpt_table_alloc); - -void -cfs_cpt_table_free(struct cfs_cpt_table *cptab) -{ - LASSERT(cptab->ctb_version == CFS_CPU_VERSION_MAGIC); - - LIBCFS_FREE(cptab, sizeof(*cptab)); -} -EXPORT_SYMBOL(cfs_cpt_table_free); - -#ifdef CONFIG_SMP -int -cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) -{ - int rc; - - rc = snprintf(buf, len, "%d\t: %d\n", 0, 0); - len -= rc; - if (len <= 0) - return -EFBIG; - - return rc; -} -EXPORT_SYMBOL(cfs_cpt_table_print); -#endif /* CONFIG_SMP */ - -int -cfs_cpt_number(struct cfs_cpt_table *cptab) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpt_number); - -int -cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpt_weight); - -int -cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpt_online); - -int -cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_cpu); - -void -cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) -{ -} -EXPORT_SYMBOL(cfs_cpt_unset_cpu); - -int -cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_cpumask); - -void -cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) -{ -} -EXPORT_SYMBOL(cfs_cpt_unset_cpumask); - -int -cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_node); - -void -cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node) -{ -} -EXPORT_SYMBOL(cfs_cpt_unset_node); - -int -cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_nodemask); - -void -cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) -{ -} -EXPORT_SYMBOL(cfs_cpt_unset_nodemask); - -void -cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt) -{ -} -EXPORT_SYMBOL(cfs_cpt_clear); - -int -cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt) -{ - return 0; -} -EXPORT_SYMBOL(cfs_cpt_spread_node); - -int -cfs_cpu_ht_nsiblings(int cpu) -{ - return 1; -} -EXPORT_SYMBOL(cfs_cpu_ht_nsiblings); - -int -cfs_cpt_current(struct cfs_cpt_table *cptab, int remap) -{ - return 0; -} -EXPORT_SYMBOL(cfs_cpt_current); - -int -cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu) -{ - return 0; -} -EXPORT_SYMBOL(cfs_cpt_of_cpu); - -int -cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) -{ - return 0; -} -EXPORT_SYMBOL(cfs_cpt_bind); - -void -cfs_cpu_fini(void) -{ - if (cfs_cpt_table != NULL) { - cfs_cpt_table_free(cfs_cpt_table); - cfs_cpt_table = NULL; - } -} - -int -cfs_cpu_init(void) -{ - cfs_cpt_table = cfs_cpt_table_alloc(1); - - return cfs_cpt_table != NULL ? 0 : -1; -} - -#endif /* HAVE_LIBCFS_CPT */ diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c b/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c deleted file mode 100644 index 15782d9e6..000000000 --- a/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c +++ /dev/null @@ -1,187 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * - * GPL HEADER END - */ -/* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Author: liang@whamcloud.com - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include "../../include/linux/libcfs/libcfs.h" - -/** destroy cpu-partition lock, see libcfs_private.h for more detail */ -void -cfs_percpt_lock_free(struct cfs_percpt_lock *pcl) -{ - LASSERT(pcl->pcl_locks != NULL); - LASSERT(!pcl->pcl_locked); - - cfs_percpt_free(pcl->pcl_locks); - LIBCFS_FREE(pcl, sizeof(*pcl)); -} -EXPORT_SYMBOL(cfs_percpt_lock_free); - -/** - * create cpu-partition lock, see libcfs_private.h for more detail. - * - * cpu-partition lock is designed for large-scale SMP system, so we need to - * reduce cacheline conflict as possible as we can, that's the - * reason we always allocate cacheline-aligned memory block. - */ -struct cfs_percpt_lock * -cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab) -{ - struct cfs_percpt_lock *pcl; - spinlock_t *lock; - int i; - - /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */ - LIBCFS_ALLOC(pcl, sizeof(*pcl)); - if (!pcl) - return NULL; - - pcl->pcl_cptab = cptab; - pcl->pcl_locks = cfs_percpt_alloc(cptab, sizeof(*lock)); - if (!pcl->pcl_locks) { - LIBCFS_FREE(pcl, sizeof(*pcl)); - return NULL; - } - - cfs_percpt_for_each(lock, i, pcl->pcl_locks) - spin_lock_init(lock); - - return pcl; -} -EXPORT_SYMBOL(cfs_percpt_lock_alloc); - -/** - * lock a CPU partition - * - * \a index != CFS_PERCPT_LOCK_EX - * hold private lock indexed by \a index - * - * \a index == CFS_PERCPT_LOCK_EX - * exclusively lock @pcl and nobody can take private lock - */ -void -cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index) -{ - int ncpt = cfs_cpt_number(pcl->pcl_cptab); - int i; - - LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt); - - if (ncpt == 1) { - index = 0; - } else { /* serialize with exclusive lock */ - while (pcl->pcl_locked) - cpu_relax(); - } - - if (likely(index != CFS_PERCPT_LOCK_EX)) { - spin_lock(pcl->pcl_locks[index]); - return; - } - - /* exclusive lock request */ - for (i = 0; i < ncpt; i++) { - spin_lock(pcl->pcl_locks[i]); - if (i == 0) { - LASSERT(!pcl->pcl_locked); - /* nobody should take private lock after this - * so I wouldn't starve for too long time */ - pcl->pcl_locked = 1; - } - } -} -EXPORT_SYMBOL(cfs_percpt_lock); - -/** unlock a CPU partition */ -void -cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index) -{ - int ncpt = cfs_cpt_number(pcl->pcl_cptab); - int i; - - index = ncpt == 1 ? 0 : index; - - if (likely(index != CFS_PERCPT_LOCK_EX)) { - spin_unlock(pcl->pcl_locks[index]); - return; - } - - for (i = ncpt - 1; i >= 0; i--) { - if (i == 0) { - LASSERT(pcl->pcl_locked); - pcl->pcl_locked = 0; - } - spin_unlock(pcl->pcl_locks[i]); - } -} -EXPORT_SYMBOL(cfs_percpt_unlock); - -/** free cpu-partition refcount */ -void -cfs_percpt_atomic_free(atomic_t **refs) -{ - cfs_percpt_free(refs); -} -EXPORT_SYMBOL(cfs_percpt_atomic_free); - -/** allocate cpu-partition refcount with initial value @init_val */ -atomic_t ** -cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val) -{ - atomic_t **refs; - atomic_t *ref; - int i; - - refs = cfs_percpt_alloc(cptab, sizeof(*ref)); - if (!refs) - return NULL; - - cfs_percpt_for_each(ref, i, refs) - atomic_set(ref, init_val); - return refs; -} -EXPORT_SYMBOL(cfs_percpt_atomic_alloc); - -/** return sum of cpu-partition refs */ -int -cfs_percpt_atomic_summary(atomic_t **refs) -{ - atomic_t *ref; - int i; - int val = 0; - - cfs_percpt_for_each(ref, i, refs) - val += atomic_read(ref); - - return val; -} -EXPORT_SYMBOL(cfs_percpt_atomic_summary); diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c b/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c deleted file mode 100644 index 27cf86106..000000000 --- a/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c +++ /dev/null @@ -1,200 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * - * GPL HEADER END - */ -/* - * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Author: liang@whamcloud.com - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include "../../include/linux/libcfs/libcfs.h" - -struct cfs_var_array { - unsigned int va_count; /* # of buffers */ - unsigned int va_size; /* size of each var */ - struct cfs_cpt_table *va_cptab; /* cpu partition table */ - void *va_ptrs[0]; /* buffer addresses */ -}; - -/* - * free per-cpu data, see more detail in cfs_percpt_free - */ -void -cfs_percpt_free(void *vars) -{ - struct cfs_var_array *arr; - int i; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - - for (i = 0; i < arr->va_count; i++) { - if (arr->va_ptrs[i] != NULL) - LIBCFS_FREE(arr->va_ptrs[i], arr->va_size); - } - - LIBCFS_FREE(arr, offsetof(struct cfs_var_array, - va_ptrs[arr->va_count])); -} -EXPORT_SYMBOL(cfs_percpt_free); - -/* - * allocate per cpu-partition variables, returned value is an array of pointers, - * variable can be indexed by CPU partition ID, i.e: - * - * arr = cfs_percpt_alloc(cfs_cpu_pt, size); - * then caller can access memory block for CPU 0 by arr[0], - * memory block for CPU 1 by arr[1]... - * memory block for CPU N by arr[N]... - * - * cacheline aligned. - */ -void * -cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size) -{ - struct cfs_var_array *arr; - int count; - int i; - - count = cfs_cpt_number(cptab); - - LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count])); - if (!arr) - return NULL; - - arr->va_size = size = L1_CACHE_ALIGN(size); - arr->va_count = count; - arr->va_cptab = cptab; - - for (i = 0; i < count; i++) { - LIBCFS_CPT_ALLOC(arr->va_ptrs[i], cptab, i, size); - if (!arr->va_ptrs[i]) { - cfs_percpt_free((void *)&arr->va_ptrs[0]); - return NULL; - } - } - - return (void *)&arr->va_ptrs[0]; -} -EXPORT_SYMBOL(cfs_percpt_alloc); - -/* - * return number of CPUs (or number of elements in per-cpu data) - * according to cptab of @vars - */ -int -cfs_percpt_number(void *vars) -{ - struct cfs_var_array *arr; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - - return arr->va_count; -} -EXPORT_SYMBOL(cfs_percpt_number); - -/* - * return memory block shadowed from current CPU - */ -void * -cfs_percpt_current(void *vars) -{ - struct cfs_var_array *arr; - int cpt; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - cpt = cfs_cpt_current(arr->va_cptab, 0); - if (cpt < 0) - return NULL; - - return arr->va_ptrs[cpt]; -} - -void * -cfs_percpt_index(void *vars, int idx) -{ - struct cfs_var_array *arr; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - - LASSERT(idx >= 0 && idx < arr->va_count); - return arr->va_ptrs[idx]; -} - -/* - * free variable array, see more detail in cfs_array_alloc - */ -void -cfs_array_free(void *vars) -{ - struct cfs_var_array *arr; - int i; - - arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); - - for (i = 0; i < arr->va_count; i++) { - if (!arr->va_ptrs[i]) - continue; - - LIBCFS_FREE(arr->va_ptrs[i], arr->va_size); - } - LIBCFS_FREE(arr, offsetof(struct cfs_var_array, - va_ptrs[arr->va_count])); -} -EXPORT_SYMBOL(cfs_array_free); - -/* - * allocate a variable array, returned value is an array of pointers. - * Caller can specify length of array by @count, @size is size of each - * memory block in array. - */ -void * -cfs_array_alloc(int count, unsigned int size) -{ - struct cfs_var_array *arr; - int i; - - LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count])); - if (!arr) - return NULL; - - arr->va_count = count; - arr->va_size = size; - - for (i = 0; i < count; i++) { - LIBCFS_ALLOC(arr->va_ptrs[i], size); - - if (!arr->va_ptrs[i]) { - cfs_array_free((void *)&arr->va_ptrs[0]); - return NULL; - } - } - - return (void *)&arr->va_ptrs[0]; -} -EXPORT_SYMBOL(cfs_array_alloc); diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c deleted file mode 100644 index 205a3ed43..000000000 --- a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c +++ /dev/null @@ -1,564 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * String manipulation functions. - * - * libcfs/libcfs/libcfs_string.c - * - * Author: Nathan Rutman <nathan.rutman@sun.com> - */ - -#include "../../include/linux/libcfs/libcfs.h" - -/* Convert a text string to a bitmask */ -int cfs_str2mask(const char *str, const char *(*bit2str)(int bit), - int *oldmask, int minmask, int allmask) -{ - const char *debugstr; - char op = '\0'; - int newmask = minmask, i, len, found = 0; - - /* <str> must be a list of tokens separated by whitespace - * and optionally an operator ('+' or '-'). If an operator - * appears first in <str>, '*oldmask' is used as the starting point - * (relative), otherwise minmask is used (absolute). An operator - * applies to all following tokens up to the next operator. */ - while (*str != '\0') { - while (isspace(*str)) - str++; - if (*str == '\0') - break; - if (*str == '+' || *str == '-') { - op = *str++; - if (!found) - /* only if first token is relative */ - newmask = *oldmask; - while (isspace(*str)) - str++; - if (*str == '\0') /* trailing op */ - return -EINVAL; - } - - /* find token length */ - len = 0; - while (str[len] != '\0' && !isspace(str[len]) && - str[len] != '+' && str[len] != '-') - len++; - - /* match token */ - found = 0; - for (i = 0; i < 32; i++) { - debugstr = bit2str(i); - if (debugstr != NULL && - strlen(debugstr) == len && - strncasecmp(str, debugstr, len) == 0) { - if (op == '-') - newmask &= ~(1 << i); - else - newmask |= (1 << i); - found = 1; - break; - } - } - if (!found && len == 3 && - (strncasecmp(str, "ALL", len) == 0)) { - if (op == '-') - newmask = minmask; - else - newmask = allmask; - found = 1; - } - if (!found) { - CWARN("unknown mask '%.*s'.\n" - "mask usage: [+|-]<all|type> ...\n", len, str); - return -EINVAL; - } - str += len; - } - - *oldmask = newmask; - return 0; -} - -/* get the first string out of @str */ -char *cfs_firststr(char *str, size_t size) -{ - size_t i = 0; - char *end; - - /* trim leading spaces */ - while (i < size && *str && isspace(*str)) { - ++i; - ++str; - } - - /* string with all spaces */ - if (*str == '\0') - goto out; - - end = str; - while (i < size && *end != '\0' && !isspace(*end)) { - ++i; - ++end; - } - - *end = '\0'; -out: - return str; -} -EXPORT_SYMBOL(cfs_firststr); - -char * -cfs_trimwhite(char *str) -{ - char *end; - - while (isspace(*str)) - str++; - - end = str + strlen(str); - while (end > str) { - if (!isspace(end[-1])) - break; - end--; - } - - *end = 0; - return str; -} -EXPORT_SYMBOL(cfs_trimwhite); - -/** - * Extracts tokens from strings. - * - * Looks for \a delim in string \a next, sets \a res to point to - * substring before the delimiter, sets \a next right after the found - * delimiter. - * - * \retval 1 if \a res points to a string of non-whitespace characters - * \retval 0 otherwise - */ -int -cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res) -{ - char *end; - - if (next->ls_str == NULL) - return 0; - - /* skip leading white spaces */ - while (next->ls_len) { - if (!isspace(*next->ls_str)) - break; - next->ls_str++; - next->ls_len--; - } - - if (next->ls_len == 0) /* whitespaces only */ - return 0; - - if (*next->ls_str == delim) { - /* first non-writespace is the delimiter */ - return 0; - } - - res->ls_str = next->ls_str; - end = memchr(next->ls_str, delim, next->ls_len); - if (end == NULL) { - /* there is no the delimeter in the string */ - end = next->ls_str + next->ls_len; - next->ls_str = NULL; - } else { - next->ls_str = end + 1; - next->ls_len -= (end - res->ls_str + 1); - } - - /* skip ending whitespaces */ - while (--end != res->ls_str) { - if (!isspace(*end)) - break; - } - - res->ls_len = end - res->ls_str + 1; - return 1; -} -EXPORT_SYMBOL(cfs_gettok); - -/** - * Converts string to integer. - * - * Accepts decimal and hexadecimal number recordings. - * - * \retval 1 if first \a nob chars of \a str convert to decimal or - * hexadecimal integer in the range [\a min, \a max] - * \retval 0 otherwise - */ -int -cfs_str2num_check(char *str, int nob, unsigned *num, - unsigned min, unsigned max) -{ - char *endp; - - str = cfs_trimwhite(str); - *num = simple_strtoul(str, &endp, 0); - if (endp == str) - return 0; - - for (; endp < str + nob; endp++) { - if (!isspace(*endp)) - return 0; - } - - return (*num >= min && *num <= max); -} -EXPORT_SYMBOL(cfs_str2num_check); - -/** - * Parses \<range_expr\> token of the syntax. If \a bracketed is false, - * \a src should only have a single token which can be \<number\> or \* - * - * \retval pointer to allocated range_expr and initialized - * range_expr::re_lo, range_expr::re_hi and range_expr:re_stride if \a - `* src parses to - * \<number\> | - * \<number\> '-' \<number\> | - * \<number\> '-' \<number\> '/' \<number\> - * \retval 0 will be returned if it can be parsed, otherwise -EINVAL or - * -ENOMEM will be returned. - */ -static int -cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max, - int bracketed, struct cfs_range_expr **expr) -{ - struct cfs_range_expr *re; - struct cfs_lstr tok; - - LIBCFS_ALLOC(re, sizeof(*re)); - if (re == NULL) - return -ENOMEM; - - if (src->ls_len == 1 && src->ls_str[0] == '*') { - re->re_lo = min; - re->re_hi = max; - re->re_stride = 1; - goto out; - } - - if (cfs_str2num_check(src->ls_str, src->ls_len, - &re->re_lo, min, max)) { - /* <number> is parsed */ - re->re_hi = re->re_lo; - re->re_stride = 1; - goto out; - } - - if (!bracketed || !cfs_gettok(src, '-', &tok)) - goto failed; - - if (!cfs_str2num_check(tok.ls_str, tok.ls_len, - &re->re_lo, min, max)) - goto failed; - - /* <number> - */ - if (cfs_str2num_check(src->ls_str, src->ls_len, - &re->re_hi, min, max)) { - /* <number> - <number> is parsed */ - re->re_stride = 1; - goto out; - } - - /* go to check <number> '-' <number> '/' <number> */ - if (cfs_gettok(src, '/', &tok)) { - if (!cfs_str2num_check(tok.ls_str, tok.ls_len, - &re->re_hi, min, max)) - goto failed; - - /* <number> - <number> / ... */ - if (cfs_str2num_check(src->ls_str, src->ls_len, - &re->re_stride, min, max)) { - /* <number> - <number> / <number> is parsed */ - goto out; - } - } - - out: - *expr = re; - return 0; - - failed: - LIBCFS_FREE(re, sizeof(*re)); - return -EINVAL; -} - -/** - * Print the range expression \a re into specified \a buffer. - * If \a bracketed is true, expression does not need additional - * brackets. - * - * \retval number of characters written - */ -static int -cfs_range_expr_print(char *buffer, int count, struct cfs_range_expr *expr, - bool bracketed) -{ - int i; - char s[] = "["; - char e[] = "]"; - - if (bracketed) - s[0] = e[0] = '\0'; - - if (expr->re_lo == expr->re_hi) - i = scnprintf(buffer, count, "%u", expr->re_lo); - else if (expr->re_stride == 1) - i = scnprintf(buffer, count, "%s%u-%u%s", - s, expr->re_lo, expr->re_hi, e); - else - i = scnprintf(buffer, count, "%s%u-%u/%u%s", - s, expr->re_lo, expr->re_hi, - expr->re_stride, e); - return i; -} - -/** - * Print a list of range expressions (\a expr_list) into specified \a buffer. - * If the list contains several expressions, separate them with comma - * and surround the list with brackets. - * - * \retval number of characters written - */ -int -cfs_expr_list_print(char *buffer, int count, struct cfs_expr_list *expr_list) -{ - struct cfs_range_expr *expr; - int i = 0, j = 0; - int numexprs = 0; - - if (count <= 0) - return 0; - - list_for_each_entry(expr, &expr_list->el_exprs, re_link) - numexprs++; - - if (numexprs > 1) - i += scnprintf(buffer + i, count - i, "["); - - list_for_each_entry(expr, &expr_list->el_exprs, re_link) { - if (j++ != 0) - i += scnprintf(buffer + i, count - i, ","); - i += cfs_range_expr_print(buffer + i, count - i, expr, - numexprs > 1); - } - - if (numexprs > 1) - i += scnprintf(buffer + i, count - i, "]"); - - return i; -} -EXPORT_SYMBOL(cfs_expr_list_print); - -/** - * Matches value (\a value) against ranges expression list \a expr_list. - * - * \retval 1 if \a value matches - * \retval 0 otherwise - */ -int -cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list) -{ - struct cfs_range_expr *expr; - - list_for_each_entry(expr, &expr_list->el_exprs, re_link) { - if (value >= expr->re_lo && value <= expr->re_hi && - ((value - expr->re_lo) % expr->re_stride) == 0) - return 1; - } - - return 0; -} -EXPORT_SYMBOL(cfs_expr_list_match); - -/** - * Convert express list (\a expr_list) to an array of all matched values - * - * \retval N N is total number of all matched values - * \retval 0 if expression list is empty - * \retval < 0 for failure - */ -int -cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp) -{ - struct cfs_range_expr *expr; - __u32 *val; - int count = 0; - int i; - - list_for_each_entry(expr, &expr_list->el_exprs, re_link) { - for (i = expr->re_lo; i <= expr->re_hi; i++) { - if (((i - expr->re_lo) % expr->re_stride) == 0) - count++; - } - } - - if (count == 0) /* empty expression list */ - return 0; - - if (count > max) { - CERROR("Number of values %d exceeds max allowed %d\n", - max, count); - return -EINVAL; - } - - LIBCFS_ALLOC(val, sizeof(val[0]) * count); - if (val == NULL) - return -ENOMEM; - - count = 0; - list_for_each_entry(expr, &expr_list->el_exprs, re_link) { - for (i = expr->re_lo; i <= expr->re_hi; i++) { - if (((i - expr->re_lo) % expr->re_stride) == 0) - val[count++] = i; - } - } - - *valpp = val; - return count; -} -EXPORT_SYMBOL(cfs_expr_list_values); - -/** - * Frees cfs_range_expr structures of \a expr_list. - * - * \retval none - */ -void -cfs_expr_list_free(struct cfs_expr_list *expr_list) -{ - while (!list_empty(&expr_list->el_exprs)) { - struct cfs_range_expr *expr; - - expr = list_entry(expr_list->el_exprs.next, - struct cfs_range_expr, re_link); - list_del(&expr->re_link); - LIBCFS_FREE(expr, sizeof(*expr)); - } - - LIBCFS_FREE(expr_list, sizeof(*expr_list)); -} -EXPORT_SYMBOL(cfs_expr_list_free); - -/** - * Parses \<cfs_expr_list\> token of the syntax. - * - * \retval 0 if \a str parses to \<number\> | \<expr_list\> - * \retval -errno otherwise - */ -int -cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max, - struct cfs_expr_list **elpp) -{ - struct cfs_expr_list *expr_list; - struct cfs_range_expr *expr; - struct cfs_lstr src; - int rc; - - LIBCFS_ALLOC(expr_list, sizeof(*expr_list)); - if (expr_list == NULL) - return -ENOMEM; - - src.ls_str = str; - src.ls_len = len; - - INIT_LIST_HEAD(&expr_list->el_exprs); - - if (src.ls_str[0] == '[' && - src.ls_str[src.ls_len - 1] == ']') { - src.ls_str++; - src.ls_len -= 2; - - rc = -EINVAL; - while (src.ls_str != NULL) { - struct cfs_lstr tok; - - if (!cfs_gettok(&src, ',', &tok)) { - rc = -EINVAL; - break; - } - - rc = cfs_range_expr_parse(&tok, min, max, 1, &expr); - if (rc != 0) - break; - - list_add_tail(&expr->re_link, - &expr_list->el_exprs); - } - } else { - rc = cfs_range_expr_parse(&src, min, max, 0, &expr); - if (rc == 0) { - list_add_tail(&expr->re_link, - &expr_list->el_exprs); - } - } - - if (rc != 0) - cfs_expr_list_free(expr_list); - else - *elpp = expr_list; - - return rc; -} -EXPORT_SYMBOL(cfs_expr_list_parse); - -/** - * Frees cfs_expr_list structures of \a list. - * - * For each struct cfs_expr_list structure found on \a list it frees - * range_expr list attached to it and frees the cfs_expr_list itself. - * - * \retval none - */ -void -cfs_expr_list_free_list(struct list_head *list) -{ - struct cfs_expr_list *el; - - while (!list_empty(list)) { - el = list_entry(list->next, - struct cfs_expr_list, el_link); - list_del(&el->el_link); - cfs_expr_list_free(el); - } -} -EXPORT_SYMBOL(cfs_expr_list_free_list); diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c deleted file mode 100644 index e52afe35e..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c +++ /dev/null @@ -1,1042 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * - * GPL HEADER END - */ -/* - * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. - * - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * Author: liang@whamcloud.com - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include <linux/cpu.h> -#include <linux/sched.h> -#include "../../../include/linux/libcfs/libcfs.h" - -#ifdef CONFIG_SMP - -/** - * modparam for setting number of partitions - * - * 0 : estimate best value based on cores or NUMA nodes - * 1 : disable multiple partitions - * >1 : specify number of partitions - */ -static int cpu_npartitions; -module_param(cpu_npartitions, int, 0444); -MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions"); - -/** - * modparam for setting CPU partitions patterns: - * - * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID, - * number in bracket is processor ID (core or HT) - * - * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket - * are NUMA node ID, number before bracket is CPU partition ID. - * - * NB: If user specified cpu_pattern, cpu_npartitions will be ignored - */ -static char *cpu_pattern = ""; -module_param(cpu_pattern, charp, 0444); -MODULE_PARM_DESC(cpu_pattern, "CPU partitions pattern"); - -struct cfs_cpt_data { - /* serialize hotplug etc */ - spinlock_t cpt_lock; - /* reserved for hotplug */ - unsigned long cpt_version; - /* mutex to protect cpt_cpumask */ - struct mutex cpt_mutex; - /* scratch buffer for set/unset_node */ - cpumask_t *cpt_cpumask; -}; - -static struct cfs_cpt_data cpt_data; - -void -cfs_cpt_table_free(struct cfs_cpt_table *cptab) -{ - int i; - - if (cptab->ctb_cpu2cpt != NULL) { - LIBCFS_FREE(cptab->ctb_cpu2cpt, - num_possible_cpus() * - sizeof(cptab->ctb_cpu2cpt[0])); - } - - for (i = 0; cptab->ctb_parts != NULL && i < cptab->ctb_nparts; i++) { - struct cfs_cpu_partition *part = &cptab->ctb_parts[i]; - - if (part->cpt_nodemask != NULL) { - LIBCFS_FREE(part->cpt_nodemask, - sizeof(*part->cpt_nodemask)); - } - - if (part->cpt_cpumask != NULL) - LIBCFS_FREE(part->cpt_cpumask, cpumask_size()); - } - - if (cptab->ctb_parts != NULL) { - LIBCFS_FREE(cptab->ctb_parts, - cptab->ctb_nparts * sizeof(cptab->ctb_parts[0])); - } - - if (cptab->ctb_nodemask != NULL) - LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask)); - if (cptab->ctb_cpumask != NULL) - LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size()); - - LIBCFS_FREE(cptab, sizeof(*cptab)); -} -EXPORT_SYMBOL(cfs_cpt_table_free); - -struct cfs_cpt_table * -cfs_cpt_table_alloc(unsigned int ncpt) -{ - struct cfs_cpt_table *cptab; - int i; - - LIBCFS_ALLOC(cptab, sizeof(*cptab)); - if (cptab == NULL) - return NULL; - - cptab->ctb_nparts = ncpt; - - LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size()); - LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask)); - - if (cptab->ctb_cpumask == NULL || cptab->ctb_nodemask == NULL) - goto failed; - - LIBCFS_ALLOC(cptab->ctb_cpu2cpt, - num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0])); - if (cptab->ctb_cpu2cpt == NULL) - goto failed; - - memset(cptab->ctb_cpu2cpt, -1, - num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0])); - - LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0])); - if (cptab->ctb_parts == NULL) - goto failed; - - for (i = 0; i < ncpt; i++) { - struct cfs_cpu_partition *part = &cptab->ctb_parts[i]; - - LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size()); - LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask)); - if (part->cpt_cpumask == NULL || part->cpt_nodemask == NULL) - goto failed; - } - - spin_lock(&cpt_data.cpt_lock); - /* Reserved for hotplug */ - cptab->ctb_version = cpt_data.cpt_version; - spin_unlock(&cpt_data.cpt_lock); - - return cptab; - - failed: - cfs_cpt_table_free(cptab); - return NULL; -} -EXPORT_SYMBOL(cfs_cpt_table_alloc); - -int -cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) -{ - char *tmp = buf; - int rc = 0; - int i; - int j; - - for (i = 0; i < cptab->ctb_nparts; i++) { - if (len > 0) { - rc = snprintf(tmp, len, "%d\t: ", i); - len -= rc; - } - - if (len <= 0) { - rc = -EFBIG; - goto out; - } - - tmp += rc; - for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) { - rc = snprintf(tmp, len, "%d ", j); - len -= rc; - if (len <= 0) { - rc = -EFBIG; - goto out; - } - tmp += rc; - } - - *tmp = '\n'; - tmp++; - len--; - } - - out: - if (rc < 0) - return rc; - - return tmp - buf; -} -EXPORT_SYMBOL(cfs_cpt_table_print); - -int -cfs_cpt_number(struct cfs_cpt_table *cptab) -{ - return cptab->ctb_nparts; -} -EXPORT_SYMBOL(cfs_cpt_number); - -int -cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt) -{ - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - return cpt == CFS_CPT_ANY ? - cpumask_weight(cptab->ctb_cpumask) : - cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask); -} -EXPORT_SYMBOL(cfs_cpt_weight); - -int -cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt) -{ - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - return cpt == CFS_CPT_ANY ? - cpumask_any_and(cptab->ctb_cpumask, - cpu_online_mask) < nr_cpu_ids : - cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask, - cpu_online_mask) < nr_cpu_ids; -} -EXPORT_SYMBOL(cfs_cpt_online); - -cpumask_t * -cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt) -{ - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - return cpt == CFS_CPT_ANY ? - cptab->ctb_cpumask : cptab->ctb_parts[cpt].cpt_cpumask; -} -EXPORT_SYMBOL(cfs_cpt_cpumask); - -nodemask_t * -cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt) -{ - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - return cpt == CFS_CPT_ANY ? - cptab->ctb_nodemask : cptab->ctb_parts[cpt].cpt_nodemask; -} -EXPORT_SYMBOL(cfs_cpt_nodemask); - -int -cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) -{ - int node; - - LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts); - - if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) { - CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu); - return 0; - } - - if (cptab->ctb_cpu2cpt[cpu] != -1) { - CDEBUG(D_INFO, "CPU %d is already in partition %d\n", - cpu, cptab->ctb_cpu2cpt[cpu]); - return 0; - } - - cptab->ctb_cpu2cpt[cpu] = cpt; - - LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_cpumask)); - LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)); - - cpumask_set_cpu(cpu, cptab->ctb_cpumask); - cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask); - - node = cpu_to_node(cpu); - - /* first CPU of @node in this CPT table */ - if (!node_isset(node, *cptab->ctb_nodemask)) - node_set(node, *cptab->ctb_nodemask); - - /* first CPU of @node in this partition */ - if (!node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask)) - node_set(node, *cptab->ctb_parts[cpt].cpt_nodemask); - - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_cpu); - -void -cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) -{ - int node; - int i; - - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - if (cpu < 0 || cpu >= nr_cpu_ids) { - CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu); - return; - } - - if (cpt == CFS_CPT_ANY) { - /* caller doesn't know the partition ID */ - cpt = cptab->ctb_cpu2cpt[cpu]; - if (cpt < 0) { /* not set in this CPT-table */ - CDEBUG(D_INFO, "Try to unset cpu %d which is not in CPT-table %p\n", - cpt, cptab); - return; - } - - } else if (cpt != cptab->ctb_cpu2cpt[cpu]) { - CDEBUG(D_INFO, - "CPU %d is not in cpu-partition %d\n", cpu, cpt); - return; - } - - LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask)); - LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask)); - - cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask); - cpumask_clear_cpu(cpu, cptab->ctb_cpumask); - cptab->ctb_cpu2cpt[cpu] = -1; - - node = cpu_to_node(cpu); - - LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask)); - LASSERT(node_isset(node, *cptab->ctb_nodemask)); - - for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) { - /* this CPT has other CPU belonging to this node? */ - if (cpu_to_node(i) == node) - break; - } - - if (i >= nr_cpu_ids) - node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask); - - for_each_cpu(i, cptab->ctb_cpumask) { - /* this CPT-table has other CPU belonging to this node? */ - if (cpu_to_node(i) == node) - break; - } - - if (i >= nr_cpu_ids) - node_clear(node, *cptab->ctb_nodemask); - - return; -} -EXPORT_SYMBOL(cfs_cpt_unset_cpu); - -int -cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) -{ - int i; - - if (cpumask_weight(mask) == 0 || - cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) { - CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n", - cpt); - return 0; - } - - for_each_cpu(i, mask) { - if (!cfs_cpt_set_cpu(cptab, cpt, i)) - return 0; - } - - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_cpumask); - -void -cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) -{ - int i; - - for_each_cpu(i, mask) - cfs_cpt_unset_cpu(cptab, cpt, i); -} -EXPORT_SYMBOL(cfs_cpt_unset_cpumask); - -int -cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node) -{ - cpumask_t *mask; - int rc; - - if (node < 0 || node >= MAX_NUMNODES) { - CDEBUG(D_INFO, - "Invalid NUMA id %d for CPU partition %d\n", node, cpt); - return 0; - } - - mutex_lock(&cpt_data.cpt_mutex); - - mask = cpt_data.cpt_cpumask; - cpumask_copy(mask, cpumask_of_node(node)); - - rc = cfs_cpt_set_cpumask(cptab, cpt, mask); - - mutex_unlock(&cpt_data.cpt_mutex); - - return rc; -} -EXPORT_SYMBOL(cfs_cpt_set_node); - -void -cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node) -{ - cpumask_t *mask; - - if (node < 0 || node >= MAX_NUMNODES) { - CDEBUG(D_INFO, - "Invalid NUMA id %d for CPU partition %d\n", node, cpt); - return; - } - - mutex_lock(&cpt_data.cpt_mutex); - - mask = cpt_data.cpt_cpumask; - cpumask_copy(mask, cpumask_of_node(node)); - - cfs_cpt_unset_cpumask(cptab, cpt, mask); - - mutex_unlock(&cpt_data.cpt_mutex); -} -EXPORT_SYMBOL(cfs_cpt_unset_node); - -int -cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) -{ - int i; - - for_each_node_mask(i, *mask) { - if (!cfs_cpt_set_node(cptab, cpt, i)) - return 0; - } - - return 1; -} -EXPORT_SYMBOL(cfs_cpt_set_nodemask); - -void -cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) -{ - int i; - - for_each_node_mask(i, *mask) - cfs_cpt_unset_node(cptab, cpt, i); -} -EXPORT_SYMBOL(cfs_cpt_unset_nodemask); - -void -cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt) -{ - int last; - int i; - - if (cpt == CFS_CPT_ANY) { - last = cptab->ctb_nparts - 1; - cpt = 0; - } else { - last = cpt; - } - - for (; cpt <= last; cpt++) { - for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) - cfs_cpt_unset_cpu(cptab, cpt, i); - } -} -EXPORT_SYMBOL(cfs_cpt_clear); - -int -cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt) -{ - nodemask_t *mask; - int weight; - int rotor; - int node; - - /* convert CPU partition ID to HW node id */ - - if (cpt < 0 || cpt >= cptab->ctb_nparts) { - mask = cptab->ctb_nodemask; - rotor = cptab->ctb_spread_rotor++; - } else { - mask = cptab->ctb_parts[cpt].cpt_nodemask; - rotor = cptab->ctb_parts[cpt].cpt_spread_rotor++; - } - - weight = nodes_weight(*mask); - LASSERT(weight > 0); - - rotor %= weight; - - for_each_node_mask(node, *mask) { - if (rotor-- == 0) - return node; - } - - LBUG(); - return 0; -} -EXPORT_SYMBOL(cfs_cpt_spread_node); - -int -cfs_cpt_current(struct cfs_cpt_table *cptab, int remap) -{ - int cpu = smp_processor_id(); - int cpt = cptab->ctb_cpu2cpt[cpu]; - - if (cpt < 0) { - if (!remap) - return cpt; - - /* don't return negative value for safety of upper layer, - * instead we shadow the unknown cpu to a valid partition ID */ - cpt = cpu % cptab->ctb_nparts; - } - - return cpt; -} -EXPORT_SYMBOL(cfs_cpt_current); - -int -cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu) -{ - LASSERT(cpu >= 0 && cpu < nr_cpu_ids); - - return cptab->ctb_cpu2cpt[cpu]; -} -EXPORT_SYMBOL(cfs_cpt_of_cpu); - -int -cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) -{ - cpumask_t *cpumask; - nodemask_t *nodemask; - int rc; - int i; - - LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts)); - - if (cpt == CFS_CPT_ANY) { - cpumask = cptab->ctb_cpumask; - nodemask = cptab->ctb_nodemask; - } else { - cpumask = cptab->ctb_parts[cpt].cpt_cpumask; - nodemask = cptab->ctb_parts[cpt].cpt_nodemask; - } - - if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) { - CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n", - cpt); - return -EINVAL; - } - - for_each_online_cpu(i) { - if (cpumask_test_cpu(i, cpumask)) - continue; - - rc = set_cpus_allowed_ptr(current, cpumask); - set_mems_allowed(*nodemask); - if (rc == 0) - schedule(); /* switch to allowed CPU */ - - return rc; - } - - /* don't need to set affinity because all online CPUs are covered */ - return 0; -} -EXPORT_SYMBOL(cfs_cpt_bind); - -/** - * Choose max to \a number CPUs from \a node and set them in \a cpt. - * We always prefer to choose CPU in the same core/socket. - */ -static int -cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt, - cpumask_t *node, int number) -{ - cpumask_t *socket = NULL; - cpumask_t *core = NULL; - int rc = 0; - int cpu; - - LASSERT(number > 0); - - if (number >= cpumask_weight(node)) { - while (!cpumask_empty(node)) { - cpu = cpumask_first(node); - - rc = cfs_cpt_set_cpu(cptab, cpt, cpu); - if (!rc) - return -EINVAL; - cpumask_clear_cpu(cpu, node); - } - return 0; - } - - /* allocate scratch buffer */ - LIBCFS_ALLOC(socket, cpumask_size()); - LIBCFS_ALLOC(core, cpumask_size()); - if (socket == NULL || core == NULL) { - rc = -ENOMEM; - goto out; - } - - while (!cpumask_empty(node)) { - cpu = cpumask_first(node); - - /* get cpumask for cores in the same socket */ - cpumask_copy(socket, topology_core_cpumask(cpu)); - cpumask_and(socket, socket, node); - - LASSERT(!cpumask_empty(socket)); - - while (!cpumask_empty(socket)) { - int i; - - /* get cpumask for hts in the same core */ - cpumask_copy(core, topology_sibling_cpumask(cpu)); - cpumask_and(core, core, node); - - LASSERT(!cpumask_empty(core)); - - for_each_cpu(i, core) { - cpumask_clear_cpu(i, socket); - cpumask_clear_cpu(i, node); - - rc = cfs_cpt_set_cpu(cptab, cpt, i); - if (!rc) { - rc = -EINVAL; - goto out; - } - - if (--number == 0) - goto out; - } - cpu = cpumask_first(socket); - } - } - - out: - if (socket != NULL) - LIBCFS_FREE(socket, cpumask_size()); - if (core != NULL) - LIBCFS_FREE(core, cpumask_size()); - return rc; -} - -#define CPT_WEIGHT_MIN 4u - -static unsigned int -cfs_cpt_num_estimate(void) -{ - unsigned nnode = num_online_nodes(); - unsigned ncpu = num_online_cpus(); - unsigned ncpt; - - if (ncpu <= CPT_WEIGHT_MIN) { - ncpt = 1; - goto out; - } - - /* generate reasonable number of CPU partitions based on total number - * of CPUs, Preferred N should be power2 and match this condition: - * 2 * (N - 1)^2 < NCPUS <= 2 * N^2 */ - for (ncpt = 2; ncpu > 2 * ncpt * ncpt; ncpt <<= 1) - ; - - if (ncpt <= nnode) { /* fat numa system */ - while (nnode > ncpt) - nnode >>= 1; - - } else { /* ncpt > nnode */ - while ((nnode << 1) <= ncpt) - nnode <<= 1; - } - - ncpt = nnode; - - out: -#if (BITS_PER_LONG == 32) - /* config many CPU partitions on 32-bit system could consume - * too much memory */ - ncpt = min(2U, ncpt); -#endif - while (ncpu % ncpt != 0) - ncpt--; /* worst case is 1 */ - - return ncpt; -} - -static struct cfs_cpt_table * -cfs_cpt_table_create(int ncpt) -{ - struct cfs_cpt_table *cptab = NULL; - cpumask_t *mask = NULL; - int cpt = 0; - int num; - int rc; - int i; - - rc = cfs_cpt_num_estimate(); - if (ncpt <= 0) - ncpt = rc; - - if (ncpt > num_online_cpus() || ncpt > 4 * rc) { - CWARN("CPU partition number %d is larger than suggested value (%d), your system may have performance issue or run out of memory while under pressure\n", - ncpt, rc); - } - - if (num_online_cpus() % ncpt != 0) { - CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n", - (int)num_online_cpus(), ncpt); - goto failed; - } - - cptab = cfs_cpt_table_alloc(ncpt); - if (cptab == NULL) { - CERROR("Failed to allocate CPU map(%d)\n", ncpt); - goto failed; - } - - num = num_online_cpus() / ncpt; - if (num == 0) { - CERROR("CPU changed while setting CPU partition\n"); - goto failed; - } - - LIBCFS_ALLOC(mask, cpumask_size()); - if (mask == NULL) { - CERROR("Failed to allocate scratch cpumask\n"); - goto failed; - } - - for_each_online_node(i) { - cpumask_copy(mask, cpumask_of_node(i)); - - while (!cpumask_empty(mask)) { - struct cfs_cpu_partition *part; - int n; - - if (cpt >= ncpt) - goto failed; - - part = &cptab->ctb_parts[cpt]; - - n = num - cpumask_weight(part->cpt_cpumask); - LASSERT(n > 0); - - rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n); - if (rc < 0) - goto failed; - - LASSERT(num >= cpumask_weight(part->cpt_cpumask)); - if (num == cpumask_weight(part->cpt_cpumask)) - cpt++; - } - } - - if (cpt != ncpt || - num != cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)) { - CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n", - cptab->ctb_nparts, num, cpt, - cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)); - goto failed; - } - - LIBCFS_FREE(mask, cpumask_size()); - - return cptab; - - failed: - CERROR("Failed to setup CPU-partition-table with %d CPU-partitions, online HW nodes: %d, HW cpus: %d.\n", - ncpt, num_online_nodes(), num_online_cpus()); - - if (mask != NULL) - LIBCFS_FREE(mask, cpumask_size()); - - if (cptab != NULL) - cfs_cpt_table_free(cptab); - - return NULL; -} - -static struct cfs_cpt_table * -cfs_cpt_table_create_pattern(char *pattern) -{ - struct cfs_cpt_table *cptab; - char *str = pattern; - int node = 0; - int high; - int ncpt; - int c; - - for (ncpt = 0;; ncpt++) { /* quick scan bracket */ - str = strchr(str, '['); - if (str == NULL) - break; - str++; - } - - str = cfs_trimwhite(pattern); - if (*str == 'n' || *str == 'N') { - pattern = str + 1; - node = 1; - } - - if (ncpt == 0 || - (node && ncpt > num_online_nodes()) || - (!node && ncpt > num_online_cpus())) { - CERROR("Invalid pattern %s, or too many partitions %d\n", - pattern, ncpt); - return NULL; - } - - high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1; - - cptab = cfs_cpt_table_alloc(ncpt); - if (cptab == NULL) { - CERROR("Failed to allocate cpu partition table\n"); - return NULL; - } - - for (str = cfs_trimwhite(pattern), c = 0;; c++) { - struct cfs_range_expr *range; - struct cfs_expr_list *el; - char *bracket = strchr(str, '['); - int cpt; - int rc; - int i; - int n; - - if (bracket == NULL) { - if (*str != 0) { - CERROR("Invalid pattern %s\n", str); - goto failed; - } else if (c != ncpt) { - CERROR("expect %d partitions but found %d\n", - ncpt, c); - goto failed; - } - break; - } - - if (sscanf(str, "%d%n", &cpt, &n) < 1) { - CERROR("Invalid cpu pattern %s\n", str); - goto failed; - } - - if (cpt < 0 || cpt >= ncpt) { - CERROR("Invalid partition id %d, total partitions %d\n", - cpt, ncpt); - goto failed; - } - - if (cfs_cpt_weight(cptab, cpt) != 0) { - CERROR("Partition %d has already been set.\n", cpt); - goto failed; - } - - str = cfs_trimwhite(str + n); - if (str != bracket) { - CERROR("Invalid pattern %s\n", str); - goto failed; - } - - bracket = strchr(str, ']'); - if (bracket == NULL) { - CERROR("missing right bracket for cpt %d, %s\n", - cpt, str); - goto failed; - } - - if (cfs_expr_list_parse(str, (bracket - str) + 1, - 0, high, &el) != 0) { - CERROR("Can't parse number range: %s\n", str); - goto failed; - } - - list_for_each_entry(range, &el->el_exprs, re_link) { - for (i = range->re_lo; i <= range->re_hi; i++) { - if ((i - range->re_lo) % range->re_stride != 0) - continue; - - rc = node ? cfs_cpt_set_node(cptab, cpt, i) : - cfs_cpt_set_cpu(cptab, cpt, i); - if (!rc) { - cfs_expr_list_free(el); - goto failed; - } - } - } - - cfs_expr_list_free(el); - - if (!cfs_cpt_online(cptab, cpt)) { - CERROR("No online CPU is found on partition %d\n", cpt); - goto failed; - } - - str = cfs_trimwhite(bracket + 1); - } - - return cptab; - - failed: - cfs_cpt_table_free(cptab); - return NULL; -} - -#ifdef CONFIG_HOTPLUG_CPU -static int -cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - bool warn; - - switch (action) { - case CPU_DEAD: - case CPU_DEAD_FROZEN: - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - spin_lock(&cpt_data.cpt_lock); - cpt_data.cpt_version++; - spin_unlock(&cpt_data.cpt_lock); - default: - if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) { - CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n", - cpu, action); - break; - } - - mutex_lock(&cpt_data.cpt_mutex); - /* if all HTs in a core are offline, it may break affinity */ - cpumask_copy(cpt_data.cpt_cpumask, - topology_sibling_cpumask(cpu)); - warn = cpumask_any_and(cpt_data.cpt_cpumask, - cpu_online_mask) >= nr_cpu_ids; - mutex_unlock(&cpt_data.cpt_mutex); - CDEBUG(warn ? D_WARNING : D_INFO, - "Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n", - cpu, action); - } - - return NOTIFY_OK; -} - -static struct notifier_block cfs_cpu_notifier = { - .notifier_call = cfs_cpu_notify, - .priority = 0 -}; - -#endif - -void -cfs_cpu_fini(void) -{ - if (cfs_cpt_table != NULL) - cfs_cpt_table_free(cfs_cpt_table); - -#ifdef CONFIG_HOTPLUG_CPU - unregister_hotcpu_notifier(&cfs_cpu_notifier); -#endif - if (cpt_data.cpt_cpumask != NULL) - LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size()); -} - -int -cfs_cpu_init(void) -{ - LASSERT(cfs_cpt_table == NULL); - - memset(&cpt_data, 0, sizeof(cpt_data)); - - LIBCFS_ALLOC(cpt_data.cpt_cpumask, cpumask_size()); - if (cpt_data.cpt_cpumask == NULL) { - CERROR("Failed to allocate scratch buffer\n"); - return -1; - } - - spin_lock_init(&cpt_data.cpt_lock); - mutex_init(&cpt_data.cpt_mutex); - -#ifdef CONFIG_HOTPLUG_CPU - register_hotcpu_notifier(&cfs_cpu_notifier); -#endif - - if (*cpu_pattern != 0) { - cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern); - if (cfs_cpt_table == NULL) { - CERROR("Failed to create cptab from pattern %s\n", - cpu_pattern); - goto failed; - } - - } else { - cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions); - if (cfs_cpt_table == NULL) { - CERROR("Failed to create ptable with npartitions %d\n", - cpu_npartitions); - goto failed; - } - } - - spin_lock(&cpt_data.cpt_lock); - if (cfs_cpt_table->ctb_version != cpt_data.cpt_version) { - spin_unlock(&cpt_data.cpt_lock); - CERROR("CPU hotplug/unplug during setup\n"); - goto failed; - } - spin_unlock(&cpt_data.cpt_lock); - - LCONSOLE(0, "HW CPU cores: %d, npartitions: %d\n", - num_online_cpus(), cfs_cpt_number(cfs_cpt_table)); - return 0; - - failed: - cfs_cpu_fini(); - return -1; -} - -#endif diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c deleted file mode 100644 index db0572733..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c +++ /dev/null @@ -1,137 +0,0 @@ -/* GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please visit http://www.xyratex.com/contact if you need additional - * information or have any questions. - * - * GPL HEADER END - */ - -/* - * Copyright 2012 Xyratex Technology Limited - */ - -/* - * This is crypto api shash wrappers to zlib_adler32. - */ - -#include <linux/module.h> -#include <linux/zutil.h> -#include <crypto/internal/hash.h> -#include "linux-crypto.h" - -#define CHKSUM_BLOCK_SIZE 1 -#define CHKSUM_DIGEST_SIZE 4 - -static int adler32_cra_init(struct crypto_tfm *tfm) -{ - u32 *key = crypto_tfm_ctx(tfm); - - *key = 1; - - return 0; -} - -static int adler32_setkey(struct crypto_shash *hash, const u8 *key, - unsigned int keylen) -{ - u32 *mctx = crypto_shash_ctx(hash); - - if (keylen != sizeof(u32)) { - crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - *mctx = *(u32 *)key; - return 0; -} - -static int adler32_init(struct shash_desc *desc) -{ - u32 *mctx = crypto_shash_ctx(desc->tfm); - u32 *cksump = shash_desc_ctx(desc); - - *cksump = *mctx; - - return 0; -} - -static int adler32_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - u32 *cksump = shash_desc_ctx(desc); - - *cksump = zlib_adler32(*cksump, data, len); - return 0; -} - -static int __adler32_finup(u32 *cksump, const u8 *data, unsigned int len, - u8 *out) -{ - *(u32 *)out = zlib_adler32(*cksump, data, len); - return 0; -} - -static int adler32_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __adler32_finup(shash_desc_ctx(desc), data, len, out); -} - -static int adler32_final(struct shash_desc *desc, u8 *out) -{ - u32 *cksump = shash_desc_ctx(desc); - - *(u32 *)out = *cksump; - return 0; -} - -static int adler32_digest(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __adler32_finup(crypto_shash_ctx(desc->tfm), data, len, - out); -} - -static struct shash_alg alg = { - .setkey = adler32_setkey, - .init = adler32_init, - .update = adler32_update, - .final = adler32_final, - .finup = adler32_finup, - .digest = adler32_digest, - .descsize = sizeof(u32), - .digestsize = CHKSUM_DIGEST_SIZE, - .base = { - .cra_name = "adler32", - .cra_driver_name = "adler32-zlib", - .cra_priority = 100, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_ctxsize = sizeof(u32), - .cra_module = THIS_MODULE, - .cra_init = adler32_cra_init, - } -}; - -int cfs_crypto_adler32_register(void) -{ - return crypto_register_shash(&alg); -} - -void cfs_crypto_adler32_unregister(void) -{ - crypto_unregister_shash(&alg); -} diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c deleted file mode 100644 index 079d50ebf..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c +++ /dev/null @@ -1,290 +0,0 @@ -/* GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please visit http://www.xyratex.com/contact if you need additional - * information or have any questions. - * - * GPL HEADER END - */ - -/* - * Copyright 2012 Xyratex Technology Limited - * - * Copyright (c) 2012, Intel Corporation. - */ - -#include <linux/crypto.h> -#include <linux/scatterlist.h> -#include "../../../include/linux/libcfs/libcfs.h" -#include "linux-crypto.h" -/** - * Array of hash algorithm speed in MByte per second - */ -static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX]; - -static int cfs_crypto_hash_alloc(unsigned char alg_id, - const struct cfs_crypto_hash_type **type, - struct hash_desc *desc, unsigned char *key, - unsigned int key_len) -{ - int err = 0; - - *type = cfs_crypto_hash_type(alg_id); - - if (*type == NULL) { - CWARN("Unsupported hash algorithm id = %d, max id is %d\n", - alg_id, CFS_HASH_ALG_MAX); - return -EINVAL; - } - desc->tfm = crypto_alloc_hash((*type)->cht_name, 0, 0); - - if (desc->tfm == NULL) - return -EINVAL; - - if (IS_ERR(desc->tfm)) { - CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n", - (*type)->cht_name); - return PTR_ERR(desc->tfm); - } - - desc->flags = 0; - - /** Shash have different logic for initialization then digest - * shash: crypto_hash_setkey, crypto_hash_init - * digest: crypto_digest_init, crypto_digest_setkey - * Skip this function for digest, because we use shash logic at - * cfs_crypto_hash_alloc. - */ - if (key != NULL) - err = crypto_hash_setkey(desc->tfm, key, key_len); - else if ((*type)->cht_key != 0) - err = crypto_hash_setkey(desc->tfm, - (unsigned char *)&((*type)->cht_key), - (*type)->cht_size); - - if (err != 0) { - crypto_free_hash(desc->tfm); - return err; - } - - CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", - (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_name, - (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_driver_name, - cfs_crypto_hash_speeds[alg_id]); - - return crypto_hash_init(desc); -} - -int cfs_crypto_hash_digest(unsigned char alg_id, - const void *buf, unsigned int buf_len, - unsigned char *key, unsigned int key_len, - unsigned char *hash, unsigned int *hash_len) -{ - struct scatterlist sl; - struct hash_desc hdesc; - int err; - const struct cfs_crypto_hash_type *type; - - if (buf == NULL || buf_len == 0 || hash_len == NULL) - return -EINVAL; - - err = cfs_crypto_hash_alloc(alg_id, &type, &hdesc, key, key_len); - if (err != 0) - return err; - - if (hash == NULL || *hash_len < type->cht_size) { - *hash_len = type->cht_size; - crypto_free_hash(hdesc.tfm); - return -ENOSPC; - } - sg_init_one(&sl, buf, buf_len); - - hdesc.flags = 0; - err = crypto_hash_digest(&hdesc, &sl, sl.length, hash); - crypto_free_hash(hdesc.tfm); - - return err; -} -EXPORT_SYMBOL(cfs_crypto_hash_digest); - -struct cfs_crypto_hash_desc * - cfs_crypto_hash_init(unsigned char alg_id, - unsigned char *key, unsigned int key_len) -{ - - struct hash_desc *hdesc; - int err; - const struct cfs_crypto_hash_type *type; - - hdesc = kmalloc(sizeof(*hdesc), 0); - if (hdesc == NULL) - return ERR_PTR(-ENOMEM); - - err = cfs_crypto_hash_alloc(alg_id, &type, hdesc, key, key_len); - - if (err) { - kfree(hdesc); - return ERR_PTR(err); - } - return (struct cfs_crypto_hash_desc *)hdesc; -} -EXPORT_SYMBOL(cfs_crypto_hash_init); - -int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc, - struct page *page, unsigned int offset, - unsigned int len) -{ - struct scatterlist sl; - - sg_init_table(&sl, 1); - sg_set_page(&sl, page, len, offset & ~CFS_PAGE_MASK); - - return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length); -} -EXPORT_SYMBOL(cfs_crypto_hash_update_page); - -int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc, - const void *buf, unsigned int buf_len) -{ - struct scatterlist sl; - - sg_init_one(&sl, buf, buf_len); - - return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length); -} -EXPORT_SYMBOL(cfs_crypto_hash_update); - -/* If hash_len pointer is NULL - destroy descriptor. */ -int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc, - unsigned char *hash, unsigned int *hash_len) -{ - int err; - int size = crypto_hash_digestsize(((struct hash_desc *)hdesc)->tfm); - - if (hash_len == NULL) { - crypto_free_hash(((struct hash_desc *)hdesc)->tfm); - kfree(hdesc); - return 0; - } - if (hash == NULL || *hash_len < size) { - *hash_len = size; - return -ENOSPC; - } - err = crypto_hash_final((struct hash_desc *) hdesc, hash); - - if (err < 0) { - /* May be caller can fix error */ - return err; - } - crypto_free_hash(((struct hash_desc *)hdesc)->tfm); - kfree(hdesc); - return err; -} -EXPORT_SYMBOL(cfs_crypto_hash_final); - -static void cfs_crypto_performance_test(unsigned char alg_id, - const unsigned char *buf, - unsigned int buf_len) -{ - unsigned long start, end; - int bcount, err = 0; - int sec = 1; /* do test only 1 sec */ - unsigned char hash[64]; - unsigned int hash_len = 64; - - for (start = jiffies, end = start + sec * HZ, bcount = 0; - time_before(jiffies, end); bcount++) { - err = cfs_crypto_hash_digest(alg_id, buf, buf_len, NULL, 0, - hash, &hash_len); - if (err) - break; - - } - end = jiffies; - - if (err) { - cfs_crypto_hash_speeds[alg_id] = -1; - CDEBUG(D_INFO, "Crypto hash algorithm %s, err = %d\n", - cfs_crypto_hash_name(alg_id), err); - } else { - unsigned long tmp; - - tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) * - 1000) / (1024 * 1024); - cfs_crypto_hash_speeds[alg_id] = (int)tmp; - } - CDEBUG(D_INFO, "Crypto hash algorithm %s speed = %d MB/s\n", - cfs_crypto_hash_name(alg_id), cfs_crypto_hash_speeds[alg_id]); -} - -int cfs_crypto_hash_speed(unsigned char hash_alg) -{ - if (hash_alg < CFS_HASH_ALG_MAX) - return cfs_crypto_hash_speeds[hash_alg]; - else - return -1; -} -EXPORT_SYMBOL(cfs_crypto_hash_speed); - -/** - * Do performance test for all hash algorithms. - */ -static int cfs_crypto_test_hashes(void) -{ - unsigned char i; - unsigned char *data; - unsigned int j; - /* Data block size for testing hash. Maximum - * kmalloc size for 2.6.18 kernel is 128K */ - unsigned int data_len = 1 * 128 * 1024; - - data = kmalloc(data_len, 0); - if (data == NULL) { - CERROR("Failed to allocate mem\n"); - return -ENOMEM; - } - - for (j = 0; j < data_len; j++) - data[j] = j & 0xff; - - for (i = 0; i < CFS_HASH_ALG_MAX; i++) - cfs_crypto_performance_test(i, data, data_len); - - kfree(data); - return 0; -} - -static int adler32; - -int cfs_crypto_register(void) -{ - request_module("crc32c"); - - adler32 = cfs_crypto_adler32_register(); - - /* check all algorithms and do performance test */ - cfs_crypto_test_hashes(); - return 0; -} - -void cfs_crypto_unregister(void) -{ - if (adler32 == 0) - cfs_crypto_adler32_unregister(); - - return; -} diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h deleted file mode 100644 index 18e8cd4d8..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h +++ /dev/null @@ -1,29 +0,0 @@ - /* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please visit http://www.xyratex.com/contact if you need additional - * information or have any questions. - * - * GPL HEADER END - */ - -/** - * Functions for start/stop shash adler32 algorithm. - */ -int cfs_crypto_adler32_register(void); -void cfs_crypto_adler32_unregister(void); diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c deleted file mode 100644 index 68515d913..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c +++ /dev/null @@ -1,112 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2015, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/linux/linux-curproc.c - * - * Lustre curproc API implementation for Linux kernel - * - * Author: Nikita Danilov <nikita@clusterfs.com> - */ - -#include <linux/sched.h> -#include <linux/fs_struct.h> - -#include <linux/compat.h> -#include <linux/thread_info.h> - -#define DEBUG_SUBSYSTEM S_LNET - -#include "../../../include/linux/libcfs/libcfs.h" - -/* - * Implementation of cfs_curproc API (see portals/include/libcfs/curproc.h) - * for Linux kernel. - */ - -void cfs_cap_raise(cfs_cap_t cap) -{ - struct cred *cred; - - cred = prepare_creds(); - if (cred) { - cap_raise(cred->cap_effective, cap); - commit_creds(cred); - } -} - -void cfs_cap_lower(cfs_cap_t cap) -{ - struct cred *cred; - - cred = prepare_creds(); - if (cred) { - cap_lower(cred->cap_effective, cap); - commit_creds(cred); - } -} - -int cfs_cap_raised(cfs_cap_t cap) -{ - return cap_raised(current_cap(), cap); -} - -static void cfs_kernel_cap_pack(kernel_cap_t kcap, cfs_cap_t *cap) -{ - /* XXX lost high byte */ - *cap = kcap.cap[0]; -} - -cfs_cap_t cfs_curproc_cap_pack(void) -{ - cfs_cap_t cap; - - cfs_kernel_cap_pack(current_cap(), &cap); - return cap; -} - -EXPORT_SYMBOL(cfs_cap_raise); -EXPORT_SYMBOL(cfs_cap_lower); -EXPORT_SYMBOL(cfs_cap_raised); -EXPORT_SYMBOL(cfs_curproc_cap_pack); - -/* - * Local variables: - * c-indentation-style: "K&R" - * c-basic-offset: 8 - * tab-width: 8 - * fill-column: 80 - * scroll-step: 1 - * End: - */ diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c deleted file mode 100644 index 59c7bf3cb..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c +++ /dev/null @@ -1,199 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/linux/linux-debug.c - * - * Author: Phil Schwan <phil@clusterfs.com> - */ - -#include <linux/module.h> -#include <linux/kmod.h> -#include <linux/notifier.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/string.h> -#include <linux/stat.h> -#include <linux/errno.h> -#include <linux/unistd.h> -#include <linux/interrupt.h> -#include <linux/completion.h> -#include <linux/fs.h> -#include <linux/uaccess.h> -#include <linux/miscdevice.h> - -# define DEBUG_SUBSYSTEM S_LNET - -#include "../../../include/linux/libcfs/libcfs.h" - -#include "../tracefile.h" - -#include <linux/kallsyms.h> - -char lnet_upcall[1024] = "/usr/lib/lustre/lnet_upcall"; -char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall"; - -/** - * Upcall function once a Lustre log has been dumped. - * - * \param file path of the dumped log - */ -void libcfs_run_debug_log_upcall(char *file) -{ - char *argv[3]; - int rc; - char *envp[] = { - "HOME=/", - "PATH=/sbin:/bin:/usr/sbin:/usr/bin", - NULL}; - - argv[0] = lnet_debug_log_upcall; - - LASSERTF(file != NULL, "called on a null filename\n"); - argv[1] = file; /* only need to pass the path of the file */ - - argv[2] = NULL; - - rc = call_usermodehelper(argv[0], argv, envp, 1); - if (rc < 0 && rc != -ENOENT) { - CERROR("Error %d invoking LNET debug log upcall %s %s; check /proc/sys/lnet/debug_log_upcall\n", - rc, argv[0], argv[1]); - } else { - CDEBUG(D_HA, "Invoked LNET debug log upcall %s %s\n", - argv[0], argv[1]); - } -} - -void libcfs_run_upcall(char **argv) -{ - int rc; - int argc; - char *envp[] = { - "HOME=/", - "PATH=/sbin:/bin:/usr/sbin:/usr/bin", - NULL}; - - argv[0] = lnet_upcall; - argc = 1; - while (argv[argc] != NULL) - argc++; - - LASSERT(argc >= 2); - - rc = call_usermodehelper(argv[0], argv, envp, 1); - if (rc < 0 && rc != -ENOENT) { - CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /proc/sys/lnet/upcall\n", - rc, argv[0], argv[1], - argc < 3 ? "" : ",", argc < 3 ? "" : argv[2], - argc < 4 ? "" : ",", argc < 4 ? "" : argv[3], - argc < 5 ? "" : ",", argc < 5 ? "" : argv[4], - argc < 6 ? "" : ",..."); - } else { - CDEBUG(D_HA, "Invoked LNET upcall %s %s%s%s%s%s%s%s%s\n", - argv[0], argv[1], - argc < 3 ? "" : ",", argc < 3 ? "" : argv[2], - argc < 4 ? "" : ",", argc < 4 ? "" : argv[3], - argc < 5 ? "" : ",", argc < 5 ? "" : argv[4], - argc < 6 ? "" : ",..."); - } -} - -void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata) -{ - char *argv[6]; - char buf[32]; - - snprintf(buf, sizeof(buf), "%d", msgdata->msg_line); - - argv[1] = "LBUG"; - argv[2] = (char *)msgdata->msg_file; - argv[3] = (char *)msgdata->msg_fn; - argv[4] = buf; - argv[5] = NULL; - - libcfs_run_upcall (argv); -} - -/* coverity[+kill] */ -void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata) -{ - libcfs_catastrophe = 1; - libcfs_debug_msg(msgdata, "LBUG\n"); - - if (in_interrupt()) { - panic("LBUG in interrupt.\n"); - /* not reached */ - } - - dump_stack(); - if (!libcfs_panic_on_lbug) - libcfs_debug_dumplog(); - libcfs_run_lbug_upcall(msgdata); - if (libcfs_panic_on_lbug) - panic("LBUG"); - set_task_state(current, TASK_UNINTERRUPTIBLE); - while (1) - schedule(); -} - -static int panic_notifier(struct notifier_block *self, unsigned long unused1, - void *unused2) -{ - if (libcfs_panic_in_progress) - return 0; - - libcfs_panic_in_progress = 1; - mb(); - - return 0; -} - -static struct notifier_block libcfs_panic_notifier = { - .notifier_call = panic_notifier, - .next = NULL, - .priority = 10000, -}; - -void libcfs_register_panic_notifier(void) -{ - atomic_notifier_chain_register(&panic_notifier_list, &libcfs_panic_notifier); -} - -void libcfs_unregister_panic_notifier(void) -{ - atomic_notifier_chain_unregister(&panic_notifier_list, &libcfs_panic_notifier); -} - -EXPORT_SYMBOL(libcfs_run_lbug_upcall); -EXPORT_SYMBOL(lbug_with_loc); diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c deleted file mode 100644 index 025e2f002..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c +++ /dev/null @@ -1,59 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - */ -/* - * This file creates a memory allocation primitive for Lustre, that - * allows to fallback to vmalloc allocations should regular kernel allocations - * fail due to size or system memory fragmentation. - * - * Author: Oleg Drokin <green@linuxhacker.ru> - * - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Seagate Technology. - */ -#include <linux/slab.h> -#include <linux/vmalloc.h> - -#include "../../../include/linux/libcfs/libcfs.h" - -void *libcfs_kvzalloc(size_t size, gfp_t flags) -{ - void *ret; - - ret = kzalloc(size, flags | __GFP_NOWARN); - if (!ret) - ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL); - return ret; -} -EXPORT_SYMBOL(libcfs_kvzalloc); - -void *libcfs_kvzalloc_cpt(struct cfs_cpt_table *cptab, int cpt, size_t size, - gfp_t flags) -{ - void *ret; - - ret = kzalloc_node(size, flags | __GFP_NOWARN, - cfs_cpt_spread_node(cptab, cpt)); - if (!ret) { - WARN_ON(!(flags & (__GFP_FS|__GFP_HIGH))); - ret = vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); - } - - return ret; -} -EXPORT_SYMBOL(libcfs_kvzalloc_cpt); diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c deleted file mode 100644 index 70a99cf01..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c +++ /dev/null @@ -1,180 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include "../../../include/linux/libcfs/libcfs.h" - -#define LNET_MINOR 240 - -int libcfs_ioctl_getdata(char *buf, char *end, void *arg) -{ - struct libcfs_ioctl_hdr *hdr; - struct libcfs_ioctl_data *data; - int orig_len; - - hdr = (struct libcfs_ioctl_hdr *)buf; - data = (struct libcfs_ioctl_data *)buf; - - if (copy_from_user(buf, arg, sizeof(*hdr))) - return -EFAULT; - - if (hdr->ioc_version != LIBCFS_IOCTL_VERSION) { - CERROR("PORTALS: version mismatch kernel vs application\n"); - return -EINVAL; - } - - if (hdr->ioc_len >= end - buf) { - CERROR("PORTALS: user buffer exceeds kernel buffer\n"); - return -EINVAL; - } - - if (hdr->ioc_len < sizeof(struct libcfs_ioctl_data)) { - CERROR("PORTALS: user buffer too small for ioctl\n"); - return -EINVAL; - } - - orig_len = hdr->ioc_len; - if (copy_from_user(buf, arg, hdr->ioc_len)) - return -EFAULT; - if (orig_len != data->ioc_len) - return -EINVAL; - - if (libcfs_ioctl_is_invalid(data)) { - CERROR("PORTALS: ioctl not correctly formatted\n"); - return -EINVAL; - } - - if (data->ioc_inllen1) - data->ioc_inlbuf1 = &data->ioc_bulk[0]; - - if (data->ioc_inllen2) - data->ioc_inlbuf2 = &data->ioc_bulk[0] + - cfs_size_round(data->ioc_inllen1); - - return 0; -} - -int libcfs_ioctl_popdata(void *arg, void *data, int size) -{ - if (copy_to_user((char *)arg, data, size)) - return -EFAULT; - return 0; -} - -static int -libcfs_psdev_open(struct inode *inode, struct file *file) -{ - struct libcfs_device_userstate **pdu = NULL; - int rc = 0; - - if (!inode) - return -EINVAL; - pdu = (struct libcfs_device_userstate **)&file->private_data; - if (libcfs_psdev_ops.p_open != NULL) - rc = libcfs_psdev_ops.p_open(0, (void *)pdu); - else - return -EPERM; - return rc; -} - -/* called when closing /dev/device */ -static int -libcfs_psdev_release(struct inode *inode, struct file *file) -{ - struct libcfs_device_userstate *pdu; - int rc = 0; - - if (!inode) - return -EINVAL; - pdu = file->private_data; - if (libcfs_psdev_ops.p_close != NULL) - rc = libcfs_psdev_ops.p_close(0, (void *)pdu); - else - rc = -EPERM; - return rc; -} - -static long libcfs_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct cfs_psdev_file pfile; - int rc = 0; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - if (_IOC_TYPE(cmd) != IOC_LIBCFS_TYPE || - _IOC_NR(cmd) < IOC_LIBCFS_MIN_NR || - _IOC_NR(cmd) > IOC_LIBCFS_MAX_NR) { - CDEBUG(D_IOCTL, "invalid ioctl ( type %d, nr %d, size %d )\n", - _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd)); - return -EINVAL; - } - - /* Handle platform-dependent IOC requests */ - switch (cmd) { - case IOC_LIBCFS_PANIC: - if (!capable(CFS_CAP_SYS_BOOT)) - return -EPERM; - panic("debugctl-invoked panic"); - return 0; - case IOC_LIBCFS_MEMHOG: - if (!capable(CFS_CAP_SYS_ADMIN)) - return -EPERM; - /* go thought */ - } - - pfile.off = 0; - pfile.private_data = file->private_data; - if (libcfs_psdev_ops.p_ioctl != NULL) - rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void *)arg); - else - rc = -EPERM; - return rc; -} - -static const struct file_operations libcfs_fops = { - .unlocked_ioctl = libcfs_ioctl, - .open = libcfs_psdev_open, - .release = libcfs_psdev_release, -}; - -struct miscdevice libcfs_dev = { - .minor = LNET_MINOR, - .name = "lnet", - .fops = &libcfs_fops, -}; diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c deleted file mode 100644 index 890844602..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c +++ /dev/null @@ -1,147 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LNET -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/fs_struct.h> -#include <linux/sched.h> - -#include "../../../include/linux/libcfs/libcfs.h" - -#if defined(CONFIG_KGDB) -#include <linux/kgdb.h> -#endif - -/** - * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively - * waiting threads, which is not always desirable because all threads will - * be waken up again and again, even user only needs a few of them to be - * active most time. This is not good for performance because cache can - * be polluted by different threads. - * - * LIFO list can resolve this problem because we always wakeup the most - * recent active thread by default. - * - * NB: please don't call non-exclusive & exclusive wait on the same - * waitq if add_wait_queue_exclusive_head is used. - */ -void -add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link) -{ - unsigned long flags; - - spin_lock_irqsave(&waitq->lock, flags); - __add_wait_queue_exclusive(waitq, link); - spin_unlock_irqrestore(&waitq->lock, flags); -} -EXPORT_SYMBOL(add_wait_queue_exclusive_head); - -sigset_t -cfs_block_allsigs(void) -{ - unsigned long flags; - sigset_t old; - - spin_lock_irqsave(¤t->sighand->siglock, flags); - old = current->blocked; - sigfillset(¤t->blocked); - recalc_sigpending(); - spin_unlock_irqrestore(¤t->sighand->siglock, flags); - - return old; -} -EXPORT_SYMBOL(cfs_block_allsigs); - -sigset_t cfs_block_sigs(unsigned long sigs) -{ - unsigned long flags; - sigset_t old; - - spin_lock_irqsave(¤t->sighand->siglock, flags); - old = current->blocked; - sigaddsetmask(¤t->blocked, sigs); - recalc_sigpending(); - spin_unlock_irqrestore(¤t->sighand->siglock, flags); - return old; -} -EXPORT_SYMBOL(cfs_block_sigs); - -/* Block all signals except for the @sigs */ -sigset_t cfs_block_sigsinv(unsigned long sigs) -{ - unsigned long flags; - sigset_t old; - - spin_lock_irqsave(¤t->sighand->siglock, flags); - old = current->blocked; - sigaddsetmask(¤t->blocked, ~sigs); - recalc_sigpending(); - spin_unlock_irqrestore(¤t->sighand->siglock, flags); - - return old; -} -EXPORT_SYMBOL(cfs_block_sigsinv); - -void -cfs_restore_sigs(sigset_t old) -{ - unsigned long flags; - - spin_lock_irqsave(¤t->sighand->siglock, flags); - current->blocked = old; - recalc_sigpending(); - spin_unlock_irqrestore(¤t->sighand->siglock, flags); -} -EXPORT_SYMBOL(cfs_restore_sigs); - -int -cfs_signal_pending(void) -{ - return signal_pending(current); -} -EXPORT_SYMBOL(cfs_signal_pending); - -void -cfs_clear_sigpending(void) -{ - unsigned long flags; - - spin_lock_irqsave(¤t->sighand->siglock, flags); - clear_tsk_thread_flag(current, TIF_SIGPENDING); - spin_unlock_irqrestore(¤t->sighand->siglock, flags); -} -EXPORT_SYMBOL(cfs_clear_sigpending); diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c deleted file mode 100644 index 64a136cd5..000000000 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c +++ /dev/null @@ -1,272 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_SUBSYSTEM S_LNET -#define LUSTRE_TRACEFILE_PRIVATE - -#include "../../../include/linux/libcfs/libcfs.h" -#include "../tracefile.h" - -/* percents to share the total debug memory for each type */ -static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = { - 80, /* 80% pages for CFS_TCD_TYPE_PROC */ - 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */ - 10 /* 10% pages for CFS_TCD_TYPE_IRQ */ -}; - -char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX]; - -static DECLARE_RWSEM(cfs_tracefile_sem); - -int cfs_tracefile_init_arch(void) -{ - int i; - int j; - struct cfs_trace_cpu_data *tcd; - - /* initialize trace_data */ - memset(cfs_trace_data, 0, sizeof(cfs_trace_data)); - for (i = 0; i < CFS_TCD_TYPE_MAX; i++) { - cfs_trace_data[i] = - kmalloc(sizeof(union cfs_trace_data_union) * - num_possible_cpus(), GFP_KERNEL); - if (cfs_trace_data[i] == NULL) - goto out; - - } - - /* arch related info initialized */ - cfs_tcd_for_each(tcd, i, j) { - spin_lock_init(&tcd->tcd_lock); - tcd->tcd_pages_factor = pages_factor[i]; - tcd->tcd_type = i; - tcd->tcd_cpu = j; - } - - for (i = 0; i < num_possible_cpus(); i++) - for (j = 0; j < 3; j++) { - cfs_trace_console_buffers[i][j] = - kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE, - GFP_KERNEL); - - if (cfs_trace_console_buffers[i][j] == NULL) - goto out; - } - - return 0; - -out: - cfs_tracefile_fini_arch(); - printk(KERN_ERR "lnet: Not enough memory\n"); - return -ENOMEM; -} - -void cfs_tracefile_fini_arch(void) -{ - int i; - int j; - - for (i = 0; i < num_possible_cpus(); i++) - for (j = 0; j < 3; j++) { - kfree(cfs_trace_console_buffers[i][j]); - cfs_trace_console_buffers[i][j] = NULL; - } - - for (i = 0; cfs_trace_data[i] != NULL; i++) { - kfree(cfs_trace_data[i]); - cfs_trace_data[i] = NULL; - } -} - -void cfs_tracefile_read_lock(void) -{ - down_read(&cfs_tracefile_sem); -} - -void cfs_tracefile_read_unlock(void) -{ - up_read(&cfs_tracefile_sem); -} - -void cfs_tracefile_write_lock(void) -{ - down_write(&cfs_tracefile_sem); -} - -void cfs_tracefile_write_unlock(void) -{ - up_write(&cfs_tracefile_sem); -} - -cfs_trace_buf_type_t cfs_trace_buf_idx_get(void) -{ - if (in_irq()) - return CFS_TCD_TYPE_IRQ; - else if (in_softirq()) - return CFS_TCD_TYPE_SOFTIRQ; - else - return CFS_TCD_TYPE_PROC; -} - -/* - * The walking argument indicates the locking comes from all tcd types - * iterator and we must lock it and dissable local irqs to avoid deadlocks - * with other interrupt locks that might be happening. See LU-1311 - * for details. - */ -int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking) - __acquires(&tcd->tc_lock) -{ - __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX); - if (tcd->tcd_type == CFS_TCD_TYPE_IRQ) - spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags); - else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ) - spin_lock_bh(&tcd->tcd_lock); - else if (unlikely(walking)) - spin_lock_irq(&tcd->tcd_lock); - else - spin_lock(&tcd->tcd_lock); - return 1; -} - -void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking) - __releases(&tcd->tcd_lock) -{ - __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX); - if (tcd->tcd_type == CFS_TCD_TYPE_IRQ) - spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags); - else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ) - spin_unlock_bh(&tcd->tcd_lock); - else if (unlikely(walking)) - spin_unlock_irq(&tcd->tcd_lock); - else - spin_unlock(&tcd->tcd_lock); -} - -int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd, - struct cfs_trace_page *tage) -{ - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - return tcd->tcd_cpu == tage->cpu; -} - -void -cfs_set_ptldebug_header(struct ptldebug_header *header, - struct libcfs_debug_msg_data *msgdata, - unsigned long stack) -{ - struct timespec64 ts; - - ktime_get_real_ts64(&ts); - - header->ph_subsys = msgdata->msg_subsys; - header->ph_mask = msgdata->msg_mask; - header->ph_cpu_id = smp_processor_id(); - header->ph_type = cfs_trace_buf_idx_get(); - /* y2038 safe since all user space treats this as unsigned, but - * will overflow in 2106 */ - header->ph_sec = (u32)ts.tv_sec; - header->ph_usec = ts.tv_nsec / NSEC_PER_USEC; - header->ph_stack = stack; - header->ph_pid = current->pid; - header->ph_line_num = msgdata->msg_line; - header->ph_extern_pid = 0; - return; -} - -static char * -dbghdr_to_err_string(struct ptldebug_header *hdr) -{ - switch (hdr->ph_subsys) { - case S_LND: - case S_LNET: - return "LNetError"; - default: - return "LustreError"; - } -} - -static char * -dbghdr_to_info_string(struct ptldebug_header *hdr) -{ - switch (hdr->ph_subsys) { - case S_LND: - case S_LNET: - return "LNet"; - default: - return "Lustre"; - } -} - -void cfs_print_to_console(struct ptldebug_header *hdr, int mask, - const char *buf, int len, const char *file, - const char *fn) -{ - char *prefix = "Lustre", *ptype = NULL; - - if ((mask & D_EMERG) != 0) { - prefix = dbghdr_to_err_string(hdr); - ptype = KERN_EMERG; - } else if ((mask & D_ERROR) != 0) { - prefix = dbghdr_to_err_string(hdr); - ptype = KERN_ERR; - } else if ((mask & D_WARNING) != 0) { - prefix = dbghdr_to_info_string(hdr); - ptype = KERN_WARNING; - } else if ((mask & (D_CONSOLE | libcfs_printk)) != 0) { - prefix = dbghdr_to_info_string(hdr); - ptype = KERN_INFO; - } - - if ((mask & D_CONSOLE) != 0) { - printk("%s%s: %.*s", ptype, prefix, len, buf); - } else { - printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix, - hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num, - fn, len, buf); - } - return; -} - -int cfs_trace_max_debug_mb(void) -{ - int total_mb = (totalram_pages >> (20 - PAGE_SHIFT)); - - return max(512, (total_mb * 80)/100); -} diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c deleted file mode 100644 index 329d78ce2..000000000 --- a/drivers/staging/lustre/lustre/libcfs/module.c +++ /dev/null @@ -1,765 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, 2015 Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/string.h> -#include <linux/stat.h> -#include <linux/errno.h> -#include <linux/unistd.h> -#include <net/sock.h> -#include <linux/uio.h> - -#include <linux/uaccess.h> - -#include <linux/fs.h> -#include <linux/file.h> -#include <linux/list.h> - -#include <linux/sysctl.h> -#include <linux/debugfs.h> - -# define DEBUG_SUBSYSTEM S_LNET - -#include "../../include/linux/libcfs/libcfs.h" -#include <asm/div64.h> - -#include "../../include/linux/libcfs/libcfs_crypto.h" -#include "../../include/linux/lnet/lib-lnet.h" -#include "../../include/linux/lnet/lnet.h" -#include "tracefile.h" - -MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Portals v3.1"); -MODULE_LICENSE("GPL"); - -static struct dentry *lnet_debugfs_root; - -static void kportal_memhog_free(struct libcfs_device_userstate *ldu) -{ - struct page **level0p = &ldu->ldu_memhog_root_page; - struct page **level1p; - struct page **level2p; - int count1; - int count2; - - if (*level0p != NULL) { - - level1p = (struct page **)page_address(*level0p); - count1 = 0; - - while (count1 < PAGE_CACHE_SIZE/sizeof(struct page *) && - *level1p != NULL) { - - level2p = (struct page **)page_address(*level1p); - count2 = 0; - - while (count2 < PAGE_CACHE_SIZE/sizeof(struct page *) && - *level2p != NULL) { - - __free_page(*level2p); - ldu->ldu_memhog_pages--; - level2p++; - count2++; - } - - __free_page(*level1p); - ldu->ldu_memhog_pages--; - level1p++; - count1++; - } - - __free_page(*level0p); - ldu->ldu_memhog_pages--; - - *level0p = NULL; - } - - LASSERT(ldu->ldu_memhog_pages == 0); -} - -static int kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages, - gfp_t flags) -{ - struct page **level0p; - struct page **level1p; - struct page **level2p; - int count1; - int count2; - - LASSERT(ldu->ldu_memhog_pages == 0); - LASSERT(ldu->ldu_memhog_root_page == NULL); - - if (npages < 0) - return -EINVAL; - - if (npages == 0) - return 0; - - level0p = &ldu->ldu_memhog_root_page; - *level0p = alloc_page(flags); - if (*level0p == NULL) - return -ENOMEM; - ldu->ldu_memhog_pages++; - - level1p = (struct page **)page_address(*level0p); - count1 = 0; - memset(level1p, 0, PAGE_CACHE_SIZE); - - while (ldu->ldu_memhog_pages < npages && - count1 < PAGE_CACHE_SIZE/sizeof(struct page *)) { - - if (cfs_signal_pending()) - return -EINTR; - - *level1p = alloc_page(flags); - if (*level1p == NULL) - return -ENOMEM; - ldu->ldu_memhog_pages++; - - level2p = (struct page **)page_address(*level1p); - count2 = 0; - memset(level2p, 0, PAGE_CACHE_SIZE); - - while (ldu->ldu_memhog_pages < npages && - count2 < PAGE_CACHE_SIZE/sizeof(struct page *)) { - - if (cfs_signal_pending()) - return -EINTR; - - *level2p = alloc_page(flags); - if (*level2p == NULL) - return -ENOMEM; - ldu->ldu_memhog_pages++; - - level2p++; - count2++; - } - - level1p++; - count1++; - } - - return 0; -} - -/* called when opening /dev/device */ -static int libcfs_psdev_open(unsigned long flags, void *args) -{ - struct libcfs_device_userstate *ldu; - - try_module_get(THIS_MODULE); - - LIBCFS_ALLOC(ldu, sizeof(*ldu)); - if (ldu != NULL) { - ldu->ldu_memhog_pages = 0; - ldu->ldu_memhog_root_page = NULL; - } - *(struct libcfs_device_userstate **)args = ldu; - - return 0; -} - -/* called when closing /dev/device */ -static int libcfs_psdev_release(unsigned long flags, void *args) -{ - struct libcfs_device_userstate *ldu; - - ldu = (struct libcfs_device_userstate *)args; - if (ldu != NULL) { - kportal_memhog_free(ldu); - LIBCFS_FREE(ldu, sizeof(*ldu)); - } - - module_put(THIS_MODULE); - return 0; -} - -static DECLARE_RWSEM(ioctl_list_sem); -static LIST_HEAD(ioctl_list); - -int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand) -{ - int rc = 0; - - down_write(&ioctl_list_sem); - if (!list_empty(&hand->item)) - rc = -EBUSY; - else - list_add_tail(&hand->item, &ioctl_list); - up_write(&ioctl_list_sem); - - return rc; -} -EXPORT_SYMBOL(libcfs_register_ioctl); - -int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand) -{ - int rc = 0; - - down_write(&ioctl_list_sem); - if (list_empty(&hand->item)) - rc = -ENOENT; - else - list_del_init(&hand->item); - up_write(&ioctl_list_sem); - - return rc; -} -EXPORT_SYMBOL(libcfs_deregister_ioctl); - -static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd, - void *arg, struct libcfs_ioctl_data *data) -{ - int err = -EINVAL; - - switch (cmd) { - case IOC_LIBCFS_CLEAR_DEBUG: - libcfs_debug_clear_buffer(); - return 0; - /* - * case IOC_LIBCFS_PANIC: - * Handled in arch/cfs_module.c - */ - case IOC_LIBCFS_MARK_DEBUG: - if (data->ioc_inlbuf1 == NULL || - data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0') - return -EINVAL; - libcfs_debug_mark_buffer(data->ioc_inlbuf1); - return 0; - case IOC_LIBCFS_MEMHOG: - if (pfile->private_data == NULL) { - err = -EINVAL; - } else { - kportal_memhog_free(pfile->private_data); - /* XXX The ioc_flags is not GFP flags now, need to be fixed */ - err = kportal_memhog_alloc(pfile->private_data, - data->ioc_count, - data->ioc_flags); - if (err != 0) - kportal_memhog_free(pfile->private_data); - } - break; - - default: { - struct libcfs_ioctl_handler *hand; - - err = -EINVAL; - down_read(&ioctl_list_sem); - list_for_each_entry(hand, &ioctl_list, item) { - err = hand->handle_ioctl(cmd, data); - if (err != -EINVAL) { - if (err == 0) - err = libcfs_ioctl_popdata(arg, - data, sizeof(*data)); - break; - } - } - up_read(&ioctl_list_sem); - break; - } - } - - return err; -} - -static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *arg) -{ - char *buf; - struct libcfs_ioctl_data *data; - int err = 0; - - LIBCFS_ALLOC_GFP(buf, 1024, GFP_KERNEL); - if (buf == NULL) - return -ENOMEM; - - /* 'cmd' and permissions get checked in our arch-specific caller */ - if (libcfs_ioctl_getdata(buf, buf + 800, arg)) { - CERROR("PORTALS ioctl: data error\n"); - err = -EINVAL; - goto out; - } - data = (struct libcfs_ioctl_data *)buf; - - err = libcfs_ioctl_int(pfile, cmd, arg, data); - -out: - LIBCFS_FREE(buf, 1024); - return err; -} - -struct cfs_psdev_ops libcfs_psdev_ops = { - libcfs_psdev_open, - libcfs_psdev_release, - NULL, - NULL, - libcfs_ioctl -}; - -static int proc_call_handler(void *data, int write, loff_t *ppos, - void __user *buffer, size_t *lenp, - int (*handler)(void *data, int write, - loff_t pos, void __user *buffer, int len)) -{ - int rc = handler(data, write, *ppos, buffer, *lenp); - - if (rc < 0) - return rc; - - if (write) { - *ppos += *lenp; - } else { - *lenp = rc; - *ppos += rc; - } - return 0; -} - -static int __proc_dobitmasks(void *data, int write, - loff_t pos, void __user *buffer, int nob) -{ - const int tmpstrlen = 512; - char *tmpstr; - int rc; - unsigned int *mask = data; - int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0; - int is_printk = (mask == &libcfs_printk) ? 1 : 0; - - rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen); - if (rc < 0) - return rc; - - if (!write) { - libcfs_debug_mask2str(tmpstr, tmpstrlen, *mask, is_subsys); - rc = strlen(tmpstr); - - if (pos >= rc) { - rc = 0; - } else { - rc = cfs_trace_copyout_string(buffer, nob, - tmpstr + pos, "\n"); - } - } else { - rc = cfs_trace_copyin_string(tmpstr, tmpstrlen, buffer, nob); - if (rc < 0) { - kfree(tmpstr); - return rc; - } - - rc = libcfs_debug_str2mask(mask, tmpstr, is_subsys); - /* Always print LBUG/LASSERT to console, so keep this mask */ - if (is_printk) - *mask |= D_EMERG; - } - - kfree(tmpstr); - return rc; -} - -static int proc_dobitmasks(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_dobitmasks); -} - -static int __proc_dump_kernel(void *data, int write, - loff_t pos, void __user *buffer, int nob) -{ - if (!write) - return 0; - - return cfs_trace_dump_debug_buffer_usrstr(buffer, nob); -} - -static int proc_dump_kernel(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_dump_kernel); -} - -static int __proc_daemon_file(void *data, int write, - loff_t pos, void __user *buffer, int nob) -{ - if (!write) { - int len = strlen(cfs_tracefile); - - if (pos >= len) - return 0; - - return cfs_trace_copyout_string(buffer, nob, - cfs_tracefile + pos, "\n"); - } - - return cfs_trace_daemon_command_usrstr(buffer, nob); -} - -static int proc_daemon_file(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_daemon_file); -} - -static int libcfs_force_lbug(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - if (write) - LBUG(); - return 0; -} - -static int proc_fail_loc(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - int rc; - long old_fail_loc = cfs_fail_loc; - - rc = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); - if (old_fail_loc != cfs_fail_loc) - wake_up(&cfs_race_waitq); - return rc; -} - -static int __proc_cpt_table(void *data, int write, - loff_t pos, void __user *buffer, int nob) -{ - char *buf = NULL; - int len = 4096; - int rc = 0; - - if (write) - return -EPERM; - - LASSERT(cfs_cpt_table != NULL); - - while (1) { - LIBCFS_ALLOC(buf, len); - if (buf == NULL) - return -ENOMEM; - - rc = cfs_cpt_table_print(cfs_cpt_table, buf, len); - if (rc >= 0) - break; - - if (rc == -EFBIG) { - LIBCFS_FREE(buf, len); - len <<= 1; - continue; - } - goto out; - } - - if (pos >= rc) { - rc = 0; - goto out; - } - - rc = cfs_trace_copyout_string(buffer, nob, buf + pos, NULL); - out: - if (buf != NULL) - LIBCFS_FREE(buf, len); - return rc; -} - -static int proc_cpt_table(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return proc_call_handler(table->data, write, ppos, buffer, lenp, - __proc_cpt_table); -} - -static struct ctl_table lnet_table[] = { - /* - * NB No .strategy entries have been provided since sysctl(8) prefers - * to go via /proc for portability. - */ - { - .procname = "debug", - .data = &libcfs_debug, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dobitmasks, - }, - { - .procname = "subsystem_debug", - .data = &libcfs_subsystem_debug, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dobitmasks, - }, - { - .procname = "printk", - .data = &libcfs_printk, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dobitmasks, - }, - { - .procname = "cpu_partition_table", - .maxlen = 128, - .mode = 0444, - .proc_handler = &proc_cpt_table, - }, - - { - .procname = "upcall", - .data = lnet_upcall, - .maxlen = sizeof(lnet_upcall), - .mode = 0644, - .proc_handler = &proc_dostring, - }, - { - .procname = "debug_log_upcall", - .data = lnet_debug_log_upcall, - .maxlen = sizeof(lnet_debug_log_upcall), - .mode = 0644, - .proc_handler = &proc_dostring, - }, - { - .procname = "catastrophe", - .data = &libcfs_catastrophe, - .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = &proc_dointvec, - }, - { - .procname = "dump_kernel", - .maxlen = 256, - .mode = 0200, - .proc_handler = &proc_dump_kernel, - }, - { - .procname = "daemon_file", - .mode = 0644, - .maxlen = 256, - .proc_handler = &proc_daemon_file, - }, - { - .procname = "force_lbug", - .data = NULL, - .maxlen = 0, - .mode = 0200, - .proc_handler = &libcfs_force_lbug - }, - { - .procname = "fail_loc", - .data = &cfs_fail_loc, - .maxlen = sizeof(cfs_fail_loc), - .mode = 0644, - .proc_handler = &proc_fail_loc - }, - { - .procname = "fail_val", - .data = &cfs_fail_val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, - { - } -}; - -static const struct lnet_debugfs_symlink_def lnet_debugfs_symlinks[] = { - { "console_ratelimit", - "/sys/module/libcfs/parameters/libcfs_console_ratelimit"}, - { "debug_path", - "/sys/module/libcfs/parameters/libcfs_debug_file_path"}, - { "panic_on_lbug", - "/sys/module/libcfs/parameters/libcfs_panic_on_lbug"}, - { "libcfs_console_backoff", - "/sys/module/libcfs/parameters/libcfs_console_backoff"}, - { "debug_mb", - "/sys/module/libcfs/parameters/libcfs_debug_mb"}, - { "console_min_delay_centisecs", - "/sys/module/libcfs/parameters/libcfs_console_min_delay"}, - { "console_max_delay_centisecs", - "/sys/module/libcfs/parameters/libcfs_console_max_delay"}, - {}, -}; - -static ssize_t lnet_debugfs_read(struct file *filp, char __user *buf, - size_t count, loff_t *ppos) -{ - struct ctl_table *table = filp->private_data; - int error; - - error = table->proc_handler(table, 0, (void __user *)buf, &count, ppos); - if (!error) - error = count; - - return error; -} - -static ssize_t lnet_debugfs_write(struct file *filp, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct ctl_table *table = filp->private_data; - int error; - - error = table->proc_handler(table, 1, (void __user *)buf, &count, ppos); - if (!error) - error = count; - - return error; -} - -static const struct file_operations lnet_debugfs_file_operations = { - .open = simple_open, - .read = lnet_debugfs_read, - .write = lnet_debugfs_write, - .llseek = default_llseek, -}; - -void lustre_insert_debugfs(struct ctl_table *table, - const struct lnet_debugfs_symlink_def *symlinks) -{ - struct dentry *entry; - - if (lnet_debugfs_root == NULL) - lnet_debugfs_root = debugfs_create_dir("lnet", NULL); - - /* Even if we cannot create, just ignore it altogether) */ - if (IS_ERR_OR_NULL(lnet_debugfs_root)) - return; - - for (; table->procname; table++) - entry = debugfs_create_file(table->procname, table->mode, - lnet_debugfs_root, table, - &lnet_debugfs_file_operations); - - for (; symlinks && symlinks->name; symlinks++) - entry = debugfs_create_symlink(symlinks->name, - lnet_debugfs_root, - symlinks->target); - -} -EXPORT_SYMBOL_GPL(lustre_insert_debugfs); - -static void lustre_remove_debugfs(void) -{ - if (lnet_debugfs_root != NULL) - debugfs_remove_recursive(lnet_debugfs_root); - - lnet_debugfs_root = NULL; -} - -static int init_libcfs_module(void) -{ - int rc; - - rc = libcfs_debug_init(5 * 1024 * 1024); - if (rc < 0) { - pr_err("LustreError: libcfs_debug_init: %d\n", rc); - return rc; - } - - rc = cfs_cpu_init(); - if (rc != 0) - goto cleanup_debug; - - rc = misc_register(&libcfs_dev); - if (rc) { - CERROR("misc_register: error %d\n", rc); - goto cleanup_cpu; - } - - rc = cfs_wi_startup(); - if (rc) { - CERROR("initialize workitem: error %d\n", rc); - goto cleanup_deregister; - } - - /* max to 4 threads, should be enough for rehash */ - rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4); - rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY, - rc, &cfs_sched_rehash); - if (rc != 0) { - CERROR("Startup workitem scheduler: error: %d\n", rc); - goto cleanup_deregister; - } - - rc = cfs_crypto_register(); - if (rc) { - CERROR("cfs_crypto_register: error %d\n", rc); - goto cleanup_wi; - } - - lustre_insert_debugfs(lnet_table, lnet_debugfs_symlinks); - - CDEBUG(D_OTHER, "portals setup OK\n"); - return 0; - cleanup_wi: - cfs_wi_shutdown(); - cleanup_deregister: - misc_deregister(&libcfs_dev); -cleanup_cpu: - cfs_cpu_fini(); - cleanup_debug: - libcfs_debug_cleanup(); - return rc; -} - -static void exit_libcfs_module(void) -{ - int rc; - - lustre_remove_debugfs(); - - if (cfs_sched_rehash) { - cfs_wi_sched_destroy(cfs_sched_rehash); - cfs_sched_rehash = NULL; - } - - cfs_crypto_unregister(); - cfs_wi_shutdown(); - - misc_deregister(&libcfs_dev); - - cfs_cpu_fini(); - - rc = libcfs_debug_cleanup(); - if (rc) - pr_err("LustreError: libcfs_debug_cleanup: %d\n", rc); -} - -MODULE_VERSION("1.0.0"); - -module_init(init_libcfs_module); -module_exit(exit_libcfs_module); diff --git a/drivers/staging/lustre/lustre/libcfs/prng.c b/drivers/staging/lustre/lustre/libcfs/prng.c deleted file mode 100644 index 4147664ff..000000000 --- a/drivers/staging/lustre/lustre/libcfs/prng.c +++ /dev/null @@ -1,139 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/prng.c - * - * concatenation of following two 16-bit multiply with carry generators - * x(n)=a*x(n-1)+carry mod 2^16 and y(n)=b*y(n-1)+carry mod 2^16, - * number and carry packed within the same 32 bit integer. - * algorithm recommended by Marsaglia -*/ - -#include "../../include/linux/libcfs/libcfs.h" - -/* -From: George Marsaglia <geo@stat.fsu.edu> -Newsgroups: sci.math -Subject: Re: A RANDOM NUMBER GENERATOR FOR C -Date: Tue, 30 Sep 1997 05:29:35 -0700 - - * You may replace the two constants 36969 and 18000 by any - * pair of distinct constants from this list: - * 18000 18030 18273 18513 18879 19074 19098 19164 19215 19584 - * 19599 19950 20088 20508 20544 20664 20814 20970 21153 21243 - * 21423 21723 21954 22125 22188 22293 22860 22938 22965 22974 - * 23109 23124 23163 23208 23508 23520 23553 23658 23865 24114 - * 24219 24660 24699 24864 24948 25023 25308 25443 26004 26088 - * 26154 26550 26679 26838 27183 27258 27753 27795 27810 27834 - * 27960 28320 28380 28689 28710 28794 28854 28959 28980 29013 - * 29379 29889 30135 30345 30459 30714 30903 30963 31059 31083 - * (or any other 16-bit constants k for which both k*2^16-1 - * and k*2^15-1 are prime) */ - -#define RANDOM_CONST_A 18030 -#define RANDOM_CONST_B 29013 - -static unsigned int seed_x = 521288629; -static unsigned int seed_y = 362436069; - -/** - * cfs_rand - creates new seeds - * - * First it creates new seeds from the previous seeds. Then it generates a - * new pseudo random number for use. - * - * Returns a pseudo-random 32-bit integer - */ -unsigned int cfs_rand(void) -{ - seed_x = RANDOM_CONST_A * (seed_x & 65535) + (seed_x >> 16); - seed_y = RANDOM_CONST_B * (seed_y & 65535) + (seed_y >> 16); - - return ((seed_x << 16) + (seed_y & 65535)); -} -EXPORT_SYMBOL(cfs_rand); - -/** - * cfs_srand - sets the initial seed - * @seed1 : (seed_x) should have the most entropy in the low bits of the word - * @seed2 : (seed_y) should have the most entropy in the high bits of the word - * - * Replaces the original seeds with new values. Used to generate a new pseudo - * random numbers. - */ -void cfs_srand(unsigned int seed1, unsigned int seed2) -{ - if (seed1) - seed_x = seed1; /* use default seeds if parameter is 0 */ - if (seed2) - seed_y = seed2; -} -EXPORT_SYMBOL(cfs_srand); - -/** - * cfs_get_random_bytes - generate a bunch of random numbers - * @buf : buffer to fill with random numbers - * @size: size of passed in buffer - * - * Fills a buffer with random bytes - */ -void cfs_get_random_bytes(void *buf, int size) -{ - int *p = buf; - int rem, tmp; - - LASSERT(size >= 0); - - rem = min((int)((unsigned long)buf & (sizeof(int) - 1)), size); - if (rem) { - get_random_bytes(&tmp, sizeof(tmp)); - tmp ^= cfs_rand(); - memcpy(buf, &tmp, rem); - p = buf + rem; - size -= rem; - } - - while (size >= sizeof(int)) { - get_random_bytes(&tmp, sizeof(tmp)); - *p = cfs_rand() ^ tmp; - size -= sizeof(int); - p++; - } - buf = p; - if (size) { - get_random_bytes(&tmp, sizeof(tmp)); - tmp ^= cfs_rand(); - memcpy(buf, &tmp, size); - } -} -EXPORT_SYMBOL(cfs_get_random_bytes); diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c deleted file mode 100644 index 65c4f1ab0..000000000 --- a/drivers/staging/lustre/lustre/libcfs/tracefile.c +++ /dev/null @@ -1,1165 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/tracefile.c - * - * Author: Zach Brown <zab@clusterfs.com> - * Author: Phil Schwan <phil@clusterfs.com> - */ - -#define DEBUG_SUBSYSTEM S_LNET -#define LUSTRE_TRACEFILE_PRIVATE -#include "tracefile.h" - -#include "../../include/linux/libcfs/libcfs.h" - -/* XXX move things up to the top, comment */ -union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned; - -char cfs_tracefile[TRACEFILE_NAME_SIZE]; -long long cfs_tracefile_size = CFS_TRACEFILE_SIZE; -static struct tracefiled_ctl trace_tctl; -static DEFINE_MUTEX(cfs_trace_thread_mutex); -static int thread_running; - -static atomic_t cfs_tage_allocated = ATOMIC_INIT(0); - -static void put_pages_on_tcd_daemon_list(struct page_collection *pc, - struct cfs_trace_cpu_data *tcd); - -static inline struct cfs_trace_page * -cfs_tage_from_list(struct list_head *list) -{ - return list_entry(list, struct cfs_trace_page, linkage); -} - -static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp) -{ - struct page *page; - struct cfs_trace_page *tage; - - /* My caller is trying to free memory */ - if (!in_interrupt() && memory_pressure_get()) - return NULL; - - /* - * Don't spam console with allocation failures: they will be reported - * by upper layer anyway. - */ - gfp |= __GFP_NOWARN; - page = alloc_page(gfp); - if (page == NULL) - return NULL; - - tage = kmalloc(sizeof(*tage), gfp); - if (tage == NULL) { - __free_page(page); - return NULL; - } - - tage->page = page; - atomic_inc(&cfs_tage_allocated); - return tage; -} - -static void cfs_tage_free(struct cfs_trace_page *tage) -{ - __LASSERT(tage != NULL); - __LASSERT(tage->page != NULL); - - __free_page(tage->page); - kfree(tage); - atomic_dec(&cfs_tage_allocated); -} - -static void cfs_tage_to_tail(struct cfs_trace_page *tage, - struct list_head *queue) -{ - __LASSERT(tage != NULL); - __LASSERT(queue != NULL); - - list_move_tail(&tage->linkage, queue); -} - -int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp, - struct list_head *stock) -{ - int i; - - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - - for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++i) { - struct cfs_trace_page *tage; - - tage = cfs_tage_alloc(gfp); - if (tage == NULL) - break; - list_add_tail(&tage->linkage, stock); - } - return i; -} - -/* return a page that has 'len' bytes left at the end */ -static struct cfs_trace_page * -cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) -{ - struct cfs_trace_page *tage; - - if (tcd->tcd_cur_pages > 0) { - __LASSERT(!list_empty(&tcd->tcd_pages)); - tage = cfs_tage_from_list(tcd->tcd_pages.prev); - if (tage->used + len <= PAGE_CACHE_SIZE) - return tage; - } - - if (tcd->tcd_cur_pages < tcd->tcd_max_pages) { - if (tcd->tcd_cur_stock_pages > 0) { - tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev); - --tcd->tcd_cur_stock_pages; - list_del_init(&tage->linkage); - } else { - tage = cfs_tage_alloc(GFP_ATOMIC); - if (unlikely(tage == NULL)) { - if ((!memory_pressure_get() || - in_interrupt()) && printk_ratelimit()) - printk(KERN_WARNING - "cannot allocate a tage (%ld)\n", - tcd->tcd_cur_pages); - return NULL; - } - } - - tage->used = 0; - tage->cpu = smp_processor_id(); - tage->type = tcd->tcd_type; - list_add_tail(&tage->linkage, &tcd->tcd_pages); - tcd->tcd_cur_pages++; - - if (tcd->tcd_cur_pages > 8 && thread_running) { - struct tracefiled_ctl *tctl = &trace_tctl; - /* - * wake up tracefiled to process some pages. - */ - wake_up(&tctl->tctl_waitq); - } - return tage; - } - return NULL; -} - -static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd) -{ - int pgcount = tcd->tcd_cur_pages / 10; - struct page_collection pc; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - - if (printk_ratelimit()) - printk(KERN_WARNING "debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n", - pgcount + 1, tcd->tcd_cur_pages); - - INIT_LIST_HEAD(&pc.pc_pages); - - list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) { - if (pgcount-- == 0) - break; - - list_move_tail(&tage->linkage, &pc.pc_pages); - tcd->tcd_cur_pages--; - } - put_pages_on_tcd_daemon_list(&pc, tcd); -} - -/* return a page that has 'len' bytes left at the end */ -static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd, - unsigned long len) -{ - struct cfs_trace_page *tage; - - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - - if (len > PAGE_CACHE_SIZE) { - pr_err("cowardly refusing to write %lu bytes in a page\n", len); - return NULL; - } - - tage = cfs_trace_get_tage_try(tcd, len); - if (tage != NULL) - return tage; - if (thread_running) - cfs_tcd_shrink(tcd); - if (tcd->tcd_cur_pages > 0) { - tage = cfs_tage_from_list(tcd->tcd_pages.next); - tage->used = 0; - cfs_tage_to_tail(tage, &tcd->tcd_pages); - } - return tage; -} - -int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata, - const char *format, ...) -{ - va_list args; - int rc; - - va_start(args, format); - rc = libcfs_debug_vmsg2(msgdata, format, args, NULL); - va_end(args); - - return rc; -} -EXPORT_SYMBOL(libcfs_debug_msg); - -int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, - const char *format1, va_list args, - const char *format2, ...) -{ - struct cfs_trace_cpu_data *tcd = NULL; - struct ptldebug_header header = {0}; - struct cfs_trace_page *tage; - /* string_buf is used only if tcd != NULL, and is always set then */ - char *string_buf = NULL; - char *debug_buf; - int known_size; - int needed = 85; /* average message length */ - int max_nob; - va_list ap; - int depth; - int i; - int remain; - int mask = msgdata->msg_mask; - const char *file = kbasename(msgdata->msg_file); - struct cfs_debug_limit_state *cdls = msgdata->msg_cdls; - - tcd = cfs_trace_get_tcd(); - - /* cfs_trace_get_tcd() grabs a lock, which disables preemption and - * pins us to a particular CPU. This avoids an smp_processor_id() - * warning on Linux when debugging is enabled. */ - cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK()); - - if (tcd == NULL) /* arch may not log in IRQ context */ - goto console; - - if (tcd->tcd_cur_pages == 0) - header.ph_flags |= PH_FLAG_FIRST_RECORD; - - if (tcd->tcd_shutting_down) { - cfs_trace_put_tcd(tcd); - tcd = NULL; - goto console; - } - - depth = __current_nesting_level(); - known_size = strlen(file) + 1 + depth; - if (msgdata->msg_fn) - known_size += strlen(msgdata->msg_fn) + 1; - - if (libcfs_debug_binary) - known_size += sizeof(header); - - /*/ - * '2' used because vsnprintf return real size required for output - * _without_ terminating NULL. - * if needed is to small for this format. - */ - for (i = 0; i < 2; i++) { - tage = cfs_trace_get_tage(tcd, needed + known_size + 1); - if (tage == NULL) { - if (needed + known_size > PAGE_CACHE_SIZE) - mask |= D_ERROR; - - cfs_trace_put_tcd(tcd); - tcd = NULL; - goto console; - } - - string_buf = (char *)page_address(tage->page) + - tage->used + known_size; - - max_nob = PAGE_CACHE_SIZE - tage->used - known_size; - if (max_nob <= 0) { - printk(KERN_EMERG "negative max_nob: %d\n", - max_nob); - mask |= D_ERROR; - cfs_trace_put_tcd(tcd); - tcd = NULL; - goto console; - } - - needed = 0; - if (format1) { - va_copy(ap, args); - needed = vsnprintf(string_buf, max_nob, format1, ap); - va_end(ap); - } - - if (format2) { - remain = max_nob - needed; - if (remain < 0) - remain = 0; - - va_start(ap, format2); - needed += vsnprintf(string_buf + needed, remain, - format2, ap); - va_end(ap); - } - - if (needed < max_nob) /* well. printing ok.. */ - break; - } - - if (*(string_buf+needed-1) != '\n') - printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n", - file, msgdata->msg_line, msgdata->msg_fn); - - header.ph_len = known_size + needed; - debug_buf = (char *)page_address(tage->page) + tage->used; - - if (libcfs_debug_binary) { - memcpy(debug_buf, &header, sizeof(header)); - tage->used += sizeof(header); - debug_buf += sizeof(header); - } - - /* indent message according to the nesting level */ - while (depth-- > 0) { - *(debug_buf++) = '.'; - ++tage->used; - } - - strcpy(debug_buf, file); - tage->used += strlen(file) + 1; - debug_buf += strlen(file) + 1; - - if (msgdata->msg_fn) { - strcpy(debug_buf, msgdata->msg_fn); - tage->used += strlen(msgdata->msg_fn) + 1; - debug_buf += strlen(msgdata->msg_fn) + 1; - } - - __LASSERT(debug_buf == string_buf); - - tage->used += needed; - __LASSERT (tage->used <= PAGE_CACHE_SIZE); - -console: - if ((mask & libcfs_printk) == 0) { - /* no console output requested */ - if (tcd != NULL) - cfs_trace_put_tcd(tcd); - return 1; - } - - if (cdls != NULL) { - if (libcfs_console_ratelimit && - cdls->cdls_next != 0 && /* not first time ever */ - !cfs_time_after(cfs_time_current(), cdls->cdls_next)) { - /* skipping a console message */ - cdls->cdls_count++; - if (tcd != NULL) - cfs_trace_put_tcd(tcd); - return 1; - } - - if (cfs_time_after(cfs_time_current(), cdls->cdls_next + - libcfs_console_max_delay - + cfs_time_seconds(10))) { - /* last timeout was a long time ago */ - cdls->cdls_delay /= libcfs_console_backoff * 4; - } else { - cdls->cdls_delay *= libcfs_console_backoff; - } - - if (cdls->cdls_delay < libcfs_console_min_delay) - cdls->cdls_delay = libcfs_console_min_delay; - else if (cdls->cdls_delay > libcfs_console_max_delay) - cdls->cdls_delay = libcfs_console_max_delay; - - /* ensure cdls_next is never zero after it's been seen */ - cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1; - } - - if (tcd != NULL) { - cfs_print_to_console(&header, mask, string_buf, needed, file, - msgdata->msg_fn); - cfs_trace_put_tcd(tcd); - } else { - string_buf = cfs_trace_get_console_buffer(); - - needed = 0; - if (format1 != NULL) { - va_copy(ap, args); - needed = vsnprintf(string_buf, - CFS_TRACE_CONSOLE_BUFFER_SIZE, - format1, ap); - va_end(ap); - } - if (format2 != NULL) { - remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed; - if (remain > 0) { - va_start(ap, format2); - needed += vsnprintf(string_buf+needed, remain, - format2, ap); - va_end(ap); - } - } - cfs_print_to_console(&header, mask, - string_buf, needed, file, msgdata->msg_fn); - - put_cpu(); - } - - if (cdls != NULL && cdls->cdls_count != 0) { - string_buf = cfs_trace_get_console_buffer(); - - needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE, - "Skipped %d previous similar message%s\n", - cdls->cdls_count, - (cdls->cdls_count > 1) ? "s" : ""); - - cfs_print_to_console(&header, mask, - string_buf, needed, file, msgdata->msg_fn); - - put_cpu(); - cdls->cdls_count = 0; - } - - return 0; -} -EXPORT_SYMBOL(libcfs_debug_vmsg2); - -void -cfs_trace_assertion_failed(const char *str, - struct libcfs_debug_msg_data *msgdata) -{ - struct ptldebug_header hdr; - - libcfs_panic_in_progress = 1; - libcfs_catastrophe = 1; - mb(); - - cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK()); - - cfs_print_to_console(&hdr, D_EMERG, str, strlen(str), - msgdata->msg_file, msgdata->msg_fn); - - panic("Lustre debug assertion failure\n"); - - /* not reached */ -} - -static void -panic_collect_pages(struct page_collection *pc) -{ - /* Do the collect_pages job on a single CPU: assumes that all other - * CPUs have been stopped during a panic. If this isn't true for some - * arch, this will have to be implemented separately in each arch. */ - int i; - int j; - struct cfs_trace_cpu_data *tcd; - - INIT_LIST_HEAD(&pc->pc_pages); - - cfs_tcd_for_each(tcd, i, j) { - list_splice_init(&tcd->tcd_pages, &pc->pc_pages); - tcd->tcd_cur_pages = 0; - - if (pc->pc_want_daemon_pages) { - list_splice_init(&tcd->tcd_daemon_pages, - &pc->pc_pages); - tcd->tcd_cur_daemon_pages = 0; - } - } -} - -static void collect_pages_on_all_cpus(struct page_collection *pc) -{ - struct cfs_trace_cpu_data *tcd; - int i, cpu; - - for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) { - list_splice_init(&tcd->tcd_pages, &pc->pc_pages); - tcd->tcd_cur_pages = 0; - if (pc->pc_want_daemon_pages) { - list_splice_init(&tcd->tcd_daemon_pages, - &pc->pc_pages); - tcd->tcd_cur_daemon_pages = 0; - } - } - } -} - -static void collect_pages(struct page_collection *pc) -{ - INIT_LIST_HEAD(&pc->pc_pages); - - if (libcfs_panic_in_progress) - panic_collect_pages(pc); - else - collect_pages_on_all_cpus(pc); -} - -static void put_pages_back_on_all_cpus(struct page_collection *pc) -{ - struct cfs_trace_cpu_data *tcd; - struct list_head *cur_head; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - int i, cpu; - - for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) { - cur_head = tcd->tcd_pages.next; - - list_for_each_entry_safe(tage, tmp, &pc->pc_pages, - linkage) { - - __LASSERT_TAGE_INVARIANT(tage); - - if (tage->cpu != cpu || tage->type != i) - continue; - - cfs_tage_to_tail(tage, cur_head); - tcd->tcd_cur_pages++; - } - } - } -} - -static void put_pages_back(struct page_collection *pc) -{ - if (!libcfs_panic_in_progress) - put_pages_back_on_all_cpus(pc); -} - -/* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that - * we have a good amount of data at all times for dumping during an LBUG, even - * if we have been steadily writing (and otherwise discarding) pages via the - * debug daemon. */ -static void put_pages_on_tcd_daemon_list(struct page_collection *pc, - struct cfs_trace_cpu_data *tcd) -{ - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - - list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) { - - __LASSERT_TAGE_INVARIANT(tage); - - if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type) - continue; - - cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages); - tcd->tcd_cur_daemon_pages++; - - if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) { - struct cfs_trace_page *victim; - - __LASSERT(!list_empty(&tcd->tcd_daemon_pages)); - victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next); - - __LASSERT_TAGE_INVARIANT(victim); - - list_del(&victim->linkage); - cfs_tage_free(victim); - tcd->tcd_cur_daemon_pages--; - } - } -} - -static void put_pages_on_daemon_list(struct page_collection *pc) -{ - struct cfs_trace_cpu_data *tcd; - int i, cpu; - - for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) - put_pages_on_tcd_daemon_list(pc, tcd); - } -} - -void cfs_trace_debug_print(void) -{ - struct page_collection pc; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { - char *p, *file, *fn; - struct page *page; - - __LASSERT_TAGE_INVARIANT(tage); - - page = tage->page; - p = page_address(page); - while (p < ((char *)page_address(page) + tage->used)) { - struct ptldebug_header *hdr; - int len; - - hdr = (void *)p; - p += sizeof(*hdr); - file = p; - p += strlen(file) + 1; - fn = p; - p += strlen(fn) + 1; - len = hdr->ph_len - (int)(p - (char *)hdr); - - cfs_print_to_console(hdr, D_EMERG, p, len, file, fn); - - p += len; - } - - list_del(&tage->linkage); - cfs_tage_free(tage); - } -} - -int cfs_tracefile_dump_all_pages(char *filename) -{ - struct page_collection pc; - struct file *filp; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - char *buf; - int rc; - - DECL_MMSPACE; - - cfs_tracefile_write_lock(); - - filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600); - if (IS_ERR(filp)) { - rc = PTR_ERR(filp); - filp = NULL; - pr_err("LustreError: can't open %s for dump: rc %d\n", - filename, rc); - goto out; - } - - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - if (list_empty(&pc.pc_pages)) { - rc = 0; - goto close; - } - - /* ok, for now, just write the pages. in the future we'll be building - * iobufs with the pages and calling generic_direct_IO */ - MMSPACE_OPEN; - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { - - __LASSERT_TAGE_INVARIANT(tage); - - buf = kmap(tage->page); - rc = vfs_write(filp, (__force const char __user *)buf, - tage->used, &filp->f_pos); - kunmap(tage->page); - - if (rc != (int)tage->used) { - printk(KERN_WARNING "wanted to write %u but wrote %d\n", - tage->used, rc); - put_pages_back(&pc); - __LASSERT(list_empty(&pc.pc_pages)); - break; - } - list_del(&tage->linkage); - cfs_tage_free(tage); - } - MMSPACE_CLOSE; - rc = vfs_fsync(filp, 1); - if (rc) - pr_err("sync returns %d\n", rc); -close: - filp_close(filp, NULL); -out: - cfs_tracefile_write_unlock(); - return rc; -} - -void cfs_trace_flush_pages(void) -{ - struct page_collection pc; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - - pc.pc_want_daemon_pages = 1; - collect_pages(&pc); - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) { - - __LASSERT_TAGE_INVARIANT(tage); - - list_del(&tage->linkage); - cfs_tage_free(tage); - } -} - -int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, - const char __user *usr_buffer, int usr_buffer_nob) -{ - int nob; - - if (usr_buffer_nob > knl_buffer_nob) - return -EOVERFLOW; - - if (copy_from_user((void *)knl_buffer, - usr_buffer, usr_buffer_nob)) - return -EFAULT; - - nob = strnlen(knl_buffer, usr_buffer_nob); - while (nob-- >= 0) /* strip trailing whitespace */ - if (!isspace(knl_buffer[nob])) - break; - - if (nob < 0) /* empty string */ - return -EINVAL; - - if (nob == knl_buffer_nob) /* no space to terminate */ - return -EOVERFLOW; - - knl_buffer[nob + 1] = 0; /* terminate */ - return 0; -} -EXPORT_SYMBOL(cfs_trace_copyin_string); - -int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, - const char *knl_buffer, char *append) -{ - /* NB if 'append' != NULL, it's a single character to append to the - * copied out string - usually "\n", for /proc entries and "" (i.e. a - * terminating zero byte) for sysctl entries */ - int nob = strlen(knl_buffer); - - if (nob > usr_buffer_nob) - nob = usr_buffer_nob; - - if (copy_to_user(usr_buffer, knl_buffer, nob)) - return -EFAULT; - - if (append != NULL && nob < usr_buffer_nob) { - if (copy_to_user(usr_buffer + nob, append, 1)) - return -EFAULT; - - nob++; - } - - return nob; -} -EXPORT_SYMBOL(cfs_trace_copyout_string); - -int cfs_trace_allocate_string_buffer(char **str, int nob) -{ - if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */ - return -EINVAL; - - *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO); - if (*str == NULL) - return -ENOMEM; - - return 0; -} - -int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob) -{ - char *str; - int rc; - - rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); - if (rc != 0) - return rc; - - rc = cfs_trace_copyin_string(str, usr_str_nob + 1, - usr_str, usr_str_nob); - if (rc != 0) - goto out; - - if (str[0] != '/') { - rc = -EINVAL; - goto out; - } - rc = cfs_tracefile_dump_all_pages(str); -out: - kfree(str); - return rc; -} - -int cfs_trace_daemon_command(char *str) -{ - int rc = 0; - - cfs_tracefile_write_lock(); - - if (strcmp(str, "stop") == 0) { - cfs_tracefile_write_unlock(); - cfs_trace_stop_thread(); - cfs_tracefile_write_lock(); - memset(cfs_tracefile, 0, sizeof(cfs_tracefile)); - - } else if (strncmp(str, "size=", 5) == 0) { - cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0); - if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480) - cfs_tracefile_size = CFS_TRACEFILE_SIZE; - else - cfs_tracefile_size <<= 20; - - } else if (strlen(str) >= sizeof(cfs_tracefile)) { - rc = -ENAMETOOLONG; - } else if (str[0] != '/') { - rc = -EINVAL; - } else { - strcpy(cfs_tracefile, str); - - printk(KERN_INFO - "Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n", - cfs_tracefile, - (long)(cfs_tracefile_size >> 10)); - - cfs_trace_start_thread(); - } - - cfs_tracefile_write_unlock(); - return rc; -} - -int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob) -{ - char *str; - int rc; - - rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1); - if (rc != 0) - return rc; - - rc = cfs_trace_copyin_string(str, usr_str_nob + 1, - usr_str, usr_str_nob); - if (rc == 0) - rc = cfs_trace_daemon_command(str); - - kfree(str); - return rc; -} - -int cfs_trace_set_debug_mb(int mb) -{ - int i; - int j; - int pages; - int limit = cfs_trace_max_debug_mb(); - struct cfs_trace_cpu_data *tcd; - - if (mb < num_possible_cpus()) { - printk(KERN_WARNING - "Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n", - mb, num_possible_cpus()); - mb = num_possible_cpus(); - } - - if (mb > limit) { - printk(KERN_WARNING - "Lustre: %d MB is too large for debug buffer size, setting it to %d MB.\n", - mb, limit); - mb = limit; - } - - mb /= num_possible_cpus(); - pages = mb << (20 - PAGE_CACHE_SHIFT); - - cfs_tracefile_write_lock(); - - cfs_tcd_for_each(tcd, i, j) - tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100; - - cfs_tracefile_write_unlock(); - - return 0; -} - -int cfs_trace_get_debug_mb(void) -{ - int i; - int j; - struct cfs_trace_cpu_data *tcd; - int total_pages = 0; - - cfs_tracefile_read_lock(); - - cfs_tcd_for_each(tcd, i, j) - total_pages += tcd->tcd_max_pages; - - cfs_tracefile_read_unlock(); - - return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1; -} - -static int tracefiled(void *arg) -{ - struct page_collection pc; - struct tracefiled_ctl *tctl = arg; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - struct file *filp; - char *buf; - int last_loop = 0; - int rc; - - DECL_MMSPACE; - - /* we're started late enough that we pick up init's fs context */ - /* this is so broken in uml? what on earth is going on? */ - - complete(&tctl->tctl_start); - - while (1) { - wait_queue_t __wait; - - pc.pc_want_daemon_pages = 0; - collect_pages(&pc); - if (list_empty(&pc.pc_pages)) - goto end_loop; - - filp = NULL; - cfs_tracefile_read_lock(); - if (cfs_tracefile[0] != 0) { - filp = filp_open(cfs_tracefile, - O_CREAT | O_RDWR | O_LARGEFILE, - 0600); - if (IS_ERR(filp)) { - rc = PTR_ERR(filp); - filp = NULL; - printk(KERN_WARNING "couldn't open %s: %d\n", - cfs_tracefile, rc); - } - } - cfs_tracefile_read_unlock(); - if (filp == NULL) { - put_pages_on_daemon_list(&pc); - __LASSERT(list_empty(&pc.pc_pages)); - goto end_loop; - } - - MMSPACE_OPEN; - - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, - linkage) { - static loff_t f_pos; - - __LASSERT_TAGE_INVARIANT(tage); - - if (f_pos >= (off_t)cfs_tracefile_size) - f_pos = 0; - else if (f_pos > i_size_read(file_inode(filp))) - f_pos = i_size_read(file_inode(filp)); - - buf = kmap(tage->page); - rc = vfs_write(filp, (__force const char __user *)buf, - tage->used, &f_pos); - kunmap(tage->page); - - if (rc != (int)tage->used) { - printk(KERN_WARNING "wanted to write %u but wrote %d\n", - tage->used, rc); - put_pages_back(&pc); - __LASSERT(list_empty(&pc.pc_pages)); - break; - } - } - MMSPACE_CLOSE; - - filp_close(filp, NULL); - put_pages_on_daemon_list(&pc); - if (!list_empty(&pc.pc_pages)) { - int i; - - printk(KERN_ALERT "Lustre: trace pages aren't empty\n"); - pr_err("total cpus(%d): ", - num_possible_cpus()); - for (i = 0; i < num_possible_cpus(); i++) - if (cpu_online(i)) - pr_cont("%d(on) ", i); - else - pr_cont("%d(off) ", i); - pr_cont("\n"); - - i = 0; - list_for_each_entry_safe(tage, tmp, &pc.pc_pages, - linkage) - pr_err("page %d belongs to cpu %d\n", - ++i, tage->cpu); - pr_err("There are %d pages unwritten\n", i); - } - __LASSERT(list_empty(&pc.pc_pages)); -end_loop: - if (atomic_read(&tctl->tctl_shutdown)) { - if (last_loop == 0) { - last_loop = 1; - continue; - } else { - break; - } - } - init_waitqueue_entry(&__wait, current); - add_wait_queue(&tctl->tctl_waitq, &__wait); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); - remove_wait_queue(&tctl->tctl_waitq, &__wait); - } - complete(&tctl->tctl_stop); - return 0; -} - -int cfs_trace_start_thread(void) -{ - struct tracefiled_ctl *tctl = &trace_tctl; - int rc = 0; - - mutex_lock(&cfs_trace_thread_mutex); - if (thread_running) - goto out; - - init_completion(&tctl->tctl_start); - init_completion(&tctl->tctl_stop); - init_waitqueue_head(&tctl->tctl_waitq); - atomic_set(&tctl->tctl_shutdown, 0); - - if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) { - rc = -ECHILD; - goto out; - } - - wait_for_completion(&tctl->tctl_start); - thread_running = 1; -out: - mutex_unlock(&cfs_trace_thread_mutex); - return rc; -} - -void cfs_trace_stop_thread(void) -{ - struct tracefiled_ctl *tctl = &trace_tctl; - - mutex_lock(&cfs_trace_thread_mutex); - if (thread_running) { - printk(KERN_INFO - "Lustre: shutting down debug daemon thread...\n"); - atomic_set(&tctl->tctl_shutdown, 1); - wait_for_completion(&tctl->tctl_stop); - thread_running = 0; - } - mutex_unlock(&cfs_trace_thread_mutex); -} - -int cfs_tracefile_init(int max_pages) -{ - struct cfs_trace_cpu_data *tcd; - int i; - int j; - int rc; - int factor; - - rc = cfs_tracefile_init_arch(); - if (rc != 0) - return rc; - - cfs_tcd_for_each(tcd, i, j) { - /* tcd_pages_factor is initialized int tracefile_init_arch. */ - factor = tcd->tcd_pages_factor; - INIT_LIST_HEAD(&tcd->tcd_pages); - INIT_LIST_HEAD(&tcd->tcd_stock_pages); - INIT_LIST_HEAD(&tcd->tcd_daemon_pages); - tcd->tcd_cur_pages = 0; - tcd->tcd_cur_stock_pages = 0; - tcd->tcd_cur_daemon_pages = 0; - tcd->tcd_max_pages = (max_pages * factor) / 100; - LASSERT(tcd->tcd_max_pages > 0); - tcd->tcd_shutting_down = 0; - } - - return 0; -} - -static void trace_cleanup_on_all_cpus(void) -{ - struct cfs_trace_cpu_data *tcd; - struct cfs_trace_page *tage; - struct cfs_trace_page *tmp; - int i, cpu; - - for_each_possible_cpu(cpu) { - cfs_tcd_for_each_type_lock(tcd, i, cpu) { - tcd->tcd_shutting_down = 1; - - list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, - linkage) { - __LASSERT_TAGE_INVARIANT(tage); - - list_del(&tage->linkage); - cfs_tage_free(tage); - } - - tcd->tcd_cur_pages = 0; - } - } -} - -static void cfs_trace_cleanup(void) -{ - struct page_collection pc; - - INIT_LIST_HEAD(&pc.pc_pages); - - trace_cleanup_on_all_cpus(); - - cfs_tracefile_fini_arch(); -} - -void cfs_tracefile_exit(void) -{ - cfs_trace_stop_thread(); - cfs_trace_cleanup(); -} diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.h b/drivers/staging/lustre/lustre/libcfs/tracefile.h deleted file mode 100644 index 7bf1471a5..000000000 --- a/drivers/staging/lustre/lustre/libcfs/tracefile.h +++ /dev/null @@ -1,319 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#ifndef __LIBCFS_TRACEFILE_H__ -#define __LIBCFS_TRACEFILE_H__ - -#include "../../include/linux/libcfs/libcfs.h" - -typedef enum { - CFS_TCD_TYPE_PROC = 0, - CFS_TCD_TYPE_SOFTIRQ, - CFS_TCD_TYPE_IRQ, - CFS_TCD_TYPE_MAX -} cfs_trace_buf_type_t; - -/* trace file lock routines */ - -#define TRACEFILE_NAME_SIZE 1024 -extern char cfs_tracefile[TRACEFILE_NAME_SIZE]; -extern long long cfs_tracefile_size; - -void libcfs_run_debug_log_upcall(char *file); - -int cfs_tracefile_init_arch(void); -void cfs_tracefile_fini_arch(void); - -void cfs_tracefile_read_lock(void); -void cfs_tracefile_read_unlock(void); -void cfs_tracefile_write_lock(void); -void cfs_tracefile_write_unlock(void); - -int cfs_tracefile_dump_all_pages(char *filename); -void cfs_trace_debug_print(void); -void cfs_trace_flush_pages(void); -int cfs_trace_start_thread(void); -void cfs_trace_stop_thread(void); -int cfs_tracefile_init(int max_pages); -void cfs_tracefile_exit(void); - -int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, - const char __user *usr_buffer, int usr_buffer_nob); -int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob, - const char *knl_str, char *append); -int cfs_trace_allocate_string_buffer(char **str, int nob); -int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob); -int cfs_trace_daemon_command(char *str); -int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob); -int cfs_trace_set_debug_mb(int mb); -int cfs_trace_get_debug_mb(void); - -void libcfs_debug_dumplog_internal(void *arg); -void libcfs_register_panic_notifier(void); -void libcfs_unregister_panic_notifier(void); -extern int libcfs_panic_in_progress; -int cfs_trace_max_debug_mb(void); - -#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) -#define TCD_STOCK_PAGES (TCD_MAX_PAGES) -#define CFS_TRACEFILE_SIZE (500 << 20) - -#ifdef LUSTRE_TRACEFILE_PRIVATE - -/* - * Private declare for tracefile - */ -#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) -#define TCD_STOCK_PAGES (TCD_MAX_PAGES) - -#define CFS_TRACEFILE_SIZE (500 << 20) - -/* Size of a buffer for sprinting console messages if we can't get a page - * from system */ -#define CFS_TRACE_CONSOLE_BUFFER_SIZE 1024 - -union cfs_trace_data_union { - struct cfs_trace_cpu_data { - /* - * Even though this structure is meant to be per-CPU, locking - * is needed because in some places the data may be accessed - * from other CPUs. This lock is directly used in trace_get_tcd - * and trace_put_tcd, which are called in libcfs_debug_vmsg2 and - * tcd_for_each_type_lock - */ - spinlock_t tcd_lock; - unsigned long tcd_lock_flags; - - /* - * pages with trace records not yet processed by tracefiled. - */ - struct list_head tcd_pages; - /* number of pages on ->tcd_pages */ - unsigned long tcd_cur_pages; - - /* - * pages with trace records already processed by - * tracefiled. These pages are kept in memory, so that some - * portion of log can be written in the event of LBUG. This - * list is maintained in LRU order. - * - * Pages are moved to ->tcd_daemon_pages by tracefiled() - * (put_pages_on_daemon_list()). LRU pages from this list are - * discarded when list grows too large. - */ - struct list_head tcd_daemon_pages; - /* number of pages on ->tcd_daemon_pages */ - unsigned long tcd_cur_daemon_pages; - - /* - * Maximal number of pages allowed on ->tcd_pages and - * ->tcd_daemon_pages each. - * Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current - * implementation. - */ - unsigned long tcd_max_pages; - - /* - * preallocated pages to write trace records into. Pages from - * ->tcd_stock_pages are moved to ->tcd_pages by - * portals_debug_msg(). - * - * This list is necessary, because on some platforms it's - * impossible to perform efficient atomic page allocation in a - * non-blockable context. - * - * Such platforms fill ->tcd_stock_pages "on occasion", when - * tracing code is entered in blockable context. - * - * trace_get_tage_try() tries to get a page from - * ->tcd_stock_pages first and resorts to atomic page - * allocation only if this queue is empty. ->tcd_stock_pages - * is replenished when tracing code is entered in blocking - * context (darwin-tracefile.c:trace_get_tcd()). We try to - * maintain TCD_STOCK_PAGES (40 by default) pages in this - * queue. Atomic allocation is only required if more than - * TCD_STOCK_PAGES pagesful are consumed by trace records all - * emitted in non-blocking contexts. Which is quite unlikely. - */ - struct list_head tcd_stock_pages; - /* number of pages on ->tcd_stock_pages */ - unsigned long tcd_cur_stock_pages; - - unsigned short tcd_shutting_down; - unsigned short tcd_cpu; - unsigned short tcd_type; - /* The factors to share debug memory. */ - unsigned short tcd_pages_factor; - } tcd; - char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))]; -}; - -#define TCD_MAX_TYPES 8 -extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS]; - -#define cfs_tcd_for_each(tcd, i, j) \ - for (i = 0; cfs_trace_data[i] != NULL; i++) \ - for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \ - j < num_possible_cpus(); \ - j++, (tcd) = &(*cfs_trace_data[i])[j].tcd) - -#define cfs_tcd_for_each_type_lock(tcd, i, cpu) \ - for (i = 0; cfs_trace_data[i] && \ - (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \ - cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++) - -/* XXX nikita: this declaration is internal to tracefile.c and should probably - * be moved there */ -struct page_collection { - struct list_head pc_pages; - /* - * if this flag is set, collect_pages() will spill both - * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise, - * only ->tcd_pages are spilled. - */ - int pc_want_daemon_pages; -}; - -/* XXX nikita: this declaration is internal to tracefile.c and should probably - * be moved there */ -struct tracefiled_ctl { - struct completion tctl_start; - struct completion tctl_stop; - wait_queue_head_t tctl_waitq; - pid_t tctl_pid; - atomic_t tctl_shutdown; -}; - -/* - * small data-structure for each page owned by tracefiled. - */ -/* XXX nikita: this declaration is internal to tracefile.c and should probably - * be moved there */ -struct cfs_trace_page { - /* - * page itself - */ - struct page *page; - /* - * linkage into one of the lists in trace_data_union or - * page_collection - */ - struct list_head linkage; - /* - * number of bytes used within this page - */ - unsigned int used; - /* - * cpu that owns this page - */ - unsigned short cpu; - /* - * type(context) of this page - */ - unsigned short type; -}; - -void cfs_set_ptldebug_header(struct ptldebug_header *header, - struct libcfs_debug_msg_data *m, - unsigned long stack); -void cfs_print_to_console(struct ptldebug_header *hdr, int mask, - const char *buf, int len, const char *file, - const char *fn); - -int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking); -void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking); - -extern char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX]; -cfs_trace_buf_type_t cfs_trace_buf_idx_get(void); - -static inline char * -cfs_trace_get_console_buffer(void) -{ - unsigned int i = get_cpu(); - unsigned int j = cfs_trace_buf_idx_get(); - - return cfs_trace_console_buffers[i][j]; -} - -static inline struct cfs_trace_cpu_data * -cfs_trace_get_tcd(void) -{ - struct cfs_trace_cpu_data *tcd = - &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd; - - cfs_trace_lock_tcd(tcd, 0); - - return tcd; -} - -static inline void -cfs_trace_put_tcd (struct cfs_trace_cpu_data *tcd) -{ - cfs_trace_unlock_tcd(tcd, 0); - - put_cpu(); -} - -int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp, - struct list_head *stock); - -int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd, - struct cfs_trace_page *tage); - -void cfs_trace_assertion_failed(const char *str, - struct libcfs_debug_msg_data *m); - -/* ASSERTION that is safe to use within the debug system */ -#define __LASSERT(cond) \ -do { \ - if (unlikely(!(cond))) { \ - LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \ - cfs_trace_assertion_failed("ASSERTION("#cond") failed", \ - &msgdata); \ - } \ -} while (0) - -#define __LASSERT_TAGE_INVARIANT(tage) \ -do { \ - __LASSERT(tage != NULL); \ - __LASSERT(tage->page != NULL); \ - __LASSERT(tage->used <= PAGE_CACHE_SIZE); \ - __LASSERT(page_count(tage->page) > 0); \ -} while (0) - -#endif /* LUSTRE_TRACEFILE_PRIVATE */ - -#endif /* __LIBCFS_TRACEFILE_H__ */ diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c deleted file mode 100644 index 60bb88a00..000000000 --- a/drivers/staging/lustre/lustre/libcfs/workitem.c +++ /dev/null @@ -1,465 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - * - * libcfs/libcfs/workitem.c - * - * Author: Isaac Huang <isaac@clusterfs.com> - * Liang Zhen <zhen.liang@sun.com> - */ - -#define DEBUG_SUBSYSTEM S_LNET - -#include "../../include/linux/libcfs/libcfs.h" - -#define CFS_WS_NAME_LEN 16 - -struct cfs_wi_sched { - struct list_head ws_list; /* chain on global list */ - /** serialised workitems */ - spinlock_t ws_lock; - /** where schedulers sleep */ - wait_queue_head_t ws_waitq; - /** concurrent workitems */ - struct list_head ws_runq; - /** rescheduled running-workitems, a workitem can be rescheduled - * while running in wi_action(), but we don't to execute it again - * unless it returns from wi_action(), so we put it on ws_rerunq - * while rescheduling, and move it to runq after it returns - * from wi_action() */ - struct list_head ws_rerunq; - /** CPT-table for this scheduler */ - struct cfs_cpt_table *ws_cptab; - /** CPT id for affinity */ - int ws_cpt; - /** number of scheduled workitems */ - int ws_nscheduled; - /** started scheduler thread, protected by cfs_wi_data::wi_glock */ - unsigned int ws_nthreads:30; - /** shutting down, protected by cfs_wi_data::wi_glock */ - unsigned int ws_stopping:1; - /** serialize starting thread, protected by cfs_wi_data::wi_glock */ - unsigned int ws_starting:1; - /** scheduler name */ - char ws_name[CFS_WS_NAME_LEN]; -}; - -static struct cfs_workitem_data { - /** serialize */ - spinlock_t wi_glock; - /** list of all schedulers */ - struct list_head wi_scheds; - /** WI module is initialized */ - int wi_init; - /** shutting down the whole WI module */ - int wi_stopping; -} cfs_wi_data; - -static inline int -cfs_wi_sched_cansleep(struct cfs_wi_sched *sched) -{ - spin_lock(&sched->ws_lock); - if (sched->ws_stopping) { - spin_unlock(&sched->ws_lock); - return 0; - } - - if (!list_empty(&sched->ws_runq)) { - spin_unlock(&sched->ws_lock); - return 0; - } - spin_unlock(&sched->ws_lock); - return 1; -} - -/* XXX: - * 0. it only works when called from wi->wi_action. - * 1. when it returns no one shall try to schedule the workitem. - */ -void -cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi) -{ - LASSERT(!in_interrupt()); /* because we use plain spinlock */ - LASSERT(!sched->ws_stopping); - - spin_lock(&sched->ws_lock); - - LASSERT(wi->wi_running); - if (wi->wi_scheduled) { /* cancel pending schedules */ - LASSERT(!list_empty(&wi->wi_list)); - list_del_init(&wi->wi_list); - - LASSERT(sched->ws_nscheduled > 0); - sched->ws_nscheduled--; - } - - LASSERT(list_empty(&wi->wi_list)); - - wi->wi_scheduled = 1; /* LBUG future schedule attempts */ - spin_unlock(&sched->ws_lock); - - return; -} -EXPORT_SYMBOL(cfs_wi_exit); - -/** - * cancel schedule request of workitem \a wi - */ -int -cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) -{ - int rc; - - LASSERT(!in_interrupt()); /* because we use plain spinlock */ - LASSERT(!sched->ws_stopping); - - /* - * return 0 if it's running already, otherwise return 1, which - * means the workitem will not be scheduled and will not have - * any race with wi_action. - */ - spin_lock(&sched->ws_lock); - - rc = !(wi->wi_running); - - if (wi->wi_scheduled) { /* cancel pending schedules */ - LASSERT(!list_empty(&wi->wi_list)); - list_del_init(&wi->wi_list); - - LASSERT(sched->ws_nscheduled > 0); - sched->ws_nscheduled--; - - wi->wi_scheduled = 0; - } - - LASSERT (list_empty(&wi->wi_list)); - - spin_unlock(&sched->ws_lock); - return rc; -} -EXPORT_SYMBOL(cfs_wi_deschedule); - -/* - * Workitem scheduled with (serial == 1) is strictly serialised not only with - * itself, but also with others scheduled this way. - * - * Now there's only one static serialised queue, but in the future more might - * be added, and even dynamic creation of serialised queues might be supported. - */ -void -cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) -{ - LASSERT(!in_interrupt()); /* because we use plain spinlock */ - LASSERT(!sched->ws_stopping); - - spin_lock(&sched->ws_lock); - - if (!wi->wi_scheduled) { - LASSERT (list_empty(&wi->wi_list)); - - wi->wi_scheduled = 1; - sched->ws_nscheduled++; - if (!wi->wi_running) { - list_add_tail(&wi->wi_list, &sched->ws_runq); - wake_up(&sched->ws_waitq); - } else { - list_add(&wi->wi_list, &sched->ws_rerunq); - } - } - - LASSERT (!list_empty(&wi->wi_list)); - spin_unlock(&sched->ws_lock); - return; -} -EXPORT_SYMBOL(cfs_wi_schedule); - -static int -cfs_wi_scheduler (void *arg) -{ - struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg; - - cfs_block_allsigs(); - - /* CPT affinity scheduler? */ - if (sched->ws_cptab != NULL) - if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0) - CWARN("Failed to bind %s on CPT %d\n", - sched->ws_name, sched->ws_cpt); - - spin_lock(&cfs_wi_data.wi_glock); - - LASSERT(sched->ws_starting == 1); - sched->ws_starting--; - sched->ws_nthreads++; - - spin_unlock(&cfs_wi_data.wi_glock); - - spin_lock(&sched->ws_lock); - - while (!sched->ws_stopping) { - int nloops = 0; - int rc; - cfs_workitem_t *wi; - - while (!list_empty(&sched->ws_runq) && - nloops < CFS_WI_RESCHED) { - wi = list_entry(sched->ws_runq.next, - cfs_workitem_t, wi_list); - LASSERT(wi->wi_scheduled && !wi->wi_running); - - list_del_init(&wi->wi_list); - - LASSERT(sched->ws_nscheduled > 0); - sched->ws_nscheduled--; - - wi->wi_running = 1; - wi->wi_scheduled = 0; - - spin_unlock(&sched->ws_lock); - nloops++; - - rc = (*wi->wi_action) (wi); - - spin_lock(&sched->ws_lock); - if (rc != 0) /* WI should be dead, even be freed! */ - continue; - - wi->wi_running = 0; - if (list_empty(&wi->wi_list)) - continue; - - LASSERT(wi->wi_scheduled); - /* wi is rescheduled, should be on rerunq now, we - * move it to runq so it can run action now */ - list_move_tail(&wi->wi_list, &sched->ws_runq); - } - - if (!list_empty(&sched->ws_runq)) { - spin_unlock(&sched->ws_lock); - /* don't sleep because some workitems still - * expect me to come back soon */ - cond_resched(); - spin_lock(&sched->ws_lock); - continue; - } - - spin_unlock(&sched->ws_lock); - rc = wait_event_interruptible_exclusive(sched->ws_waitq, - !cfs_wi_sched_cansleep(sched)); - spin_lock(&sched->ws_lock); - } - - spin_unlock(&sched->ws_lock); - - spin_lock(&cfs_wi_data.wi_glock); - sched->ws_nthreads--; - spin_unlock(&cfs_wi_data.wi_glock); - - return 0; -} - -void -cfs_wi_sched_destroy(struct cfs_wi_sched *sched) -{ - int i; - - LASSERT(cfs_wi_data.wi_init); - LASSERT(!cfs_wi_data.wi_stopping); - - spin_lock(&cfs_wi_data.wi_glock); - if (sched->ws_stopping) { - CDEBUG(D_INFO, "%s is in progress of stopping\n", - sched->ws_name); - spin_unlock(&cfs_wi_data.wi_glock); - return; - } - - LASSERT(!list_empty(&sched->ws_list)); - sched->ws_stopping = 1; - - spin_unlock(&cfs_wi_data.wi_glock); - - i = 2; - wake_up_all(&sched->ws_waitq); - - spin_lock(&cfs_wi_data.wi_glock); - while (sched->ws_nthreads > 0) { - CDEBUG(is_power_of_2(++i) ? D_WARNING : D_NET, - "waiting for %d threads of WI sched[%s] to terminate\n", - sched->ws_nthreads, sched->ws_name); - - spin_unlock(&cfs_wi_data.wi_glock); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) / 20); - spin_lock(&cfs_wi_data.wi_glock); - } - - list_del(&sched->ws_list); - - spin_unlock(&cfs_wi_data.wi_glock); - LASSERT(sched->ws_nscheduled == 0); - - LIBCFS_FREE(sched, sizeof(*sched)); -} -EXPORT_SYMBOL(cfs_wi_sched_destroy); - -int -cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, - int cpt, int nthrs, struct cfs_wi_sched **sched_pp) -{ - struct cfs_wi_sched *sched; - int rc; - - LASSERT(cfs_wi_data.wi_init); - LASSERT(!cfs_wi_data.wi_stopping); - LASSERT(cptab == NULL || cpt == CFS_CPT_ANY || - (cpt >= 0 && cpt < cfs_cpt_number(cptab))); - - LIBCFS_ALLOC(sched, sizeof(*sched)); - if (sched == NULL) - return -ENOMEM; - - strlcpy(sched->ws_name, name, CFS_WS_NAME_LEN); - - sched->ws_cptab = cptab; - sched->ws_cpt = cpt; - - spin_lock_init(&sched->ws_lock); - init_waitqueue_head(&sched->ws_waitq); - INIT_LIST_HEAD(&sched->ws_runq); - INIT_LIST_HEAD(&sched->ws_rerunq); - INIT_LIST_HEAD(&sched->ws_list); - - rc = 0; - while (nthrs > 0) { - char name[16]; - struct task_struct *task; - - spin_lock(&cfs_wi_data.wi_glock); - while (sched->ws_starting > 0) { - spin_unlock(&cfs_wi_data.wi_glock); - schedule(); - spin_lock(&cfs_wi_data.wi_glock); - } - - sched->ws_starting++; - spin_unlock(&cfs_wi_data.wi_glock); - - if (sched->ws_cptab != NULL && sched->ws_cpt >= 0) { - snprintf(name, sizeof(name), "%s_%02d_%02u", - sched->ws_name, sched->ws_cpt, - sched->ws_nthreads); - } else { - snprintf(name, sizeof(name), "%s_%02u", - sched->ws_name, sched->ws_nthreads); - } - - task = kthread_run(cfs_wi_scheduler, sched, "%s", name); - if (!IS_ERR(task)) { - nthrs--; - continue; - } - rc = PTR_ERR(task); - - CERROR("Failed to create thread for WI scheduler %s: %d\n", - name, rc); - - spin_lock(&cfs_wi_data.wi_glock); - - /* make up for cfs_wi_sched_destroy */ - list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); - sched->ws_starting--; - - spin_unlock(&cfs_wi_data.wi_glock); - - cfs_wi_sched_destroy(sched); - return rc; - } - spin_lock(&cfs_wi_data.wi_glock); - list_add(&sched->ws_list, &cfs_wi_data.wi_scheds); - spin_unlock(&cfs_wi_data.wi_glock); - - *sched_pp = sched; - return 0; -} -EXPORT_SYMBOL(cfs_wi_sched_create); - -int -cfs_wi_startup(void) -{ - memset(&cfs_wi_data, 0, sizeof(cfs_wi_data)); - - spin_lock_init(&cfs_wi_data.wi_glock); - INIT_LIST_HEAD(&cfs_wi_data.wi_scheds); - cfs_wi_data.wi_init = 1; - - return 0; -} - -void -cfs_wi_shutdown(void) -{ - struct cfs_wi_sched *sched; - - spin_lock(&cfs_wi_data.wi_glock); - cfs_wi_data.wi_stopping = 1; - spin_unlock(&cfs_wi_data.wi_glock); - - /* nobody should contend on this list */ - list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { - sched->ws_stopping = 1; - wake_up_all(&sched->ws_waitq); - } - - list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) { - spin_lock(&cfs_wi_data.wi_glock); - - while (sched->ws_nthreads != 0) { - spin_unlock(&cfs_wi_data.wi_glock); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) / 20); - spin_lock(&cfs_wi_data.wi_glock); - } - spin_unlock(&cfs_wi_data.wi_glock); - } - while (!list_empty(&cfs_wi_data.wi_scheds)) { - sched = list_entry(cfs_wi_data.wi_scheds.next, - struct cfs_wi_sched, ws_list); - list_del(&sched->ws_list); - LIBCFS_FREE(sched, sizeof(*sched)); - } - - cfs_wi_data.wi_stopping = 0; - cfs_wi_data.wi_init = 0; -} diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c index 3d6745e63..dd1c82701 100644 --- a/drivers/staging/lustre/lustre/llite/dcache.c +++ b/drivers/staging/lustre/lustre/llite/dcache.c @@ -60,9 +60,9 @@ static void ll_release(struct dentry *de) { struct ll_dentry_data *lld; - LASSERT(de != NULL); + LASSERT(de); lld = ll_d2d(de); - if (lld == NULL) /* NFS copies the de->d_op methods (bug 4655) */ + if (!lld) /* NFS copies the de->d_op methods (bug 4655) */ return; if (lld->lld_it) { @@ -80,7 +80,8 @@ static void ll_release(struct dentry *de) * This avoids a race where ll_lookup_it() instantiates a dentry, but we get * an AST before calling d_revalidate_it(). The dentry still exists (marked * INVALID) so d_lookup() matches it, but we have no lock on it (so - * lock_match() fails) and we spin around real_lookup(). */ + * lock_match() fails) and we spin around real_lookup(). + */ static int ll_dcompare(const struct dentry *parent, const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) @@ -117,7 +118,8 @@ static inline int return_if_equal(struct ldlm_lock *lock, void *data) /* find any ldlm lock of the inode in mdc and lov * return 0 not find * 1 find one - * < 0 error */ + * < 0 error + */ static int find_cbdata(struct inode *inode) { struct ll_sb_info *sbi = ll_i2sbi(inode); @@ -131,7 +133,7 @@ static int find_cbdata(struct inode *inode) return rc; lsm = ccc_inode_lsm_get(inode); - if (lsm == NULL) + if (!lsm) return rc; rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL); @@ -163,10 +165,12 @@ static int ll_ddelete(const struct dentry *de) /* Disable this piece of code temporarily because this is called * inside dcache_lock so it's not appropriate to do lots of work * here. ATTENTION: Before this piece of code enabling, LU-2487 must be - * resolved. */ + * resolved. + */ #if 0 /* if not ldlm lock for this inode, set i_nlink to 0 so that - * this inode can be recycled later b=20433 */ + * this inode can be recycled later b=20433 + */ if (d_really_is_positive(de) && !find_cbdata(d_inode(de))) clear_nlink(d_inode(de)); #endif @@ -178,19 +182,16 @@ static int ll_ddelete(const struct dentry *de) int ll_d_init(struct dentry *de) { - LASSERT(de != NULL); - CDEBUG(D_DENTRY, "ldd on dentry %pd (%p) parent %p inode %p refc %d\n", - de, de, de->d_parent, d_inode(de), - d_count(de)); + de, de, de->d_parent, d_inode(de), d_count(de)); - if (de->d_fsdata == NULL) { + if (!de->d_fsdata) { struct ll_dentry_data *lld; lld = kzalloc(sizeof(*lld), GFP_NOFS); if (likely(lld)) { spin_lock(&de->d_lock); - if (likely(de->d_fsdata == NULL)) { + if (likely(!de->d_fsdata)) { de->d_fsdata = lld; __d_lustre_invalidate(de); } else { @@ -218,7 +219,8 @@ void ll_intent_drop_lock(struct lookup_intent *it) ldlm_lock_decref(&handle, it->d.lustre.it_lock_mode); /* bug 494: intent_release may be called multiple times, from - * this thread and we don't want to double-decref this lock */ + * this thread and we don't want to double-decref this lock + */ it->d.lustre.it_lock_mode = 0; if (it->d.lustre.it_remote_lock_mode != 0) { handle.cookie = it->d.lustre.it_remote_lock_handle; @@ -251,8 +253,6 @@ void ll_invalidate_aliases(struct inode *inode) { struct dentry *dentry; - LASSERT(inode != NULL); - CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n", inode->i_ino, inode->i_generation, inode); @@ -286,9 +286,7 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request, void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode) { - LASSERT(it != NULL); - - if (it->d.lustre.it_lock_mode && inode != NULL) { + if (it->d.lustre.it_lock_mode && inode) { struct ll_sb_info *sbi = ll_i2sbi(inode); CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n", @@ -300,7 +298,8 @@ void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode) if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR) { /* on 2.6 there are situation when several lookups and * revalidations may be requested during single operation. - * therefore, we don't release intent here -bzzz */ + * therefore, we don't release intent here -bzzz + */ ll_intent_drop_lock(it); } } @@ -328,7 +327,7 @@ static int ll_revalidate_dentry(struct dentry *dentry, if (lookup_flags & LOOKUP_RCU) return -ECHILD; - do_statahead_enter(dir, &dentry, d_inode(dentry) == NULL); + do_statahead_enter(dir, &dentry, !d_inode(dentry)); ll_statahead_mark(dir, dentry); return 1; } diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c index 8982f7d1b..e4c82883e 100644 --- a/drivers/staging/lustre/lustre/llite/dir.c +++ b/drivers/staging/lustre/lustre/llite/dir.c @@ -55,6 +55,7 @@ #include "../include/lustre_lite.h" #include "../include/lustre_dlm.h" #include "../include/lustre_fid.h" +#include "../include/lustre_kernelcomm.h" #include "llite_internal.h" /* @@ -133,9 +134,8 @@ * a header lu_dirpage which describes the start/end hash, and whether this * page is empty (contains no dir entry) or hash collide with next page. * After client receives reply, several pages will be integrated into dir page - * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the - * lu_dirpage for this integrated page will be adjusted. See - * lmv_adjust_dirpages(). + * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage + * for this integrated page will be adjusted. See lmv_adjust_dirpages(). * */ @@ -152,7 +152,7 @@ static int ll_dir_filler(void *_hash, struct page *page0) struct page **page_pool; struct page *page; struct lu_dirpage *dp; - int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT; + int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_SHIFT; int nrdpgs = 0; /* number of pages read actually */ int npages; int i; @@ -189,13 +189,11 @@ static int ll_dir_filler(void *_hash, struct page *page0) } else if (rc == 0) { body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY); /* Checked by mdc_readpage() */ - LASSERT(body != NULL); - if (body->valid & OBD_MD_FLSIZE) cl_isize_write(inode, body->size); - nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1) - >> PAGE_CACHE_SHIFT; + nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1) + >> PAGE_SHIFT; SetPageUptodate(page0); } unlock_page(page0); @@ -210,7 +208,7 @@ static int ll_dir_filler(void *_hash, struct page *page0) page = page_pool[i]; if (rc < 0 || i >= nrdpgs) { - page_cache_release(page); + put_page(page); continue; } @@ -231,7 +229,7 @@ static int ll_dir_filler(void *_hash, struct page *page0) CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n", offset, ret); } - page_cache_release(page); + put_page(page); } if (page_pool != &page0) @@ -244,11 +242,11 @@ void ll_release_page(struct page *page, int remove) kunmap(page); if (remove) { lock_page(page); - if (likely(page->mapping != NULL)) + if (likely(page->mapping)) truncate_complete_page(page->mapping, page); unlock_page(page); } - page_cache_release(page); + put_page(page); } /* @@ -274,7 +272,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash, if (found > 0 && !radix_tree_exceptional_entry(page)) { struct lu_dirpage *dp; - page_cache_get(page); + get_page(page); spin_unlock_irq(&mapping->tree_lock); /* * In contrast to find_lock_page() we are sure that directory @@ -314,7 +312,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash, page = NULL; } } else { - page_cache_release(page); + put_page(page); page = ERR_PTR(-EIO); } @@ -333,7 +331,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, struct lustre_handle lockh; struct lu_dirpage *dp; struct page *page; - ldlm_mode_t mode; + enum ldlm_mode mode; int rc; __u64 start = 0; __u64 end = 0; @@ -356,7 +354,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, struct md_op_data *op_data; op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0, - LUSTRE_OPC_ANY, NULL); + LUSTRE_OPC_ANY, NULL); if (IS_ERR(op_data)) return (void *)op_data; @@ -369,8 +367,8 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, if (request) ptlrpc_req_finished(request); if (rc < 0) { - CERROR("lock enqueue: "DFID" at %llu: rc %d\n", - PFID(ll_inode2fid(dir)), hash, rc); + CERROR("lock enqueue: " DFID " at %llu: rc %d\n", + PFID(ll_inode2fid(dir)), hash, rc); return ERR_PTR(rc); } @@ -380,7 +378,8 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, &it.d.lustre.it_lock_handle, dir, NULL); } else { /* for cross-ref object, l_ast_data of the lock may not be set, - * we reset it here */ + * we reset it here + */ md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, &lockh.cookie, dir, NULL); } @@ -392,7 +391,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash, CERROR("dir page locate: "DFID" at %llu: rc %ld\n", PFID(ll_inode2fid(dir)), lhash, PTR_ERR(page)); goto out_unlock; - } else if (page != NULL) { + } else if (page) { /* * XXX nikita: not entirely correct handling of a corner case: * suppose hash chain of entries with hash value HASH crosses @@ -498,7 +497,7 @@ int ll_dir_read(struct inode *inode, struct dir_context *ctx) __u64 next; dp = page_address(page); - for (ent = lu_dirent_start(dp); ent != NULL && !done; + for (ent = lu_dirent_start(dp); ent && !done; ent = lu_dirent_next(ent)) { __u16 type; int namelen; @@ -688,7 +687,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump, struct obd_device *mgc = lsi->lsi_mgc; int lum_size; - if (lump != NULL) { + if (lump) { /* * This is coming from userspace, so should be in * local endian. But the MDS would like it in little @@ -724,7 +723,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump, if (IS_ERR(op_data)) return PTR_ERR(op_data); - if (lump != NULL && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC)) + if (lump && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC)) op_data->op_cli_flags |= CLI_SET_MEA; /* swabbing is done in lov_setstripe() on server side */ @@ -738,8 +737,9 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump, } /* In the following we use the fact that LOV_USER_MAGIC_V1 and - LOV_USER_MAGIC_V3 have the same initial fields so we do not - need to make the distinction between the 2 versions */ + * LOV_USER_MAGIC_V3 have the same initial fields so we do not + * need to make the distinction between the 2 versions + */ if (set_default && mgc->u.cli.cl_mgc_mgsexp) { char *param = NULL; char *buf; @@ -811,7 +811,6 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp, } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); lmmsize = body->eadatasize; @@ -823,7 +822,6 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp, lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize); - LASSERT(lmm != NULL); /* * This is coming from the MDS, so is probably in @@ -879,7 +877,7 @@ int ll_get_mdt_idx(struct inode *inode) /** * Generic handler to do any pre-copy work. * - * It send a first hsm_progress (with extent length == 0) to coordinator as a + * It sends a first hsm_progress (with extent length == 0) to coordinator as a * first information for it that real work has started. * * Moreover, for a ARCHIVE request, it will sample the file data version and @@ -931,8 +929,9 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy) goto progress; } - /* Store it the hsm_copy for later copytool use. - * Always modified even if no lsm. */ + /* Store in the hsm_copy for later copytool use. + * Always modified even if no lsm. + */ copy->hc_data_version = data_version; } @@ -1008,12 +1007,14 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy) goto progress; } - /* Store it the hsm_copy for later copytool use. - * Always modified even if no lsm. */ + /* Store in the hsm_copy for later copytool use. + * Always modified even if no lsm. + */ hpk.hpk_data_version = data_version; /* File could have been stripped during archiving, so we need - * to check anyway. */ + * to check anyway. + */ if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) && (copy->hc_data_version != data_version)) { CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. " @@ -1025,7 +1026,8 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy) * the cdt will loop on retried archive requests. * The policy engine will ask for a new archive later * when the file will not be modified for some tunable - * time */ + * time + */ /* we do not notify caller */ hpk.hpk_flags &= ~HP_FLAG_RETRY; /* hpk_errval must be >= 0 */ @@ -1153,7 +1155,8 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl) return rc; } /* If QIF_SPACE is not set, client should collect the - * space usage from OSSs by itself */ + * space usage from OSSs by itself + */ if (cmd == Q_GETQUOTA && !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) && !oqctl->qc_dqblk.dqb_curspace) { @@ -1204,7 +1207,8 @@ out: /* This function tries to get a single name component, * to send to the server. No actual path traversal involved, - * so we limit to NAME_MAX */ + * so we limit to NAME_MAX + */ static char *ll_getname(const char __user *filename) { int ret = 0, len; @@ -1252,7 +1256,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ll_iocontrol(inode, file, cmd, arg); case FSFILT_IOC_GETVERSION_OLD: case FSFILT_IOC_GETVERSION: - return put_user(inode->i_generation, (int *)arg); + return put_user(inode->i_generation, (int __user *)arg); /* We need to special case any other ioctls we want to handle, * to send them to the MDS/OST as appropriate and to properly * network encode the arg field. @@ -1266,7 +1270,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (mdtidx < 0) return mdtidx; - if (put_user((int)mdtidx, (int *)arg)) + if (put_user((int)mdtidx, (int __user *)arg)) return -EFAULT; return 0; @@ -1278,7 +1282,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) char *filename; struct md_op_data *op_data; - rc = obd_ioctl_getdata(&buf, &len, (void *)arg); + rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg); if (rc) return rc; data = (void *)buf; @@ -1320,12 +1324,12 @@ out_free: int len; int rc; - rc = obd_ioctl_getdata(&buf, &len, (void *)arg); + rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg); if (rc) return rc; data = (void *)buf; - if (data->ioc_inlbuf1 == NULL || data->ioc_inlbuf2 == NULL || + if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 || data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0) { rc = -EINVAL; goto lmv_out_free; @@ -1363,8 +1367,8 @@ lmv_out_free: case LL_IOC_LOV_SETSTRIPE: { struct lov_user_md_v3 lumv3; struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3; - struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg; - struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg; + struct lov_user_md_v1 __user *lumv1p = (void __user *)arg; + struct lov_user_md_v3 __user *lumv3p = (void __user *)arg; int set_default = 0; @@ -1389,7 +1393,7 @@ lmv_out_free: return rc; } case LL_IOC_LMV_GETSTRIPE: { - struct lmv_user_md *lump = (struct lmv_user_md *)arg; + struct lmv_user_md __user *lump = (void __user *)arg; struct lmv_user_md lum; struct lmv_user_md *tmp; int lum_size; @@ -1422,7 +1426,7 @@ lmv_out_free: tmp->lum_objects[0].lum_mds = mdtindex; memcpy(&tmp->lum_objects[0].lum_fid, ll_inode2fid(inode), sizeof(struct lu_fid)); - if (copy_to_user((void *)arg, tmp, lum_size)) { + if (copy_to_user((void __user *)arg, tmp, lum_size)) { rc = -EFAULT; goto free_lmv; } @@ -1433,13 +1437,13 @@ free_lmv: case LL_IOC_LOV_SWAP_LAYOUTS: return -EPERM; case LL_IOC_OBD_STATFS: - return ll_obd_statfs(inode, (void *)arg); + return ll_obd_statfs(inode, (void __user *)arg); case LL_IOC_LOV_GETSTRIPE: case LL_IOC_MDC_GETINFO: case IOC_MDC_GETFILEINFO: case IOC_MDC_GETFILESTRIPE: { struct ptlrpc_request *request = NULL; - struct lov_user_md *lump; + struct lov_user_md __user *lump; struct lov_mds_md *lmm = NULL; struct mdt_body *body; char *filename = NULL; @@ -1447,7 +1451,7 @@ free_lmv: if (cmd == IOC_MDC_GETFILEINFO || cmd == IOC_MDC_GETFILESTRIPE) { - filename = ll_getname((const char *)arg); + filename = ll_getname((const char __user *)arg); if (IS_ERR(filename)) return PTR_ERR(filename); @@ -1460,7 +1464,7 @@ free_lmv: if (request) { body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); + LASSERT(body); } else { goto out_req; } @@ -1476,11 +1480,11 @@ free_lmv: if (cmd == IOC_MDC_GETFILESTRIPE || cmd == LL_IOC_LOV_GETSTRIPE) { - lump = (struct lov_user_md *)arg; + lump = (struct lov_user_md __user *)arg; } else { - struct lov_user_mds_data *lmdp; + struct lov_user_mds_data __user *lmdp; - lmdp = (struct lov_user_mds_data *)arg; + lmdp = (struct lov_user_mds_data __user *)arg; lump = &lmdp->lmd_lmm; } if (copy_to_user(lump, lmm, lmmsize)) { @@ -1492,7 +1496,7 @@ free_lmv: } skip_lmm: if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) { - struct lov_user_mds_data *lmdp; + struct lov_user_mds_data __user *lmdp; lstat_t st = { 0 }; st.st_dev = inode->i_sb->s_dev; @@ -1502,14 +1506,14 @@ skip_lmm: st.st_gid = body->gid; st.st_rdev = body->rdev; st.st_size = body->size; - st.st_blksize = PAGE_CACHE_SIZE; + st.st_blksize = PAGE_SIZE; st.st_blocks = body->blocks; st.st_atime = body->atime; st.st_mtime = body->mtime; st.st_ctime = body->ctime; st.st_ino = inode->i_ino; - lmdp = (struct lov_user_mds_data *)arg; + lmdp = (struct lov_user_mds_data __user *)arg; if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) { rc = -EFAULT; goto out_req; @@ -1523,14 +1527,14 @@ out_req: return rc; } case IOC_LOV_GETINFO: { - struct lov_user_mds_data *lumd; + struct lov_user_mds_data __user *lumd; struct lov_stripe_md *lsm; - struct lov_user_md *lum; + struct lov_user_md __user *lum; struct lov_mds_md *lmm; int lmmsize; lstat_t st; - lumd = (struct lov_user_mds_data *)arg; + lumd = (struct lov_user_mds_data __user *)arg; lum = &lumd->lmd_lmm; rc = ll_get_max_mdsize(sbi, &lmmsize); @@ -1538,7 +1542,7 @@ out_req: return rc; lmm = libcfs_kvzalloc(lmmsize, GFP_NOFS); - if (lmm == NULL) + if (!lmm) return -ENOMEM; if (copy_from_user(lmm, lum, lmmsize)) { rc = -EFAULT; @@ -1636,8 +1640,8 @@ free_lmm: NULL); if (rc) { CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc); - if (copy_to_user((void *)arg, check, - sizeof(*check))) + if (copy_to_user((void __user *)arg, check, + sizeof(*check))) CDEBUG(D_QUOTA, "copy_to_user failed\n"); goto out_poll; } @@ -1646,8 +1650,8 @@ free_lmm: NULL); if (rc) { CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc); - if (copy_to_user((void *)arg, check, - sizeof(*check))) + if (copy_to_user((void __user *)arg, check, + sizeof(*check))) CDEBUG(D_QUOTA, "copy_to_user failed\n"); goto out_poll; } @@ -1662,14 +1666,15 @@ out_poll: if (!qctl) return -ENOMEM; - if (copy_from_user(qctl, (void *)arg, sizeof(*qctl))) { + if (copy_from_user(qctl, (void __user *)arg, sizeof(*qctl))) { rc = -EFAULT; goto out_quotactl; } rc = quotactl_ioctl(sbi, qctl); - if (rc == 0 && copy_to_user((void *)arg, qctl, sizeof(*qctl))) + if (rc == 0 && copy_to_user((void __user *)arg, qctl, + sizeof(*qctl))) rc = -EFAULT; out_quotactl: @@ -1686,7 +1691,6 @@ out_quotactl: if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) { struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - LASSERT(fd != NULL); rc = rct_add(&sbi->ll_rct, current_pid(), arg); if (!rc) fd->fd_flags |= LL_FILE_RMTACL; @@ -1699,7 +1703,7 @@ out_quotactl: int count, vallen; struct obd_export *exp; - if (copy_from_user(&count, (int *)arg, sizeof(int))) + if (copy_from_user(&count, (int __user *)arg, sizeof(int))) return -EFAULT; /* get ost count when count is zero, get mdt count otherwise */ @@ -1712,34 +1716,35 @@ out_quotactl: return rc; } - if (copy_to_user((int *)arg, &count, sizeof(int))) + if (copy_to_user((int __user *)arg, &count, sizeof(int))) return -EFAULT; return 0; } case LL_IOC_PATH2FID: - if (copy_to_user((void *)arg, ll_inode2fid(inode), - sizeof(struct lu_fid))) + if (copy_to_user((void __user *)arg, ll_inode2fid(inode), + sizeof(struct lu_fid))) return -EFAULT; return 0; case LL_IOC_GET_CONNECT_FLAGS: { - return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, (void *)arg); + return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, + (void __user *)arg); } case OBD_IOC_CHANGELOG_SEND: case OBD_IOC_CHANGELOG_CLEAR: if (!capable(CFS_CAP_SYS_ADMIN)) return -EPERM; - rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg, + rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg, sizeof(struct ioc_changelog)); return rc; case OBD_IOC_FID2PATH: - return ll_fid2path(inode, (void *)arg); + return ll_fid2path(inode, (void __user *)arg); case LL_IOC_HSM_REQUEST: { struct hsm_user_request *hur; ssize_t totalsize; - hur = memdup_user((void *)arg, sizeof(*hur)); + hur = memdup_user((void __user *)arg, sizeof(*hur)); if (IS_ERR(hur)) return PTR_ERR(hur); @@ -1754,11 +1759,11 @@ out_quotactl: return -E2BIG; hur = libcfs_kvzalloc(totalsize, GFP_NOFS); - if (hur == NULL) + if (!hur) return -ENOMEM; /* Copy the whole struct */ - if (copy_from_user(hur, (void *)arg, totalsize)) { + if (copy_from_user(hur, (void __user *)arg, totalsize)) { kvfree(hur); return -EFAULT; } @@ -1794,7 +1799,7 @@ out_quotactl: struct hsm_progress_kernel hpk; struct hsm_progress hp; - if (copy_from_user(&hp, (void *)arg, sizeof(hp))) + if (copy_from_user(&hp, (void __user *)arg, sizeof(hp))) return -EFAULT; hpk.hpk_fid = hp.hp_fid; @@ -1805,13 +1810,14 @@ out_quotactl: hpk.hpk_data_version = 0; /* File may not exist in Lustre; all progress - * reported to Lustre root */ + * reported to Lustre root + */ rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk, NULL); return rc; } case LL_IOC_HSM_CT_START: - rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg, + rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg, sizeof(struct lustre_kernelcomm)); return rc; @@ -1819,12 +1825,12 @@ out_quotactl: struct hsm_copy *copy; int rc; - copy = memdup_user((char *)arg, sizeof(*copy)); + copy = memdup_user((char __user *)arg, sizeof(*copy)); if (IS_ERR(copy)) return PTR_ERR(copy); rc = ll_ioc_copy_start(inode->i_sb, copy); - if (copy_to_user((char *)arg, copy, sizeof(*copy))) + if (copy_to_user((char __user *)arg, copy, sizeof(*copy))) rc = -EFAULT; kfree(copy); @@ -1834,19 +1840,20 @@ out_quotactl: struct hsm_copy *copy; int rc; - copy = memdup_user((char *)arg, sizeof(*copy)); + copy = memdup_user((char __user *)arg, sizeof(*copy)); if (IS_ERR(copy)) return PTR_ERR(copy); rc = ll_ioc_copy_end(inode->i_sb, copy); - if (copy_to_user((char *)arg, copy, sizeof(*copy))) + if (copy_to_user((char __user *)arg, copy, sizeof(*copy))) rc = -EFAULT; kfree(copy); return rc; } default: - return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, (void *)arg); + return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, + (void __user *)arg); } } diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c index 39e2ffd5f..cf619af3c 100644 --- a/drivers/staging/lustre/lustre/llite/file.c +++ b/drivers/staging/lustre/lustre/llite/file.c @@ -64,8 +64,8 @@ static struct ll_file_data *ll_file_data_get(void) { struct ll_file_data *fd; - fd = kmem_cache_alloc(ll_file_data_slab, GFP_NOFS | __GFP_ZERO); - if (fd == NULL) + fd = kmem_cache_zalloc(ll_file_data_slab, GFP_NOFS); + if (!fd) return NULL; fd->fd_write_failed = false; return fd; @@ -73,7 +73,7 @@ static struct ll_file_data *ll_file_data_get(void) static void ll_file_data_put(struct ll_file_data *fd) { - if (fd != NULL) + if (fd) kmem_cache_free(ll_file_data_slab, fd); } @@ -134,7 +134,7 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp, int epoch_close = 1; int rc; - if (obd == NULL) { + if (!obd) { /* * XXX: in case of LMV, is this correct to access * ->exp_handle? @@ -153,7 +153,7 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp, } ll_prepare_close(inode, op_data, och); - if (data_version != NULL) { + if (data_version) { /* Pass in data_version implies release. */ op_data->op_bias |= MDS_HSM_RELEASE; op_data->op_data_version = *data_version; @@ -166,7 +166,8 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp, /* This close must have the epoch closed. */ LASSERT(epoch_close); /* MDS has instructed us to obtain Size-on-MDS attribute from - * OSTs and send setattr to back to MDS. */ + * OSTs and send setattr to back to MDS. + */ rc = ll_som_update(inode, op_data); if (rc) { CERROR("inode %lu mdc Size-on-MDS update failed: rc = %d\n", @@ -179,7 +180,8 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp, } /* DATA_MODIFIED flag was successfully sent on close, cancel data - * modification flag. */ + * modification flag. + */ if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) { struct ll_inode_info *lli = ll_i2info(inode); @@ -242,7 +244,8 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode) mutex_lock(&lli->lli_och_mutex); if (*och_usecount > 0) { /* There are still users of this handle, so skip - * freeing it. */ + * freeing it. + */ mutex_unlock(&lli->lli_och_mutex); return 0; } @@ -251,9 +254,10 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode) *och_p = NULL; mutex_unlock(&lli->lli_och_mutex); - if (och != NULL) { + if (och) { /* There might be a race and this handle may already - be closed. */ + * be closed. + */ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och, NULL); } @@ -276,26 +280,29 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode, if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED)) ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid); - if (fd->fd_lease_och != NULL) { + if (fd->fd_lease_och) { bool lease_broken; /* Usually the lease is not released when the - * application crashed, we need to release here. */ + * application crashed, we need to release here. + */ rc = ll_lease_close(fd->fd_lease_och, inode, &lease_broken); - CDEBUG(rc ? D_ERROR : D_INODE, "Clean up lease "DFID" %d/%d\n", - PFID(&lli->lli_fid), rc, lease_broken); + CDEBUG(rc ? D_ERROR : D_INODE, + "Clean up lease " DFID " %d/%d\n", + PFID(&lli->lli_fid), rc, lease_broken); fd->fd_lease_och = NULL; } - if (fd->fd_och != NULL) { + if (fd->fd_och) { rc = ll_close_inode_openhandle(md_exp, inode, fd->fd_och, NULL); fd->fd_och = NULL; goto out; } /* Let's see if we have good enough OPEN lock on the file and if - we can skip talking to MDS */ + * we can skip talking to MDS + */ mutex_lock(&lli->lli_och_mutex); if (fd->fd_omode & FMODE_WRITE) { @@ -343,7 +350,6 @@ int ll_file_release(struct inode *inode, struct file *file) if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) { struct ll_file_data *fd = LUSTRE_FPRIVATE(file); - LASSERT(fd != NULL); if (unlikely(fd->fd_flags & LL_FILE_RMTACL)) { fd->fd_flags &= ~LL_FILE_RMTACL; rct_del(&sbi->ll_rct, current_pid()); @@ -355,11 +361,12 @@ int ll_file_release(struct inode *inode, struct file *file) if (!is_root_inode(inode)) ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1); fd = LUSTRE_FPRIVATE(file); - LASSERT(fd != NULL); + LASSERT(fd); - /* The last ref on @file, maybe not the owner pid of statahead. + /* The last ref on @file, maybe not be the owner pid of statahead. * Different processes can open the same dir, "ll_opendir_key" means: - * it is me that should stop the statahead thread. */ + * it is me that should stop the statahead thread. + */ if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd && lli->lli_opendir_pid != 0) ll_stop_statahead(inode, lli->lli_opendir_key); @@ -396,16 +403,16 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm, __u32 opc = LUSTRE_OPC_ANY; int rc; - /* Usually we come here only for NFSD, and we want open lock. - But we can also get here with pre 2.6.15 patchless kernels, and in - that case that lock is also ok */ + /* Usually we come here only for NFSD, and we want open lock. */ /* We can also get here if there was cached open handle in revalidate_it * but it disappeared while we were getting from there to ll_file_open. * But this means this file was closed and immediately opened which - * makes a good candidate for using OPEN lock */ + * makes a good candidate for using OPEN lock + */ /* If lmmsize & lmm are not 0, we are just setting stripe info - * parameters. No need for the open lock */ - if (lmm == NULL && lmmsize == 0) { + * parameters. No need for the open lock + */ + if (!lmm && lmmsize == 0) { itp->it_flags |= MDS_OPEN_LOCK; if (itp->it_flags & FMODE_WRITE) opc = LUSTRE_OPC_CREATE; @@ -426,7 +433,7 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm, * with messages with -ESTALE errors. */ if (!it_disposition(itp, DISP_OPEN_OPEN) || - it_open_error(DISP_OPEN_OPEN, itp)) + it_open_error(DISP_OPEN_OPEN, itp)) goto out; ll_release_openhandle(inode, itp); goto out; @@ -492,7 +499,7 @@ static int ll_local_open(struct file *file, struct lookup_intent *it, LASSERT(!LUSTRE_FPRIVATE(file)); - LASSERT(fd != NULL); + LASSERT(fd); if (och) { struct ptlrpc_request *req = it->d.lustre.it_data; @@ -543,7 +550,7 @@ int ll_file_open(struct inode *inode, struct file *file) file->private_data = NULL; /* prevent ll_local_open assertion */ fd = ll_file_data_get(); - if (fd == NULL) { + if (!fd) { rc = -ENOMEM; goto out_openerr; } @@ -551,7 +558,7 @@ int ll_file_open(struct inode *inode, struct file *file) fd->fd_file = file; if (S_ISDIR(inode->i_mode)) { spin_lock(&lli->lli_sa_lock); - if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL && + if (!lli->lli_opendir_key && !lli->lli_sai && lli->lli_opendir_pid == 0) { lli->lli_opendir_key = fd; lli->lli_opendir_pid = current_pid(); @@ -568,7 +575,8 @@ int ll_file_open(struct inode *inode, struct file *file) if (!it || !it->d.lustre.it_disposition) { /* Convert f_flags into access mode. We cannot use file->f_mode, * because everything but O_ACCMODE mask was stripped from - * there */ + * there + */ if ((oit.it_flags + 1) & O_ACCMODE) oit.it_flags++; if (file->f_flags & O_TRUNC) @@ -577,17 +585,20 @@ int ll_file_open(struct inode *inode, struct file *file) /* kernel only call f_op->open in dentry_open. filp_open calls * dentry_open after call to open_namei that checks permissions. * Only nfsd_open call dentry_open directly without checking - * permissions and because of that this code below is safe. */ + * permissions and because of that this code below is safe. + */ if (oit.it_flags & (FMODE_WRITE | FMODE_READ)) oit.it_flags |= MDS_OPEN_OWNEROVERRIDE; /* We do not want O_EXCL here, presumably we opened the file - * already? XXX - NFS implications? */ + * already? XXX - NFS implications? + */ oit.it_flags &= ~O_EXCL; /* bug20584, if "it_flags" contains O_CREAT, the file will be * created if necessary, then "IT_CREAT" should be set to keep - * consistent with it */ + * consistent with it + */ if (oit.it_flags & O_CREAT) oit.it_op |= IT_CREAT; @@ -611,7 +622,8 @@ restart: if (*och_p) { /* Open handle is present */ if (it_disposition(it, DISP_OPEN_OPEN)) { /* Well, there's extra open request that we do not need, - let's close it somehow. This will decref request. */ + * let's close it somehow. This will decref request. + */ rc = it_open_error(DISP_OPEN_OPEN, it); if (rc) { mutex_unlock(&lli->lli_och_mutex); @@ -632,10 +644,11 @@ restart: LASSERT(*och_usecount == 0); if (!it->d.lustre.it_disposition) { /* We cannot just request lock handle now, new ELC code - means that one of other OPEN locks for this file - could be cancelled, and since blocking ast handler - would attempt to grab och_mutex as well, that would - result in a deadlock */ + * means that one of other OPEN locks for this file + * could be cancelled, and since blocking ast handler + * would attempt to grab och_mutex as well, that would + * result in a deadlock + */ mutex_unlock(&lli->lli_och_mutex); it->it_create_mode |= M_CHECK_STALE; rc = ll_intent_file_open(file->f_path.dentry, NULL, 0, it); @@ -655,9 +668,11 @@ restart: /* md_intent_lock() didn't get a request ref if there was an * open error, so don't do cleanup on the request here - * (bug 3430) */ + * (bug 3430) + */ /* XXX (green): Should not we bail out on any error here, not - * just open error? */ + * just open error? + */ rc = it_open_error(DISP_OPEN_OPEN, it); if (rc) goto out_och_free; @@ -672,8 +687,9 @@ restart: fd = NULL; /* Must do this outside lli_och_mutex lock to prevent deadlock where - different kind of OPEN lock for this same inode gets cancelled - by ldlm_cancel_lru */ + * different kind of OPEN lock for this same inode gets cancelled + * by ldlm_cancel_lru + */ if (!S_ISREG(inode->i_mode)) goto out_och_free; @@ -712,7 +728,8 @@ out_openerr: } static int ll_md_blocking_lease_ast(struct ldlm_lock *lock, - struct ldlm_lock_desc *desc, void *data, int flag) + struct ldlm_lock_desc *desc, + void *data, int flag) { int rc; struct lustre_handle lockh; @@ -752,7 +769,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, if (fmode != FMODE_WRITE && fmode != FMODE_READ) return ERR_PTR(-EINVAL); - if (file != NULL) { + if (file) { struct ll_inode_info *lli = ll_i2info(inode); struct ll_file_data *fd = LUSTRE_FPRIVATE(file); struct obd_client_handle **och_p; @@ -764,18 +781,18 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, /* Get the openhandle of the file */ rc = -EBUSY; mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och != NULL) { + if (fd->fd_lease_och) { mutex_unlock(&lli->lli_och_mutex); return ERR_PTR(rc); } - if (fd->fd_och == NULL) { + if (!fd->fd_och) { if (file->f_mode & FMODE_WRITE) { - LASSERT(lli->lli_mds_write_och != NULL); + LASSERT(lli->lli_mds_write_och); och_p = &lli->lli_mds_write_och; och_usecount = &lli->lli_open_fd_write_count; } else { - LASSERT(lli->lli_mds_read_och != NULL); + LASSERT(lli->lli_mds_read_och); och_p = &lli->lli_mds_read_och; och_usecount = &lli->lli_open_fd_read_count; } @@ -790,7 +807,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, if (rc < 0) /* more than 1 opener */ return ERR_PTR(rc); - LASSERT(fd->fd_och != NULL); + LASSERT(fd->fd_och); old_handle = fd->fd_och->och_fh; } @@ -799,7 +816,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, return ERR_PTR(-ENOMEM); op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0, - LUSTRE_OPC_ANY, NULL); + LUSTRE_OPC_ANY, NULL); if (IS_ERR(op_data)) { rc = PTR_ERR(op_data); goto out; @@ -811,13 +828,14 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, it.it_flags = fmode | open_flags; it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE; rc = md_intent_lock(sbi->ll_md_exp, op_data, NULL, 0, &it, 0, &req, - ll_md_blocking_lease_ast, + ll_md_blocking_lease_ast, /* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise * it can be cancelled which may mislead applications that the lease is * broken; * LDLM_FL_EXCL: Set this flag so that it won't be matched by normal * open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast - * doesn't deal with openhandle, so normal openhandle will be leaked. */ + * doesn't deal with openhandle, so normal openhandle will be leaked. + */ LDLM_FL_NO_LRU | LDLM_FL_EXCL); ll_finish_md_op_data(op_data); ptlrpc_req_finished(req); @@ -847,8 +865,8 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode, it.d.lustre.it_lock_bits != MDS_INODELOCK_OPEN) { /* open lock must return for lease */ CERROR(DFID "lease granted but no open lock, %d/%llu.\n", - PFID(ll_inode2fid(inode)), it.d.lustre.it_lock_mode, - it.d.lustre.it_lock_bits); + PFID(ll_inode2fid(inode)), it.d.lustre.it_lock_mode, + it.d.lustre.it_lock_bits); rc = -EPROTO; goto out_close; } @@ -864,7 +882,7 @@ out_close: /* cancel open lock */ if (it.d.lustre.it_lock_mode != 0) { ldlm_lock_decref_and_cancel(&och->och_lease_handle, - it.d.lustre.it_lock_mode); + it.d.lustre.it_lock_mode); it.d.lustre.it_lock_mode = 0; } out_release_it: @@ -886,19 +904,19 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode, int rc; lock = ldlm_handle2lock(&och->och_lease_handle); - if (lock != NULL) { + if (lock) { lock_res_and_lock(lock); cancelled = ldlm_is_cancel(lock); unlock_res_and_lock(lock); ldlm_lock_put(lock); } - CDEBUG(D_INODE, "lease for "DFID" broken? %d\n", - PFID(&ll_i2info(inode)->lli_fid), cancelled); + CDEBUG(D_INODE, "lease for " DFID " broken? %d\n", + PFID(&ll_i2info(inode)->lli_fid), cancelled); if (!cancelled) ldlm_cli_cancel(&och->och_lease_handle, 0); - if (lease_broken != NULL) + if (lease_broken) *lease_broken = cancelled; rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och, @@ -914,7 +932,7 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp, struct obd_info oinfo = { }; int rc; - LASSERT(lsm != NULL); + LASSERT(lsm); oinfo.oi_md = lsm; oinfo.oi_oa = obdo; @@ -933,8 +951,8 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp, } set = ptlrpc_prep_set(); - if (set == NULL) { - CERROR("can't allocate ptlrpc set\n"); + if (!set) { + CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM); rc = -ENOMEM; } else { rc = obd_getattr_async(exp, &oinfo, set); @@ -986,7 +1004,8 @@ int ll_merge_lvb(const struct lu_env *env, struct inode *inode) ll_inode_size_lock(inode); /* merge timestamps the most recently obtained from mds with - timestamps obtained from osts */ + * timestamps obtained from osts + */ LTIME_S(inode->i_atime) = lli->lli_lvb.lvb_atime; LTIME_S(inode->i_mtime) = lli->lli_lvb.lvb_mtime; LTIME_S(inode->i_ctime) = lli->lli_lvb.lvb_ctime; @@ -1009,8 +1028,8 @@ int ll_merge_lvb(const struct lu_env *env, struct inode *inode) if (lvb.lvb_mtime < attr->cat_mtime) lvb.lvb_mtime = attr->cat_mtime; - CDEBUG(D_VFSTRACE, DFID" updating i_size %llu\n", - PFID(&lli->lli_fid), attr->cat_size); + CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n", + PFID(&lli->lli_fid), attr->cat_size); cl_isize_write_nolock(inode, attr->cat_size); inode->i_blocks = attr->cat_blocks; @@ -1155,12 +1174,13 @@ restart: out: cl_io_fini(env, io); /* If any bit been read/written (result != 0), we just return - * short read/write instead of restart io. */ + * short read/write instead of restart io. + */ if ((result == 0 || result == -ENODATA) && io->ci_need_restart) { CDEBUG(D_VFSTRACE, "Restart %s on %pD from %lld, count:%zd\n", iot == CIT_READ ? "read" : "write", file, *ppos, count); - LASSERTF(io->ci_nob == 0, "%zd", io->ci_nob); + LASSERTF(io->ci_nob == 0, "%zd\n", io->ci_nob); goto restart; } @@ -1221,7 +1241,7 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from) args->u.normal.via_iocb = iocb; result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE, - &iocb->ki_pos, iov_iter_count(from)); + &iocb->ki_pos, iov_iter_count(from)); cl_env_put(env, &refcheck); return result; } @@ -1260,8 +1280,8 @@ static int ll_lov_recreate(struct inode *inode, struct ost_id *oi, u32 ost_idx) int rc = 0; struct lov_stripe_md *lsm = NULL, *lsm2; - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); - if (oa == NULL) + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!oa) return -ENOMEM; lsm = ccc_inode_lsm_get(inode); @@ -1274,7 +1294,7 @@ static int ll_lov_recreate(struct inode *inode, struct ost_id *oi, u32 ost_idx) (lsm->lsm_stripe_count)); lsm2 = libcfs_kvzalloc(lsm_size, GFP_NOFS); - if (lsm2 == NULL) { + if (!lsm2) { rc = -ENOMEM; goto out; } @@ -1307,7 +1327,7 @@ static int ll_lov_recreate_obj(struct inode *inode, unsigned long arg) if (!capable(CFS_CAP_SYS_ADMIN)) return -EPERM; - if (copy_from_user(&ucreat, (struct ll_recreate_obj *)arg, + if (copy_from_user(&ucreat, (struct ll_recreate_obj __user *)arg, sizeof(ucreat))) return -EFAULT; @@ -1325,7 +1345,7 @@ static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg) if (!capable(CFS_CAP_SYS_ADMIN)) return -EPERM; - if (copy_from_user(&fid, (struct lu_fid *)arg, sizeof(fid))) + if (copy_from_user(&fid, (struct lu_fid __user *)arg, sizeof(fid))) return -EFAULT; fid_to_ostid(&fid, &oi); @@ -1341,7 +1361,7 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, int rc = 0; lsm = ccc_inode_lsm_get(inode); - if (lsm != NULL) { + if (lsm) { ccc_inode_lsm_put(inode, lsm); CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n", inode->i_ino); @@ -1401,18 +1421,16 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename, } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); /* checked by mdc_getattr_name */ lmmsize = body->eadatasize; if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) || - lmmsize == 0) { + lmmsize == 0) { rc = -ENODATA; goto out; } lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize); - LASSERT(lmm != NULL); if ((lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1)) && (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3))) { @@ -1433,7 +1451,8 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename, stripe_count = 0; /* if function called for directory - we should - * avoid swab not existent lsm objects */ + * avoid swab not existent lsm objects + */ if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) { lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm); if (S_ISREG(body->mode)) @@ -1457,7 +1476,7 @@ out: } static int ll_lov_setea(struct inode *inode, struct file *file, - unsigned long arg) + unsigned long arg) { int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE; struct lov_user_md *lump; @@ -1469,16 +1488,16 @@ static int ll_lov_setea(struct inode *inode, struct file *file, return -EPERM; lump = libcfs_kvzalloc(lum_size, GFP_NOFS); - if (lump == NULL) + if (!lump) return -ENOMEM; - if (copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) { + if (copy_from_user(lump, (struct lov_user_md __user *)arg, lum_size)) { kvfree(lump); return -EFAULT; } rc = ll_lov_setstripe_ea_info(inode, file->f_path.dentry, flags, lump, - lum_size); + lum_size); cl_lov_delay_create_clear(&file->f_flags); kvfree(lump); @@ -1488,12 +1507,12 @@ static int ll_lov_setea(struct inode *inode, struct file *file, static int ll_lov_setstripe(struct inode *inode, struct file *file, unsigned long arg) { - struct lov_user_md_v3 lumv3; - struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3; - struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg; - struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg; - int lum_size, rc; - int flags = FMODE_WRITE; + struct lov_user_md_v3 lumv3; + struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3; + struct lov_user_md_v1 __user *lumv1p = (void __user *)arg; + struct lov_user_md_v3 __user *lumv3p = (void __user *)arg; + int lum_size, rc; + int flags = FMODE_WRITE; /* first try with v1 which is smaller than v3 */ lum_size = sizeof(struct lov_user_md_v1); @@ -1518,7 +1537,7 @@ static int ll_lov_setstripe(struct inode *inode, struct file *file, ll_layout_refresh(inode, &gen); lsm = ccc_inode_lsm_get(inode); rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), - 0, lsm, (void *)arg); + 0, lsm, (void __user *)arg); ccc_inode_lsm_put(inode, lsm); } return rc; @@ -1530,9 +1549,9 @@ static int ll_lov_getstripe(struct inode *inode, unsigned long arg) int rc = -ENODATA; lsm = ccc_inode_lsm_get(inode); - if (lsm != NULL) + if (lsm) rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), 0, - lsm, (void *)arg); + lsm, (void __user *)arg); ccc_inode_lsm_put(inode, lsm); return rc; } @@ -1560,7 +1579,7 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg) spin_unlock(&lli->lli_lock); return -EINVAL; } - LASSERT(fd->fd_grouplock.cg_lock == NULL); + LASSERT(!fd->fd_grouplock.cg_lock); spin_unlock(&lli->lli_lock); rc = cl_get_grouplock(cl_i2info(inode)->lli_clob, @@ -1597,11 +1616,11 @@ static int ll_put_grouplock(struct inode *inode, struct file *file, CWARN("no group lock held\n"); return -EINVAL; } - LASSERT(fd->fd_grouplock.cg_lock != NULL); + LASSERT(fd->fd_grouplock.cg_lock); if (fd->fd_grouplock.cg_gid != arg) { CWARN("group lock %lu doesn't match current id %lu\n", - arg, fd->fd_grouplock.cg_gid); + arg, fd->fd_grouplock.cg_gid); spin_unlock(&lli->lli_lock); return -EINVAL; } @@ -1688,7 +1707,7 @@ static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap, } lsm = ccc_inode_lsm_get(inode); - if (lsm == NULL) + if (!lsm) return -ENOENT; /* If the stripe_count > 1 and the application does not understand @@ -1782,9 +1801,10 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg) int rc = 0; /* Get the extent count so we can calculate the size of - * required fiemap buffer */ + * required fiemap buffer + */ if (get_user(extent_count, - &((struct ll_user_fiemap __user *)arg)->fm_extent_count)) + &((struct ll_user_fiemap __user *)arg)->fm_extent_count)) return -EFAULT; if (extent_count >= @@ -1794,7 +1814,7 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg) sizeof(struct ll_fiemap_extent)); fiemap_s = libcfs_kvzalloc(num_bytes, GFP_NOFS); - if (fiemap_s == NULL) + if (!fiemap_s) return -ENOMEM; /* get the fiemap value */ @@ -1806,11 +1826,12 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg) /* If fm_extent_count is non-zero, read the first extent since * it is used to calculate end_offset and device from previous - * fiemap call. */ + * fiemap call. + */ if (extent_count) { if (copy_from_user(&fiemap_s->fm_extents[0], - (char __user *)arg + sizeof(*fiemap_s), - sizeof(struct ll_fiemap_extent))) { + (char __user *)arg + sizeof(*fiemap_s), + sizeof(struct ll_fiemap_extent))) { rc = -EFAULT; goto error; } @@ -1826,7 +1847,7 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg) ret_bytes += (fiemap_s->fm_mapped_extents * sizeof(struct ll_fiemap_extent)); - if (copy_to_user((void *)arg, fiemap_s, ret_bytes)) + if (copy_to_user((void __user *)arg, fiemap_s, ret_bytes)) rc = -EFAULT; error: @@ -1917,13 +1938,14 @@ int ll_hsm_release(struct inode *inode) /* Release the file. * NB: lease lock handle is released in mdc_hsm_release_pack() because - * we still need it to pack l_remote_handle to MDT. */ + * we still need it to pack l_remote_handle to MDT. + */ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och, &data_version); och = NULL; out: - if (och != NULL && !IS_ERR(och)) /* close the file */ + if (och && !IS_ERR(och)) /* close the file */ ll_lease_close(och, inode, NULL); return rc; @@ -2007,7 +2029,8 @@ static int ll_swap_layouts(struct file *file1, struct file *file2, } /* to be able to restore mtime and atime after swap - * we need to first save them */ + * we need to first save them + */ if (lsl->sl_flags & (SWAP_LAYOUTS_KEEP_MTIME | SWAP_LAYOUTS_KEEP_ATIME)) { llss->ia1.ia_mtime = llss->inode1->i_mtime; @@ -2019,7 +2042,8 @@ static int ll_swap_layouts(struct file *file1, struct file *file2, } /* ultimate check, before swapping the layouts we check if - * dataversion has changed (if requested) */ + * dataversion has changed (if requested) + */ if (llss->check_dv1) { rc = ll_data_version(llss->inode1, &dv, 0); if (rc) @@ -2042,9 +2066,11 @@ static int ll_swap_layouts(struct file *file1, struct file *file2, /* struct md_op_data is used to send the swap args to the mdt * only flags is missing, so we use struct mdc_swap_layouts - * through the md_op_data->op_data */ + * through the md_op_data->op_data + */ /* flags from user space have to be converted before they are send to - * server, no flag is sent today, they are only used on the client */ + * server, no flag is sent today, they are only used on the client + */ msl.msl_flags = 0; rc = -ENOMEM; op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0, @@ -2113,7 +2139,8 @@ static int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss) return -EINVAL; /* Non-root users are forbidden to set or clear flags which are - * NOT defined in HSM_USER_MASK. */ + * NOT defined in HSM_USER_MASK. + */ if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) && !capable(CFS_CAP_SYS_ADMIN)) return -EPERM; @@ -2211,14 +2238,14 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) switch (cmd) { case LL_IOC_GETFLAGS: /* Get the current value of the file flags */ - return put_user(fd->fd_flags, (int *)arg); + return put_user(fd->fd_flags, (int __user *)arg); case LL_IOC_SETFLAGS: case LL_IOC_CLRFLAGS: /* Set or clear specific file flags */ /* XXX This probably needs checks to ensure the flags are * not abused, and to handle any flag side effects. */ - if (get_user(flags, (int *) arg)) + if (get_user(flags, (int __user *)arg)) return -EFAULT; if (cmd == LL_IOC_SETFLAGS) { @@ -2242,15 +2269,15 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct file *file2; struct lustre_swap_layouts lsl; - if (copy_from_user(&lsl, (char *)arg, - sizeof(struct lustre_swap_layouts))) + if (copy_from_user(&lsl, (char __user *)arg, + sizeof(struct lustre_swap_layouts))) return -EFAULT; if ((file->f_flags & O_ACCMODE) == 0) /* O_RDONLY */ return -EPERM; file2 = fget(lsl.sl_fd); - if (file2 == NULL) + if (!file2) return -EBADF; rc = -EPERM; @@ -2272,13 +2299,13 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ll_iocontrol(inode, file, cmd, arg); case FSFILT_IOC_GETVERSION_OLD: case FSFILT_IOC_GETVERSION: - return put_user(inode->i_generation, (int *)arg); + return put_user(inode->i_generation, (int __user *)arg); case LL_IOC_GROUP_LOCK: return ll_get_grouplock(inode, file, arg); case LL_IOC_GROUP_UNLOCK: return ll_put_grouplock(inode, file, arg); case IOC_OBD_STATFS: - return ll_obd_statfs(inode, (void *)arg); + return ll_obd_statfs(inode, (void __user *)arg); /* We need to special case any other ioctls we want to handle, * to send them to the MDS/OST as appropriate and to properly @@ -2289,25 +2316,26 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case LL_IOC_FLUSHCTX: return ll_flush_ctx(inode); case LL_IOC_PATH2FID: { - if (copy_to_user((void *)arg, ll_inode2fid(inode), + if (copy_to_user((void __user *)arg, ll_inode2fid(inode), sizeof(struct lu_fid))) return -EFAULT; return 0; } case OBD_IOC_FID2PATH: - return ll_fid2path(inode, (void *)arg); + return ll_fid2path(inode, (void __user *)arg); case LL_IOC_DATA_VERSION: { struct ioc_data_version idv; int rc; - if (copy_from_user(&idv, (char *)arg, sizeof(idv))) + if (copy_from_user(&idv, (char __user *)arg, sizeof(idv))) return -EFAULT; rc = ll_data_version(inode, &idv.idv_version, - !(idv.idv_flags & LL_DV_NOFLUSH)); + !(idv.idv_flags & LL_DV_NOFLUSH)); - if (rc == 0 && copy_to_user((char *) arg, &idv, sizeof(idv))) + if (rc == 0 && copy_to_user((char __user *)arg, &idv, + sizeof(idv))) return -EFAULT; return rc; @@ -2320,7 +2348,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (mdtidx < 0) return mdtidx; - if (put_user((int)mdtidx, (int *)arg)) + if (put_user(mdtidx, (int __user *)arg)) return -EFAULT; return 0; @@ -2347,7 +2375,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data), op_data, NULL); - if (copy_to_user((void *)arg, hus, sizeof(*hus))) + if (copy_to_user((void __user *)arg, hus, sizeof(*hus))) rc = -EFAULT; ll_finish_md_op_data(op_data); @@ -2358,7 +2386,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct hsm_state_set *hss; int rc; - hss = memdup_user((char *)arg, sizeof(*hss)); + hss = memdup_user((char __user *)arg, sizeof(*hss)); if (IS_ERR(hss)) return PTR_ERR(hss); @@ -2386,7 +2414,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data), op_data, NULL); - if (copy_to_user((char *)arg, hca, sizeof(*hca))) + if (copy_to_user((char __user *)arg, hca, sizeof(*hca))) rc = -EFAULT; ll_finish_md_op_data(op_data); @@ -2412,13 +2440,13 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; case F_UNLCK: mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och != NULL) { + if (fd->fd_lease_och) { och = fd->fd_lease_och; fd->fd_lease_och = NULL; } mutex_unlock(&lli->lli_och_mutex); - if (och != NULL) { + if (och) { mode = och->och_flags & (FMODE_READ|FMODE_WRITE); rc = ll_lease_close(och, inode, &lease_broken); @@ -2443,12 +2471,12 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) rc = 0; mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och == NULL) { + if (!fd->fd_lease_och) { fd->fd_lease_och = och; och = NULL; } mutex_unlock(&lli->lli_och_mutex); - if (och != NULL) { + if (och) { /* impossible now that only excl is supported for now */ ll_lease_close(och, inode, &lease_broken); rc = -EBUSY; @@ -2461,11 +2489,11 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) rc = 0; mutex_lock(&lli->lli_och_mutex); - if (fd->fd_lease_och != NULL) { + if (fd->fd_lease_och) { struct obd_client_handle *och = fd->fd_lease_och; lock = ldlm_handle2lock(&och->och_lease_handle); - if (lock != NULL) { + if (lock) { lock_res_and_lock(lock); if (!ldlm_is_cancel(lock)) rc = och->och_flags & @@ -2480,7 +2508,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case LL_IOC_HSM_IMPORT: { struct hsm_user_import *hui; - hui = memdup_user((void *)arg, sizeof(*hui)); + hui = memdup_user((void __user *)arg, sizeof(*hui)); if (IS_ERR(hui)) return PTR_ERR(hui); @@ -2497,7 +2525,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return err; return obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL, - (void *)arg); + (void __user *)arg); } } } @@ -2536,15 +2564,17 @@ static int ll_flush(struct file *file, fl_owner_t id) LASSERT(!S_ISDIR(inode->i_mode)); /* catch async errors that were recorded back when async writeback - * failed for pages in this mapping. */ + * failed for pages in this mapping. + */ rc = lli->lli_async_rc; lli->lli_async_rc = 0; err = lov_read_and_clear_async_rc(lli->lli_clob); if (rc == 0) rc = err; - /* The application has been told write failure already. - * Do not report failure again. */ + /* The application has been told about write failure already. + * Do not report failure again. + */ if (fd->fd_write_failed) return 0; return rc ? -EIO : 0; @@ -2612,7 +2642,8 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync) inode_lock(inode); /* catch async errors that were recorded back when async writeback - * failed for pages in this mapping. */ + * failed for pages in this mapping. + */ if (!S_ISDIR(inode->i_mode)) { err = lli->lli_async_rc; lli->lli_async_rc = 0; @@ -2683,7 +2714,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) * I guess between lockd processes) and then compares pid. * As such we assign pid to the owner field to make it all work, * conflict with normal locks is unlikely since pid space and - * pointer space for current->files are not intersecting */ + * pointer space for current->files are not intersecting + */ if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner) flock.l_flock.owner = (unsigned long)file_lock->fl_pid; @@ -2699,7 +2731,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) * order to process an unlock request we need all of the same * information that is given with a normal read or write record * lock request. To avoid creating another ldlm unlock (cancel) - * message we'll treat a LCK_NL flock request as an unlock. */ + * message we'll treat a LCK_NL flock request as an unlock. + */ einfo.ei_mode = LCK_NL; break; case F_WRLCK: @@ -2707,7 +2740,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) break; default: CDEBUG(D_INFO, "Unknown fcntl lock type: %d\n", - file_lock->fl_type); + file_lock->fl_type); return -ENOTSUPP; } @@ -2730,7 +2763,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) #endif flags = LDLM_FL_TEST_LOCK; /* Save the old mode so that if the mode in the lock changes we - * can decrement the appropriate reader or writer refcount. */ + * can decrement the appropriate reader or writer refcount. + */ file_lock->fl_type = einfo.ei_mode; break; default: @@ -2757,7 +2791,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock) if (rc2 && file_lock->fl_type != F_UNLCK) { einfo.ei_mode = LCK_NL; md_enqueue(sbi->ll_md_exp, &einfo, NULL, - op_data, &lockh, &flock, 0, NULL /* req */, flags); + op_data, &lockh, &flock, 0, NULL /* req */, flags); rc = rc2; } @@ -2782,11 +2816,12 @@ ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock) * \param l_req_mode [IN] searched lock mode * \retval boolean, true iff all bits are found */ -int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode) +int ll_have_md_lock(struct inode *inode, __u64 *bits, + enum ldlm_mode l_req_mode) { struct lustre_handle lockh; ldlm_policy_data_t policy; - ldlm_mode_t mode = (l_req_mode == LCK_MINMODE) ? + enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ? (LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode; struct lu_fid *fid; __u64 flags; @@ -2822,13 +2857,13 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode) return *bits == 0; } -ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits, - struct lustre_handle *lockh, __u64 flags, - ldlm_mode_t mode) +enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits, + struct lustre_handle *lockh, __u64 flags, + enum ldlm_mode mode) { ldlm_policy_data_t policy = { .l_inodebits = {bits} }; struct lu_fid *fid; - ldlm_mode_t rc; + enum ldlm_mode rc; fid = &ll_i2info(inode)->lli_fid; CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid)); @@ -2866,8 +2901,6 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) struct obd_export *exp; int rc = 0; - LASSERT(inode != NULL); - CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%pd\n", inode->i_ino, inode->i_generation, inode, dentry); @@ -2875,7 +2908,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) /* XXX: Enable OBD_CONNECT_ATTRFID to reduce unnecessary getattr RPC. * But under CMD case, it caused some lock issues, should be fixed - * with new CMD ibits lock. See bug 12718 */ + * with new CMD ibits lock. See bug 12718 + */ if (exp_connect_flags(exp) & OBD_CONNECT_ATTRFID) { struct lookup_intent oit = { .it_op = IT_GETATTR }; struct md_op_data *op_data; @@ -2893,7 +2927,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) oit.it_create_mode |= M_CHECK_STALE; rc = md_intent_lock(exp, op_data, NULL, 0, /* we are not interested in name - based lookup */ + * based lookup + */ &oit, 0, &req, ll_md_blocking_ast, 0); ll_finish_md_op_data(op_data); @@ -2910,9 +2945,10 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) } /* Unlinked? Unhash dentry, so it is not picked up later by - do_lookup() -> ll_revalidate_it(). We cannot use d_drop - here to preserve get_cwd functionality on 2.6. - Bug 10503 */ + * do_lookup() -> ll_revalidate_it(). We cannot use d_drop + * here to preserve get_cwd functionality on 2.6. + * Bug 10503 + */ if (!d_inode(dentry)->i_nlink) d_lustre_invalidate(dentry, 0); @@ -3026,26 +3062,33 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, sizeof(struct ll_fiemap_extent)); fiemap = libcfs_kvzalloc(num_bytes, GFP_NOFS); - if (fiemap == NULL) + if (!fiemap) return -ENOMEM; fiemap->fm_flags = fieinfo->fi_flags; fiemap->fm_extent_count = fieinfo->fi_extents_max; fiemap->fm_start = start; fiemap->fm_length = len; - if (extent_count > 0) - memcpy(&fiemap->fm_extents[0], fieinfo->fi_extents_start, - sizeof(struct ll_fiemap_extent)); + if (extent_count > 0 && + copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start, + sizeof(struct ll_fiemap_extent)) != 0) { + rc = -EFAULT; + goto out; + } rc = ll_do_fiemap(inode, fiemap, num_bytes); fieinfo->fi_flags = fiemap->fm_flags; fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents; - if (extent_count > 0) - memcpy(fieinfo->fi_extents_start, &fiemap->fm_extents[0], - fiemap->fm_mapped_extents * - sizeof(struct ll_fiemap_extent)); + if (extent_count > 0 && + copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0], + fiemap->fm_mapped_extents * + sizeof(struct ll_fiemap_extent)) != 0) { + rc = -EFAULT; + goto out; + } +out: kvfree(fiemap); return rc; } @@ -3067,13 +3110,12 @@ int ll_inode_permission(struct inode *inode, int mask) { int rc = 0; -#ifdef MAY_NOT_BLOCK if (mask & MAY_NOT_BLOCK) return -ECHILD; -#endif /* as root inode are NOT getting validated in lookup operation, - * need to do it before permission check. */ + * need to do it before permission check. + */ if (is_root_inode(inode)) { rc = __ll_inode_revalidate(inode->i_sb->s_root, @@ -3173,8 +3215,7 @@ void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd) unsigned int size; struct llioc_data *in_data = NULL; - if (cb == NULL || cmd == NULL || - count > LLIOC_MAX_CMD || count < 0) + if (!cb || !cmd || count > LLIOC_MAX_CMD || count < 0) return NULL; size = sizeof(*in_data) + count * sizeof(unsigned int); @@ -3200,7 +3241,7 @@ void ll_iocontrol_unregister(void *magic) { struct llioc_data *tmp; - if (magic == NULL) + if (!magic) return; down_write(&llioc.ioc_sem); @@ -3254,7 +3295,7 @@ int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf) struct lu_env *env; int result; - if (lli->lli_clob == NULL) + if (!lli->lli_clob) return 0; env = cl_env_nested_get(&nest); @@ -3267,13 +3308,14 @@ int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf) if (conf->coc_opc == OBJECT_CONF_SET) { struct ldlm_lock *lock = conf->coc_lock; - LASSERT(lock != NULL); + LASSERT(lock); LASSERT(ldlm_has_layout(lock)); if (result == 0) { /* it can only be allowed to match after layout is * applied to inode otherwise false layout would be * seen. Applying layout should happen before dropping - * the intent lock. */ + * the intent lock. + */ ldlm_lock_allow_match(lock); } } @@ -3296,14 +3338,15 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock) PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY), lock->l_lvb_data, lock->l_lvb_len); - if ((lock->l_lvb_data != NULL) && (lock->l_flags & LDLM_FL_LVB_READY)) + if (lock->l_lvb_data && (lock->l_flags & LDLM_FL_LVB_READY)) return 0; /* if layout lock was granted right away, the layout is returned * within DLM_LVB of dlm reply; otherwise if the lock was ever * blocked and then granted via completion ast, we have to fetch * layout here. Please note that we can't use the LVB buffer in - * completion AST because it doesn't have a large enough buffer */ + * completion AST because it doesn't have a large enough buffer + */ rc = ll_get_default_mdsize(sbi, &lmmsize); if (rc == 0) rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), @@ -3313,7 +3356,7 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock) return rc; body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -3325,20 +3368,20 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock) } lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, lmmsize); - if (lmm == NULL) { + if (!lmm) { rc = -EFAULT; goto out; } lvbdata = libcfs_kvzalloc(lmmsize, GFP_NOFS); - if (lvbdata == NULL) { + if (!lvbdata) { rc = -ENOMEM; goto out; } memcpy(lvbdata, lmm, lmmsize); lock_res_and_lock(lock); - if (lock->l_lvb_data != NULL) + if (lock->l_lvb_data) kvfree(lock->l_lvb_data); lock->l_lvb_data = lvbdata; @@ -3354,8 +3397,8 @@ out: * Apply the layout to the inode. Layout lock is held and will be released * in this function. */ -static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode, - struct inode *inode, __u32 *gen, bool reconf) +static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode, + struct inode *inode, __u32 *gen, bool reconf) { struct ll_inode_info *lli = ll_i2info(inode); struct ll_sb_info *sbi = ll_i2sbi(inode); @@ -3369,10 +3412,10 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode, LASSERT(lustre_handle_is_used(lockh)); lock = ldlm_handle2lock(lockh); - LASSERT(lock != NULL); + LASSERT(lock); LASSERT(ldlm_has_layout(lock)); - LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d.\n", + LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d", inode, PFID(&lli->lli_fid), reconf); /* in case this is a caching lock and reinstate with new inode */ @@ -3382,12 +3425,14 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode, lvb_ready = !!(lock->l_flags & LDLM_FL_LVB_READY); unlock_res_and_lock(lock); /* checking lvb_ready is racy but this is okay. The worst case is - * that multi processes may configure the file on the same time. */ + * that multi processes may configure the file on the same time. + */ if (lvb_ready || !reconf) { rc = -ENODATA; if (lvb_ready) { /* layout_gen must be valid if layout lock is not - * cancelled and stripe has already set */ + * cancelled and stripe has already set + */ *gen = ll_layout_version_get(lli); rc = 0; } @@ -3401,26 +3446,28 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode, /* for layout lock, lmm is returned in lock's lvb. * lvb_data is immutable if the lock is held so it's safe to access it * without res lock. See the description in ldlm_lock_decref_internal() - * for the condition to free lvb_data of layout lock */ - if (lock->l_lvb_data != NULL) { + * for the condition to free lvb_data of layout lock + */ + if (lock->l_lvb_data) { rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm, lock->l_lvb_data, lock->l_lvb_len); if (rc >= 0) { *gen = LL_LAYOUT_GEN_EMPTY; - if (md.lsm != NULL) + if (md.lsm) *gen = md.lsm->lsm_layout_gen; rc = 0; } else { - CERROR("%s: file "DFID" unpackmd error: %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(&lli->lli_fid), rc); + CERROR("%s: file " DFID " unpackmd error: %d\n", + ll_get_fsname(inode->i_sb, NULL, 0), + PFID(&lli->lli_fid), rc); } } if (rc < 0) goto out; /* set layout to file. Unlikely this will fail as old layout was - * surely eliminated */ + * surely eliminated + */ memset(&conf, 0, sizeof(conf)); conf.coc_opc = OBJECT_CONF_SET; conf.coc_inode = inode; @@ -3428,7 +3475,7 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode, conf.u.coc_md = &md; rc = ll_layout_conf(inode, &conf); - if (md.lsm != NULL) + if (md.lsm) obd_free_memmd(sbi->ll_dt_exp, &md.lsm); /* refresh layout failed, need to wait */ @@ -3440,9 +3487,9 @@ out: /* wait for IO to complete if it's still being used. */ if (wait_layout) { - CDEBUG(D_INODE, "%s: %p/"DFID" wait for layout reconf.\n", - ll_get_fsname(inode->i_sb, NULL, 0), - inode, PFID(&lli->lli_fid)); + CDEBUG(D_INODE, "%s: %p/" DFID " wait for layout reconf.\n", + ll_get_fsname(inode->i_sb, NULL, 0), + inode, PFID(&lli->lli_fid)); memset(&conf, 0, sizeof(conf)); conf.coc_opc = OBJECT_CONF_WAIT; @@ -3451,8 +3498,8 @@ out: if (rc == 0) rc = -EAGAIN; - CDEBUG(D_INODE, "file: "DFID" waiting layout return: %d.\n", - PFID(&lli->lli_fid), rc); + CDEBUG(D_INODE, "file: " DFID " waiting layout return: %d.\n", + PFID(&lli->lli_fid), rc); } return rc; } @@ -3477,7 +3524,7 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen) struct md_op_data *op_data; struct lookup_intent it; struct lustre_handle lockh; - ldlm_mode_t mode; + enum ldlm_mode mode; struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS, .ei_mode = LCK_CR, @@ -3499,7 +3546,8 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen) again: /* mostly layout lock is caching on the local side, so try to match - * it before grabbing layout lock mutex. */ + * it before grabbing layout lock mutex. + */ mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0, LCK_CR | LCK_CW | LCK_PR | LCK_PW); if (mode != 0) { /* hit cached lock */ @@ -3512,7 +3560,7 @@ again: } op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, - 0, 0, LUSTRE_OPC_ANY, NULL); + 0, 0, LUSTRE_OPC_ANY, NULL); if (IS_ERR(op_data)) { mutex_unlock(&lli->lli_layout_mutex); return PTR_ERR(op_data); @@ -3523,14 +3571,13 @@ again: it.it_op = IT_LAYOUT; lockh.cookie = 0ULL; - LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file %p/"DFID".\n", - ll_get_fsname(inode->i_sb, NULL, 0), inode, + LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file %p/" DFID "", + ll_get_fsname(inode->i_sb, NULL, 0), inode, PFID(&lli->lli_fid)); rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh, NULL, 0, NULL, 0); - if (it.d.lustre.it_data != NULL) - ptlrpc_req_finished(it.d.lustre.it_data); + ptlrpc_req_finished(it.d.lustre.it_data); it.d.lustre.it_data = NULL; ll_finish_md_op_data(op_data); diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c index 3f348a3aa..a55ac4dcc 100644 --- a/drivers/staging/lustre/lustre/llite/llite_close.c +++ b/drivers/staging/lustre/lustre/llite/llite_close.c @@ -52,9 +52,8 @@ void vvp_write_pending(struct ccc_object *club, struct ccc_page *page) spin_lock(&lli->lli_lock); lli->lli_flags |= LLIF_SOM_DIRTY; - if (page != NULL && list_empty(&page->cpg_pending_linkage)) - list_add(&page->cpg_pending_linkage, - &club->cob_pending_list); + if (page && list_empty(&page->cpg_pending_linkage)) + list_add(&page->cpg_pending_linkage, &club->cob_pending_list); spin_unlock(&lli->lli_lock); } @@ -65,7 +64,7 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page) int rc = 0; spin_lock(&lli->lli_lock); - if (page != NULL && !list_empty(&page->cpg_pending_linkage)) { + if (page && !list_empty(&page->cpg_pending_linkage)) { list_del_init(&page->cpg_pending_linkage); rc = 1; } @@ -76,7 +75,8 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page) /** Queues DONE_WRITING if * - done writing is allowed; - * - inode has no no dirty pages; */ + * - inode has no no dirty pages; + */ void ll_queue_done_writing(struct inode *inode, unsigned long flags) { struct ll_inode_info *lli = ll_i2info(inode); @@ -106,7 +106,8 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags) * close() happen, epoch is closed as the inode is marked as * LLIF_EPOCH_PENDING. When pages are written inode should not * be inserted into the queue again, clear this flag to avoid - * it. */ + * it. + */ lli->lli_flags &= ~LLIF_DONE_WRITING; wake_up(&lcq->lcq_waitq); @@ -144,10 +145,11 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data, spin_lock(&lli->lli_lock); if (!(list_empty(&club->cob_pending_list))) { if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) { - LASSERT(*och != NULL); - LASSERT(lli->lli_pending_och == NULL); + LASSERT(*och); + LASSERT(!lli->lli_pending_och); /* Inode is dirty and there is no pending write done - * request yet, DONE_WRITE is to be sent later. */ + * request yet, DONE_WRITE is to be sent later. + */ lli->lli_flags |= LLIF_EPOCH_PENDING; lli->lli_pending_och = *och; spin_unlock(&lli->lli_lock); @@ -159,7 +161,8 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data, if (flags & LLIF_DONE_WRITING) { /* Some pages are still dirty, it is early to send * DONE_WRITE. Wait until all pages will be flushed - * and try DONE_WRITE again later. */ + * and try DONE_WRITE again later. + */ LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING)); lli->lli_flags |= LLIF_DONE_WRITING; spin_unlock(&lli->lli_lock); @@ -187,7 +190,8 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data, } /* There is a pending DONE_WRITE -- close epoch with no - * attribute change. */ + * attribute change. + */ if (lli->lli_flags & LLIF_EPOCH_PENDING) { spin_unlock(&lli->lli_lock); goto out; @@ -215,13 +219,13 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data) struct obdo *oa; int rc; - LASSERT(op_data != NULL); + LASSERT(op_data); if (lli->lli_flags & LLIF_MDS_SIZE_LOCK) CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n", inode->i_ino, inode->i_generation, lli->lli_flags); - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); if (!oa) { CERROR("can't allocate memory for Size-on-MDS update.\n"); return -ENOMEM; @@ -266,7 +270,7 @@ static void ll_prepare_done_writing(struct inode *inode, { ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING); /* If there is no @och, we do not do D_W yet. */ - if (*och == NULL) + if (!*och) return; ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh); @@ -289,13 +293,14 @@ static void ll_done_writing(struct inode *inode) ll_prepare_done_writing(inode, op_data, &och); /* If there is no @och, we do not do D_W yet. */ - if (och == NULL) + if (!och) goto out; rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL); if (rc == -EAGAIN) /* MDS has instructed us to obtain Size-on-MDS attribute from - * OSTs and send setattr to back to MDS. */ + * OSTs and send setattr to back to MDS. + */ rc = ll_som_update(inode, op_data); else if (rc) CERROR("inode %lu mdc done_writing failed: rc = %d\n", @@ -316,7 +321,7 @@ static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq) if (!list_empty(&lcq->lcq_head)) { lli = list_entry(lcq->lcq_head.next, struct ll_inode_info, - lli_close_list); + lli_close_list); list_del_init(&lli->lli_close_list); } else if (atomic_read(&lcq->lcq_stop)) lli = ERR_PTR(-EALREADY); diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index 845e992ca..e3c0f1dd4 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h @@ -93,9 +93,10 @@ struct ll_remote_perm { gid_t lrp_gid; uid_t lrp_fsuid; gid_t lrp_fsgid; - int lrp_access_perm; /* MAY_READ/WRITE/EXEC, this - is access permission with - lrp_fsuid/lrp_fsgid. */ + int lrp_access_perm; /* MAY_READ/WRITE/EXEC, this + * is access permission with + * lrp_fsuid/lrp_fsgid. + */ }; enum lli_flags { @@ -106,7 +107,8 @@ enum lli_flags { /* DONE WRITING is allowed. */ LLIF_DONE_WRITING = (1 << 2), /* Sizeon-on-MDS attributes are changed. An attribute update needs to - * be sent to MDS. */ + * be sent to MDS. + */ LLIF_SOM_DIRTY = (1 << 3), /* File data is modified. */ LLIF_DATA_MODIFIED = (1 << 4), @@ -130,22 +132,23 @@ struct ll_inode_info { /* identifying fields for both metadata and data stacks. */ struct lu_fid lli_fid; /* Parent fid for accessing default stripe data on parent directory - * for allocating OST objects after a mknod() and later open-by-FID. */ + * for allocating OST objects after a mknod() and later open-by-FID. + */ struct lu_fid lli_pfid; - struct list_head lli_close_list; - /* open count currently used by capability only, indicate whether - * capability needs renewal */ - atomic_t lli_open_count; + struct list_head lli_close_list; + unsigned long lli_rmtperm_time; /* handle is to be sent to MDS later on done_writing and setattr. * Open handle data are needed for the recovery to reconstruct - * the inode state on the MDS. XXX: recovery is not ready yet. */ + * the inode state on the MDS. XXX: recovery is not ready yet. + */ struct obd_client_handle *lli_pending_och; /* We need all three because every inode may be opened in different - * modes */ + * modes + */ struct obd_client_handle *lli_mds_read_och; struct obd_client_handle *lli_mds_write_och; struct obd_client_handle *lli_mds_exec_och; @@ -162,7 +165,8 @@ struct ll_inode_info { spinlock_t lli_agl_lock; /* Try to make the d::member and f::member are aligned. Before using - * these members, make clear whether it is directory or not. */ + * these members, make clear whether it is directory or not. + */ union { /* for directory */ struct { @@ -173,13 +177,15 @@ struct ll_inode_info { /* since parent-child threads can share the same @file * struct, "opendir_key" is the token when dir close for * case of parent exit before child -- it is me should - * cleanup the dir readahead. */ + * cleanup the dir readahead. + */ void *d_opendir_key; struct ll_statahead_info *d_sai; /* protect statahead stuff. */ spinlock_t d_sa_lock; - /* "opendir_pid" is the token when lookup/revalid - * -- I am the owner of dir statahead. */ + /* "opendir_pid" is the token when lookup/revalidate + * -- I am the owner of dir statahead. + */ pid_t d_opendir_pid; } d; @@ -281,11 +287,8 @@ static inline void ll_layout_version_set(struct ll_inode_info *lli, __u32 gen) int ll_xattr_cache_destroy(struct inode *inode); -int ll_xattr_cache_get(struct inode *inode, - const char *name, - char *buffer, - size_t size, - __u64 valid); +int ll_xattr_cache_get(struct inode *inode, const char *name, + char *buffer, size_t size, __u64 valid); /* * Locking to guarantee consistency of non-atomic updates to long long i_size, @@ -305,11 +308,12 @@ static inline struct ll_inode_info *ll_i2info(struct inode *inode) } /* default to about 40meg of readahead on a given system. That much tied - * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */ -#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT)) + * up in 512k readahead requests serviced at 40ms each is about 1GB/s. + */ +#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_SHIFT)) /* default to read-ahead full files smaller than 2MB on the second read */ -#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT)) +#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT)) enum ra_stat { RA_STAT_HIT = 0, @@ -344,11 +348,13 @@ struct ra_io_arg { unsigned long ria_end; /* end offset of read-ahead*/ /* If stride read pattern is detected, ria_stoff means where * stride read is started. Note: for normal read-ahead, the - * value here is meaningless, and also it will not be accessed*/ + * value here is meaningless, and also it will not be accessed + */ pgoff_t ria_stoff; /* ria_length and ria_pages are the length and pages length in the * stride I/O mode. And they will also be used to check whether - * it is stride I/O read-ahead in the read-ahead pages*/ + * it is stride I/O read-ahead in the read-ahead pages + */ unsigned long ria_length; unsigned long ria_pages; }; @@ -455,7 +461,8 @@ struct eacl_table { struct ll_sb_info { /* this protects pglist and ra_info. It isn't safe to - * grab from interrupt contexts */ + * grab from interrupt contexts + */ spinlock_t ll_lock; spinlock_t ll_pp_extent_lock; /* pp_extent entry*/ spinlock_t ll_process_lock; /* ll_rw_process_info */ @@ -468,10 +475,8 @@ struct ll_sb_info { int ll_flags; unsigned int ll_umounting:1, ll_xattr_cache_enabled:1; - struct list_head ll_conn_chain; /* per-conn chain of SBs */ struct lustre_client_ocd ll_lco; - struct list_head ll_orphan_dentry_list; /*please don't ask -p*/ struct ll_close_queue *ll_lcq; struct lprocfs_stats *ll_stats; /* lprocfs stats counter */ @@ -502,13 +507,16 @@ struct ll_sb_info { /* metadata stat-ahead */ unsigned int ll_sa_max; /* max statahead RPCs */ atomic_t ll_sa_total; /* statahead thread started - * count */ + * count + */ atomic_t ll_sa_wrong; /* statahead thread stopped for - * low hit ratio */ + * low hit ratio + */ atomic_t ll_agl_total; /* AGL thread started count */ - dev_t ll_sdev_orig; /* save s_dev before assign for - * clustered nfs */ + dev_t ll_sdev_orig; /* save s_dev before assign for + * clustered nfs + */ struct rmtacl_ctl_table ll_rct; struct eacl_table ll_et; __kernel_fsid_t ll_fsid; @@ -619,13 +627,15 @@ struct ll_file_data { __u32 fd_flags; fmode_t fd_omode; /* openhandle if lease exists for this file. - * Borrow lli->lli_och_mutex to protect assignment */ + * Borrow lli->lli_och_mutex to protect assignment + */ struct obd_client_handle *fd_lease_och; struct obd_client_handle *fd_och; struct file *fd_file; /* Indicate whether need to report failure when close. * true: failure is known, not report again. - * false: unknown failure, should report. */ + * false: unknown failure, should report. + */ bool fd_write_failed; }; @@ -647,7 +657,7 @@ static inline int ll_need_32bit_api(struct ll_sb_info *sbi) #if BITS_PER_LONG == 32 return 1; #elif defined(CONFIG_COMPAT) - return unlikely(is_compat_task() || (sbi->ll_flags & LL_SBI_32BIT_API)); + return unlikely(in_compat_syscall() || (sbi->ll_flags & LL_SBI_32BIT_API)); #else return unlikely(sbi->ll_flags & LL_SBI_32BIT_API); #endif @@ -705,10 +715,10 @@ extern struct file_operations ll_file_operations_flock; extern struct file_operations ll_file_operations_noflock; extern const struct inode_operations ll_file_inode_operations; int ll_have_md_lock(struct inode *inode, __u64 *bits, - ldlm_mode_t l_req_mode); -ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits, - struct lustre_handle *lockh, __u64 flags, - ldlm_mode_t mode); + enum ldlm_mode l_req_mode); +enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits, + struct lustre_handle *lockh, __u64 flags, + enum ldlm_mode mode); int ll_file_open(struct inode *inode, struct file *file); int ll_file_release(struct inode *inode, struct file *file); int ll_glimpse_ioctl(struct ll_sb_info *sbi, @@ -782,7 +792,7 @@ int ll_show_options(struct seq_file *seq, struct dentry *dentry); void ll_dirty_page_discard_warn(struct page *page, int ioret); int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, struct super_block *, struct lookup_intent *); -int ll_obd_statfs(struct inode *inode, void *arg); +int ll_obd_statfs(struct inode *inode, void __user *arg); int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize); int ll_get_default_mdsize(struct ll_sb_info *sbi, int *default_mdsize); int ll_process_config(struct lustre_cfg *lcfg); @@ -796,7 +806,7 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen); void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req); /* llite/llite_nfs.c */ -extern struct export_operations lustre_export_operations; +extern const struct export_operations lustre_export_operations; __u32 get_uuid2int(const char *name, int len); void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid); struct inode *search_inode_for_lustre(struct super_block *sb, @@ -913,7 +923,7 @@ static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env) struct vvp_thread_info *info; info = lu_context_key_get(&env->le_ctx, &vvp_key); - LASSERT(info != NULL); + LASSERT(info); return info; } @@ -937,7 +947,7 @@ static inline struct vvp_session *vvp_env_session(const struct lu_env *env) struct vvp_session *ses; ses = lu_context_key_get(env->le_ses, &vvp_session_key); - LASSERT(ses != NULL); + LASSERT(ses); return ses; } @@ -957,21 +967,21 @@ int ll_close_thread_start(struct ll_close_queue **lcq_ret); int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last); int ll_file_mmap(struct file *file, struct vm_area_struct *vma); -void policy_from_vma(ldlm_policy_data_t *policy, - struct vm_area_struct *vma, unsigned long addr, size_t count); +void policy_from_vma(ldlm_policy_data_t *policy, struct vm_area_struct *vma, + unsigned long addr, size_t count); struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, size_t count); static inline void ll_invalidate_page(struct page *vmpage) { struct address_space *mapping = vmpage->mapping; - loff_t offset = vmpage->index << PAGE_CACHE_SHIFT; + loff_t offset = vmpage->index << PAGE_SHIFT; LASSERT(PageLocked(vmpage)); - if (mapping == NULL) + if (!mapping) return; - ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE); + ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE); truncate_complete_page(mapping, vmpage); } @@ -993,7 +1003,7 @@ static inline struct client_obd *sbi2mdc(struct ll_sb_info *sbi) { struct obd_device *obd = sbi->ll_md_exp->exp_obd; - if (obd == NULL) + if (!obd) LBUG(); return &obd->u.cli; } @@ -1018,7 +1028,7 @@ static inline struct lu_fid *ll_inode2fid(struct inode *inode) { struct lu_fid *fid; - LASSERT(inode != NULL); + LASSERT(inode); fid = &ll_i2info(inode)->lli_fid; return fid; @@ -1107,39 +1117,44 @@ static inline u64 rce_ops2valid(int ops) struct ll_statahead_info { struct inode *sai_inode; atomic_t sai_refcount; /* when access this struct, hold - * refcount */ + * refcount + */ unsigned int sai_generation; /* generation for statahead */ unsigned int sai_max; /* max ahead of lookup */ __u64 sai_sent; /* stat requests sent count */ __u64 sai_replied; /* stat requests which received - * reply */ + * reply + */ __u64 sai_index; /* index of statahead entry */ __u64 sai_index_wait; /* index of entry which is the - * caller is waiting for */ + * caller is waiting for + */ __u64 sai_hit; /* hit count */ __u64 sai_miss; /* miss count: - * for "ls -al" case, it includes - * hidden dentry miss; - * for "ls -l" case, it does not - * include hidden dentry miss. - * "sai_miss_hidden" is used for - * the later case. - */ + * for "ls -al" case, it includes + * hidden dentry miss; + * for "ls -l" case, it does not + * include hidden dentry miss. + * "sai_miss_hidden" is used for + * the later case. + */ unsigned int sai_consecutive_miss; /* consecutive miss */ unsigned int sai_miss_hidden;/* "ls -al", but first dentry - * is not a hidden one */ + * is not a hidden one + */ unsigned int sai_skip_hidden;/* skipped hidden dentry count */ unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for - * hidden entries */ + * hidden entries + */ sai_agl_valid:1;/* AGL is valid for the dir */ - wait_queue_head_t sai_waitq; /* stat-ahead wait queue */ + wait_queue_head_t sai_waitq; /* stat-ahead wait queue */ struct ptlrpc_thread sai_thread; /* stat-ahead thread */ struct ptlrpc_thread sai_agl_thread; /* AGL thread */ - struct list_head sai_entries; /* entry list */ - struct list_head sai_entries_received; /* entries returned */ - struct list_head sai_entries_stated; /* entries stated */ - struct list_head sai_entries_agl; /* AGL entries to be sent */ - struct list_head sai_cache[LL_SA_CACHE_SIZE]; + struct list_head sai_entries; /* entry list */ + struct list_head sai_entries_received; /* entries returned */ + struct list_head sai_entries_stated; /* entries stated */ + struct list_head sai_entries_agl; /* AGL entries to be sent */ + struct list_head sai_cache[LL_SA_CACHE_SIZE]; spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE]; atomic_t sai_cache_count; /* entry count in cache */ }; @@ -1171,8 +1186,8 @@ ll_statahead_mark(struct inode *dir, struct dentry *dentry) if (lli->lli_opendir_pid != current_pid()) return; - LASSERT(ldd != NULL); - if (sai != NULL) + LASSERT(ldd); + if (sai) ldd->lld_sa_generation = sai->sai_generation; } @@ -1191,7 +1206,7 @@ d_need_statahead(struct inode *dir, struct dentry *dentryp) return -EAGAIN; /* statahead has been stopped */ - if (lli->lli_opendir_key == NULL) + if (!lli->lli_opendir_key) return -EAGAIN; ldd = ll_d2d(dentryp); @@ -1313,13 +1328,15 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end, /** direct write pages */ struct ll_dio_pages { /** page array to be written. we don't support - * partial pages except the last one. */ + * partial pages except the last one. + */ struct page **ldp_pages; /* offset of each page */ loff_t *ldp_offsets; /** if ldp_offsets is NULL, it means a sequential * pages to be written, then this is the file offset - * of the * first page. */ + * of the first page. + */ loff_t ldp_start_offset; /** how many bytes are to be written. */ size_t ldp_size; @@ -1345,7 +1362,6 @@ static inline int ll_file_nolock(const struct file *file) struct ll_file_data *fd = LUSTRE_FPRIVATE(file); struct inode *inode = file_inode(file); - LASSERT(fd != NULL); return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) || (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK)); } @@ -1362,7 +1378,8 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode, * remote MDT, where the object is, will grant * UPDATE|PERM lock. The inode will be attached to both * LOOKUP and PERM locks, so revoking either locks will - * case the dcache being cleared */ + * case the dcache being cleared + */ if (it->d.lustre.it_remote_lock_mode) { handle.cookie = it->d.lustre.it_remote_lock_handle; CDEBUG(D_DLMTRACE, "setting l_data to inode %p(%lu/%u) for remote lock %#llx\n", @@ -1383,7 +1400,7 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode, it->d.lustre.it_lock_set = 1; } - if (bits != NULL) + if (bits) *bits = it->d.lustre.it_lock_bits; } @@ -1401,14 +1418,14 @@ static inline int d_lustre_invalid(const struct dentry *dentry) { struct ll_dentry_data *lld = ll_d2d(dentry); - return (lld == NULL) || lld->lld_invalid; + return !lld || lld->lld_invalid; } static inline void __d_lustre_invalidate(struct dentry *dentry) { struct ll_dentry_data *lld = ll_d2d(dentry); - if (lld != NULL) + if (lld) lld->lld_invalid = 1; } @@ -1442,7 +1459,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested) static inline void d_lustre_revalidate(struct dentry *dentry) { spin_lock(&dentry->d_lock); - LASSERT(ll_d2d(dentry) != NULL); + LASSERT(ll_d2d(dentry)); ll_d2d(dentry)->lld_invalid = 0; spin_unlock(&dentry->d_lock); } diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index b2fc5b378..b57a99268 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c @@ -85,7 +85,7 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb) si_meminfo(&si); pages = si.totalram - si.totalhigh; - if (pages >> (20 - PAGE_CACHE_SHIFT) < 512) + if (pages >> (20 - PAGE_SHIFT) < 512) lru_page_max = pages / 2; else lru_page_max = (pages / 4) * 3; @@ -102,8 +102,6 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb) sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file; sbi->ll_ra_info.ra_max_read_ahead_whole_pages = SBI_DEFAULT_READAHEAD_WHOLE_MAX; - INIT_LIST_HEAD(&sbi->ll_conn_chain); - INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list); ll_generate_random_uuid(uuid); class_uuid_unparse(uuid, &sbi->ll_sb_uuid); @@ -171,7 +169,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, return -ENOMEM; } - if (llite_root != NULL) { + if (llite_root) { err = ldebugfs_register_mountpoint(llite_root, sb, dt, md); if (err < 0) CERROR("could not register mount in <debugfs>/lustre/llite\n"); @@ -204,7 +202,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT)) /* flag mdc connection as lightweight, only used for test - * purpose, use with care */ + * purpose, use with care + */ data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT; data->ocd_ibits_known = MDS_INODELOCK_FULL; @@ -252,10 +251,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, /* For mount, we only need fs info from MDT0, and also in DNE, it * can make sure the client can be mounted as long as MDT0 is - * available */ + * available + */ err = obd_statfs(NULL, sbi->ll_md_exp, osfs, - cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), - OBD_STATFS_FOR_MDT0); + cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), + OBD_STATFS_FOR_MDT0); if (err) goto out_md_fid; @@ -265,18 +265,19 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, * we can access the MDC export directly and exp_connect_flags will * be non-zero, but if accessing an upgraded 2.1 server it will * have the correct flags filled in. - * XXX: fill in the LMV exp_connect_flags from MDC(s). */ + * XXX: fill in the LMV exp_connect_flags from MDC(s). + */ valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD; if (exp_connect_flags(sbi->ll_md_exp) != 0 && valid != CLIENT_CONNECT_MDT_REQD) { char *buf; - buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + buf = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) { err = -ENOMEM; goto out_md_fid; } - obd_connect_flags2str(buf, PAGE_CACHE_SIZE, + obd_connect_flags2str(buf, PAGE_SIZE, valid ^ CLIENT_CONNECT_MDT_REQD, ","); LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n", sbi->ll_md_exp->exp_obd->obd_name, buf); @@ -308,15 +309,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, } if (data->ocd_connect_flags & OBD_CONNECT_ACL) { -#ifdef MS_POSIXACL sb->s_flags |= MS_POSIXACL; -#endif sbi->ll_flags |= LL_SBI_ACL; } else { LCONSOLE_INFO("client wants to enable acl, but mdt not!\n"); -#ifdef MS_POSIXACL sb->s_flags &= ~MS_POSIXACL; -#endif sbi->ll_flags &= ~LL_SBI_ACL; } @@ -338,7 +335,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) sbi->ll_md_brw_size = data->ocd_brw_size; else - sbi->ll_md_brw_size = PAGE_CACHE_SIZE; + sbi->ll_md_brw_size = PAGE_SIZE; if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) { LCONSOLE_INFO("Layout lock feature supported.\n"); @@ -382,7 +379,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, /* OBD_CONNECT_CKSUM should always be set, even if checksums are * disabled by default, because it can still be enabled on the * fly via /sys. As a consequence, we still need to come to an - * agreement on the supported algorithms at connect time */ + * agreement on the supported algorithms at connect time + */ data->ocd_connect_flags |= OBD_CONNECT_CKSUM; if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY)) @@ -453,7 +451,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, #endif /* make root inode - * XXX: move this to after cbd setup? */ + * XXX: move this to after cbd setup? + */ valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS; if (sbi->ll_flags & LL_SBI_RMT_CLIENT) valid |= OBD_MD_FLRMTPERM; @@ -493,7 +492,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, md_free_lustre_md(sbi->ll_md_exp, &lmd); ptlrpc_req_finished(request); - if (root == NULL || IS_ERR(root)) { + if (!(root)) { if (lmd.lsm) obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm); #ifdef CONFIG_FS_POSIX_ACL @@ -502,8 +501,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, lmd.posix_acl = NULL; } #endif - err = IS_ERR(root) ? PTR_ERR(root) : -EBADF; - root = NULL; + err = -EBADF; CERROR("lustre_lite: bad iget4 for root\n"); goto out_root; } @@ -532,9 +530,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, &sbi->ll_cache, NULL); sb->s_root = d_make_root(root); - if (sb->s_root == NULL) { + if (!sb->s_root) { CERROR("%s: can't make root dentry\n", - ll_get_fsname(sb, NULL, 0)); + ll_get_fsname(sb, NULL, 0)); err = -ENOMEM; goto out_lock_cn_cb; } @@ -543,11 +541,13 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, /* We set sb->s_dev equal on all lustre clients in order to support * NFS export clustering. NFSD requires that the FSID be the same - * on all clients. */ + * on all clients. + */ /* s_dev is also used in lt_compare() to compare two fs, but that is - * only a node-local comparison. */ + * only a node-local comparison. + */ uuid = obd_get_uuid(sbi->ll_md_exp); - if (uuid != NULL) { + if (uuid) { sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid)); get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid); } @@ -597,7 +597,7 @@ int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize) size = sizeof(int); rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE), - KEY_DEFAULT_EASIZE, &size, lmmsize, NULL); + KEY_DEFAULT_EASIZE, &size, lmmsize, NULL); if (rc) CERROR("Get default mdsize error rc %d\n", rc); @@ -619,13 +619,12 @@ static void client_common_put_super(struct super_block *sb) cl_sb_fini(sb); - list_del(&sbi->ll_conn_chain); - obd_fid_fini(sbi->ll_dt_exp->exp_obd); obd_disconnect(sbi->ll_dt_exp); sbi->ll_dt_exp = NULL; /* wait till all OSCs are gone, since cl_cache is accessing sbi. - * see LU-2543. */ + * see LU-2543. + */ obd_zombie_barrier(); ldebugfs_unregister_mountpoint(sbi); @@ -646,7 +645,8 @@ void ll_kill_super(struct super_block *sb) sbi = ll_s2sbi(sb); /* we need to restore s_dev from changed for clustered NFS before * put_super because new kernels have cached s_dev and change sb->s_dev - * in put_super not affected real removing devices */ + * in put_super not affected real removing devices + */ if (sbi) { sb->s_dev = sbi->ll_sdev_orig; sbi->ll_umounting = 1; @@ -777,7 +777,7 @@ static int ll_options(char *options, int *flags) next: /* Find next opt */ s2 = strchr(s1, ','); - if (s2 == NULL) + if (!s2) break; s1 = s2 + 1; } @@ -797,7 +797,6 @@ void ll_lli_init(struct ll_inode_info *lli) /* Do not set lli_fid, it has been initialized already. */ fid_zero(&lli->lli_pfid); INIT_LIST_HEAD(&lli->lli_close_list); - atomic_set(&lli->lli_open_count, 0); lli->lli_rmtperm_time = 0; lli->lli_pending_och = NULL; lli->lli_mds_read_och = NULL; @@ -890,8 +889,9 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt) sb->s_d_op = &ll_d_ops; /* Generate a string unique to this super, in case some joker tries - to mount the same fs at two mount points. - Use the address of the super itself.*/ + * to mount the same fs at two mount points. + * Use the address of the super itself. + */ cfg->cfg_instance = sb; cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid; cfg->cfg_callback = class_config_llog_handler; @@ -904,7 +904,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt) /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */ lprof = class_get_profile(profilenm); - if (lprof == NULL) { + if (!lprof) { LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n", profilenm); err = -EINVAL; @@ -964,7 +964,8 @@ void ll_put_super(struct super_block *sb) } /* We need to set force before the lov_disconnect in - lustre_common_put_super, since l_d cleans up osc's as well. */ + * lustre_common_put_super, since l_d cleans up osc's as well. + */ if (force) { next = 0; while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, @@ -1036,8 +1037,8 @@ void ll_clear_inode(struct inode *inode) if (S_ISDIR(inode->i_mode)) { /* these should have been cleared in ll_file_release */ - LASSERT(lli->lli_opendir_key == NULL); - LASSERT(lli->lli_sai == NULL); + LASSERT(!lli->lli_opendir_key); + LASSERT(!lli->lli_sai); LASSERT(lli->lli_opendir_pid == 0); } @@ -1065,7 +1066,7 @@ void ll_clear_inode(struct inode *inode) ll_xattr_cache_destroy(inode); if (sbi->ll_flags & LL_SBI_RMT_CLIENT) { - LASSERT(lli->lli_posix_acl == NULL); + LASSERT(!lli->lli_posix_acl); if (lli->lli_remote_perms) { free_rmtperm_hash(lli->lli_remote_perms); lli->lli_remote_perms = NULL; @@ -1074,7 +1075,7 @@ void ll_clear_inode(struct inode *inode) #ifdef CONFIG_FS_POSIX_ACL else if (lli->lli_posix_acl) { LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1); - LASSERT(lli->lli_remote_perms == NULL); + LASSERT(!lli->lli_remote_perms); posix_acl_release(lli->lli_posix_acl); lli->lli_posix_acl = NULL; } @@ -1095,7 +1096,7 @@ void ll_clear_inode(struct inode *inode) #define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET) static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data, - struct md_open_data **mod) + struct md_open_data **mod) { struct lustre_md md; struct inode *inode = d_inode(dentry); @@ -1115,7 +1116,8 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data, if (rc == -ENOENT) { clear_nlink(inode); /* Unlinked special device node? Or just a race? - * Pretend we done everything. */ + * Pretend we did everything. + */ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) { ia_valid = op_data->op_attr.ia_valid; @@ -1138,7 +1140,8 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data, ia_valid = op_data->op_attr.ia_valid; /* inode size will be in cl_setattr_ost, can't do it now since dirty - * cache is not cleared yet. */ + * cache is not cleared yet. + */ op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE); rc = simple_setattr(dentry, &op_data->op_attr); op_data->op_attr.ia_valid = ia_valid; @@ -1161,7 +1164,6 @@ static int ll_setattr_done_writing(struct inode *inode, struct ll_inode_info *lli = ll_i2info(inode); int rc = 0; - LASSERT(op_data != NULL); if (!S_ISREG(inode->i_mode)) return 0; @@ -1175,7 +1177,8 @@ static int ll_setattr_done_writing(struct inode *inode, rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod); if (rc == -EAGAIN) /* MDS has instructed us to obtain Size-on-MDS attribute - * from OSTs and send setattr to back to MDS. */ + * from OSTs and send setattr to back to MDS. + */ rc = ll_som_update(inode, op_data); else if (rc) CERROR("inode %lu mdc truncate failed: rc = %d\n", @@ -1208,11 +1211,11 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) int rc = 0, rc1 = 0; CDEBUG(D_VFSTRACE, - "%s: setattr inode %p/fid:"DFID - " from %llu to %llu, valid %x, hsm_import %d\n", - ll_get_fsname(inode->i_sb, NULL, 0), inode, - PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size, - attr->ia_valid, hsm_import); + "%s: setattr inode %p/fid:" DFID + " from %llu to %llu, valid %x, hsm_import %d\n", + ll_get_fsname(inode->i_sb, NULL, 0), inode, + PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size, + attr->ia_valid, hsm_import); if (attr->ia_valid & ATTR_SIZE) { /* Check new size against VFS/VM file size limit and rlimit */ @@ -1222,7 +1225,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) /* The maximum Lustre file size is variable, based on the * OST maximum object size and number of stripes. This - * needs another check in addition to the VFS check above. */ + * needs another check in addition to the VFS check above. + */ if (attr->ia_size > ll_file_maxbytes(inode)) { CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n", PFID(&lli->lli_fid), attr->ia_size, @@ -1270,7 +1274,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) } /* We always do an MDS RPC, even if we're only changing the size; - * only the MDS knows whether truncate() should fail with -ETXTBUSY */ + * only the MDS knows whether truncate() should fail with -ETXTBUSY + */ op_data = kzalloc(sizeof(*op_data), GFP_NOFS); if (!op_data) @@ -1304,7 +1309,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) /* if not in HSM import mode, clear size attr for released file * we clear the attribute send to MDT in op_data, not the original * received from caller in attr which is used later to - * decide return code */ + * decide return code + */ if (file_is_released && (attr->ia_valid & ATTR_SIZE) && !hsm_import) op_data->op_attr.ia_valid &= ~ATTR_SIZE; @@ -1322,7 +1328,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) } /* RPC to MDT is sent, cancel data modification flag */ - if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) { + if (op_data->op_bias & MDS_DATA_MODIFIED) { spin_lock(&lli->lli_lock); lli->lli_flags &= ~LLIF_DATA_MODIFIED; spin_unlock(&lli->lli_lock); @@ -1342,7 +1348,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) * extent lock (new_size:EOF for truncate). It may seem * excessive to send mtime/atime updates to OSTs when not * setting times to past, but it is necessary due to possible - * time de-synchronization between MDT inode and OST objects */ + * time de-synchronization between MDT inode and OST objects + */ if (attr->ia_valid & ATTR_SIZE) down_write(&lli->lli_trunc_sem); rc = cl_setattr_ost(inode, attr); @@ -1470,7 +1477,8 @@ int ll_statfs(struct dentry *de, struct kstatfs *sfs) /* We need to downshift for all 32-bit kernels, because we can't * tell if the kernel is being called via sys_statfs64() or not. * Stop before overflowing f_bsize - in which case it is better - * to just risk EOVERFLOW if caller is using old sys_statfs(). */ + * to just risk EOVERFLOW if caller is using old sys_statfs(). + */ if (sizeof(long) < 8) { while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) { sfs->f_bsize <<= 1; @@ -1514,7 +1522,7 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md) struct ll_sb_info *sbi = ll_i2sbi(inode); LASSERT((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0)); - if (lsm != NULL) { + if (lsm) { if (!lli->lli_has_smd && !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK)) cl_file_inode_init(inode, md); @@ -1599,12 +1607,13 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md) if (exp_connect_som(ll_i2mdexp(inode)) && S_ISREG(inode->i_mode)) { struct lustre_handle lockh; - ldlm_mode_t mode; + enum ldlm_mode mode; /* As it is possible a blocking ast has been processed * by this time, we need to check there is an UPDATE * lock on the client and set LLIF_MDS_SIZE_LOCK holding - * it. */ + * it. + */ mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE, &lockh, LDLM_FL_CBPENDING, LCK_CR | LCK_CW | @@ -1617,7 +1626,8 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md) inode->i_ino, lli->lli_flags); } else { /* Use old size assignment to avoid - * deadlock bz14138 & bz14326 */ + * deadlock bz14138 & bz14326 + */ i_size_write(inode, body->size); spin_lock(&lli->lli_lock); lli->lli_flags |= LLIF_MDS_SIZE_LOCK; @@ -1627,7 +1637,8 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md) } } else { /* Use old size assignment to avoid - * deadlock bz14138 & bz14326 */ + * deadlock bz14138 & bz14326 + */ i_size_write(inode, body->size); CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n", @@ -1657,7 +1668,8 @@ void ll_read_inode2(struct inode *inode, void *opaque) /* Core attributes from the MDS first. This is a new inode, and * the VFS doesn't zero times in the core inode so we have to do * it ourselves. They will be overwritten by either MDS or OST - * attributes - we just need to make sure they aren't newer. */ + * attributes - we just need to make sure they aren't newer. + */ LTIME_S(inode->i_mtime) = 0; LTIME_S(inode->i_atime) = 0; LTIME_S(inode->i_ctime) = 0; @@ -1689,9 +1701,10 @@ void ll_delete_inode(struct inode *inode) { struct cl_inode_info *lli = cl_i2info(inode); - if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL) + if (S_ISREG(inode->i_mode) && lli->lli_clob) /* discard all dirty pages before truncating them, required by - * osc_extent implementation at LU-1030. */ + * osc_extent implementation at LU-1030. + */ cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_DISCARD, 1); @@ -1744,14 +1757,14 @@ int ll_iocontrol(struct inode *inode, struct file *file, ptlrpc_req_finished(req); - return put_user(flags, (int *)arg); + return put_user(flags, (int __user *)arg); } case FSFILT_IOC_SETFLAGS: { struct lov_stripe_md *lsm; struct obd_info oinfo = { }; struct md_op_data *op_data; - if (get_user(flags, (int *)arg)) + if (get_user(flags, (int __user *)arg)) return -EFAULT; op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0, @@ -1776,8 +1789,7 @@ int ll_iocontrol(struct inode *inode, struct file *file, return 0; } - oinfo.oi_oa = kmem_cache_alloc(obdo_cachep, - GFP_NOFS | __GFP_ZERO); + oinfo.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); if (!oinfo.oi_oa) { ccc_inode_lsm_put(inode, lsm); return -ENOMEM; @@ -1809,7 +1821,7 @@ int ll_flush_ctx(struct inode *inode) struct ll_sb_info *sbi = ll_i2sbi(inode); CDEBUG(D_SEC, "flush context for user %d\n", - from_kuid(&init_user_ns, current_uid())); + from_kuid(&init_user_ns, current_uid())); obd_set_info_async(NULL, sbi->ll_md_exp, sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX, @@ -1831,7 +1843,7 @@ void ll_umount_begin(struct super_block *sb) sb->s_count, atomic_read(&sb->s_active)); obd = class_exp2obd(sbi->ll_md_exp); - if (obd == NULL) { + if (!obd) { CERROR("Invalid MDC connection handle %#llx\n", sbi->ll_md_exp->exp_handle.h_cookie); return; @@ -1839,7 +1851,7 @@ void ll_umount_begin(struct super_block *sb) obd->obd_force = 1; obd = class_exp2obd(sbi->ll_dt_exp); - if (obd == NULL) { + if (!obd) { CERROR("Invalid LOV connection handle %#llx\n", sbi->ll_dt_exp->exp_handle.h_cookie); return; @@ -1920,13 +1932,8 @@ void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req) body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY); op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - if (!op_data) { - CWARN("%s: cannot allocate op_data to release open handle for " - DFID "\n", - ll_get_fsname(sb, NULL, 0), PFID(&body->fid1)); - + if (!op_data) return; - } op_data->op_fid1 = body->fid1; op_data->op_ioepoch = body->ioepoch; @@ -1941,7 +1948,7 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, struct super_block *sb, struct lookup_intent *it) { struct ll_sb_info *sbi = NULL; - struct lustre_md md; + struct lustre_md md = { NULL }; int rc; LASSERT(*inode || sb); @@ -1954,7 +1961,7 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, if (*inode) { ll_update_inode(*inode, &md); } else { - LASSERT(sb != NULL); + LASSERT(sb); /* * At this point server returns to client's same fid as client @@ -1965,15 +1972,14 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1, sbi->ll_flags & LL_SBI_32BIT_API), &md); - if (*inode == NULL || IS_ERR(*inode)) { + if (!*inode) { #ifdef CONFIG_FS_POSIX_ACL if (md.posix_acl) { posix_acl_release(md.posix_acl); md.posix_acl = NULL; } #endif - rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM; - *inode = NULL; + rc = -ENOMEM; CERROR("new_inode -fatal: rc %d\n", rc); goto out; } @@ -1986,14 +1992,15 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, * 1. proc1: mdt returns a lsm but not granting layout * 2. layout was changed by another client * 3. proc2: refresh layout and layout lock granted - * 4. proc1: to apply a stale layout */ - if (it != NULL && it->d.lustre.it_lock_mode != 0) { + * 4. proc1: to apply a stale layout + */ + if (it && it->d.lustre.it_lock_mode != 0) { struct lustre_handle lockh; struct ldlm_lock *lock; lockh.cookie = it->d.lustre.it_lock_handle; lock = ldlm_handle2lock(&lockh); - LASSERT(lock != NULL); + LASSERT(lock); if (ldlm_has_layout(lock)) { struct cl_object_conf conf; @@ -2008,7 +2015,7 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, } out: - if (md.lsm != NULL) + if (md.lsm) obd_free_memmd(sbi->ll_dt_exp, &md.lsm); md_free_lustre_md(sbi->ll_md_exp, &md); @@ -2019,14 +2026,13 @@ cleanup: return rc; } -int ll_obd_statfs(struct inode *inode, void *arg) +int ll_obd_statfs(struct inode *inode, void __user *arg) { struct ll_sb_info *sbi = NULL; struct obd_export *exp; char *buf = NULL; struct obd_ioctl_data *data = NULL; __u32 type; - __u32 flags; int len = 0, rc; if (!inode) { @@ -2069,8 +2075,7 @@ int ll_obd_statfs(struct inode *inode, void *arg) goto out_statfs; } - flags = (type & LL_STATFS_NODELAY) ? OBD_STATFS_NODELAY : 0; - rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, &flags); + rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL); if (rc) goto out_statfs; out_statfs: @@ -2101,7 +2106,8 @@ int ll_process_config(struct lustre_cfg *lcfg) LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC); /* Note we have not called client_common_fill_super yet, so - proc fns must be able to handle that! */ + * proc fns must be able to handle that! + */ rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars, lcfg, sb); if (rc > 0) @@ -2111,19 +2117,17 @@ int ll_process_config(struct lustre_cfg *lcfg) /* this function prepares md_op_data hint for passing ot down to MD stack. */ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, - struct inode *i1, struct inode *i2, - const char *name, int namelen, - int mode, __u32 opc, void *data) + struct inode *i1, struct inode *i2, + const char *name, int namelen, + int mode, __u32 opc, void *data) { - LASSERT(i1 != NULL); - if (namelen > ll_i2sbi(i1)->ll_namelen) return ERR_PTR(-ENAMETOOLONG); - if (op_data == NULL) + if (!op_data) op_data = kzalloc(sizeof(*op_data), GFP_NOFS); - if (op_data == NULL) + if (!op_data) return ERR_PTR(-ENOMEM); ll_i2gids(op_data->op_suppgids, i1, i2); @@ -2143,8 +2147,8 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, op_data->op_cap = cfs_curproc_cap_pack(); op_data->op_bias = 0; op_data->op_cli_flags = 0; - if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) && - filename_is_volatile(name, namelen, NULL)) + if ((opc == LUSTRE_OPC_CREATE) && name && + filename_is_volatile(name, namelen, NULL)) op_data->op_bias |= MDS_CREATE_VOLATILE; op_data->op_opc = opc; op_data->op_mds = 0; @@ -2152,7 +2156,8 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data, /* If the file is being opened after mknod() (normally due to NFS) * try to use the default stripe data from parent directory for - * allocating OST objects. Try to pass the parent FID to MDS. */ + * allocating OST objects. Try to pass the parent FID to MDS. + */ if (opc == LUSTRE_OPC_CREATE && i1 == i2 && S_ISREG(i2->i_mode) && !ll_i2info(i2)->lli_has_smd) { struct ll_inode_info *lli = ll_i2info(i2); @@ -2179,7 +2184,7 @@ int ll_show_options(struct seq_file *seq, struct dentry *dentry) { struct ll_sb_info *sbi; - LASSERT((seq != NULL) && (dentry != NULL)); + LASSERT(seq && dentry); sbi = ll_s2sbi(dentry->d_sb); if (sbi->ll_flags & LL_SBI_NOLCK) @@ -2221,8 +2226,8 @@ int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg) if (!obd) return -ENOENT; - if (copy_to_user((void *)arg, obd->obd_name, - strlen(obd->obd_name) + 1)) + if (copy_to_user((void __user *)arg, obd->obd_name, + strlen(obd->obd_name) + 1)) return -EFAULT; return 0; @@ -2240,10 +2245,11 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen) char *ptr; int len; - if (buf == NULL) { + if (!buf) { /* this means the caller wants to use static buffer * and it doesn't care about race. Usually this is - * in error reporting path */ + * in error reporting path + */ buf = fsname_static; buflen = sizeof(fsname_static); } @@ -2269,9 +2275,9 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret) /* this can be called inside spin lock so use GFP_ATOMIC. */ buf = (char *)__get_free_page(GFP_ATOMIC); - if (buf != NULL) { + if (buf) { dentry = d_find_alias(page->mapping->host); - if (dentry != NULL) + if (dentry) path = dentry_path_raw(dentry, buf, PAGE_SIZE); } @@ -2282,9 +2288,9 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret) PFID(&obj->cob_header.coh_lu.loh_fid), (path && !IS_ERR(path)) ? path : "", ioret); - if (dentry != NULL) + if (dentry) dput(dentry); - if (buf != NULL) + if (buf) free_page((unsigned long)buf); } diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c index bbae95c9f..5b484e62f 100644 --- a/drivers/staging/lustre/lustre/llite/llite_mmap.c +++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c @@ -54,11 +54,11 @@ static const struct vm_operations_struct ll_file_vm_ops; void policy_from_vma(ldlm_policy_data_t *policy, - struct vm_area_struct *vma, unsigned long addr, - size_t count) + struct vm_area_struct *vma, unsigned long addr, + size_t count) { policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + - (vma->vm_pgoff << PAGE_CACHE_SHIFT); + (vma->vm_pgoff << PAGE_SHIFT); policy->l_extent.end = (policy->l_extent.start + count - 1) | ~CFS_PAGE_MASK; } @@ -72,7 +72,7 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, LASSERT(!down_write_trylock(&mm->mmap_sem)); for (vma = find_vma(mm, addr); - vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) { + vma && vma->vm_start < (addr + count); vma = vma->vm_next) { if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops && vma->vm_flags & VM_SHARED) { ret = vma; @@ -119,13 +119,13 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, */ env = cl_env_nested_get(nest); if (IS_ERR(env)) - return ERR_PTR(-EINVAL); + return ERR_PTR(-EINVAL); *env_ret = env; io = ccc_env_thread_io(env); io->ci_obj = ll_i2info(inode)->lli_clob; - LASSERT(io->ci_obj != NULL); + LASSERT(io->ci_obj); fio = &io->u.ci_fault; fio->ft_index = index; @@ -136,7 +136,7 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, * the kernel will not read other pages not covered by ldlm in * filemap_nopage. we do our readahead in ll_readpage. */ - if (ra_flags != NULL) + if (ra_flags) *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ); vma->vm_flags &= ~VM_SEQ_READ; vma->vm_flags |= VM_RAND_READ; @@ -151,8 +151,7 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, LASSERT(cio->cui_cl.cis_io == io); - /* mmap lock must be MANDATORY it has to cache - * pages. */ + /* mmap lock must be MANDATORY it has to cache pages. */ io->ci_lockreq = CILR_MANDATORY; cio->cui_fd = fd; } else { @@ -178,8 +177,6 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, struct inode *inode; struct ll_inode_info *lli; - LASSERT(vmpage != NULL); - io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL); if (IS_ERR(io)) { result = PTR_ERR(io); @@ -201,7 +198,8 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, /* we grab lli_trunc_sem to exclude truncate case. * Otherwise, we could add dirty pages into osc cache - * while truncate is on-going. */ + * while truncate is on-going. + */ inode = ccc_object_inode(io->ci_obj); lli = ll_i2info(inode); down_read(&lli->lli_trunc_sem); @@ -217,12 +215,13 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, struct ll_inode_info *lli = ll_i2info(inode); lock_page(vmpage); - if (vmpage->mapping == NULL) { + if (!vmpage->mapping) { unlock_page(vmpage); /* page was truncated and lock was cancelled, return * ENODATA so that VM_FAULT_NOPAGE will be returned - * to handle_mm_fault(). */ + * to handle_mm_fault(). + */ if (result == 0) result = -ENODATA; } else if (!PageDirty(vmpage)) { @@ -315,13 +314,14 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) result = cl_io_loop(env, io); /* ft_flags are only valid if we reached - * the call to filemap_fault */ + * the call to filemap_fault + */ if (vio->u.fault.fault.ft_flags_valid) fault_ret = vio->u.fault.fault.ft_flags; vmpage = vio->u.fault.ft_vmpage; - if (result != 0 && vmpage != NULL) { - page_cache_release(vmpage); + if (result != 0 && vmpage) { + put_page(vmpage); vmf->page = NULL; } } @@ -344,9 +344,10 @@ static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) int result; sigset_t set; - /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite + /* Only SIGKILL and SIGTERM are allowed for fault/nopage/mkwrite * so that it can be killed by admin but not cause segfault by - * other signals. */ + * other signals. + */ set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); restart: @@ -357,9 +358,9 @@ restart: /* check if this page has been truncated */ lock_page(vmpage); - if (unlikely(vmpage->mapping == NULL)) { /* unlucky */ + if (unlikely(!vmpage->mapping)) { /* unlucky */ unlock_page(vmpage); - page_cache_release(vmpage); + put_page(vmpage); vmf->page = NULL; if (!printed && ++count > 16) { @@ -447,7 +448,8 @@ static void ll_vm_close(struct vm_area_struct *vma) } /* XXX put nice comment here. talk about __free_pte -> dirty pages and - * nopage's reference passing to the pte */ + * nopage's reference passing to the pte + */ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) { int rc = -ENOENT; @@ -455,7 +457,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) LASSERTF(last > first, "last %llu first %llu\n", last, first); if (mapping_mapped(mapping)) { rc = 0; - unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1, + unmap_mapping_range(mapping, first + PAGE_SIZE - 1, last - first + 1, 0); } diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c index 18aab25f9..193aab879 100644 --- a/drivers/staging/lustre/lustre/llite/llite_nfs.c +++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c @@ -105,7 +105,8 @@ struct inode *search_inode_for_lustre(struct super_block *sb, return ERR_PTR(rc); /* Because inode is NULL, ll_prep_md_op_data can not - * be used here. So we allocate op_data ourselves */ + * be used here. So we allocate op_data ourselves + */ op_data = kzalloc(sizeof(*op_data), GFP_NOFS); if (!op_data) return ERR_PTR(-ENOMEM); @@ -141,10 +142,11 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren struct inode *inode; struct dentry *result; - CDEBUG(D_INFO, "Get dentry for fid: "DFID"\n", PFID(fid)); if (!fid_is_sane(fid)) return ERR_PTR(-ESTALE); + CDEBUG(D_INFO, "Get dentry for fid: " DFID "\n", PFID(fid)); + inode = search_inode_for_lustre(sb, fid); if (IS_ERR(inode)) return ERR_CAST(inode); @@ -160,7 +162,7 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren * We have to find the parent to tell MDS how to init lov objects. */ if (S_ISREG(inode->i_mode) && !ll_i2info(inode)->lli_has_smd && - parent != NULL) { + parent && !fid_is_zero(parent)) { struct ll_inode_info *lli = ll_i2info(inode); spin_lock(&lli->lli_lock); @@ -174,8 +176,6 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren return result; } -#define LUSTRE_NFS_FID 0x97 - /** * \a connectable - is nfsd will connect himself or this should be done * at lustre @@ -188,20 +188,25 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen, struct inode *parent) { + int fileid_len = sizeof(struct lustre_nfs_fid) / 4; struct lustre_nfs_fid *nfs_fid = (void *)fh; - CDEBUG(D_INFO, "encoding for (%lu,"DFID") maxlen=%d minlen=%d\n", - inode->i_ino, PFID(ll_inode2fid(inode)), *plen, - (int)sizeof(struct lustre_nfs_fid)); + CDEBUG(D_INFO, "encoding for (%lu," DFID ") maxlen=%d minlen=%d\n", + inode->i_ino, PFID(ll_inode2fid(inode)), *plen, fileid_len); - if (*plen < sizeof(struct lustre_nfs_fid) / 4) - return 255; + if (*plen < fileid_len) { + *plen = fileid_len; + return FILEID_INVALID; + } nfs_fid->lnf_child = *ll_inode2fid(inode); - nfs_fid->lnf_parent = *ll_inode2fid(parent); - *plen = sizeof(struct lustre_nfs_fid) / 4; + if (parent) + nfs_fid->lnf_parent = *ll_inode2fid(parent); + else + fid_zero(&nfs_fid->lnf_parent); + *plen = fileid_len; - return LUSTRE_NFS_FID; + return FILEID_LUSTRE; } static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name, @@ -209,7 +214,8 @@ static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name, unsigned type) { /* It is hack to access lde_fid for comparison with lgd_fid. - * So the input 'name' must be part of the 'lu_dirent'. */ + * So the input 'name' must be part of the 'lu_dirent'. + */ struct lu_dirent *lde = container_of0(name, struct lu_dirent, lde_name); struct ll_getname_data *lgd = container_of(ctx, struct ll_getname_data, ctx); @@ -259,7 +265,7 @@ static struct dentry *ll_fh_to_dentry(struct super_block *sb, struct fid *fid, { struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid; - if (fh_type != LUSTRE_NFS_FID) + if (fh_type != FILEID_LUSTRE) return ERR_PTR(-EPROTO); return ll_iget_for_nfs(sb, &nfs_fid->lnf_child, &nfs_fid->lnf_parent); @@ -270,7 +276,7 @@ static struct dentry *ll_fh_to_parent(struct super_block *sb, struct fid *fid, { struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid; - if (fh_type != LUSTRE_NFS_FID) + if (fh_type != FILEID_LUSTRE) return ERR_PTR(-EPROTO); return ll_iget_for_nfs(sb, &nfs_fid->lnf_parent, NULL); @@ -292,8 +298,8 @@ static struct dentry *ll_get_parent(struct dentry *dchild) sbi = ll_s2sbi(dir->i_sb); - CDEBUG(D_INFO, "getting parent for (%lu,"DFID")\n", - dir->i_ino, PFID(ll_inode2fid(dir))); + CDEBUG(D_INFO, "getting parent for (%lu," DFID ")\n", + dir->i_ino, PFID(ll_inode2fid(dir))); rc = ll_get_default_mdsize(sbi, &lmmsize); if (rc != 0) @@ -314,8 +320,8 @@ static struct dentry *ll_get_parent(struct dentry *dchild) body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); LASSERT(body->valid & OBD_MD_FLID); - CDEBUG(D_INFO, "parent for "DFID" is "DFID"\n", - PFID(ll_inode2fid(dir)), PFID(&body->fid1)); + CDEBUG(D_INFO, "parent for " DFID " is " DFID "\n", + PFID(ll_inode2fid(dir)), PFID(&body->fid1)); result = ll_iget_for_nfs(dir->i_sb, &body->fid1, NULL); @@ -323,10 +329,10 @@ static struct dentry *ll_get_parent(struct dentry *dchild) return result; } -struct export_operations lustre_export_operations = { - .get_parent = ll_get_parent, - .encode_fh = ll_encode_fh, - .get_name = ll_get_name, +const struct export_operations lustre_export_operations = { + .get_parent = ll_get_parent, + .encode_fh = ll_encode_fh, + .get_name = ll_get_name, .fh_to_dentry = ll_fh_to_dentry, .fh_to_parent = ll_fh_to_parent, }; diff --git a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c index b27c3f2fc..8509b07cb 100644 --- a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c +++ b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c @@ -98,7 +98,7 @@ static void rce_free(struct rmtacl_ctl_entry *rce) } static struct rmtacl_ctl_entry *__rct_search(struct rmtacl_ctl_table *rct, - pid_t key) + pid_t key) { struct rmtacl_ctl_entry *rce; struct list_head *head = &rct->rct_entries[rce_hashfunc(key)]; @@ -125,12 +125,12 @@ int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops) struct rmtacl_ctl_entry *rce, *e; rce = rce_alloc(key, ops); - if (rce == NULL) + if (!rce) return -ENOMEM; spin_lock(&rct->rct_lock); e = __rct_search(rct, key); - if (unlikely(e != NULL)) { + if (unlikely(e)) { CWARN("Unexpected stale rmtacl_entry found: [key: %d] [ops: %d]\n", (int)key, ops); rce_free(e); @@ -172,7 +172,7 @@ void rct_fini(struct rmtacl_ctl_table *rct) for (i = 0; i < RCE_HASHES; i++) while (!list_empty(&rct->rct_entries[i])) { rce = list_entry(rct->rct_entries[i].next, - struct rmtacl_ctl_entry, rce_list); + struct rmtacl_ctl_entry, rce_list); rce_free(rce); } spin_unlock(&rct->rct_lock); @@ -208,12 +208,12 @@ void ee_free(struct eacl_entry *ee) } static struct eacl_entry *__et_search_del(struct eacl_table *et, pid_t key, - struct lu_fid *fid, int type) + struct lu_fid *fid, int type) { struct eacl_entry *ee; struct list_head *head = &et->et_entries[ee_hashfunc(key)]; - LASSERT(fid != NULL); + LASSERT(fid); list_for_each_entry(ee, head, ee_list) if (ee->ee_key == key) { if (lu_fid_eq(&ee->ee_fid, fid) && @@ -256,12 +256,12 @@ int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type, struct eacl_entry *ee, *e; ee = ee_alloc(key, fid, type, header); - if (ee == NULL) + if (!ee) return -ENOMEM; spin_lock(&et->et_lock); e = __et_search_del(et, key, fid, type); - if (unlikely(e != NULL)) { + if (unlikely(e)) { CWARN("Unexpected stale eacl_entry found: [key: %d] [fid: " DFID "] [type: %d]\n", (int)key, PFID(fid), type); ee_free(e); @@ -290,7 +290,7 @@ void et_fini(struct eacl_table *et) for (i = 0; i < EE_HASHES; i++) while (!list_empty(&et->et_entries[i])) { ee = list_entry(et->et_entries[i].next, - struct eacl_entry, ee_list); + struct eacl_entry, ee_list); ee_free(ee); } spin_unlock(&et->et_lock); diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c index 871924b3f..f169c0db6 100644 --- a/drivers/staging/lustre/lustre/llite/lloop.c +++ b/drivers/staging/lustre/lustre/llite/lloop.c @@ -211,15 +211,14 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) return io->ci_result; io->ci_lockreq = CILR_NEVER; - LASSERT(head != NULL); rw = head->bi_rw; - for (bio = head; bio != NULL; bio = bio->bi_next) { + for (bio = head; bio ; bio = bio->bi_next) { LASSERT(rw == bio->bi_rw); offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; bio_for_each_segment(bvec, bio, iter) { BUG_ON(bvec.bv_offset != 0); - BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE); + BUG_ON(bvec.bv_len != PAGE_SIZE); pages[page_count] = bvec.bv_page; offsets[page_count] = offset; @@ -233,7 +232,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ, page_count); - pvec->ldp_size = page_count << PAGE_CACHE_SHIFT; + pvec->ldp_size = page_count << PAGE_SHIFT; pvec->ldp_nr = page_count; /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to @@ -297,7 +296,7 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req) spin_lock_irq(&lo->lo_lock); first = lo->lo_bio; - if (unlikely(first == NULL)) { + if (unlikely(!first)) { spin_unlock_irq(&lo->lo_lock); return 0; } @@ -308,7 +307,7 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req) rw = first->bi_rw; bio = &lo->lo_bio; while (*bio && (*bio)->bi_rw == rw) { - CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n", + CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u\n", (unsigned long long)(*bio)->bi_iter.bi_sector, (*bio)->bi_iter.bi_size, page_count, (*bio)->bi_vcnt); @@ -458,7 +457,7 @@ static int loop_thread(void *data) total_count, times, total_count / times); } - LASSERT(bio != NULL); + LASSERT(bio); LASSERT(count <= atomic_read(&lo->lo_pending)); loop_handle_bio(lo, bio); atomic_sub(count, &lo->lo_pending); @@ -508,7 +507,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused, set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); - lo->lo_blocksize = PAGE_CACHE_SIZE; + lo->lo_blocksize = PAGE_SIZE; lo->lo_device = bdev; lo->lo_flags = lo_flags; lo->lo_backing_file = file; @@ -526,11 +525,11 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused, lo->lo_queue->queuedata = lo; /* queue parameters */ - CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8))); + CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8))); blk_queue_logical_block_size(lo->lo_queue, - (unsigned short)PAGE_CACHE_SIZE); + (unsigned short)PAGE_SIZE); blk_queue_max_hw_sectors(lo->lo_queue, - LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9)); + LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9)); blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS); set_capacity(disks[lo->lo_number], size); @@ -560,7 +559,7 @@ static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev, if (lo->lo_refcnt > count) /* we needed one fd for the ioctl */ return -EBUSY; - if (filp == NULL) + if (!filp) return -EINVAL; spin_lock_irq(&lo->lo_lock); @@ -625,18 +624,18 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, case LL_IOC_LLOOP_INFO: { struct lu_fid fid; - if (lo->lo_backing_file == NULL) { + if (!lo->lo_backing_file) { err = -ENOENT; break; } - if (inode == NULL) + if (!inode) inode = file_inode(lo->lo_backing_file); if (lo->lo_state == LLOOP_BOUND) fid = ll_i2info(inode)->lli_fid; else fid_zero(&fid); - if (copy_to_user((struct lu_fid *)arg, &fid, sizeof(fid))) + if (copy_to_user((void __user *)arg, &fid, sizeof(fid))) err = -EFAULT; break; } @@ -676,7 +675,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file, if (magic != ll_iocontrol_magic) return LLIOC_CONT; - if (disks == NULL) { + if (!disks) { err = -ENODEV; goto out1; } @@ -708,7 +707,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file, dev = MKDEV(lloop_major, lo->lo_number); /* quit if the used pointer is writable */ - if (put_user((long)old_encode_dev(dev), (long *)arg)) { + if (put_user((long)old_encode_dev(dev), (long __user *)arg)) { err = -EFAULT; goto out; } @@ -793,7 +792,7 @@ static int __init lloop_init(void) lloop_major, max_loop); ll_iocontrol_magic = ll_iocontrol_register(lloop_ioctl, 2, cmdlist); - if (ll_iocontrol_magic == NULL) + if (!ll_iocontrol_magic) goto out_mem1; loop_dev = kcalloc(max_loop, sizeof(*loop_dev), GFP_KERNEL); @@ -872,11 +871,12 @@ static void lloop_exit(void) kfree(loop_dev); } -module_init(lloop_init); -module_exit(lloop_exit); - module_param(max_loop, int, 0444); MODULE_PARM_DESC(max_loop, "maximum of lloop_device"); MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); MODULE_DESCRIPTION("Lustre virtual block device"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); + +module_init(lloop_init); +module_exit(lloop_exit); diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c index f134ad9d2..27ab12614 100644 --- a/drivers/staging/lustre/lustre/llite/lproc_llite.c +++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c @@ -43,7 +43,7 @@ #include "llite_internal.h" #include "vvp_internal.h" -/* /proc/lustre/llite mount point registration */ +/* debugfs llite mount point registration */ static struct file_operations ll_rw_extents_stats_fops; static struct file_operations ll_rw_extents_stats_pp_fops; static struct file_operations ll_rw_offset_stats_fops; @@ -233,7 +233,7 @@ static ssize_t max_read_ahead_mb_show(struct kobject *kobj, pages_number = sbi->ll_ra_info.ra_max_pages; spin_unlock(&sbi->ll_lock); - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); } @@ -251,12 +251,12 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj, if (rc) return rc; - pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */ + pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */ if (pages_number > totalram_pages / 2) { CERROR("can't set file readahead more than %lu MB\n", - totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/ + totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/ return -ERANGE; } @@ -281,7 +281,7 @@ static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj, pages_number = sbi->ll_ra_info.ra_max_pages_per_file; spin_unlock(&sbi->ll_lock); - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); } @@ -326,7 +326,7 @@ static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj, pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages; spin_unlock(&sbi->ll_lock); - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); } @@ -345,10 +345,11 @@ static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj, return rc; /* Cap this at the current max readahead window size, the readahead - * algorithm does this anyway so it's pointless to set it larger. */ + * algorithm does this anyway so it's pointless to set it larger. + */ if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) { CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n", - sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT)); + sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT)); return -ERANGE; } @@ -365,7 +366,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v) struct super_block *sb = m->private; struct ll_sb_info *sbi = ll_s2sbi(sb); struct cl_client_cache *cache = &sbi->ll_cache; - int shift = 20 - PAGE_CACHE_SHIFT; + int shift = 20 - PAGE_SHIFT; int max_cached_mb; int unused_mb; @@ -404,7 +405,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, return -EFAULT; kernbuf[count] = 0; - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) - kernbuf; rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); @@ -414,7 +415,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, if (pages_number < 0 || pages_number > totalram_pages) { CERROR("%s: can't set max cache more than %lu MB\n", ll_get_fsname(sb, NULL, 0), - totalram_pages >> (20 - PAGE_CACHE_SHIFT)); + totalram_pages >> (20 - PAGE_SHIFT)); return -ERANGE; } @@ -453,7 +454,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, if (diff <= 0) break; - if (sbi->ll_dt_exp == NULL) { /* being initialized */ + if (!sbi->ll_dt_exp) { /* being initialized */ rc = -ENODEV; break; } @@ -461,9 +462,9 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, /* difficult - have to ask OSCs to drop LRU slots. */ tmp = diff << 1; rc = obd_set_info_async(NULL, sbi->ll_dt_exp, - sizeof(KEY_CACHE_LRU_SHRINK), - KEY_CACHE_LRU_SHRINK, - sizeof(tmp), &tmp, NULL); + sizeof(KEY_CACHE_LRU_SHRINK), + KEY_CACHE_LRU_SHRINK, + sizeof(tmp), &tmp, NULL); if (rc < 0) break; } @@ -966,9 +967,9 @@ int ldebugfs_register_mountpoint(struct dentry *parent, name[MAX_STRING_SIZE] = '\0'; - LASSERT(sbi != NULL); - LASSERT(mdc != NULL); - LASSERT(osc != NULL); + LASSERT(sbi); + LASSERT(mdc); + LASSERT(osc); /* Get fsname */ len = strlen(lsi->lsi_lmd->lmd_profile); @@ -999,7 +1000,7 @@ int ldebugfs_register_mountpoint(struct dentry *parent, CWARN("Error adding the extent_stats file\n"); rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, - "extents_stats_per_process", + "extents_stats_per_process", 0644, &ll_rw_extents_stats_pp_fops, sbi); if (rc) CWARN("Error adding the extents_stats_per_process file\n"); @@ -1012,7 +1013,7 @@ int ldebugfs_register_mountpoint(struct dentry *parent, /* File operations stats */ sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES, LPROCFS_STATS_FLAG_NONE); - if (sbi->ll_stats == NULL) { + if (!sbi->ll_stats) { err = -ENOMEM; goto out; } @@ -1033,13 +1034,13 @@ int ldebugfs_register_mountpoint(struct dentry *parent, llite_opcode_table[id].opname, ptr); } err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats", - sbi->ll_stats); + sbi->ll_stats); if (err) goto out; sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string), LPROCFS_STATS_FLAG_NONE); - if (sbi->ll_ra_stats == NULL) { + if (!sbi->ll_ra_stats) { err = -ENOMEM; goto out; } @@ -1049,7 +1050,7 @@ int ldebugfs_register_mountpoint(struct dentry *parent, ra_stat_string[id], "pages"); err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats", - sbi->ll_ra_stats); + sbi->ll_ra_stats); if (err) goto out; @@ -1103,7 +1104,7 @@ void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi) #define pct(a, b) (b ? a * 100 / b : 0) static void ll_display_extents_info(struct ll_rw_extents_info *io_extents, - struct seq_file *seq, int which) + struct seq_file *seq, int which) { unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum; unsigned long start, end, r, w; @@ -1503,5 +1504,5 @@ LPROC_SEQ_FOPS(ll_rw_offset_stats); void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars) { - lvars->obd_vars = lprocfs_llite_obd_vars; + lvars->obd_vars = lprocfs_llite_obd_vars; } diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c index da5f443a0..f8f98e4e8 100644 --- a/drivers/staging/lustre/lustre/llite/namei.c +++ b/drivers/staging/lustre/lustre/llite/namei.c @@ -118,16 +118,16 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash, ll_read_inode2(inode, md); if (S_ISREG(inode->i_mode) && - ll_i2info(inode)->lli_clob == NULL) { + !ll_i2info(inode)->lli_clob) { CDEBUG(D_INODE, - "%s: apply lsm %p to inode "DFID".\n", - ll_get_fsname(sb, NULL, 0), md->lsm, - PFID(ll_inode2fid(inode))); + "%s: apply lsm %p to inode " DFID ".\n", + ll_get_fsname(sb, NULL, 0), md->lsm, + PFID(ll_inode2fid(inode))); rc = cl_file_inode_init(inode, md); } if (rc != 0) { iget_failed(inode); - inode = ERR_PTR(rc); + inode = NULL; } else unlock_new_inode(inode); } else if (!(inode->i_state & (I_FREEING | I_CLEAR))) @@ -180,10 +180,11 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, __u64 bits = lock->l_policy_data.l_inodebits.bits; /* Inode is set to lock->l_resource->lr_lvb_inode - * for mdc - bug 24555 */ - LASSERT(lock->l_ast_data == NULL); + * for mdc - bug 24555 + */ + LASSERT(!lock->l_ast_data); - if (inode == NULL) + if (!inode) break; /* Invalidate all dentries associated with this inode */ @@ -202,7 +203,8 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, } /* For OPEN locks we differentiate between lock modes - * LCK_CR, LCK_CW, LCK_PR - bug 22891 */ + * LCK_CR, LCK_CW, LCK_PR - bug 22891 + */ if (bits & MDS_INODELOCK_OPEN) ll_have_md_lock(inode, &bits, lock->l_req_mode); @@ -260,7 +262,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, } if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) && - inode->i_sb->s_root != NULL && + inode->i_sb->s_root && !is_root_inode(inode)) ll_invalidate_aliases(inode); @@ -285,15 +287,11 @@ __u32 ll_i2suppgid(struct inode *i) /* Pack the required supplementary groups into the supplied groups array. * If we don't need to use the groups from the target inode(s) then we * instead pack one or more groups from the user's supplementary group - * array in case it might be useful. Not needed if doing an MDS-side upcall. */ + * array in case it might be useful. Not needed if doing an MDS-side upcall. + */ void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2) { -#if 0 - int i; -#endif - - LASSERT(i1 != NULL); - LASSERT(suppgids != NULL); + LASSERT(i1); suppgids[0] = ll_i2suppgid(i1); @@ -301,22 +299,6 @@ void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2) suppgids[1] = ll_i2suppgid(i2); else suppgids[1] = -1; - -#if 0 - for (i = 0; i < current_ngroups; i++) { - if (suppgids[0] == -1) { - if (current_groups[i] != suppgids[1]) - suppgids[0] = current_groups[i]; - continue; - } - if (suppgids[1] == -1) { - if (current_groups[i] != suppgids[0]) - suppgids[1] = current_groups[i]; - continue; - } - break; - } -#endif } /* @@ -409,7 +391,8 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request, int rc = 0; /* NB 1 request reference will be taken away by ll_intent_lock() - * when I return */ + * when I return + */ CDEBUG(D_DENTRY, "it %p it_disposition %x\n", it, it->d.lustre.it_disposition); if (!it_disposition(it, DISP_LOOKUP_NEG)) { @@ -420,13 +403,14 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request, ll_set_lock_data(ll_i2sbi(parent)->ll_md_exp, inode, it, &bits); /* We used to query real size from OSTs here, but actually - this is not needed. For stat() calls size would be updated - from subsequent do_revalidate()->ll_inode_revalidate_it() in - 2.4 and - vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6 - Everybody else who needs correct file size would call - ll_glimpse_size or some equivalent themselves anyway. - Also see bug 7198. */ + * this is not needed. For stat() calls size would be updated + * from subsequent do_revalidate()->ll_inode_revalidate_it() in + * 2.4 and + * vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6 + * Everybody else who needs correct file size would call + * ll_glimpse_size or some equivalent themselves anyway. + * Also see bug 7198. + */ } /* Only hash *de if it is unhashed (new dentry). @@ -443,9 +427,10 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request, *de = alias; } else if (!it_disposition(it, DISP_LOOKUP_NEG) && !it_disposition(it, DISP_OPEN_CREATE)) { - /* With DISP_OPEN_CREATE dentry will - instantiated in ll_create_it. */ - LASSERT(d_inode(*de) == NULL); + /* With DISP_OPEN_CREATE dentry will be + * instantiated in ll_create_it. + */ + LASSERT(!d_inode(*de)); d_instantiate(*de, inode); } @@ -498,7 +483,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry, if (d_mountpoint(dentry)) CERROR("Tell Peter, lookup on mtpt, it %s\n", LL_IT2STR(it)); - if (it == NULL || it->it_op == IT_GETXATTR) + if (!it || it->it_op == IT_GETXATTR) it = &lookup_it; if (it->it_op == IT_GETATTR) { @@ -557,7 +542,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry, out: if (req) ptlrpc_req_finished(req); - if (it->it_op == IT_GETATTR && (retval == NULL || retval == dentry)) + if (it->it_op == IT_GETATTR && (!retval || retval == dentry)) ll_statahead_mark(parent, dentry); return retval; } @@ -582,7 +567,7 @@ static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry, itp = ⁢ de = ll_lookup_it(parent, dentry, itp, 0); - if (itp != NULL) + if (itp) ll_intent_release(itp); return de; @@ -622,7 +607,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry, de = ll_lookup_it(dir, dentry, it, lookup_flags); if (IS_ERR(de)) rc = PTR_ERR(de); - else if (de != NULL) + else if (de) dentry = de; if (!rc) { @@ -631,7 +616,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry, rc = ll_create_it(dir, dentry, mode, it); if (rc) { /* We dget in ll_splice_alias. */ - if (de != NULL) + if (de) dput(de); goto out_release; } @@ -655,7 +640,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry, /* We dget in ll_splice_alias. finish_open takes * care of dget for fd open. */ - if (de != NULL) + if (de) dput(de); } } else { @@ -693,7 +678,8 @@ static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it) /* We asked for a lock on the directory, but were granted a * lock on the inode. Since we finally have an inode pointer, - * stuff it in the lock. */ + * stuff it in the lock. + */ CDEBUG(D_DLMTRACE, "setting l_ast_data to inode %p (%lu/%u)\n", inode, inode->i_ino, inode->i_generation); ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL); @@ -767,7 +753,7 @@ static int ll_new_node(struct inode *dir, struct dentry *dentry, int tgt_len = 0; int err; - if (unlikely(tgt != NULL)) + if (unlikely(tgt)) tgt_len = strlen(tgt) + 1; op_data = ll_prep_md_op_data(NULL, dir, NULL, @@ -888,10 +874,11 @@ int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir) /* The MDS sent back the EA because we unlinked the last reference * to this file. Use this EA to unlink the objects on the OST. * It's opaque so we don't swab here; we leave it to obd_unpackmd() to - * check it is complete and sensible. */ + * check it is complete and sensible. + */ eadata = req_capsule_server_sized_get(&request->rq_pill, &RMF_MDT_MD, body->eadatasize); - LASSERT(eadata != NULL); + LASSERT(eadata); rc = obd_unpackmd(ll_i2dtexp(dir), &lsm, eadata, body->eadatasize); if (rc < 0) { @@ -900,8 +887,8 @@ int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir) } LASSERT(rc >= sizeof(*lsm)); - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); - if (oa == NULL) { + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!oa) { rc = -ENOMEM; goto out_free_memmd; } @@ -917,7 +904,7 @@ int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir) &RMF_LOGCOOKIES, sizeof(struct llog_cookie) * lsm->lsm_stripe_count); - if (oti.oti_logcookies == NULL) { + if (!oti.oti_logcookies) { oa->o_valid &= ~OBD_MD_FLCOOKIE; body->valid &= ~OBD_MD_FLCOOKIE; } @@ -938,7 +925,8 @@ out: /* ll_unlink() doesn't update the inode with the new link count. * Instead, ll_ddelete() and ll_d_iput() will update it based upon if there * is any lock existing. They will recycle dentries and inodes based upon locks - * too. b=20433 */ + * too. b=20433 + */ static int ll_unlink(struct inode *dir, struct dentry *dentry) { struct ptlrpc_request *request = NULL; @@ -1028,7 +1016,7 @@ static int ll_symlink(struct inode *dir, struct dentry *dentry, dir, 3000, oldname); err = ll_new_node(dir, dentry, oldname, S_IFLNK | S_IRWXUGO, - 0, LUSTRE_OPC_SYMLINK); + 0, LUSTRE_OPC_SYMLINK); if (!err) ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_SYMLINK, 1); diff --git a/drivers/staging/lustre/lustre/llite/remote_perm.c b/drivers/staging/lustre/lustre/llite/remote_perm.c index fe4a72268..e9d25317c 100644 --- a/drivers/staging/lustre/lustre/llite/remote_perm.c +++ b/drivers/staging/lustre/lustre/llite/remote_perm.c @@ -61,7 +61,7 @@ static inline struct ll_remote_perm *alloc_ll_remote_perm(void) { struct ll_remote_perm *lrp; - lrp = kmem_cache_alloc(ll_remote_perm_cachep, GFP_KERNEL | __GFP_ZERO); + lrp = kmem_cache_zalloc(ll_remote_perm_cachep, GFP_KERNEL); if (lrp) INIT_HLIST_NODE(&lrp->lrp_list); return lrp; @@ -82,7 +82,7 @@ static struct hlist_head *alloc_rmtperm_hash(void) struct hlist_head *hash; int i; - hash = kmem_cache_alloc(ll_rmtperm_hash_cachep, GFP_NOFS | __GFP_ZERO); + hash = kmem_cache_zalloc(ll_rmtperm_hash_cachep, GFP_NOFS); if (!hash) return NULL; diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c index f35547496..edab6c5b7 100644 --- a/drivers/staging/lustre/lustre/llite/rw.c +++ b/drivers/staging/lustre/lustre/llite/rw.c @@ -70,9 +70,9 @@ static void ll_cl_fini(struct ll_cl_context *lcc) struct cl_page *page = lcc->lcc_page; LASSERT(lcc->lcc_cookie == current); - LASSERT(env != NULL); + LASSERT(env); - if (page != NULL) { + if (page) { lu_ref_del(&page->cp_reference, "cl_io", io); cl_page_put(env, page); } @@ -97,7 +97,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file, int result = 0; clob = ll_i2info(vmpage->mapping->host)->lli_clob; - LASSERT(clob != NULL); + LASSERT(clob); env = cl_env_get(&refcheck); if (IS_ERR(env)) @@ -111,7 +111,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file, cio = ccc_env_io(env); io = cio->cui_cl.cis_io; - if (io == NULL && create) { + if (!io && create) { struct inode *inode = vmpage->mapping->host; loff_t pos; @@ -120,7 +120,8 @@ static struct ll_cl_context *ll_cl_init(struct file *file, /* this is too bad. Someone is trying to write the * page w/o holding inode mutex. This means we can - * add dirty pages into cache during truncate */ + * add dirty pages into cache during truncate + */ CERROR("Proc %s is dirtying page w/o inode lock, this will break truncate\n", current->comm); dump_stack(); @@ -145,10 +146,10 @@ static struct ll_cl_context *ll_cl_init(struct file *file, */ io->ci_lockreq = CILR_NEVER; - pos = vmpage->index << PAGE_CACHE_SHIFT; + pos = vmpage->index << PAGE_SHIFT; /* Create a temp IO to serve write. */ - result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE); + result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE); if (result == 0) { cio->cui_fd = LUSTRE_FPRIVATE(file); cio->cui_iter = NULL; @@ -163,12 +164,11 @@ static struct ll_cl_context *ll_cl_init(struct file *file, } lcc->lcc_io = io; - if (io == NULL) + if (!io) result = -EIO; if (result == 0) { struct cl_page *page; - LASSERT(io != NULL); LASSERT(io->ci_state == CIS_IO_GOING); LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file)); page = cl_page_find(env, clob, vmpage->index, vmpage, @@ -240,7 +240,8 @@ int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from, ll_cl_fini(lcc); } /* returning 0 in prepare assumes commit must be called - * afterwards */ + * afterwards + */ } else { result = PTR_ERR(lcc); } @@ -296,8 +297,8 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which); * to get an ra budget that is larger than the remaining readahead pages * and reach here at exactly the same time. They will compute /a ret to * consume the remaining pages, but will fail at atomic_add_return() and - * get a zero ra window, although there is still ra space remaining. - Jay */ - + * get a zero ra window, although there is still ra space remaining. - Jay + */ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, struct ra_io_arg *ria, unsigned long pages) @@ -307,7 +308,8 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, /* If read-ahead pages left are less than 1M, do not do read-ahead, * otherwise it will form small read RPC(< 1M), which hurt server - * performance a lot. */ + * performance a lot. + */ ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), pages); if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages)) { ret = 0; @@ -324,7 +326,8 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, * branch is more expensive than subtracting zero from the result. * * Strided read is left unaligned to avoid small fragments beyond - * the RPC boundary from needing an extra read RPC. */ + * the RPC boundary from needing an extra read RPC. + */ if (ria->ria_pages == 0) { long beyond_rpc = (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES; @@ -364,7 +367,7 @@ void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which) #define RAS_CDEBUG(ras) \ CDEBUG(D_READA, \ "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \ - "csr %lu sf %lu sp %lu sl %lu \n", \ + "csr %lu sf %lu sp %lu sl %lu\n", \ ras->ras_last_readpage, ras->ras_consecutive_requests, \ ras->ras_consecutive_pages, ras->ras_window_start, \ ras->ras_window_len, ras->ras_next_readahead, \ @@ -378,9 +381,9 @@ static int index_in_window(unsigned long index, unsigned long point, unsigned long start = point - before, end = point + after; if (start > point) - start = 0; + start = 0; if (end < point) - end = ~0; + end = ~0; return start <= index && index <= end; } @@ -473,7 +476,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, const char *msg = NULL; vmpage = grab_cache_page_nowait(mapping, index); - if (vmpage != NULL) { + if (vmpage) { /* Check if vmpage was truncated or reclaimed */ if (vmpage->mapping == mapping) { page = cl_page_find(env, clob, vmpage->index, @@ -495,12 +498,12 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, } if (rc != 1) unlock_page(vmpage); - page_cache_release(vmpage); + put_page(vmpage); } else { which = RA_STAT_FAILED_GRAB_PAGE; msg = "g_c_p_n failed"; } - if (msg != NULL) { + if (msg) { ll_ra_stats_inc(mapping, which); CDEBUG(D_READA, "%s\n", msg); } @@ -515,14 +518,16 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, /* Limit this to the blocksize instead of PTLRPC_BRW_MAX_SIZE, since we don't * know what the actual RPC size is. If this needs to change, it makes more * sense to tune the i_blkbits value for the file based on the OSTs it is - * striped over, rather than having a constant value for all files here. */ + * striped over, rather than having a constant value for all files here. + */ -/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)). +/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)). * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled * by default, this should be adjusted corresponding with max_read_ahead_mb * and max_read_ahead_per_file_mb otherwise the readahead budget can be used - * up quickly which will affect read performance significantly. See LU-2816 */ -#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT) + * up quickly which will affect read performance significantly. See LU-2816 + */ +#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_SHIFT) static inline int stride_io_mode(struct ll_readahead_state *ras) { @@ -570,7 +575,7 @@ stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs, if (end_left > st_pgs) end_left = st_pgs; - CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu \n", + CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu\n", start, end, start_left, end_left); if (start == end) @@ -600,7 +605,8 @@ static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria) /* If ria_length == ria_pages, it means non-stride I/O mode, * idx should always inside read-ahead window in this case * For stride I/O mode, just check whether the idx is inside - * the ria_pages. */ + * the ria_pages. + */ return ria->ria_length == 0 || ria->ria_length == ria->ria_pages || (idx >= ria->ria_stoff && (idx - ria->ria_stoff) % ria->ria_length < ria->ria_pages); @@ -616,12 +622,12 @@ static int ll_read_ahead_pages(const struct lu_env *env, int rc, count = 0, stride_ria; unsigned long page_idx; - LASSERT(ria != NULL); + LASSERT(ria); RIA_DEBUG(ria); stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0; - for (page_idx = ria->ria_start; page_idx <= ria->ria_end && - *reserved_pages > 0; page_idx++) { + for (page_idx = ria->ria_start; + page_idx <= ria->ria_end && *reserved_pages > 0; page_idx++) { if (ras_inside_ra_window(page_idx, ria)) { /* If the page is inside the read-ahead window*/ rc = ll_read_ahead_page(env, io, queue, @@ -634,11 +640,13 @@ static int ll_read_ahead_pages(const struct lu_env *env, } else if (stride_ria) { /* If it is not in the read-ahead window, and it is * read-ahead mode, then check whether it should skip - * the stride gap */ + * the stride gap + */ pgoff_t offset; /* FIXME: This assertion only is valid when it is for * forward read-ahead, it will be fixed when backward - * read-ahead is implemented */ + * read-ahead is implemented + */ LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n", page_idx, ria->ria_start, ria->ria_end, ria->ria_stoff, @@ -647,7 +655,7 @@ static int ll_read_ahead_pages(const struct lu_env *env, offset = offset % (ria->ria_length); if (offset > ria->ria_pages) { page_idx += ria->ria_length - offset; - CDEBUG(D_READA, "i %lu skip %lu \n", page_idx, + CDEBUG(D_READA, "i %lu skip %lu\n", page_idx, ria->ria_length - offset); continue; } @@ -699,7 +707,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, bead = NULL; /* Enlarge the RA window to encompass the full read */ - if (bead != NULL && ras->ras_window_start + ras->ras_window_len < + if (bead && ras->ras_window_start + ras->ras_window_len < bead->lrr_start + bead->lrr_count) { ras->ras_window_len = bead->lrr_start + bead->lrr_count - ras->ras_window_start; @@ -721,7 +729,8 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, */ /* Note: we only trim the RPC, instead of extending the RPC * to the boundary, so to avoid reading too much pages during - * random reading. */ + * random reading. + */ rpc_boundary = (end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1)); if (rpc_boundary > 0) rpc_boundary--; @@ -730,7 +739,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, end = rpc_boundary; /* Truncate RA window to end of file */ - end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT)); + end = min(end, (unsigned long)((kms - 1) >> PAGE_SHIFT)); ras->ras_next_readahead = max(end, end + 1); RAS_CDEBUG(ras); @@ -764,19 +773,19 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, ret = ll_read_ahead_pages(env, io, queue, ria, &reserved, mapping, &ra_end); - LASSERTF(reserved >= 0, "reserved %lu\n", reserved); if (reserved != 0) ll_ra_count_put(ll_i2sbi(inode), reserved); - if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT)) + if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT)) ll_ra_stats_inc(mapping, RA_STAT_EOF); /* if we didn't get to the end of the region we reserved from * the ras we need to go back and update the ras so that the * next read-ahead tries from where we left off. we only do so * if the region we failed to issue read-ahead on is still ahead - * of the app and behind the next index to start read-ahead from */ - CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n", + * of the app and behind the next index to start read-ahead from + */ + CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu\n", ra_end, end, ria->ria_end); if (ra_end != end + 1) { @@ -860,7 +869,7 @@ static void ras_update_stride_detector(struct ll_readahead_state *ras, unsigned long stride_gap = index - ras->ras_last_readpage - 1; if (!stride_io_mode(ras) && (stride_gap != 0 || - ras->ras_consecutive_stride_requests == 0)) { + ras->ras_consecutive_stride_requests == 0)) { ras->ras_stride_pages = ras->ras_consecutive_pages; ras->ras_stride_length = stride_gap+ras->ras_consecutive_pages; } @@ -881,7 +890,8 @@ static void ras_update_stride_detector(struct ll_readahead_state *ras, } /* Stride Read-ahead window will be increased inc_len according to - * stride I/O pattern */ + * stride I/O pattern + */ static void ras_stride_increase_window(struct ll_readahead_state *ras, struct ll_ra_info *ra, unsigned long inc_len) @@ -952,7 +962,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, * or reads to some other part of the file. Secondly if we get a * read-ahead miss that we think we've previously issued. This can * be a symptom of there being so many read-ahead pages that the VM is - * reclaiming it before we get to it. */ + * reclaiming it before we get to it. + */ if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) { zero = 1; ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE); @@ -969,12 +980,13 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, * file up to ra_max_pages_per_file. This is simply a best effort * and only occurs once per open file. Normal RA behavior is reverted * to for subsequent IO. The mmap case does not increment - * ras_requests and thus can never trigger this behavior. */ + * ras_requests and thus can never trigger this behavior. + */ if (ras->ras_requests == 2 && !ras->ras_request_index) { __u64 kms_pages; - kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >> + PAGE_SHIFT; CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages, ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file); @@ -1015,14 +1027,16 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, stride_io_mode(ras)) { /*If stride-RA hit cache miss, the stride dector *will not be reset to avoid the overhead of - *redetecting read-ahead mode */ + *redetecting read-ahead mode + */ if (index != ras->ras_last_readpage + 1) ras->ras_consecutive_pages = 0; ras_reset(inode, ras, index); RAS_CDEBUG(ras); } else { /* Reset both stride window and normal RA - * window */ + * window + */ ras_reset(inode, ras, index); ras->ras_consecutive_pages++; ras_stride_reset(ras); @@ -1031,7 +1045,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, } else if (stride_io_mode(ras)) { /* If this is contiguous read but in stride I/O mode * currently, check whether stride step still is valid, - * if invalid, it will reset the stride ra window*/ + * if invalid, it will reset the stride ra window + */ if (!index_in_stride_window(ras, index)) { /* Shrink stride read-ahead window to be zero */ ras_stride_reset(ras); @@ -1047,7 +1062,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, if (stride_io_mode(ras)) /* Since stride readahead is sensitive to the offset * of read-ahead, so we use original offset here, - * instead of ras_window_start, which is RPC aligned */ + * instead of ras_window_start, which is RPC aligned + */ ras->ras_next_readahead = max(index, ras->ras_next_readahead); else ras->ras_next_readahead = max(ras->ras_window_start, @@ -1055,7 +1071,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, RAS_CDEBUG(ras); /* Trigger RA in the mmap case where ras_consecutive_requests - * is not incremented and thus can't be used to trigger RA */ + * is not incremented and thus can't be used to trigger RA + */ if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) { ras->ras_window_len = RAS_INCREASE_STEP(inode); goto out_unlock; @@ -1101,7 +1118,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) LASSERT(PageLocked(vmpage)); LASSERT(!PageWriteback(vmpage)); - LASSERT(ll_i2dtexp(inode) != NULL); + LASSERT(ll_i2dtexp(inode)); env = cl_env_nested_get(&nest); if (IS_ERR(env)) { @@ -1110,7 +1127,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) } clob = ll_i2info(inode)->lli_clob; - LASSERT(clob != NULL); + LASSERT(clob); io = ccc_env_thread_io(env); io->ci_obj = clob; @@ -1153,14 +1170,16 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) /* Flush page failed because the extent is being written out. * Wait for the write of extent to be finished to avoid * breaking kernel which assumes ->writepage should mark - * PageWriteback or clean the page. */ + * PageWriteback or clean the page. + */ result = cl_sync_file_range(inode, offset, - offset + PAGE_CACHE_SIZE - 1, + offset + PAGE_SIZE - 1, CL_FSYNC_LOCAL, 1); if (result > 0) { /* actually we may have written more than one page. * decreasing this page because the caller will count - * it. */ + * it. + */ wbc->nr_to_write -= result - 1; result = 0; } @@ -1192,7 +1211,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) int ignore_layout = 0; if (wbc->range_cyclic) { - start = mapping->writeback_index << PAGE_CACHE_SHIFT; + start = mapping->writeback_index << PAGE_SHIFT; end = OBD_OBJECT_EOF; } else { start = wbc->range_start; @@ -1210,7 +1229,8 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) if (sbi->ll_umounting) /* if the mountpoint is being umounted, all pages have to be * evicted to avoid hitting LBUG when truncate_inode_pages() - * is called later on. */ + * is called later on. + */ ignore_layout = 1; result = cl_sync_file_range(inode, start, end, mode, ignore_layout); if (result > 0) { @@ -1221,7 +1241,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) { if (end == OBD_OBJECT_EOF) end = i_size_read(inode); - mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1; + mapping->writeback_index = (end >> PAGE_SHIFT) + 1; } return result; } diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c index 711fda93a..69aa15e8e 100644 --- a/drivers/staging/lustre/lustre/llite/rw26.c +++ b/drivers/staging/lustre/lustre/llite/rw26.c @@ -87,14 +87,14 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset, * below because they are run with page locked and all our io is * happening with locked page too */ - if (offset == 0 && length == PAGE_CACHE_SIZE) { + if (offset == 0 && length == PAGE_SIZE) { env = cl_env_get(&refcheck); if (!IS_ERR(env)) { inode = vmpage->mapping->host; obj = ll_i2info(inode)->lli_clob; - if (obj != NULL) { + if (obj) { page = cl_vmpage_page(vmpage, obj); - if (page != NULL) { + if (page) { lu_ref_add(&page->cp_reference, "delete", vmpage); cl_page_delete(env, page); @@ -109,12 +109,7 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset, } } -#ifdef HAVE_RELEASEPAGE_WITH_INT -#define RELEASEPAGE_ARG_TYPE int -#else -#define RELEASEPAGE_ARG_TYPE gfp_t -#endif -static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) +static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask) { struct cl_env_nest nest; struct lu_env *env; @@ -128,11 +123,11 @@ static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) return 0; mapping = vmpage->mapping; - if (mapping == NULL) + if (!mapping) return 1; obj = ll_i2info(mapping->host)->lli_clob; - if (obj == NULL) + if (!obj) return 1; /* 1 for page allocator, 1 for cl_page and 1 for page cache */ @@ -145,12 +140,13 @@ static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) /* If we can't allocate an env we won't call cl_page_put() * later on which further means it's impossible to drop * page refcount by cl_page, so ask kernel to not free - * this page. */ + * this page. + */ return 0; page = cl_vmpage_page(vmpage, obj); - result = page == NULL; - if (page != NULL) { + result = !page; + if (page) { if (!cl_page_in_use(page)) { result = 1; cl_page_delete(env, page); @@ -197,8 +193,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr, return -EFBIG; } - *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - *max_pages -= user_addr >> PAGE_CACHE_SHIFT; + *max_pages = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT; + *max_pages -= user_addr >> PAGE_SHIFT; *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS); if (*pages) { @@ -212,7 +208,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr, } /* ll_free_user_pages - tear down page struct array - * @pages: array of page struct pointers underlying target buffer */ + * @pages: array of page struct pointers underlying target buffer + */ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) { int i; @@ -220,7 +217,7 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) for (i = 0; i < npages; i++) { if (do_dirty) set_page_dirty_lock(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } kvfree(pages); } @@ -246,7 +243,7 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, cl_2queue_init(queue); for (i = 0; i < page_count; i++) { if (pv->ldp_offsets) - file_offset = pv->ldp_offsets[i]; + file_offset = pv->ldp_offsets[i]; LASSERT(!(file_offset & (page_size - 1))); clp = cl_page_find(env, obj, cl_index(obj, file_offset), @@ -266,7 +263,8 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, do_io = true; /* check the page type: if the page is a host page, then do - * write directly */ + * write directly + */ if (clp->cp_type == CPT_CACHEABLE) { struct page *vmpage = cl_page_vmpage(env, clp); struct page *src_page; @@ -284,14 +282,16 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, kunmap_atomic(src); /* make sure page will be added to the transfer by - * cl_io_submit()->...->vvp_page_prep_write(). */ + * cl_io_submit()->...->vvp_page_prep_write(). + */ if (rw == WRITE) set_page_dirty(vmpage); if (rw == READ) { /* do not issue the page for read, since it * may reread a ra page which has NOT uptodate - * bit set. */ + * bit set. + */ cl_page_disown(env, io, clp); do_io = false; } @@ -339,29 +339,25 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, size_t size, loff_t file_offset, struct page **pages, int page_count) { - struct ll_dio_pages pvec = { .ldp_pages = pages, - .ldp_nr = page_count, - .ldp_size = size, - .ldp_offsets = NULL, - .ldp_start_offset = file_offset - }; - - return ll_direct_rw_pages(env, io, rw, inode, &pvec); + struct ll_dio_pages pvec = { + .ldp_pages = pages, + .ldp_nr = page_count, + .ldp_size = size, + .ldp_offsets = NULL, + .ldp_start_offset = file_offset + }; + + return ll_direct_rw_pages(env, io, rw, inode, &pvec); } -#ifdef KMALLOC_MAX_SIZE -#define MAX_MALLOC KMALLOC_MAX_SIZE -#else -#define MAX_MALLOC (128 * 1024) -#endif - /* This is the maximum size of a single O_DIRECT request, based on the * kmalloc limit. We need to fit all of the brw_page structs, each one * representing PAGE_SIZE worth of user data, into a single buffer, and * then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is - * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */ -#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \ - ~(DT_MAX_BRW_SIZE - 1)) + * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. + */ +#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \ + PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1)) static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, loff_t file_offset) { @@ -386,8 +382,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n", inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE, - file_offset, file_offset, count >> PAGE_CACHE_SHIFT, - MAX_DIO_SIZE >> PAGE_CACHE_SHIFT); + file_offset, file_offset, count >> PAGE_SHIFT, + MAX_DIO_SIZE >> PAGE_SHIFT); /* Check that all user buffers are aligned as well */ if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK) @@ -396,7 +392,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, env = cl_env_get(&refcheck); LASSERT(!IS_ERR(env)); io = ccc_env_io(env)->cui_cl.cis_io; - LASSERT(io != NULL); + LASSERT(io); /* 0. Need locking between buffered and direct access. and race with * size changing by concurrent truncates and writes. @@ -433,10 +429,11 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, * for the request, shrink it to a smaller * PAGE_SIZE multiple and try again. * We should always be able to kmalloc for a - * page worth of page pointers = 4MB on i386. */ + * page worth of page pointers = 4MB on i386. + */ if (result == -ENOMEM && - size > (PAGE_CACHE_SIZE / sizeof(*pages)) * - PAGE_CACHE_SIZE) { + size > (PAGE_SIZE / sizeof(*pages)) * + PAGE_SIZE) { size = ((((size / 2) - 1) | ~CFS_PAGE_MASK) + 1) & CFS_PAGE_MASK; @@ -461,7 +458,7 @@ out: struct lov_stripe_md *lsm; lsm = ccc_inode_lsm_get(inode); - LASSERT(lsm != NULL); + LASSERT(lsm); lov_stripe_lock(lsm); obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0); lov_stripe_unlock(lsm); @@ -474,13 +471,13 @@ out: } static int ll_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) { - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct page *page; int rc; - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); page = grab_cache_page_write_begin(mapping, index, flags); if (!page) @@ -491,7 +488,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping, rc = ll_prepare_write(file, page, from, from + len); if (rc) { unlock_page(page); - page_cache_release(page); + put_page(page); } return rc; } @@ -500,20 +497,20 @@ static int ll_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); int rc; rc = ll_commit_write(file, page, from, from + copied); unlock_page(page); - page_cache_release(page); + put_page(page); return rc ?: copied; } #ifdef CONFIG_MIGRATION static int ll_migratepage(struct address_space *mapping, - struct page *newpage, struct page *page, - enum migrate_mode mode + struct page *newpage, struct page *page, + enum migrate_mode mode ) { /* Always fail page migration until we have a proper implementation */ diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c index 88ffd8e3a..99ffd1589 100644 --- a/drivers/staging/lustre/lustre/llite/statahead.c +++ b/drivers/staging/lustre/lustre/llite/statahead.c @@ -49,13 +49,13 @@ #define SA_OMITTED_ENTRY_MAX 8ULL -typedef enum { +enum se_stat { /** negative values are for error cases */ SA_ENTRY_INIT = 0, /** init entry */ SA_ENTRY_SUCC = 1, /** stat succeed */ SA_ENTRY_INVA = 2, /** invalid entry */ SA_ENTRY_DEST = 3, /** entry to be destroyed */ -} se_stat_t; +}; struct ll_sa_entry { /* link into sai->sai_entries */ @@ -71,7 +71,7 @@ struct ll_sa_entry { /* low layer ldlm lock handle */ __u64 se_handle; /* entry status */ - se_stat_t se_stat; + enum se_stat se_stat; /* entry size, contains name */ int se_size; /* pointer to async getattr enqueue info */ @@ -130,7 +130,7 @@ ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry) static inline int agl_should_run(struct ll_statahead_info *sai, struct inode *inode) { - return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid); + return (inode && S_ISREG(inode->i_mode) && sai->sai_agl_valid); } static inline int sa_sent_full(struct ll_statahead_info *sai) @@ -284,7 +284,7 @@ ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index) } static void ll_sa_entry_cleanup(struct ll_statahead_info *sai, - struct ll_sa_entry *entry) + struct ll_sa_entry *entry) { struct md_enqueue_info *minfo = entry->se_minfo; struct ptlrpc_request *req = entry->se_req; @@ -303,7 +303,7 @@ static void ll_sa_entry_cleanup(struct ll_statahead_info *sai, } static void ll_sa_entry_put(struct ll_statahead_info *sai, - struct ll_sa_entry *entry) + struct ll_sa_entry *entry) { if (atomic_dec_and_test(&entry->se_refcount)) { CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n", @@ -366,7 +366,7 @@ ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry) */ static void do_sa_entry_to_stated(struct ll_statahead_info *sai, - struct ll_sa_entry *entry, se_stat_t stat) + struct ll_sa_entry *entry, enum se_stat stat) { struct ll_sa_entry *se; struct list_head *pos = &sai->sai_entries_stated; @@ -392,7 +392,7 @@ do_sa_entry_to_stated(struct ll_statahead_info *sai, */ static int ll_sa_entry_to_stated(struct ll_statahead_info *sai, - struct ll_sa_entry *entry, se_stat_t stat) + struct ll_sa_entry *entry, enum se_stat stat) { struct ll_inode_info *lli = ll_i2info(sai->sai_inode); int ret = 1; @@ -494,12 +494,13 @@ static void ll_sai_put(struct ll_statahead_info *sai) if (unlikely(atomic_read(&sai->sai_refcount) > 0)) { /* It is race case, the interpret callback just hold - * a reference count */ + * a reference count + */ spin_unlock(&lli->lli_sa_lock); return; } - LASSERT(lli->lli_opendir_key == NULL); + LASSERT(!lli->lli_opendir_key); LASSERT(thread_is_stopped(&sai->sai_thread)); LASSERT(thread_is_stopped(&sai->sai_agl_thread)); @@ -513,8 +514,8 @@ static void ll_sai_put(struct ll_statahead_info *sai) PFID(&lli->lli_fid), sai->sai_sent, sai->sai_replied); - list_for_each_entry_safe(entry, next, - &sai->sai_entries, se_link) + list_for_each_entry_safe(entry, next, &sai->sai_entries, + se_link) do_sa_entry_fini(sai, entry); LASSERT(list_empty(&sai->sai_entries)); @@ -618,20 +619,21 @@ static void ll_post_statahead(struct ll_statahead_info *sai) it = &minfo->mi_it; req = entry->se_req; body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (body == NULL) { + if (!body) { rc = -EFAULT; goto out; } child = entry->se_inode; - if (child == NULL) { + if (!child) { /* * lookup. */ LASSERT(fid_is_zero(&minfo->mi_data.op_fid2)); /* XXX: No fid in reply, this is probably cross-ref case. - * SA can't handle it yet. */ + * SA can't handle it yet. + */ if (body->valid & OBD_MD_MDS) { rc = -EAGAIN; goto out; @@ -672,7 +674,8 @@ out: /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock * reference count by calling "ll_intent_drop_lock()" in spite of the * above operations failed or not. Do not worry about calling - * "ll_intent_drop_lock()" more than once. */ + * "ll_intent_drop_lock()" more than once. + */ rc = ll_sa_entry_to_stated(sai, entry, rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC); if (rc == 0 && entry->se_index == sai->sai_index_wait) @@ -698,14 +701,15 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, /* release ibits lock ASAP to avoid deadlock when statahead * thread enqueues lock on parent in readdir and another * process enqueues lock on child with parent lock held, eg. - * unlink. */ + * unlink. + */ handle = it->d.lustre.it_lock_handle; ll_intent_drop_lock(it); } spin_lock(&lli->lli_sa_lock); /* stale entry */ - if (unlikely(lli->lli_sai == NULL || + if (unlikely(!lli->lli_sai || lli->lli_sai->sai_generation != minfo->mi_generation)) { spin_unlock(&lli->lli_sa_lock); rc = -ESTALE; @@ -720,7 +724,7 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, } entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata); - if (entry == NULL) { + if (!entry) { sai->sai_replied++; spin_unlock(&lli->lli_sa_lock); rc = -EIDRM; @@ -736,11 +740,12 @@ static int ll_statahead_interpret(struct ptlrpc_request *req, /* Release the async ibits lock ASAP to avoid deadlock * when statahead thread tries to enqueue lock on parent * for readpage and other tries to enqueue lock on child - * with parent's lock held, for example: unlink. */ + * with parent's lock held, for example: unlink. + */ entry->se_handle = handle; wakeup = list_empty(&sai->sai_entries_received); list_add_tail(&entry->se_list, - &sai->sai_entries_received); + &sai->sai_entries_received); } sai->sai_replied++; spin_unlock(&lli->lli_sa_lock); @@ -756,7 +761,7 @@ out: iput(dir); kfree(minfo); } - if (sai != NULL) + if (sai) ll_sai_put(sai); return rc; } @@ -853,7 +858,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry, struct ldlm_enqueue_info *einfo; int rc; - if (unlikely(inode == NULL)) + if (unlikely(!inode)) return 1; if (d_mountpoint(dentry)) @@ -908,10 +913,9 @@ static void ll_statahead_one(struct dentry *parent, const char *entry_name, rc = do_sa_revalidate(dir, entry, dentry); if (rc == 1 && agl_should_run(sai, d_inode(dentry))) ll_agl_add(sai, d_inode(dentry), entry->se_index); - } - if (dentry != NULL) dput(dentry); + } if (rc) { rc1 = ll_sa_entry_to_stated(sai, entry, @@ -948,7 +952,8 @@ static int ll_agl_thread(void *arg) if (thread_is_init(thread)) /* If someone else has changed the thread state * (e.g. already changed to SVC_STOPPING), we can't just - * blindly overwrite that setting. */ + * blindly overwrite that setting. + */ thread_set_flags(thread, SVC_RUNNING); spin_unlock(&plli->lli_agl_lock); wake_up(&thread->t_ctl_waitq); @@ -964,7 +969,8 @@ static int ll_agl_thread(void *arg) spin_lock(&plli->lli_agl_lock); /* The statahead thread maybe help to process AGL entries, - * so check whether list empty again. */ + * so check whether list empty again. + */ if (!list_empty(&sai->sai_entries_agl)) { clli = list_entry(sai->sai_entries_agl.next, struct ll_inode_info, lli_agl_list); @@ -1007,8 +1013,8 @@ static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai) sai, parent); plli = ll_i2info(d_inode(parent)); - task = kthread_run(ll_agl_thread, parent, - "ll_agl_%u", plli->lli_opendir_pid); + task = kthread_run(ll_agl_thread, parent, "ll_agl_%u", + plli->lli_opendir_pid); if (IS_ERR(task)) { CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task)); thread_set_flags(thread, SVC_STOPPED); @@ -1049,7 +1055,8 @@ static int ll_statahead_thread(void *arg) if (thread_is_init(thread)) /* If someone else has changed the thread state * (e.g. already changed to SVC_STOPPING), we can't just - * blindly overwrite that setting. */ + * blindly overwrite that setting. + */ thread_set_flags(thread, SVC_RUNNING); spin_unlock(&plli->lli_sa_lock); wake_up(&thread->t_ctl_waitq); @@ -1070,7 +1077,7 @@ static int ll_statahead_thread(void *arg) } dp = page_address(page); - for (ent = lu_dirent_start(dp); ent != NULL; + for (ent = lu_dirent_start(dp); ent; ent = lu_dirent_next(ent)) { __u64 hash; int namelen; @@ -1137,7 +1144,8 @@ interpret_it: /* If no window for metadata statahead, but there are * some AGL entries to be triggered, then try to help - * to process the AGL entries. */ + * to process the AGL entries. + */ if (sa_sent_full(sai)) { spin_lock(&plli->lli_agl_lock); while (!list_empty(&sai->sai_entries_agl)) { @@ -1274,7 +1282,7 @@ void ll_stop_statahead(struct inode *dir, void *key) { struct ll_inode_info *lli = ll_i2info(dir); - if (unlikely(key == NULL)) + if (unlikely(!key)) return; spin_lock(&lli->lli_sa_lock); @@ -1357,7 +1365,7 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry) } dp = page_address(page); - for (ent = lu_dirent_start(dp); ent != NULL; + for (ent = lu_dirent_start(dp); ent; ent = lu_dirent_next(ent)) { __u64 hash; int namelen; @@ -1365,7 +1373,8 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry) hash = le64_to_cpu(ent->lde_hash); /* The ll_get_dir_page() can return any page containing - * the given hash which may be not the start hash. */ + * the given hash which may be not the start hash. + */ if (unlikely(hash < pos)) continue; @@ -1448,7 +1457,7 @@ ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry) struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode); int hit; - if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC) + if (entry && entry->se_stat == SA_ENTRY_SUCC) hit = 1; else hit = 0; @@ -1498,6 +1507,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, struct ll_sa_entry *entry; struct ptlrpc_thread *thread; struct l_wait_info lwi = { 0 }; + struct task_struct *task; int rc = 0; struct ll_inode_info *plli; @@ -1540,7 +1550,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, } entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name); - if (entry == NULL || only_unplug) { + if (!entry || only_unplug) { ll_sai_unplug(sai, entry); return entry ? 1 : -EAGAIN; } @@ -1559,8 +1569,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, } } - if (entry->se_stat == SA_ENTRY_SUCC && - entry->se_inode != NULL) { + if (entry->se_stat == SA_ENTRY_SUCC && entry->se_inode) { struct inode *inode = entry->se_inode; struct lookup_intent it = { .it_op = IT_GETATTR, .d.lustre.it_lock_handle = @@ -1570,11 +1579,11 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode), &bits); if (rc == 1) { - if (d_inode(*dentryp) == NULL) { + if (!d_inode(*dentryp)) { struct dentry *alias; alias = ll_splice_alias(inode, - *dentryp); + *dentryp); if (IS_ERR(alias)) { ll_sai_unplug(sai, entry); return PTR_ERR(alias); @@ -1583,7 +1592,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, } else if (d_inode(*dentryp) != inode) { /* revalidate, but inode is recreated */ CDEBUG(D_READA, - "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n", + "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n", *dentryp, d_inode(*dentryp)->i_ino, d_inode(*dentryp)->i_generation, @@ -1616,14 +1625,14 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, } sai = ll_sai_alloc(); - if (sai == NULL) { + if (!sai) { rc = -ENOMEM; goto out; } sai->sai_ls_all = (rc == LS_FIRST_DOT_DE); sai->sai_inode = igrab(dir); - if (unlikely(sai->sai_inode == NULL)) { + if (unlikely(!sai->sai_inode)) { CWARN("Do not start stat ahead on dying inode "DFID"\n", PFID(&lli->lli_fid)); rc = -ESTALE; @@ -1651,25 +1660,28 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp, * but as soon as we expose the sai by attaching it to the lli that * default reference can be dropped by another thread calling * ll_stop_statahead. We need to take a local reference to protect - * the sai buffer while we intend to access it. */ + * the sai buffer while we intend to access it. + */ ll_sai_get(sai); lli->lli_sai = sai; plli = ll_i2info(d_inode(parent)); - rc = PTR_ERR(kthread_run(ll_statahead_thread, parent, - "ll_sa_%u", plli->lli_opendir_pid)); + task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u", + plli->lli_opendir_pid); thread = &sai->sai_thread; - if (IS_ERR_VALUE(rc)) { + if (IS_ERR(task)) { + rc = PTR_ERR(task); CERROR("can't start ll_sa thread, rc: %d\n", rc); dput(parent); lli->lli_opendir_key = NULL; thread_set_flags(thread, SVC_STOPPED); thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED); /* Drop both our own local reference and the default - * reference from allocation time. */ + * reference from allocation time. + */ ll_sai_put(sai); ll_sai_put(sai); - LASSERT(lli->lli_sai == NULL); + LASSERT(!lli->lli_sai); return -EAGAIN; } diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c index 86c371ef7..61856d37a 100644 --- a/drivers/staging/lustre/lustre/llite/super25.c +++ b/drivers/staging/lustre/lustre/llite/super25.c @@ -53,8 +53,8 @@ static struct inode *ll_alloc_inode(struct super_block *sb) struct ll_inode_info *lli; ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_ALLOC_INODE, 1); - lli = kmem_cache_alloc(ll_inode_cachep, GFP_NOFS | __GFP_ZERO); - if (lli == NULL) + lli = kmem_cache_zalloc(ll_inode_cachep, GFP_NOFS); + if (!lli) return NULL; inode_init_once(&lli->lli_vfs_inode); @@ -89,7 +89,7 @@ MODULE_ALIAS_FS("lustre"); void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg)); -static int __init init_lustre_lite(void) +static int __init lustre_init(void) { lnet_process_id_t lnet_id; struct timespec64 ts; @@ -99,7 +99,8 @@ static int __init init_lustre_lite(void) /* print an address of _any_ initialized kernel symbol from this * module, to allow debugging with gdb that doesn't support data - * symbols from modules.*/ + * symbols from modules. + */ CDEBUG(D_INFO, "Lustre client module (%p).\n", &lustre_super_operations); @@ -108,26 +109,26 @@ static int __init init_lustre_lite(void) sizeof(struct ll_inode_info), 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, NULL); - if (ll_inode_cachep == NULL) + if (!ll_inode_cachep) goto out_cache; ll_file_data_slab = kmem_cache_create("ll_file_data", - sizeof(struct ll_file_data), 0, - SLAB_HWCACHE_ALIGN, NULL); - if (ll_file_data_slab == NULL) + sizeof(struct ll_file_data), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!ll_file_data_slab) goto out_cache; ll_remote_perm_cachep = kmem_cache_create("ll_remote_perm_cache", sizeof(struct ll_remote_perm), 0, 0, NULL); - if (ll_remote_perm_cachep == NULL) + if (!ll_remote_perm_cachep) goto out_cache; ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache", REMOTE_PERM_HASHSIZE * sizeof(struct list_head), 0, 0, NULL); - if (ll_rmtperm_hash_cachep == NULL) + if (!ll_rmtperm_hash_cachep) goto out_cache; llite_root = debugfs_create_dir("llite", debugfs_lustre_root); @@ -146,7 +147,8 @@ static int __init init_lustre_lite(void) cfs_get_random_bytes(seed, sizeof(seed)); /* Nodes with small feet have little entropy. The NID for this - * node gives the most entropy in the low bits */ + * node gives the most entropy in the low bits + */ for (i = 0;; i++) { if (LNetGetId(i, &lnet_id) == -ENOENT) break; @@ -186,7 +188,7 @@ out_cache: return rc; } -static void __exit exit_lustre_lite(void) +static void __exit lustre_exit(void) { lustre_register_client_fill_super(NULL); lustre_register_kill_super_cb(NULL); @@ -207,8 +209,9 @@ static void __exit exit_lustre_lite(void) } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Lustre Lite Client File System"); +MODULE_DESCRIPTION("Lustre Client File System"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); -module_init(init_lustre_lite); -module_exit(exit_lustre_lite); +module_init(lustre_init); +module_exit(lustre_exit); diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c index 2610348f6..46d03ea48 100644 --- a/drivers/staging/lustre/lustre/llite/symlink.c +++ b/drivers/staging/lustre/lustre/llite/symlink.c @@ -59,7 +59,8 @@ static int ll_readlink_internal(struct inode *inode, *symname = lli->lli_symlink_name; /* If the total CDEBUG() size is larger than a page, it * will print a warning to the console, avoid this by - * printing just the last part of the symlink. */ + * printing just the last part of the symlink. + */ CDEBUG(D_INODE, "using cached symlink %s%.*s, len = %d\n", print_limit < symlen ? "..." : "", print_limit, (*symname) + symlen - print_limit, symlen); @@ -81,7 +82,6 @@ static int ll_readlink_internal(struct inode *inode, } body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); if ((body->valid & OBD_MD_LINKNAME) == 0) { CERROR("OBD_MD_LINKNAME not set on reply\n"); rc = -EPROTO; @@ -91,13 +91,13 @@ static int ll_readlink_internal(struct inode *inode, LASSERT(symlen != 0); if (body->eadatasize != symlen) { CERROR("inode %lu: symlink length %d not expected %d\n", - inode->i_ino, body->eadatasize - 1, symlen - 1); + inode->i_ino, body->eadatasize - 1, symlen - 1); rc = -EPROTO; goto failed; } *symname = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_MD); - if (*symname == NULL || + if (!*symname || strnlen(*symname, symlen) != symlen - 1) { /* not full/NULL terminated */ CERROR("inode %lu: symlink not NULL terminated string of length %d\n", diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c index fdca4ec05..282b70b77 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_dev.c +++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c @@ -79,8 +79,8 @@ static void *vvp_key_init(const struct lu_context *ctx, { struct vvp_thread_info *info; - info = kmem_cache_alloc(vvp_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -98,8 +98,8 @@ static void *vvp_session_key_init(const struct lu_context *ctx, { struct vvp_session *session; - session = kmem_cache_alloc(vvp_session_kmem, GFP_NOFS | __GFP_ZERO); - if (session == NULL) + session = kmem_cache_zalloc(vvp_session_kmem, GFP_NOFS); + if (!session) session = ERR_PTR(-ENOMEM); return session; } @@ -228,7 +228,7 @@ int cl_sb_fini(struct super_block *sb) if (!IS_ERR(env)) { cld = sbi->ll_cl; - if (cld != NULL) { + if (cld) { cl_stack_fini(env, cld); sbi->ll_cl = NULL; sbi->ll_site = NULL; @@ -325,11 +325,11 @@ static struct cl_object *vvp_pgcache_obj(const struct lu_env *env, cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket, vvp_pgcache_obj_get, id); - if (id->vpi_obj != NULL) { + if (id->vpi_obj) { struct lu_object *lu_obj; lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type); - if (lu_obj != NULL) { + if (lu_obj) { lu_object_ref_add(lu_obj, "dump", current); return lu2cl(lu_obj); } @@ -355,7 +355,7 @@ static loff_t vvp_pgcache_find(const struct lu_env *env, if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash)) return ~0ULL; clob = vvp_pgcache_obj(env, dev, &id); - if (clob != NULL) { + if (clob) { struct cl_object_header *hdr; int nr; struct cl_page *pg; @@ -443,7 +443,7 @@ static int vvp_pgcache_show(struct seq_file *f, void *v) vvp_pgcache_id_unpack(pos, &id); sbi = f->private; clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id); - if (clob != NULL) { + if (clob) { hdr = cl_object_header(clob); spin_lock(&hdr->coh_page_guard); @@ -452,7 +452,7 @@ static int vvp_pgcache_show(struct seq_file *f, void *v) seq_printf(f, "%8x@"DFID": ", id.vpi_index, PFID(&hdr->coh_lu.loh_fid)); - if (page != NULL) { + if (page) { vvp_pgcache_page_show(env, f, page); cl_page_put(env, page); } else diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h index 2e39533a4..bb393378c 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_internal.h +++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h @@ -44,14 +44,13 @@ #include "../include/cl_object.h" #include "llite_internal.h" -int vvp_io_init (const struct lu_env *env, - struct cl_object *obj, struct cl_io *io); -int vvp_lock_init (const struct lu_env *env, - struct cl_object *obj, struct cl_lock *lock, +int vvp_io_init(const struct lu_env *env, + struct cl_object *obj, struct cl_io *io); +int vvp_lock_init(const struct lu_env *env, + struct cl_object *obj, struct cl_lock *lock, const struct cl_io *io); -int vvp_page_init (const struct lu_env *env, - struct cl_object *obj, - struct cl_page *page, struct page *vmpage); +int vvp_page_init(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, struct page *vmpage); struct lu_object *vvp_object_alloc(const struct lu_env *env, const struct lu_object_header *hdr, struct lu_device *dev); diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c index 0920ac6b3..85a835976 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c +++ b/drivers/staging/lustre/lustre/llite/vvp_io.c @@ -68,7 +68,7 @@ int cl_is_normalio(const struct lu_env *env, const struct cl_io *io) * have to acquire group lock. */ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io, - struct inode *inode) + struct inode *inode) { struct ll_inode_info *lli = ll_i2info(inode); struct ccc_io *cio = ccc_env_io(env); @@ -78,7 +78,8 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io, case CIT_READ: case CIT_WRITE: /* don't need lock here to check lli_layout_gen as we have held - * extent lock and GROUP lock has to hold to swap layout */ + * extent lock and GROUP lock has to hold to swap layout + */ if (ll_layout_version_get(lli) != cio->cui_layout_gen) { io->ci_need_restart = 1; /* this will return application a short read/write */ @@ -134,7 +135,8 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) */ rc = ll_layout_restore(ccc_object_inode(obj)); /* if restore registration failed, no restart, - * we will return -ENODATA */ + * we will return -ENODATA + */ /* The layout will change after restore, so we need to * block on layout lock hold by the MDT * as MDT will not send new layout in lvb (see LU-3124) @@ -164,8 +166,7 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) DFID" layout changed from %d to %d.\n", PFID(lu_object_fid(&obj->co_lu)), cio->cui_layout_gen, gen); - /* today successful restore is the only possible - * case */ + /* today successful restore is the only possible case */ /* restore was done, clear restoring state */ ll_i2info(ccc_object_inode(obj))->lli_flags &= ~LLIF_FILE_RESTORING; @@ -181,7 +182,7 @@ static void vvp_io_fault_fini(const struct lu_env *env, CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj)); - if (page != NULL) { + if (page) { lu_ref_del(&page->cp_reference, "fault", io); cl_page_put(env, page); io->u.ci_fault.ft_page = NULL; @@ -220,11 +221,11 @@ static int vvp_mmap_locks(const struct lu_env *env, if (!cl_is_normalio(env, io)) return 0; - if (vio->cui_iter == NULL) /* nfs or loop back device write */ + if (!vio->cui_iter) /* nfs or loop back device write */ return 0; /* No MM (e.g. NFS)? No vmas too. */ - if (mm == NULL) + if (!mm) return 0; iov_for_each(iov, i, *(vio->cui_iter)) { @@ -456,7 +457,8 @@ static void vvp_io_setattr_end(const struct lu_env *env, if (cl_io_is_trunc(io)) /* Truncate in memory pages - they must be clean pages - * because osc has already notified to destroy osc_extents. */ + * because osc has already notified to destroy osc_extents. + */ vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size); inode_unlock(inode); @@ -499,8 +501,8 @@ static int vvp_io_read_start(const struct lu_env *env, goto out; LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, - "Read ino %lu, %lu bytes, offset %lld, size %llu\n", - inode->i_ino, cnt, pos, i_size_read(inode)); + "Read ino %lu, %lu bytes, offset %lld, size %llu\n", + inode->i_ino, cnt, pos, i_size_read(inode)); /* turn off the kernel's read-ahead */ cio->cui_fd->fd_file->f_ra.ra_pages = 0; @@ -510,9 +512,9 @@ static int vvp_io_read_start(const struct lu_env *env, vio->cui_ra_window_set = 1; bead->lrr_start = cl_index(obj, pos); /* - * XXX: explicit PAGE_CACHE_SIZE + * XXX: explicit PAGE_SIZE */ - bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); + bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1); ll_ra_read_in(file, bead); } @@ -525,11 +527,12 @@ static int vvp_io_read_start(const struct lu_env *env, break; case IO_SPLICE: result = generic_file_splice_read(file, &pos, - vio->u.splice.cui_pipe, cnt, - vio->u.splice.cui_flags); + vio->u.splice.cui_pipe, cnt, + vio->u.splice.cui_flags); /* LU-1109: do splice read stripe by stripe otherwise if it * may make nfsd stuck if this read occupied all internal pipe - * buffers. */ + * buffers. + */ io->ci_continue = 0; break; default: @@ -587,7 +590,7 @@ static int vvp_io_write_start(const struct lu_env *env, CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt); - if (cio->cui_iter == NULL) /* from a temp io in ll_cl_init(). */ + if (!cio->cui_iter) /* from a temp io in ll_cl_init(). */ result = 0; else result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter); @@ -673,7 +676,7 @@ static int vvp_io_fault_start(const struct lu_env *env, /* must return locked page */ if (fio->ft_mkwrite) { - LASSERT(cfio->ft_vmpage != NULL); + LASSERT(cfio->ft_vmpage); lock_page(cfio->ft_vmpage); } else { result = vvp_io_kernel_fault(cfio); @@ -689,13 +692,15 @@ static int vvp_io_fault_start(const struct lu_env *env, size = i_size_read(inode); /* Though we have already held a cl_lock upon this page, but - * it still can be truncated locally. */ + * it still can be truncated locally. + */ if (unlikely((vmpage->mapping != inode->i_mapping) || (page_offset(vmpage) > size))) { CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n"); /* return +1 to stop cl_io_loop() and ll_fault() will catch - * and retry. */ + * and retry. + */ result = 1; goto out; } @@ -736,7 +741,8 @@ static int vvp_io_fault_start(const struct lu_env *env, } /* if page is going to be written, we should add this page into cache - * earlier. */ + * earlier. + */ if (fio->ft_mkwrite) { wait_on_page_writeback(vmpage); if (set_page_dirty(vmpage)) { @@ -750,7 +756,8 @@ static int vvp_io_fault_start(const struct lu_env *env, /* Do not set Dirty bit here so that in case IO is * started before the page is really made dirty, we - * still have chance to detect it. */ + * still have chance to detect it. + */ result = cl_page_cache_add(env, io, page, CRT_WRITE); LASSERT(cl_page_is_owned(page, io)); @@ -792,7 +799,7 @@ static int vvp_io_fault_start(const struct lu_env *env, out: /* return unlocked vmpage to avoid deadlocking */ - if (vmpage != NULL) + if (vmpage) unlock_page(vmpage); cfio->fault.ft_flags &= ~VM_FAULT_LOCKED; return result; @@ -803,7 +810,8 @@ static int vvp_io_fsync_start(const struct lu_env *env, { /* we should mark TOWRITE bit to each dirty page in radix tree to * verify pages have been written, but this is difficult because of - * race. */ + * race. + */ return 0; } @@ -951,7 +959,7 @@ static int vvp_io_prepare_write(const struct lu_env *env, * We're completely overwriting an existing page, so _don't_ * set it up to date until commit_write */ - if (from == 0 && to == PAGE_CACHE_SIZE) { + if (from == 0 && to == PAGE_SIZE) { CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n"); POISON_PAGE(page, 0x11); } else @@ -1003,7 +1011,7 @@ static int vvp_io_commit_write(const struct lu_env *env, * * (3) IO is batched up to the RPC size and is async until the * client max cache is hit - * (/proc/fs/lustre/osc/OSC.../max_dirty_mb) + * (/sys/fs/lustre/osc/OSC.../max_dirty_mb) * */ if (!PageDirty(vmpage)) { @@ -1014,7 +1022,7 @@ static int vvp_io_commit_write(const struct lu_env *env, set_page_dirty(vmpage); vvp_write_pending(cl2ccc(obj), cp); } else if (result == -EDQUOT) { - pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; + pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT; bool need_clip = true; /* @@ -1032,7 +1040,7 @@ static int vvp_io_commit_write(const struct lu_env *env, * being. */ if (last_index > pg->cp_index) { - to = PAGE_CACHE_SIZE; + to = PAGE_SIZE; need_clip = false; } else if (last_index == pg->cp_index) { int size_to = i_size_read(inode) & ~CFS_PAGE_MASK; @@ -1153,7 +1161,8 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj, count = io->u.ci_rw.crw_count; /* "If nbyte is 0, read() will return 0 and have no other - * results." -- Single Unix Spec */ + * results." -- Single Unix Spec + */ if (count == 0) result = 1; else @@ -1173,25 +1182,28 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj, /* ignore layout change for generic CIT_MISC but not for glimpse. * io context for glimpse must set ci_verify_layout to true, - * see cl_glimpse_size0() for details. */ + * see cl_glimpse_size0() for details. + */ if (io->ci_type == CIT_MISC && !io->ci_verify_layout) io->ci_ignore_layout = 1; /* Enqueue layout lock and get layout version. We need to do this * even for operations requiring to open file, such as read and write, - * because it might not grant layout lock in IT_OPEN. */ + * because it might not grant layout lock in IT_OPEN. + */ if (result == 0 && !io->ci_ignore_layout) { result = ll_layout_refresh(inode, &cio->cui_layout_gen); if (result == -ENOENT) /* If the inode on MDS has been removed, but the objects * on OSTs haven't been destroyed (async unlink), layout * fetch will return -ENOENT, we'd ignore this error - * and continue with dirty flush. LU-3230. */ + * and continue with dirty flush. LU-3230. + */ result = 0; if (result < 0) CERROR("%s: refresh file layout " DFID " error %d.\n", - ll_get_fsname(inode->i_sb, NULL, 0), - PFID(lu_object_fid(&obj->co_lu)), result); + ll_get_fsname(inode->i_sb, NULL, 0), + PFID(lu_object_fid(&obj->co_lu)), result); } return result; diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c index c82714ea8..03c887d8e 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_object.c +++ b/drivers/staging/lustre/lustre/llite/vvp_object.c @@ -137,7 +137,8 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj, * page may be stale due to layout change, and the process * will never be notified. * This operation is expensive but mmap processes have to pay - * a price themselves. */ + * a price themselves. + */ unmap_mapping_range(conf->coc_inode->i_mapping, 0, OBD_OBJECT_EOF, 0); @@ -147,7 +148,7 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj, if (conf->coc_opc != OBJECT_CONF_SET) return 0; - if (conf->u.coc_md != NULL && conf->u.coc_md->lsm != NULL) { + if (conf->u.coc_md && conf->u.coc_md->lsm) { CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n", PFID(&lli->lli_fid), lli->lli_layout_gen, conf->u.coc_md->lsm->lsm_layout_gen); @@ -186,9 +187,8 @@ struct ccc_object *cl_inode2ccc(struct inode *inode) struct cl_object *obj = lli->lli_clob; struct lu_object *lu; - LASSERT(obj != NULL); lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type); - LASSERT(lu != NULL); + LASSERT(lu); return lu2ccc(lu); } diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c index a133475a7..33ca3eb34 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_page.c +++ b/drivers/staging/lustre/lustre/llite/vvp_page.c @@ -56,8 +56,8 @@ static void vvp_page_fini_common(struct ccc_page *cp) { struct page *vmpage = cp->cpg_page; - LASSERT(vmpage != NULL); - page_cache_release(vmpage); + LASSERT(vmpage); + put_page(vmpage); } static void vvp_page_fini(const struct lu_env *env, @@ -81,7 +81,7 @@ static int vvp_page_own(const struct lu_env *env, struct ccc_page *vpg = cl2ccc_page(slice); struct page *vmpage = vpg->cpg_page; - LASSERT(vmpage != NULL); + LASSERT(vmpage); if (nonblock) { if (!trylock_page(vmpage)) return -EAGAIN; @@ -105,7 +105,7 @@ static void vvp_page_assume(const struct lu_env *env, { struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); wait_on_page_writeback(vmpage); } @@ -116,7 +116,7 @@ static void vvp_page_unassume(const struct lu_env *env, { struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); } @@ -125,7 +125,7 @@ static void vvp_page_disown(const struct lu_env *env, { struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); unlock_page(cl2vm_page(slice)); @@ -139,7 +139,7 @@ static void vvp_page_discard(const struct lu_env *env, struct address_space *mapping; struct ccc_page *cpg = cl2ccc_page(slice); - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); mapping = vmpage->mapping; @@ -161,15 +161,15 @@ static int vvp_page_unmap(const struct lu_env *env, struct page *vmpage = cl2vm_page(slice); __u64 offset; - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); - offset = vmpage->index << PAGE_CACHE_SHIFT; + offset = vmpage->index << PAGE_SHIFT; /* * XXX is it safe to call this with the page lock held? */ - ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE); + ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE); return 0; } @@ -199,7 +199,7 @@ static void vvp_page_export(const struct lu_env *env, { struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); + LASSERT(vmpage); LASSERT(PageLocked(vmpage)); if (uptodate) SetPageUptodate(vmpage); @@ -232,7 +232,8 @@ static int vvp_page_prep_write(const struct lu_env *env, LASSERT(!PageDirty(vmpage)); /* ll_writepage path is not a sync write, so need to set page writeback - * flag */ + * flag + */ if (!pg->cp_sync_io) set_page_writeback(vmpage); @@ -262,7 +263,7 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret set_bit(AS_EIO, &inode->i_mapping->flags); if ((ioret == -ESHUTDOWN || ioret == -EINTR) && - obj->cob_discard_page_warned == 0) { + obj->cob_discard_page_warned == 0) { obj->cob_discard_page_warned = 1; ll_dirty_page_discard_warn(vmpage, ioret); } @@ -290,7 +291,7 @@ static void vvp_page_completion_read(const struct lu_env *env, } else cp->cpg_defer_uptodate = 0; - if (page->cp_sync_io == NULL) + if (!page->cp_sync_io) unlock_page(vmpage); } @@ -317,7 +318,7 @@ static void vvp_page_completion_write(const struct lu_env *env, cp->cpg_write_queued = 0; vvp_write_complete(cl2ccc(slice->cpl_obj), cp); - if (pg->cp_sync_io != NULL) { + if (pg->cp_sync_io) { LASSERT(PageLocked(vmpage)); LASSERT(!PageWriteback(vmpage)); } else { @@ -356,15 +357,14 @@ static int vvp_page_make_ready(const struct lu_env *env, lock_page(vmpage); if (clear_page_dirty_for_io(vmpage)) { LASSERT(pg->cp_state == CPS_CACHED); - /* This actually clears the dirty bit in the radix - * tree. */ + /* This actually clears the dirty bit in the radix tree. */ set_page_writeback(vmpage); - vvp_write_pending(cl2ccc(slice->cpl_obj), - cl2ccc_page(slice)); + vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice)); CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n"); } else if (pg->cp_state == CPS_PAGEOUT) { /* is it possible for osc_flush_async_page() to already - * make it ready? */ + * make it ready? + */ result = -EALREADY; } else { CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n", @@ -385,7 +385,7 @@ static int vvp_page_print(const struct lu_env *env, (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ", vp, vp->cpg_defer_uptodate, vp->cpg_ra_used, vp->cpg_write_queued, vmpage); - if (vmpage != NULL) { + if (vmpage) { (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru", (long)vmpage->flags, page_count(vmpage), page_mapcount(vmpage), vmpage->private, @@ -530,27 +530,26 @@ static const struct cl_page_operations vvp_transient_page_ops = { }; int vvp_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage) + struct cl_page *page, struct page *vmpage) { struct ccc_page *cpg = cl_object_page_slice(obj, page); CLOBINVRNT(env, obj, ccc_object_invariant(obj)); cpg->cpg_page = vmpage; - page_cache_get(vmpage); + get_page(vmpage); INIT_LIST_HEAD(&cpg->cpg_pending_linkage); if (page->cp_type == CPT_CACHEABLE) { SetPagePrivate(vmpage); vmpage->private = (unsigned long)page; - cl_page_slice_add(page, &cpg->cpg_cl, obj, - &vvp_page_ops); + cl_page_slice_add(page, &cpg->cpg_cl, obj, &vvp_page_ops); } else { struct ccc_object *clobj = cl2ccc(obj); LASSERT(!inode_trylock(clobj->cob_inode)); cl_page_slice_add(page, &cpg->cpg_cl, obj, - &vvp_transient_page_ops); + &vvp_transient_page_ops); clobj->cob_transient_pages++; } return 0; diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c index 8eb43f192..b68dcc921 100644 --- a/drivers/staging/lustre/lustre/llite/xattr.c +++ b/drivers/staging/lustre/lustre/llite/xattr.c @@ -135,7 +135,7 @@ int ll_setxattr_common(struct inode *inode, const char *name, /* b15587: ignore security.capability xattr for now */ if ((xattr_type == XATTR_SECURITY_T && - strcmp(name, "security.capability") == 0)) + strcmp(name, "security.capability") == 0)) return 0; /* LU-549: Disable security.selinux when selinux is disabled */ @@ -148,7 +148,7 @@ int ll_setxattr_common(struct inode *inode, const char *name, (xattr_type == XATTR_ACL_ACCESS_T || xattr_type == XATTR_ACL_DEFAULT_T)) { rce = rct_search(&sbi->ll_rct, current_pid()); - if (rce == NULL || + if (!rce || (rce->rce_ops != RMT_LSETFACL && rce->rce_ops != RMT_RSETFACL)) return -EOPNOTSUPP; @@ -158,7 +158,6 @@ int ll_setxattr_common(struct inode *inode, const char *name, ee = et_search_del(&sbi->ll_et, current_pid(), ll_inode2fid(inode), xattr_type); - LASSERT(ee != NULL); if (valid & OBD_MD_FLXATTR) { acl = lustre_acl_xattr_merge2ext( (posix_acl_xattr_header *)value, @@ -192,12 +191,11 @@ int ll_setxattr_common(struct inode *inode, const char *name, valid, name, pv, size, 0, flags, ll_i2suppgid(inode), &req); #ifdef CONFIG_FS_POSIX_ACL - if (new_value != NULL) - /* - * Release the posix ACL space. - */ - kfree(new_value); - if (acl != NULL) + /* + * Release the posix ACL space. + */ + kfree(new_value); + if (acl) lustre_ext_acl_xattr_free(acl); #endif if (rc) { @@ -239,11 +237,12 @@ int ll_setxattr(struct dentry *dentry, const char *name, /* Attributes that are saved via getxattr will always have * the stripe_offset as 0. Instead, the MDS should be - * allowed to pick the starting OST index. b=17846 */ - if (lump != NULL && lump->lmm_stripe_offset == 0) + * allowed to pick the starting OST index. b=17846 + */ + if (lump && lump->lmm_stripe_offset == 0) lump->lmm_stripe_offset = -1; - if (lump != NULL && S_ISREG(inode->i_mode)) { + if (lump && S_ISREG(inode->i_mode)) { int flags = FMODE_WRITE; int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ? sizeof(*lump) : sizeof(struct lov_user_md_v3); @@ -312,7 +311,7 @@ int ll_getxattr_common(struct inode *inode, const char *name, /* b15587: ignore security.capability xattr for now */ if ((xattr_type == XATTR_SECURITY_T && - strcmp(name, "security.capability") == 0)) + strcmp(name, "security.capability") == 0)) return -ENODATA; /* LU-549: Disable security.selinux when selinux is disabled */ @@ -325,7 +324,7 @@ int ll_getxattr_common(struct inode *inode, const char *name, (xattr_type == XATTR_ACL_ACCESS_T || xattr_type == XATTR_ACL_DEFAULT_T)) { rce = rct_search(&sbi->ll_rct, current_pid()); - if (rce == NULL || + if (!rce || (rce->rce_ops != RMT_LSETFACL && rce->rce_ops != RMT_LGETFACL && rce->rce_ops != RMT_RSETFACL && @@ -366,7 +365,7 @@ do_getxattr: goto out_xattr; /* Add "system.posix_acl_access" to the list */ - if (lli->lli_posix_acl != NULL && valid & OBD_MD_FLXATTRLS) { + if (lli->lli_posix_acl && valid & OBD_MD_FLXATTRLS) { if (size == 0) { rc += sizeof(XATTR_NAME_ACL_ACCESS); } else if (size - rc >= sizeof(XATTR_NAME_ACL_ACCESS)) { @@ -398,7 +397,7 @@ getxattr_nocache: if (size < body->eadatasize) { CERROR("server bug: replied size %u > %u\n", - body->eadatasize, (int)size); + body->eadatasize, (int)size); rc = -ERANGE; goto out; } @@ -410,7 +409,7 @@ getxattr_nocache: /* do not need swab xattr data */ xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, - body->eadatasize); + body->eadatasize); if (!xdata) { rc = -EFAULT; goto out; @@ -482,13 +481,14 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name, if (size == 0 && S_ISDIR(inode->i_mode)) { /* XXX directory EA is fix for now, optimize to save - * RPC transfer */ + * RPC transfer + */ rc = sizeof(struct lov_user_md); goto out; } lsm = ccc_inode_lsm_get(inode); - if (lsm == NULL) { + if (!lsm) { if (S_ISDIR(inode->i_mode)) { rc = ll_dir_getstripe(inode, &lmm, &lmmsize, &request); @@ -497,7 +497,8 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name, } } else { /* LSM is present already after lookup/getattr call. - * we need to grab layout lock once it is implemented */ + * we need to grab layout lock once it is implemented + */ rc = obd_packmd(ll_i2dtexp(inode), &lmm, lsm); lmmsize = rc; } @@ -510,7 +511,8 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name, /* used to call ll_get_max_mdsize() forward to get * the maximum buffer size, while some apps (such as * rsync 3.0.x) care much about the exact xattr value - * size */ + * size + */ rc = lmmsize; goto out; } @@ -526,7 +528,8 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name, memcpy(lump, lmm, lmmsize); /* do not return layout gen for getxattr otherwise it would * confuse tar --xattr by recognizing layout gen as stripe - * offset when the file is restored. See LU-2809. */ + * offset when the file is restored. See LU-2809. + */ lump->lmm_layout_gen = 0; rc = lmmsize; @@ -560,7 +563,7 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size) if (rc < 0) goto out; - if (buffer != NULL) { + if (buffer) { struct ll_sb_info *sbi = ll_i2sbi(inode); char *xattr_name = buffer; int xlen, rem = rc; @@ -598,12 +601,12 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size) const size_t name_len = sizeof("lov") - 1; const size_t total_len = prefix_len + name_len + 1; - if (((rc + total_len) > size) && (buffer != NULL)) { + if (((rc + total_len) > size) && buffer) { ptlrpc_req_finished(request); return -ERANGE; } - if (buffer != NULL) { + if (buffer) { buffer += rc; memcpy(buffer, XATTR_LUSTRE_PREFIX, prefix_len); memcpy(buffer + prefix_len, "lov", name_len); diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c index d1402762a..3480ce2bb 100644 --- a/drivers/staging/lustre/lustre/llite/xattr_cache.c +++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c @@ -23,7 +23,8 @@ */ struct ll_xattr_entry { struct list_head xe_list; /* protected with - * lli_xattrs_list_rwsem */ + * lli_xattrs_list_rwsem + */ char *xe_name; /* xattr name, \0-terminated */ char *xe_value; /* xattr value */ unsigned xe_namelen; /* strlen(xe_name) + 1 */ @@ -59,9 +60,6 @@ void ll_xattr_fini(void) */ static void ll_xattr_cache_init(struct ll_inode_info *lli) { - - LASSERT(lli != NULL); - INIT_LIST_HEAD(&lli->lli_xattrs); lli->lli_flags |= LLIF_XATTR_CACHE; } @@ -83,8 +81,7 @@ static int ll_xattr_cache_find(struct list_head *cache, list_for_each_entry(entry, cache, xe_list) { /* xattr_name == NULL means look for any entry */ - if (xattr_name == NULL || - strcmp(xattr_name, entry->xe_name) == 0) { + if (!xattr_name || strcmp(xattr_name, entry->xe_name) == 0) { *xattr = entry; CDEBUG(D_CACHE, "find: [%s]=%.*s\n", entry->xe_name, entry->xe_vallen, @@ -117,8 +114,8 @@ static int ll_xattr_cache_add(struct list_head *cache, return -EPROTO; } - xattr = kmem_cache_alloc(xattr_kmem, GFP_NOFS | __GFP_ZERO); - if (xattr == NULL) { + xattr = kmem_cache_zalloc(xattr_kmem, GFP_NOFS); + if (!xattr) { CDEBUG(D_CACHE, "failed to allocate xattr\n"); return -ENOMEM; } @@ -136,8 +133,8 @@ static int ll_xattr_cache_add(struct list_head *cache, xattr->xe_vallen = xattr_val_len; list_add(&xattr->xe_list, cache); - CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name, - xattr_val_len, xattr_val); + CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name, xattr_val_len, + xattr_val); return 0; err_value: @@ -194,7 +191,7 @@ static int ll_xattr_cache_list(struct list_head *cache, list_for_each_entry_safe(xattr, tmp, cache, xe_list) { CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n", - xld_buffer, xld_tail, xattr->xe_name); + xld_buffer, xld_tail, xattr->xe_name); if (xld_buffer) { xld_size -= xattr->xe_namelen; @@ -270,7 +267,7 @@ static int ll_xattr_find_get_lock(struct inode *inode, struct lookup_intent *oit, struct ptlrpc_request **req) { - ldlm_mode_t mode; + enum ldlm_mode mode; struct lustre_handle lockh = { 0 }; struct md_op_data *op_data; struct ll_inode_info *lli = ll_i2info(inode); @@ -284,7 +281,8 @@ static int ll_xattr_find_get_lock(struct inode *inode, mutex_lock(&lli->lli_xattrs_enq_lock); /* inode may have been shrunk and recreated, so data is gone, match lock - * only when data exists. */ + * only when data exists. + */ if (ll_xattr_cache_valid(lli)) { /* Try matching first. */ mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0, @@ -359,7 +357,7 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit) } /* Matched but no cache? Cancelled on error by a parallel refill. */ - if (unlikely(req == NULL)) { + if (unlikely(!req)) { CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n"); rc = -EIO; goto out_maybe_drop; @@ -376,19 +374,19 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit) } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (body == NULL) { + if (!body) { CERROR("no MDT BODY in the refill xattr reply\n"); rc = -EPROTO; goto out_destroy; } /* do not need swab xattr data */ xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, - body->eadatasize); + body->eadatasize); xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS, - body->aclsize); + body->aclsize); xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS, body->max_mdsize * sizeof(__u32)); - if (xdata == NULL || xval == NULL || xsizes == NULL) { + if (!xdata || !xval || !xsizes) { CERROR("wrong setxattr reply\n"); rc = -EPROTO; goto out_destroy; @@ -404,7 +402,7 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit) for (i = 0; i < body->max_mdsize; i++) { CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval); /* Perform consistency checks: attr names and vals in pill */ - if (memchr(xdata, 0, xtail - xdata) == NULL) { + if (!memchr(xdata, 0, xtail - xdata)) { CERROR("xattr protocol violation (names are broken)\n"); rc = -EPROTO; } else if (xval + *xsizes > xvtail) { @@ -471,11 +469,8 @@ out_destroy: * \retval -ERANGE the buffer is not large enough * \retval -ENODATA no such attr or the list is empty */ -int ll_xattr_cache_get(struct inode *inode, - const char *name, - char *buffer, - size_t size, - __u64 valid) +int ll_xattr_cache_get(struct inode *inode, const char *name, char *buffer, + size_t size, __u64 valid) { struct lookup_intent oit = { .it_op = IT_GETXATTR }; struct ll_inode_info *lli = ll_i2info(inode); @@ -504,7 +499,7 @@ int ll_xattr_cache_get(struct inode *inode, if (size != 0) { if (size >= xattr->xe_vallen) memcpy(buffer, xattr->xe_value, - xattr->xe_vallen); + xattr->xe_vallen); else rc = -ERANGE; } diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c index ee235926f..378691b2a 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c @@ -58,7 +58,8 @@ int lmv_fld_lookup(struct lmv_obd *lmv, int rc; /* FIXME: Currently ZFS still use local seq for ROOT unfortunately, and - * this fid_is_local check should be removed once LU-2240 is fixed */ + * this fid_is_local check should be removed once LU-2240 is fixed + */ LASSERTF((fid_seq_in_fldb(fid_seq(fid)) || fid_seq_is_local_file(fid_seq(fid))) && fid_is_sane(fid), DFID" is insane!\n", PFID(fid)); diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c index 66de27f1d..e0958eaed 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c @@ -69,7 +69,7 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm, int rc = 0; body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) return -EPROTO; LASSERT((body->valid & OBD_MD_MDS)); @@ -107,14 +107,16 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm, op_data->op_fid1 = body->fid1; /* Sent the parent FID to the remote MDT */ - if (parent_fid != NULL) { + if (parent_fid) { /* The parent fid is only for remote open to * check whether the open is from OBF, - * see mdt_cross_open */ + * see mdt_cross_open + */ LASSERT(it->it_op & IT_OPEN); op_data->op_fid2 = *parent_fid; /* Add object FID to op_fid3, in case it needs to check stale - * (M_CHECK_STALE), see mdc_finish_intent_lock */ + * (M_CHECK_STALE), see mdc_finish_intent_lock + */ op_data->op_fid3 = body->fid1; } @@ -173,7 +175,8 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data, return PTR_ERR(tgt); /* If it is ready to open the file by FID, do not need - * allocate FID at all, otherwise it will confuse MDT */ + * allocate FID at all, otherwise it will confuse MDT + */ if ((it->it_op & IT_CREAT) && !(it->it_flags & MDS_OPEN_BY_FID)) { /* @@ -204,7 +207,7 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data, return rc; body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) return -EPROTO; /* * Not cross-ref case, just get out of here. @@ -268,9 +271,9 @@ static int lmv_intent_lookup(struct obd_export *exp, op_data->op_bias &= ~MDS_CROSS_REF; rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it, - flags, reqp, cb_blocking, extra_lock_flags); + flags, reqp, cb_blocking, extra_lock_flags); - if (rc < 0 || *reqp == NULL) + if (rc < 0 || !*reqp) return rc; /* @@ -278,7 +281,7 @@ static int lmv_intent_lookup(struct obd_export *exp, * remote inode. Let's check this. */ body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) return -EPROTO; /* Not cross-ref case, just get out of here. */ if (likely(!(body->valid & OBD_MD_MDS))) @@ -299,7 +302,6 @@ int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data, struct obd_device *obd = exp->exp_obd; int rc; - LASSERT(it != NULL); LASSERT(fid_is_sane(&op_data->op_fid1)); CDEBUG(D_INODE, "INTENT LOCK '%s' for '%*s' on "DFID"\n", diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h index eb8e673cb..8a0087190 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h +++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h @@ -66,7 +66,7 @@ static inline struct lmv_stripe_md *lmv_get_mea(struct ptlrpc_request *req) struct mdt_body *body; struct lmv_stripe_md *mea; - LASSERT(req != NULL); + LASSERT(req); body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); @@ -75,13 +75,11 @@ static inline struct lmv_stripe_md *lmv_get_mea(struct ptlrpc_request *req) mea = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, body->eadatasize); - LASSERT(mea != NULL); - if (mea->mea_count == 0) return NULL; if (mea->mea_magic != MEA_MAGIC_LAST_CHAR && - mea->mea_magic != MEA_MAGIC_ALL_CHARS && - mea->mea_magic != MEA_MAGIC_HASH_SEGMENT) + mea->mea_magic != MEA_MAGIC_ALL_CHARS && + mea->mea_magic != MEA_MAGIC_HASH_SEGMENT) return NULL; return mea; @@ -101,7 +99,7 @@ lmv_get_target(struct lmv_obd *lmv, u32 mds) int i; for (i = 0; i < count; i++) { - if (lmv->tgts[i] == NULL) + if (!lmv->tgts[i]) continue; if (lmv->tgts[i]->ltd_idx == mds) diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c index bbafe0a71..9abb7c2b9 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c @@ -53,6 +53,7 @@ #include "../include/lprocfs_status.h" #include "../include/lustre_lite.h" #include "../include/lustre_fid.h" +#include "../include/lustre_kernelcomm.h" #include "lmv_internal.h" static void lmv_activate_target(struct lmv_obd *lmv, @@ -87,7 +88,7 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid, spin_lock(&lmv->lmv_lock); for (i = 0; i < lmv->desc.ld_tgt_count; i++) { tgt = lmv->tgts[i]; - if (tgt == NULL || tgt->ltd_exp == NULL) + if (!tgt || !tgt->ltd_exp) continue; CDEBUG(D_INFO, "Target idx %d is %s conn %#llx\n", i, @@ -103,7 +104,7 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid, } obd = class_exp2obd(tgt->ltd_exp); - if (obd == NULL) { + if (!obd) { rc = -ENOTCONN; goto out_lmv_lock; } @@ -237,7 +238,7 @@ static int lmv_connect(const struct lu_env *env, * and MDC stuff will be called directly, for instance while reading * ../mdc/../kbytesfree procfs file, etc. */ - if (data->ocd_connect_flags & OBD_CONNECT_REAL) + if (data && data->ocd_connect_flags & OBD_CONNECT_REAL) rc = lmv_check_connect(obd); if (rc && lmv->lmv_tgts_kobj) @@ -261,7 +262,7 @@ static void lmv_set_timeouts(struct obd_device *obd) for (i = 0; i < lmv->desc.ld_tgt_count; i++) { tgt = lmv->tgts[i]; - if (tgt == NULL || tgt->ltd_exp == NULL || tgt->ltd_active == 0) + if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0) continue; obd_set_info_async(NULL, tgt->ltd_exp, sizeof(KEY_INTERMDS), @@ -301,8 +302,7 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize, return 0; for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || - lmv->tgts[i]->ltd_exp == NULL || + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp || lmv->tgts[i]->ltd_active == 0) { CWARN("%s: NULL export for %d\n", obd->obd_name, i); continue; @@ -311,7 +311,7 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize, rc = md_init_ea_size(lmv->tgts[i]->ltd_exp, easize, def_easize, cookiesize, def_cookiesize); if (rc) { - CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d.\n", + CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n", obd->obd_name, i, rc); break; } @@ -339,9 +339,8 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) } CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s FOR %s\n", - mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, - tgt->ltd_uuid.uuid, obd->obd_uuid.uuid, - cluuid->uuid); + mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, + tgt->ltd_uuid.uuid, obd->obd_uuid.uuid, cluuid->uuid); if (!mdc_obd->obd_set_up) { CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid); @@ -397,8 +396,8 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) lmv->max_cookiesize, lmv->max_def_cookiesize); CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n", - mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, - atomic_read(&obd->obd_refcount)); + mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, + atomic_read(&obd->obd_refcount)); if (lmv->lmv_tgts_kobj) /* Even if we failed to create the link, that's fine */ @@ -409,7 +408,7 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) static void lmv_del_target(struct lmv_obd *lmv, int index) { - if (lmv->tgts[index] == NULL) + if (!lmv->tgts[index]) return; kfree(lmv->tgts[index]); @@ -418,7 +417,7 @@ static void lmv_del_target(struct lmv_obd *lmv, int index) } static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp, - __u32 index, int gen) + __u32 index, int gen) { struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *tgt; @@ -441,7 +440,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp, } } - if ((index < lmv->tgts_size) && (lmv->tgts[index] != NULL)) { + if ((index < lmv->tgts_size) && lmv->tgts[index]) { tgt = lmv->tgts[index]; CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n", obd->obd_name, @@ -459,7 +458,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp, while (newsize < index + 1) newsize <<= 1; newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS); - if (newtgts == NULL) { + if (!newtgts) { lmv_init_unlock(lmv); return -ENOMEM; } @@ -538,11 +537,9 @@ int lmv_check_connect(struct obd_device *obd) CDEBUG(D_CONFIG, "Time to connect %s to %s\n", lmv->cluuid.uuid, obd->obd_name); - LASSERT(lmv->tgts != NULL); - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { tgt = lmv->tgts[i]; - if (tgt == NULL) + if (!tgt) continue; rc = lmv_connect_mdc(obd, tgt); if (rc) @@ -562,7 +559,7 @@ int lmv_check_connect(struct obd_device *obd) int rc2; tgt = lmv->tgts[i]; - if (tgt == NULL) + if (!tgt) continue; tgt->ltd_active = 0; if (tgt->ltd_exp) { @@ -585,9 +582,6 @@ static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) struct obd_device *mdc_obd; int rc; - LASSERT(tgt != NULL); - LASSERT(obd != NULL); - mdc_obd = class_exp2obd(tgt->ltd_exp); if (mdc_obd) { @@ -640,7 +634,7 @@ static int lmv_disconnect(struct obd_export *exp) goto out_local; for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL) + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; lmv_disconnect_mdc(obd, lmv->tgts[i]); @@ -662,7 +656,8 @@ out_local: return rc; } -static int lmv_fid2path(struct obd_export *exp, int len, void *karg, void *uarg) +static int lmv_fid2path(struct obd_export *exp, int len, void *karg, + void __user *uarg) { struct obd_device *obddev = class_exp2obd(exp); struct lmv_obd *lmv = &obddev->u.lmv; @@ -683,8 +678,9 @@ repeat_fid2path: goto out_fid2path; /* If remote_gf != NULL, it means just building the - * path on the remote MDT, copy this path segment to gf */ - if (remote_gf != NULL) { + * path on the remote MDT, copy this path segment to gf + */ + if (remote_gf) { struct getinfo_fid2path *ori_gf; char *ptr; @@ -714,7 +710,7 @@ repeat_fid2path: goto out_fid2path; /* sigh, has to go to another MDT to do path building further */ - if (remote_gf == NULL) { + if (!remote_gf) { remote_gf_size = sizeof(*remote_gf) + PATH_MAX; remote_gf = kzalloc(remote_gf_size, GFP_NOFS); if (!remote_gf) { @@ -779,7 +775,7 @@ static void lmv_hsm_req_build(struct lmv_obd *lmv, nr_out = 0; for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) { curr_tgt = lmv_find_target(lmv, - &hur_in->hur_user_item[i].hui_fid); + &hur_in->hur_user_item[i].hui_fid); if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) { hur_out->hur_user_item[nr_out] = hur_in->hur_user_item[i]; @@ -792,14 +788,17 @@ static void lmv_hsm_req_build(struct lmv_obd *lmv, } static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len, - struct lustre_kernelcomm *lk, void *uarg) + struct lustre_kernelcomm *lk, + void __user *uarg) { - int i, rc = 0; + int rc = 0; + __u32 i; /* unregister request (call from llapi_hsm_copytool_fini) */ for (i = 0; i < lmv->desc.ld_tgt_count; i++) { /* best effort: try to clean as much as possible - * (continue on error) */ + * (continue on error) + */ obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg); } @@ -808,23 +807,25 @@ static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len, * and will unregister automatically. */ rc = libcfs_kkuc_group_rem(lk->lk_uid, lk->lk_group); + return rc; } static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len, - struct lustre_kernelcomm *lk, void *uarg) + struct lustre_kernelcomm *lk, void __user *uarg) { - struct file *filp; - int i, j, err; - int rc = 0; - bool any_set = false; + struct file *filp; + __u32 i, j; + int err, rc = 0; + bool any_set = false; + struct kkuc_ct_data kcd = { 0 }; /* All or nothing: try to register to all MDS. * In case of failure, unregister from previous MDS, - * except if it because of inactive target. */ + * except if it because of inactive target. + */ for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, - len, lk, uarg); + err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg); if (err) { if (lmv->tgts[i]->ltd_active) { /* permanent error */ @@ -836,13 +837,13 @@ static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len, /* unregister from previous MDS */ for (j = 0; j < i; j++) obd_iocontrol(cmd, - lmv->tgts[j]->ltd_exp, - len, lk, uarg); + lmv->tgts[j]->ltd_exp, + len, lk, uarg); return rc; } /* else: transient error. - * kuc will register to the missing MDT - * when it is back */ + * kuc will register to the missing MDT when it is back + */ } else { any_set = true; } @@ -854,17 +855,25 @@ static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len, /* at least one registration done, with no failure */ filp = fget(lk->lk_wfd); - if (filp == NULL) { + if (!filp) return -EBADF; + + kcd.kcd_magic = KKUC_CT_DATA_MAGIC; + kcd.kcd_uuid = lmv->cluuid; + kcd.kcd_archive = lk->lk_data; + + rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group, + &kcd, sizeof(kcd)); + if (rc) { + if (filp) + fput(filp); } - rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group, lk->lk_data); - if (rc != 0 && filp != NULL) - fput(filp); + return rc; } static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, - int len, void *karg, void *uarg) + int len, void *karg, void __user *uarg) { struct obd_device *obddev = class_exp2obd(exp); struct lmv_obd *lmv = &obddev->u.lmv; @@ -887,8 +896,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, if (index >= count) return -ENODEV; - if (lmv->tgts[index] == NULL || - lmv->tgts[index]->ltd_active == 0) + if (!lmv->tgts[index] || lmv->tgts[index]->ltd_active == 0) return -ENODATA; mdc_obd = class_exp2obd(lmv->tgts[index]->ltd_exp); @@ -897,8 +905,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, /* copy UUID */ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd), - min((int) data->ioc_plen2, - (int) sizeof(struct obd_uuid)))) + min((int)data->ioc_plen2, + (int)sizeof(struct obd_uuid)))) return -EFAULT; rc = obd_statfs(NULL, lmv->tgts[index]->ltd_exp, &stat_buf, @@ -907,8 +915,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, if (rc) return rc; if (copy_to_user(data->ioc_pbuf1, &stat_buf, - min((int) data->ioc_plen1, - (int) sizeof(stat_buf)))) + min((int)data->ioc_plen1, + (int)sizeof(stat_buf)))) return -EFAULT; break; } @@ -922,18 +930,18 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, return -EINVAL; tgt = lmv->tgts[qctl->qc_idx]; - if (tgt == NULL || tgt->ltd_exp == NULL) + if (!tgt || !tgt->ltd_exp) return -EINVAL; } else if (qctl->qc_valid == QC_UUID) { for (i = 0; i < count; i++) { tgt = lmv->tgts[i]; - if (tgt == NULL) + if (!tgt) continue; if (!obd_uuid_equals(&tgt->ltd_uuid, &qctl->obd_uuid)) continue; - if (tgt->ltd_exp == NULL) + if (!tgt->ltd_exp) return -EINVAL; break; @@ -967,8 +975,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, if (icc->icc_mdtindex >= count) return -ENODEV; - if (lmv->tgts[icc->icc_mdtindex] == NULL || - lmv->tgts[icc->icc_mdtindex]->ltd_exp == NULL || + if (!lmv->tgts[icc->icc_mdtindex] || + !lmv->tgts[icc->icc_mdtindex]->ltd_exp || lmv->tgts[icc->icc_mdtindex]->ltd_active == 0) return -ENODEV; rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex]->ltd_exp, @@ -976,7 +984,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, break; } case LL_IOC_GET_CONNECT_FLAGS: { - if (lmv->tgts[0] == NULL) + if (!lmv->tgts[0]) return -ENODATA; rc = obd_iocontrol(cmd, lmv->tgts[0]->ltd_exp, len, karg, uarg); break; @@ -993,10 +1001,10 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, tgt = lmv_find_target(lmv, &op_data->op_fid1); if (IS_ERR(tgt)) - return PTR_ERR(tgt); + return PTR_ERR(tgt); - if (tgt->ltd_exp == NULL) - return -EINVAL; + if (!tgt->ltd_exp) + return -EINVAL; rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); break; @@ -1021,7 +1029,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, /* if the request is about a single fid * or if there is a single MDS, no need to split - * the request. */ + * the request. + */ if (reqcount == 1 || count == 1) { tgt = lmv_find_target(lmv, &hur->hur_user_item[0].hui_fid); @@ -1044,7 +1053,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, hur_user_item[nr]) + hur->hur_request.hr_data_len; req = libcfs_kvzalloc(reqlen, GFP_NOFS); - if (req == NULL) + if (!req) return -ENOMEM; lmv_hsm_req_build(lmv, hur, lmv->tgts[i], req); @@ -1070,7 +1079,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, if (IS_ERR(tgt2)) return PTR_ERR(tgt2); - if ((tgt1->ltd_exp == NULL) || (tgt2->ltd_exp == NULL)) + if (!tgt1->ltd_exp || !tgt2->ltd_exp) return -EINVAL; /* only files on same MDT can have their layouts swapped */ @@ -1094,11 +1103,11 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, struct obd_device *mdc_obd; int err; - if (lmv->tgts[i] == NULL || - lmv->tgts[i]->ltd_exp == NULL) + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; /* ll_umount_begin() sets force flag but for lmv, not - * mdc. Let's pass it through */ + * mdc. Let's pass it through + */ mdc_obd = class_exp2obd(lmv->tgts[i]->ltd_exp); mdc_obd->obd_force = obddev->obd_force; err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, @@ -1122,51 +1131,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, return rc; } -#if 0 -static int lmv_all_chars_policy(int count, const char *name, - int len) -{ - unsigned int c = 0; - - while (len > 0) - c += name[--len]; - c = c % count; - return c; -} - -static int lmv_nid_policy(struct lmv_obd *lmv) -{ - struct obd_import *imp; - __u32 id; - - /* - * XXX: To get nid we assume that underlying obd device is mdc. - */ - imp = class_exp2cliimp(lmv->tgts[0].ltd_exp); - id = imp->imp_connection->c_self ^ (imp->imp_connection->c_self >> 32); - return id % lmv->desc.ld_tgt_count; -} - -static int lmv_choose_mds(struct lmv_obd *lmv, struct md_op_data *op_data, - enum placement_policy placement) -{ - switch (placement) { - case PLACEMENT_CHAR_POLICY: - return lmv_all_chars_policy(lmv->desc.ld_tgt_count, - op_data->op_name, - op_data->op_namelen); - case PLACEMENT_NID_POLICY: - return lmv_nid_policy(lmv); - - default: - break; - } - - CERROR("Unsupported placement policy %x\n", placement); - return -EINVAL; -} -#endif - /** * This is _inode_ placement policy function (not name). */ @@ -1175,7 +1139,7 @@ static int lmv_placement_policy(struct obd_device *obd, { struct lmv_obd *lmv = &obd->u.lmv; - LASSERT(mds != NULL); + LASSERT(mds); if (lmv->desc.ld_tgt_count == 1) { *mds = 0; @@ -1205,7 +1169,8 @@ static int lmv_placement_policy(struct obd_device *obd, } /* Allocate new fid on target according to operation type and parent - * home mds. */ + * home mds. + */ *mds = op_data->op_mds; return 0; } @@ -1225,7 +1190,7 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds) */ mutex_lock(&tgt->ltd_fid_mutex); - if (tgt->ltd_active == 0 || tgt->ltd_exp == NULL) { + if (tgt->ltd_active == 0 || !tgt->ltd_exp) { rc = -ENODEV; goto out; } @@ -1252,8 +1217,8 @@ int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid, u32 mds = 0; int rc; - LASSERT(op_data != NULL); - LASSERT(fid != NULL); + LASSERT(op_data); + LASSERT(fid); rc = lmv_placement_policy(obd, op_data, &mds); if (rc) { @@ -1291,7 +1256,7 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg) } lmv->tgts = kcalloc(32, sizeof(*lmv->tgts), GFP_NOFS); - if (lmv->tgts == NULL) + if (!lmv->tgts) return -ENOMEM; lmv->tgts_size = 32; @@ -1332,11 +1297,11 @@ static int lmv_cleanup(struct obd_device *obd) struct lmv_obd *lmv = &obd->u.lmv; fld_client_fini(&lmv->lmv_fld); - if (lmv->tgts != NULL) { + if (lmv->tgts) { int i; for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL) + if (!lmv->tgts[i]) continue; lmv_del_target(lmv, i); } @@ -1357,7 +1322,8 @@ static int lmv_process_config(struct obd_device *obd, u32 len, void *buf) switch (lcfg->lcfg_command) { case LCFG_ADD_MDC: /* modify_mdc_tgts add 0:lustre-clilmv 1:lustre-MDT0000_UUID - * 2:0 3:1 4:lustre-MDT0000-mdc_UUID */ + * 2:0 3:1 4:lustre-MDT0000-mdc_UUID + */ if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid)) { rc = -EINVAL; goto out; @@ -1402,7 +1368,7 @@ static int lmv_statfs(const struct lu_env *env, struct obd_export *exp, return -ENOMEM; for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL) + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; rc = obd_statfs(env, lmv->tgts[i]->ltd_exp, temp, @@ -1421,7 +1387,8 @@ static int lmv_statfs(const struct lu_env *env, struct obd_export *exp, * i.e. mount does not need the merged osfs * from all of MDT. * And also clients can be mounted as long as - * MDT0 is in service*/ + * MDT0 is in service + */ if (flags & OBD_STATFS_FOR_MDT0) goto out_free_temp; } else { @@ -1547,7 +1514,7 @@ static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid) * space of MDT storing inode. */ for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL) + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; md_null_inode(lmv->tgts[i]->ltd_exp, fid); } @@ -1575,7 +1542,7 @@ static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid, * space of MDT storing inode. */ for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL) + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp) continue; rc = md_find_cbdata(lmv->tgts[i]->ltd_exp, fid, it, data); if (rc) @@ -1655,7 +1622,7 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data, cap_effective, rdev, request); if (rc == 0) { - if (*request == NULL) + if (!*request) return rc; CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2)); } @@ -1701,7 +1668,6 @@ lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo, int pmode; body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); if (!(body->valid & OBD_MD_MDS)) return 0; @@ -1808,7 +1774,6 @@ lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data, body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); if (body->valid & OBD_MD_MDS) { struct lu_fid rid = body->fid1; @@ -1842,7 +1807,8 @@ lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data, NULL) static int lmv_early_cancel(struct obd_export *exp, struct md_op_data *op_data, - int op_tgt, ldlm_mode_t mode, int bits, int flag) + int op_tgt, enum ldlm_mode mode, int bits, + int flag) { struct lu_fid *fid = md_op_data_fid(op_data, flag); struct obd_device *obd = exp->exp_obd; @@ -2051,7 +2017,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid, * |s|e|f|p|ent| 0 | ... | 0 | * '----------------- -----' * - * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is + * However, on hosts where the native VM page size (PAGE_SIZE) is * larger than LU_PAGE_SIZE, a single host page may contain multiple * lu_dirpages. After reading the lu_dirpages from the MDS, the * ldp_hash_end of the first lu_dirpage refers to the one immediately @@ -2082,7 +2048,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid, * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span * to the first entry of the next lu_dirpage. */ -#if PAGE_CACHE_SIZE > LU_PAGE_SIZE +#if PAGE_SIZE > LU_PAGE_SIZE static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) { int i; @@ -2097,7 +2063,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) while (--nlupgs > 0) { ent = lu_dirent_start(dp); - for (end_dirent = ent; ent != NULL; + for (end_dirent = ent; ent; end_dirent = ent, ent = lu_dirent_next(ent)) ; @@ -2117,7 +2083,8 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) break; /* Enlarge the end entry lde_reclen from 0 to - * first entry of next lu_dirpage. */ + * first entry of next lu_dirpage. + */ LASSERT(le16_to_cpu(end_dirent->lde_reclen) == 0); end_dirent->lde_reclen = cpu_to_le16((char *)(dp->ldp_entries) - @@ -2134,7 +2101,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) } #else #define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0) -#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */ +#endif /* PAGE_SIZE > LU_PAGE_SIZE */ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, struct page **pages, struct ptlrpc_request **request) @@ -2143,7 +2110,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, struct lmv_obd *lmv = &obd->u.lmv; __u64 offset = op_data->op_offset; int rc; - int ncfspgs; /* pages read in PAGE_CACHE_SIZE */ + int ncfspgs; /* pages read in PAGE_SIZE */ int nlupgs; /* pages read in LU_PAGE_SIZE */ struct lmv_tgt_desc *tgt; @@ -2162,8 +2129,8 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, if (rc != 0) return rc; - ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1) - >> PAGE_CACHE_SHIFT; + ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1) + >> PAGE_SHIFT; nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT; LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK)); LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages); @@ -2227,7 +2194,7 @@ retry: return rc; body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) return -EPROTO; /* Not cross-ref case, just get out of here. */ @@ -2255,7 +2222,8 @@ retry: * 4. Then A will resend unlink RPC to MDT0. (retry 2nd times). * * In theory, it might try unlimited time here, but it should - * be very rare case. */ + * be very rare case. + */ op_data->op_fid2 = body->fid1; ptlrpc_req_finished(*request); *request = NULL; @@ -2270,7 +2238,8 @@ static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) switch (stage) { case OBD_CLEANUP_EARLY: /* XXX: here should be calling obd_precleanup() down to - * stack. */ + * stack. + */ break; case OBD_CLEANUP_EXPORTS: fld_client_debugfs_fini(&lmv->lmv_fld); @@ -2291,7 +2260,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp, int rc = 0; obd = class_exp2obd(exp); - if (obd == NULL) { + if (!obd) { CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n", exp->exp_handle.h_cookie); return -EINVAL; @@ -2312,7 +2281,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp, /* * All tgts should be connected when this gets called. */ - if (tgt == NULL || tgt->ltd_exp == NULL) + if (!tgt || !tgt->ltd_exp) continue; if (!obd_get_info(env, tgt->ltd_exp, keylen, key, @@ -2355,7 +2324,7 @@ static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp, int rc = 0; obd = class_exp2obd(exp); - if (obd == NULL) { + if (!obd) { CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n", exp->exp_handle.h_cookie); return -EINVAL; @@ -2368,7 +2337,7 @@ static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp, for (i = 0; i < lmv->desc.ld_tgt_count; i++) { tgt = lmv->tgts[i]; - if (tgt == NULL || tgt->ltd_exp == NULL) + if (!tgt || !tgt->ltd_exp) continue; err = obd_set_info_async(env, tgt->ltd_exp, @@ -2403,9 +2372,9 @@ static int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, return 0; } - if (*lmmp == NULL) { + if (!*lmmp) { *lmmp = libcfs_kvzalloc(mea_size, GFP_NOFS); - if (*lmmp == NULL) + if (!*lmmp) return -ENOMEM; } @@ -2443,10 +2412,10 @@ static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, __u32 magic; mea_size = lmv_get_easize(lmv); - if (lsmp == NULL) + if (!lsmp) return mea_size; - if (*lsmp != NULL && lmm == NULL) { + if (*lsmp && !lmm) { kvfree(*tmea); *lsmp = NULL; return 0; @@ -2455,7 +2424,7 @@ static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, LASSERT(mea_size == lmm_size); *tmea = libcfs_kvzalloc(mea_size, GFP_NOFS); - if (*tmea == NULL) + if (!*tmea) return -ENOMEM; if (!lmm) @@ -2485,8 +2454,8 @@ static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, } static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - ldlm_cancel_flags_t flags, void *opaque) + ldlm_policy_data_t *policy, enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque) { struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; @@ -2494,10 +2463,10 @@ static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, int err; int i; - LASSERT(fid != NULL); + LASSERT(fid); for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL || + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp || lmv->tgts[i]->ltd_active == 0) continue; @@ -2519,14 +2488,16 @@ static int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data, return rc; } -static ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, ldlm_type_t type, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh) +static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags, + const struct lu_fid *fid, + enum ldlm_type type, + ldlm_policy_data_t *policy, + enum ldlm_mode mode, + struct lustre_handle *lockh) { struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; - ldlm_mode_t rc; + enum ldlm_mode rc; int i; CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid)); @@ -2538,8 +2509,7 @@ static ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags, * one fid was created in. */ for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i] == NULL || - lmv->tgts[i]->ltd_exp == NULL || + if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp || lmv->tgts[i]->ltd_active == 0) continue; @@ -2695,7 +2665,7 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp, tgt = lmv->tgts[i]; - if (tgt == NULL || tgt->ltd_exp == NULL || tgt->ltd_active == 0) + if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0) continue; if (!tgt->ltd_active) { CDEBUG(D_HA, "mdt %d is inactive.\n", i); @@ -2730,7 +2700,7 @@ static int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp, int err; tgt = lmv->tgts[i]; - if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) { + if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) { CERROR("lmv idx %d inactive\n", i); return -EIO; } @@ -2813,7 +2783,8 @@ static void lmv_exit(void) } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Lustre Logical Metadata Volume OBD driver"); +MODULE_DESCRIPTION("Lustre Logical Metadata Volume"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); module_init(lmv_init); diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c index 40cf4d9f0..b39e364a2 100644 --- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c +++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c @@ -138,7 +138,7 @@ static int lmv_desc_uuid_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lmv_obd *lmv; - LASSERT(dev != NULL); + LASSERT(dev); lmv = &dev->u.lmv; seq_printf(m, "%s\n", lmv->desc.ld_uuid.uuid); return 0; @@ -171,7 +171,7 @@ static int lmv_tgt_seq_show(struct seq_file *p, void *v) { struct lmv_tgt_desc *tgt = v; - if (tgt == NULL) + if (!tgt) return 0; seq_printf(p, "%d: %s %sACTIVE\n", tgt->ltd_idx, tgt->ltd_uuid.uuid, diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h index 66a2492c1..7dd3162b5 100644 --- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h +++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h @@ -579,51 +579,49 @@ extern struct kmem_cache *lovsub_req_kmem; extern struct kmem_cache *lov_lock_link_kmem; -int lov_object_init(const struct lu_env *env, struct lu_object *obj, - const struct lu_object_conf *conf); -int lovsub_object_init(const struct lu_env *env, struct lu_object *obj, - const struct lu_object_conf *conf); -int lov_lock_init(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); -int lov_io_init(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); - -int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); -int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io); -int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -int lov_io_init_released(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io); -void lov_lock_unlink(const struct lu_env *env, struct lov_lock_link *link, - struct lovsub_lock *sub); +int lov_object_init(const struct lu_env *env, struct lu_object *obj, + const struct lu_object_conf *conf); +int lovsub_object_init(const struct lu_env *env, struct lu_object *obj, + const struct lu_object_conf *conf); +int lov_lock_init(const struct lu_env *env, struct cl_object *obj, + struct cl_lock *lock, const struct cl_io *io); +int lov_io_init(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io); +int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, + struct cl_lock *lock, const struct cl_io *io); + +int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj, + struct cl_lock *lock, const struct cl_io *io); +int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj, + struct cl_lock *lock, const struct cl_io *io); +int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io); +int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io); +int lov_io_init_released(const struct lu_env *env, struct cl_object *obj, + struct cl_io *io); +void lov_lock_unlink(const struct lu_env *env, struct lov_lock_link *link, + struct lovsub_lock *sub); struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio, int stripe); -void lov_sub_put(struct lov_io_sub *sub); -int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov, - struct lovsub_lock *sublock, - const struct cl_lock_descr *d, int idx); - -int lov_page_init(const struct lu_env *env, struct cl_object *ob, - struct cl_page *page, struct page *vmpage); -int lovsub_page_init(const struct lu_env *env, struct cl_object *ob, - struct cl_page *page, struct page *vmpage); - -int lov_page_init_empty(const struct lu_env *env, - struct cl_object *obj, - struct cl_page *page, struct page *vmpage); -int lov_page_init_raid0(const struct lu_env *env, - struct cl_object *obj, - struct cl_page *page, struct page *vmpage); +void lov_sub_put(struct lov_io_sub *sub); +int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov, + struct lovsub_lock *sublock, + const struct cl_lock_descr *d, int idx); + +int lov_page_init(const struct lu_env *env, struct cl_object *ob, + struct cl_page *page, struct page *vmpage); +int lovsub_page_init(const struct lu_env *env, struct cl_object *ob, + struct cl_page *page, struct page *vmpage); + +int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, struct page *vmpage); +int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, + struct cl_page *page, struct page *vmpage); struct lu_object *lov_object_alloc(const struct lu_env *env, - const struct lu_object_header *hdr, - struct lu_device *dev); + const struct lu_object_header *hdr, + struct lu_device *dev); struct lu_object *lovsub_object_alloc(const struct lu_env *env, const struct lu_object_header *hdr, struct lu_device *dev); @@ -631,9 +629,8 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env, struct lov_lock_link *lov_lock_link_find(const struct lu_env *env, struct lov_lock *lck, struct lovsub_lock *sub); -struct lov_io_sub *lov_page_subio(const struct lu_env *env, - struct lov_io *lio, - const struct cl_page_slice *slice); +struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio, + const struct cl_page_slice *slice); #define lov_foreach_target(lov, var) \ for (var = 0; var < lov_targets_nr(lov); ++var) @@ -651,7 +648,7 @@ static inline struct lov_session *lov_env_session(const struct lu_env *env) struct lov_session *ses; ses = lu_context_key_get(env->le_ses, &lov_session_key); - LASSERT(ses != NULL); + LASSERT(ses); return ses; } @@ -759,7 +756,7 @@ static inline struct lovsub_lock *cl2sub_lock(const struct cl_lock *lock) const struct cl_lock_slice *slice; slice = cl_lock_at(lock, &lovsub_device_type); - LASSERT(slice != NULL); + LASSERT(slice); return cl2lovsub_lock(slice); } @@ -798,7 +795,7 @@ static inline struct cl_page *lov_sub_page(const struct cl_page_slice *slice) } static inline struct lov_io *cl2lov_io(const struct lu_env *env, - const struct cl_io_slice *ios) + const struct cl_io_slice *ios) { struct lov_io *lio; @@ -817,7 +814,7 @@ static inline struct lov_thread_info *lov_env_info(const struct lu_env *env) struct lov_thread_info *info; info = lu_context_key_get(&env->le_ctx, &lov_key); - LASSERT(info != NULL); + LASSERT(info); return info; } diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c index 3733fdc88..532ef87df 100644 --- a/drivers/staging/lustre/lustre/lov/lov_dev.c +++ b/drivers/staging/lustre/lustre/lov/lov_dev.c @@ -142,8 +142,8 @@ static void *lov_key_init(const struct lu_context *ctx, { struct lov_thread_info *info; - info = kmem_cache_alloc(lov_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info != NULL) + info = kmem_cache_zalloc(lov_thread_kmem, GFP_NOFS); + if (info) INIT_LIST_HEAD(&info->lti_closure.clc_list); else info = ERR_PTR(-ENOMEM); @@ -170,8 +170,8 @@ static void *lov_session_key_init(const struct lu_context *ctx, { struct lov_session *info; - info = kmem_cache_alloc(lov_session_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(lov_session_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -199,15 +199,15 @@ static struct lu_device *lov_device_fini(const struct lu_env *env, int i; struct lov_device *ld = lu2lov_dev(d); - LASSERT(ld->ld_lov != NULL); - if (ld->ld_target == NULL) + LASSERT(ld->ld_lov); + if (!ld->ld_target) return NULL; lov_foreach_target(ld, i) { struct lovsub_device *lsd; lsd = ld->ld_target[i]; - if (lsd != NULL) { + if (lsd) { cl_stack_fini(env, lovsub2cl_dev(lsd)); ld->ld_target[i] = NULL; } @@ -222,8 +222,8 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d, int i; int rc = 0; - LASSERT(d->ld_site != NULL); - if (ld->ld_target == NULL) + LASSERT(d->ld_site); + if (!ld->ld_target) return rc; lov_foreach_target(ld, i) { @@ -232,7 +232,7 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d, struct lov_tgt_desc *desc; desc = ld->ld_lov->lov_tgts[i]; - if (desc == NULL) + if (!desc) continue; cl = cl_type_setup(env, d->ld_site, &lovsub_device_type, @@ -261,8 +261,8 @@ static int lov_req_init(const struct lu_env *env, struct cl_device *dev, struct lov_req *lr; int result; - lr = kmem_cache_alloc(lov_req_kmem, GFP_NOFS | __GFP_ZERO); - if (lr != NULL) { + lr = kmem_cache_zalloc(lov_req_kmem, GFP_NOFS); + if (lr) { cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops); result = 0; } else @@ -282,9 +282,9 @@ static void lov_emerg_free(struct lov_device_emerg **emrg, int nr) struct lov_device_emerg *em; em = emrg[i]; - if (em != NULL) { + if (em) { LASSERT(em->emrg_page_list.pl_nr == 0); - if (em->emrg_env != NULL) + if (em->emrg_env) cl_env_put(em->emrg_env, &em->emrg_refcheck); kfree(em); } @@ -300,7 +300,7 @@ static struct lu_device *lov_device_free(const struct lu_env *env, cl_device_fini(lu2cl_dev(d)); kfree(ld->ld_target); - if (ld->ld_emrg != NULL) + if (ld->ld_emrg) lov_emerg_free(ld->ld_emrg, nr); kfree(ld); return NULL; @@ -311,7 +311,7 @@ static void lov_cl_del_target(const struct lu_env *env, struct lu_device *dev, { struct lov_device *ld = lu2lov_dev(dev); - if (ld->ld_target[index] != NULL) { + if (ld->ld_target[index]) { cl_stack_fini(env, lovsub2cl_dev(ld->ld_target[index])); ld->ld_target[index] = NULL; } @@ -324,17 +324,17 @@ static struct lov_device_emerg **lov_emerg_alloc(int nr) int result; emerg = kcalloc(nr, sizeof(emerg[0]), GFP_NOFS); - if (emerg == NULL) + if (!emerg) return ERR_PTR(-ENOMEM); for (result = i = 0; i < nr && result == 0; i++) { struct lov_device_emerg *em; em = kzalloc(sizeof(*em), GFP_NOFS); - if (em != NULL) { + if (em) { emerg[i] = em; cl_page_list_init(&em->emrg_page_list); em->emrg_env = cl_env_alloc(&em->emrg_refcheck, - LCT_REMEMBER|LCT_NOREF); + LCT_REMEMBER | LCT_NOREF); if (!IS_ERR(em->emrg_env)) em->emrg_env->le_ctx.lc_cookie = 0x2; else { @@ -370,7 +370,7 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev) return PTR_ERR(emerg); newd = kcalloc(tgt_size, sz, GFP_NOFS); - if (newd != NULL) { + if (newd) { mutex_lock(&dev->ld_mutex); if (sub_size > 0) { memcpy(newd, dev->ld_target, sub_size * sz); @@ -379,7 +379,7 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev) dev->ld_target = newd; dev->ld_target_nr = tgt_size; - if (dev->ld_emrg != NULL) + if (dev->ld_emrg) lov_emerg_free(dev->ld_emrg, sub_size); dev->ld_emrg = emerg; mutex_unlock(&dev->ld_mutex); @@ -404,8 +404,6 @@ static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev, obd_getref(obd); tgt = obd->u.lov.lov_tgts[index]; - LASSERT(tgt != NULL); - LASSERT(tgt->ltd_obd != NULL); if (!tgt->ltd_obd->obd_set_up) { CERROR("Target %s not set up\n", obd_uuid2str(&tgt->ltd_uuid)); @@ -414,7 +412,7 @@ static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev, rc = lov_expand_targets(env, ld); if (rc == 0 && ld->ld_flags & LOV_DEV_INITIALIZED) { - LASSERT(dev->ld_site != NULL); + LASSERT(dev->ld_site); cl = cl_type_setup(env, dev->ld_site, &lovsub_device_type, tgt->ltd_obd->obd_lu_dev); @@ -492,7 +490,7 @@ static struct lu_device *lov_device_alloc(const struct lu_env *env, /* setup the LOV OBD */ obd = class_name2obd(lustre_cfg_string(cfg, 0)); - LASSERT(obd != NULL); + LASSERT(obd); rc = lov_setup(obd, cfg); if (rc) { lov_device_free(env, d); diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c index b3c9c85aa..b6529401c 100644 --- a/drivers/staging/lustre/lustre/lov/lov_ea.c +++ b/drivers/staging/lustre/lustre/lov/lov_ea.c @@ -100,8 +100,8 @@ struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size) return NULL; for (i = 0; i < stripe_count; i++) { - loi = kmem_cache_alloc(lov_oinfo_slab, GFP_NOFS | __GFP_ZERO); - if (loi == NULL) + loi = kmem_cache_zalloc(lov_oinfo_slab, GFP_NOFS); + if (!loi) goto err; lsm->lsm_oinfo[i] = loi; } @@ -141,7 +141,7 @@ static void lsm_unpackmd_common(struct lov_stripe_md *lsm, static void lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno, - u64 *lov_off, u64 *swidth) + u64 *lov_off, u64 *swidth) { if (swidth) *swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count; @@ -162,12 +162,13 @@ static int lsm_destroy_plain(struct lov_stripe_md *lsm, struct obdo *oa, } /* Find minimum stripe maxbytes value. For inactive or - * reconnecting targets use LUSTRE_STRIPE_MAXBYTES. */ + * reconnecting targets use LUSTRE_STRIPE_MAXBYTES. + */ static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes) { struct obd_import *imp = tgt->ltd_obd->u.cli.cl_import; - if (imp == NULL || !tgt->ltd_active) { + if (!imp || !tgt->ltd_active) { *stripe_maxbytes = LUSTRE_STRIPE_MAXBYTES; return; } diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h index 2d00bad58..590f9326a 100644 --- a/drivers/staging/lustre/lustre/lov/lov_internal.h +++ b/drivers/staging/lustre/lustre/lov/lov_internal.h @@ -43,7 +43,8 @@ /* lov_do_div64(a, b) returns a % b, and a = a / b. * The 32-bit code is LOV-specific due to knowing about stripe limits in * order to reduce the divisor to a 32-bit number. If the divisor is - * already a 32-bit value the compiler handles this directly. */ + * already a 32-bit value the compiler handles this directly. + */ #if BITS_PER_LONG == 64 # define lov_do_div64(n, base) ({ \ uint64_t __base = (base); \ @@ -92,7 +93,8 @@ struct lov_request_set { atomic_t set_refcount; struct obd_export *set_exp; /* XXX: There is @set_exp already, however obd_statfs gets obd_device - only. */ + * only. + */ struct obd_device *set_obd; int set_count; atomic_t set_completes; @@ -114,7 +116,6 @@ void lov_finish_set(struct lov_request_set *set); static inline void lov_get_reqset(struct lov_request_set *set) { - LASSERT(set != NULL); LASSERT(atomic_read(&set->set_refcount) > 0); atomic_inc(&set->set_refcount); } @@ -137,12 +138,10 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm, struct ost_lvb *lvb, __u64 *kms_place); /* lov_offset.c */ -u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, - int stripeno); +u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno); int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off, int stripeno, u64 *u64); -u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, - int stripeno); +u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, int stripeno); int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno, u64 start, u64 end, u64 *obd_start, u64 *obd_end); @@ -197,7 +196,7 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmm, int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, struct lov_mds_md *lmm, int lmm_bytes); int lov_getstripe(struct obd_export *exp, - struct lov_stripe_md *lsm, struct lov_user_md *lump); + struct lov_stripe_md *lsm, struct lov_user_md __user *lump); int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count, int pattern, int magic); int lov_free_memmd(struct lov_stripe_md **lsmp); diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c index 93fe69eb2..4296aacd8 100644 --- a/drivers/staging/lustre/lustre/lov/lov_io.c +++ b/drivers/staging/lustre/lustre/lov/lov_io.c @@ -60,7 +60,7 @@ static inline void lov_sub_exit(struct lov_io_sub *sub) static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio, struct lov_io_sub *sub) { - if (sub->sub_io != NULL) { + if (sub->sub_io) { if (sub->sub_io_initialized) { lov_sub_enter(sub); cl_io_fini(sub->sub_env, sub->sub_io); @@ -74,7 +74,7 @@ static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio, kfree(sub->sub_io); sub->sub_io = NULL; } - if (sub->sub_env != NULL && !IS_ERR(sub->sub_env)) { + if (!IS_ERR_OR_NULL(sub->sub_env)) { if (!sub->sub_borrowed) cl_env_put(sub->sub_env, &sub->sub_refcheck); sub->sub_env = NULL; @@ -143,11 +143,11 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio, int stripe = sub->sub_stripe; int result; - LASSERT(sub->sub_io == NULL); - LASSERT(sub->sub_env == NULL); + LASSERT(!sub->sub_io); + LASSERT(!sub->sub_env); LASSERT(sub->sub_stripe < lio->lis_stripe_count); - if (unlikely(lov_r0(lov)->lo_sub[stripe] == NULL)) + if (unlikely(!lov_r0(lov)->lo_sub[stripe])) return -EIO; result = 0; @@ -252,7 +252,6 @@ static int lov_page_stripe(const struct cl_page *page) subobj = lu2lovsub( lu_object_locate(page->cp_child->cp_obj->co_lu.lo_header, &lovsub_device_type)); - LASSERT(subobj != NULL); return subobj->lso_index; } @@ -263,9 +262,9 @@ struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio, struct cl_page *page = slice->cpl_page; int stripe; - LASSERT(lio->lis_cl.cis_io != NULL); + LASSERT(lio->lis_cl.cis_io); LASSERT(cl2lov(slice->cpl_obj) == lio->lis_object); - LASSERT(lsm != NULL); + LASSERT(lsm); LASSERT(lio->lis_nr_subios > 0); stripe = lov_page_stripe(page); @@ -278,7 +277,7 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio, struct lov_stripe_md *lsm = lio->lis_object->lo_lsm; int result; - LASSERT(lio->lis_object != NULL); + LASSERT(lio->lis_object); /* * Need to be optimized, we can't afford to allocate a piece of memory @@ -288,7 +287,7 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio, libcfs_kvzalloc(lsm->lsm_stripe_count * sizeof(lio->lis_subs[0]), GFP_NOFS); - if (lio->lis_subs != NULL) { + if (lio->lis_subs) { lio->lis_nr_subios = lio->lis_stripe_count; lio->lis_single_subio_index = -1; lio->lis_active_subios = 0; @@ -304,7 +303,6 @@ static void lov_io_slice_init(struct lov_io *lio, io->ci_result = 0; lio->lis_object = obj; - LASSERT(obj->lo_lsm != NULL); lio->lis_stripe_count = obj->lo_lsm->lsm_stripe_count; switch (io->ci_type) { @@ -358,7 +356,7 @@ static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) struct lov_object *lov = cl2lov(ios->cis_obj); int i; - if (lio->lis_subs != NULL) { + if (lio->lis_subs) { for (i = 0; i < lio->lis_nr_subios; i++) lov_io_sub_fini(env, lio, &lio->lis_subs[i]); kvfree(lio->lis_subs); @@ -395,7 +393,7 @@ static int lov_io_iter_init(const struct lu_env *env, endpos, &start, &end)) continue; - if (unlikely(lov_r0(lio->lis_object)->lo_sub[stripe] == NULL)) { + if (unlikely(!lov_r0(lio->lis_object)->lo_sub[stripe])) { if (ios->cis_io->ci_type == CIT_READ || ios->cis_io->ci_type == CIT_WRITE || ios->cis_io->ci_type == CIT_FAULT) @@ -601,13 +599,13 @@ static int lov_io_submit(const struct lu_env *env, return rc; } - LASSERT(lio->lis_subs != NULL); + LASSERT(lio->lis_subs); if (alloc) { stripes_qin = libcfs_kvzalloc(sizeof(*stripes_qin) * lio->lis_nr_subios, GFP_NOFS); - if (stripes_qin == NULL) + if (!stripes_qin) return -ENOMEM; for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) @@ -949,13 +947,13 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj, } int lov_io_init_released(const struct lu_env *env, struct cl_object *obj, - struct cl_io *io) + struct cl_io *io) { struct lov_object *lov = cl2lov(obj); struct lov_io *lio = lov_env_io(env); int result; - LASSERT(lov->lo_lsm != NULL); + LASSERT(lov->lo_lsm); lio->lis_object = lov; switch (io->ci_type) { diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c index d866791d7..ae854bc25 100644 --- a/drivers/staging/lustre/lustre/lov/lov_lock.c +++ b/drivers/staging/lustre/lustre/lov/lov_lock.c @@ -115,7 +115,7 @@ static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck, /* * check that sub-lock doesn't have lock link to this top-lock. */ - LASSERT(lov_lock_link_find(env, lck, lsl) == NULL); + LASSERT(!lov_lock_link_find(env, lck, lsl)); LASSERT(idx < lck->lls_nr); lck->lls_sub[idx].sub_lock = lsl; @@ -144,8 +144,8 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env, LASSERT(idx < lck->lls_nr); - link = kmem_cache_alloc(lov_lock_link_kmem, GFP_NOFS | __GFP_ZERO); - if (link != NULL) { + link = kmem_cache_zalloc(lov_lock_link_kmem, GFP_NOFS); + if (link) { struct lov_sublock_env *subenv; struct lov_lock_sub *lls; struct cl_lock_descr *descr; @@ -160,7 +160,8 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env, * to remember the subio. This is because lock is able * to be cached, but this is not true for IO. This * further means a sublock might be referenced in - * different io context. -jay */ + * different io context. -jay + */ sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io, descr, "lov-parent", parent); @@ -220,7 +221,7 @@ static int lov_sublock_lock(const struct lu_env *env, LASSERT(!(lls->sub_flags & LSF_HELD)); link = lov_lock_link_find(env, lck, sublock); - LASSERT(link != NULL); + LASSERT(link); lov_lock_unlink(env, link, sublock); lov_sublock_unlock(env, sublock, closure, NULL); lck->lls_cancel_race = 1; @@ -263,7 +264,7 @@ static int lov_subresult(int result, int rc) int rc_rank; LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT, - "result = %d", result); + "result = %d\n", result); LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT, "rc = %d\n", rc); CLASSERT(CLO_WAIT < CLO_REPEAT); @@ -309,14 +310,14 @@ static int lov_lock_sub_init(const struct lu_env *env, * XXX for wide striping smarter algorithm is desirable, * breaking out of the loop, early. */ - if (likely(r0->lo_sub[i] != NULL) && + if (likely(r0->lo_sub[i]) && lov_stripe_intersects(loo->lo_lsm, i, file_start, file_end, &start, &end)) nr++; } LASSERT(nr > 0); lck->lls_sub = libcfs_kvzalloc(nr * sizeof(lck->lls_sub[0]), GFP_NOFS); - if (lck->lls_sub == NULL) + if (!lck->lls_sub) return -ENOMEM; lck->lls_nr = nr; @@ -328,14 +329,14 @@ static int lov_lock_sub_init(const struct lu_env *env, * top-lock. */ for (i = 0, nr = 0; i < r0->lo_nr; ++i) { - if (likely(r0->lo_sub[i] != NULL) && + if (likely(r0->lo_sub[i]) && lov_stripe_intersects(loo->lo_lsm, i, file_start, file_end, &start, &end)) { struct cl_lock_descr *descr; descr = &lck->lls_sub[nr].sub_descr; - LASSERT(descr->cld_obj == NULL); + LASSERT(!descr->cld_obj); descr->cld_obj = lovsub2cl(r0->lo_sub[i]); descr->cld_start = cl_index(descr->cld_obj, start); descr->cld_end = cl_index(descr->cld_obj, end); @@ -369,7 +370,6 @@ static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck, struct cl_lock *sublock; int dying; - LASSERT(lck->lls_sub[i].sub_lock != NULL); sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock; LASSERT(cl_lock_is_mutexed(sublock)); @@ -413,7 +413,6 @@ static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck, if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) { struct cl_lock *sublock; - LASSERT(lck->lls_sub[i].sub_lock != NULL); sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock; LASSERT(cl_lock_is_mutexed(sublock)); LASSERT(sublock->cll_state != CLS_FREEING); @@ -435,13 +434,13 @@ static void lov_lock_fini(const struct lu_env *env, lck = cl2lov_lock(slice); LASSERT(lck->lls_nr_filled == 0); - if (lck->lls_sub != NULL) { + if (lck->lls_sub) { for (i = 0; i < lck->lls_nr; ++i) /* * No sub-locks exists at this point, as sub-lock has * a reference on its parent. */ - LASSERT(lck->lls_sub[i].sub_lock == NULL); + LASSERT(!lck->lls_sub[i].sub_lock); kvfree(lck->lls_sub); } kmem_cache_free(lov_lock_kmem, lck); @@ -479,7 +478,8 @@ static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck, result = cl_enqueue_try(env, sublock, io, enqflags); if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) { /* if it is enqueued, try to `wait' on it---maybe it's already - * granted */ + * granted + */ result = cl_wait_try(env, sublock); if (result == CLO_REENQUEUED) result = CLO_WAIT; @@ -515,12 +515,13 @@ static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent, if (!IS_ERR(sublock)) { cl_lock_get_trust(sublock); if (parent->cll_state == CLS_QUEUING && - lck->lls_sub[idx].sub_lock == NULL) { + !lck->lls_sub[idx].sub_lock) { lov_sublock_adopt(env, lck, sublock, idx, link); } else { kmem_cache_free(lov_lock_link_kmem, link); /* other thread allocated sub-lock, or enqueue is no - * longer going on */ + * longer going on + */ cl_lock_mutex_put(env, parent); cl_lock_unhold(env, sublock, "lov-parent", parent); cl_lock_mutex_get(env, parent); @@ -574,10 +575,11 @@ static int lov_lock_enqueue(const struct lu_env *env, * Sub-lock might have been canceled, while top-lock was * cached. */ - if (sub == NULL) { + if (!sub) { result = lov_sublock_fill(env, lock, io, lck, i); /* lov_sublock_fill() released @lock mutex, - * restart. */ + * restart. + */ break; } sublock = sub->lss_cl.cls_lock; @@ -605,7 +607,8 @@ static int lov_lock_enqueue(const struct lu_env *env, /* take recursive mutex of sublock */ cl_lock_mutex_get(env, sublock); /* need to release all locks in closure - * otherwise it may deadlock. LU-2683.*/ + * otherwise it may deadlock. LU-2683. + */ lov_sublock_unlock(env, sub, closure, subenv); /* sublock and parent are held. */ @@ -620,7 +623,7 @@ static int lov_lock_enqueue(const struct lu_env *env, break; } } else { - LASSERT(sublock->cll_conflict == NULL); + LASSERT(!sublock->cll_conflict); lov_sublock_unlock(env, sub, closure, subenv); } } @@ -649,11 +652,12 @@ static int lov_lock_unuse(const struct lu_env *env, /* top-lock state cannot change concurrently, because single * thread (one that released the last hold) carries unlocking - * to the completion. */ + * to the completion. + */ LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT); lls = &lck->lls_sub[i]; sub = lls->sub_lock; - if (sub == NULL) + if (!sub) continue; sublock = sub->lss_cl.cls_lock; @@ -679,7 +683,7 @@ static int lov_lock_unuse(const struct lu_env *env, } static void lov_lock_cancel(const struct lu_env *env, - const struct cl_lock_slice *slice) + const struct cl_lock_slice *slice) { struct lov_lock *lck = cl2lov_lock(slice); struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock); @@ -695,10 +699,11 @@ static void lov_lock_cancel(const struct lu_env *env, /* top-lock state cannot change concurrently, because single * thread (one that released the last hold) carries unlocking - * to the completion. */ + * to the completion. + */ lls = &lck->lls_sub[i]; sub = lls->sub_lock; - if (sub == NULL) + if (!sub) continue; sublock = sub->lss_cl.cls_lock; @@ -757,7 +762,6 @@ again: lls = &lck->lls_sub[i]; sub = lls->sub_lock; - LASSERT(sub != NULL); sublock = sub->lss_cl.cls_lock; rc = lov_sublock_lock(env, lck, lls, closure, &subenv); if (rc == 0) { @@ -776,8 +780,9 @@ again: if (result != 0) break; } - /* Each sublock only can be reenqueued once, so will not loop for - * ever. */ + /* Each sublock only can be reenqueued once, so will not loop + * forever. + */ if (result == 0 && reenqueued != 0) goto again; cl_lock_closure_fini(closure); @@ -805,7 +810,7 @@ static int lov_lock_use(const struct lu_env *env, lls = &lck->lls_sub[i]; sub = lls->sub_lock; - if (sub == NULL) { + if (!sub) { /* * Sub-lock might have been canceled, while top-lock was * cached. @@ -826,7 +831,8 @@ static int lov_lock_use(const struct lu_env *env, i, 1, rc); } else if (sublock->cll_state == CLS_NEW) { /* Sub-lock might have been canceled, while - * top-lock was cached. */ + * top-lock was cached. + */ result = -ESTALE; lov_sublock_release(env, lck, i, 1, result); } @@ -852,45 +858,6 @@ static int lov_lock_use(const struct lu_env *env, return result; } -#if 0 -static int lock_lock_multi_match() -{ - struct cl_lock *lock = slice->cls_lock; - struct cl_lock_descr *subneed = &lov_env_info(env)->lti_ldescr; - struct lov_object *loo = cl2lov(lov->lls_cl.cls_obj); - struct lov_layout_raid0 *r0 = lov_r0(loo); - struct lov_lock_sub *sub; - struct cl_object *subobj; - u64 fstart; - u64 fend; - u64 start; - u64 end; - int i; - - fstart = cl_offset(need->cld_obj, need->cld_start); - fend = cl_offset(need->cld_obj, need->cld_end + 1) - 1; - subneed->cld_mode = need->cld_mode; - cl_lock_mutex_get(env, lock); - for (i = 0; i < lov->lls_nr; ++i) { - sub = &lov->lls_sub[i]; - if (sub->sub_lock == NULL) - continue; - subobj = sub->sub_descr.cld_obj; - if (!lov_stripe_intersects(loo->lo_lsm, sub->sub_stripe, - fstart, fend, &start, &end)) - continue; - subneed->cld_start = cl_index(subobj, start); - subneed->cld_end = cl_index(subobj, end); - subneed->cld_obj = subobj; - if (!cl_lock_ext_match(&sub->sub_got, subneed)) { - result = 0; - break; - } - } - cl_lock_mutex_put(env, lock); -} -#endif - /** * Check if the extent region \a descr is covered by \a child against the * specific \a stripe. @@ -922,10 +889,10 @@ static int lov_lock_stripe_is_matching(const struct lu_env *env, idx = lov_stripe_number(lsm, start); if (idx == stripe || - unlikely(lov_r0(lov)->lo_sub[idx] == NULL)) { + unlikely(!lov_r0(lov)->lo_sub[idx])) { idx = lov_stripe_number(lsm, end); if (idx == stripe || - unlikely(lov_r0(lov)->lo_sub[idx] == NULL)) + unlikely(!lov_r0(lov)->lo_sub[idx])) result = 1; } } @@ -970,7 +937,8 @@ static int lov_lock_fits_into(const struct lu_env *env, LASSERT(lov->lls_nr > 0); /* for top lock, it's necessary to match enq flags otherwise it will - * run into problem if a sublock is missing and reenqueue. */ + * run into problem if a sublock is missing and reenqueue. + */ if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags) return 0; @@ -1074,7 +1042,7 @@ static void lov_lock_delete(const struct lu_env *env, struct lov_lock_sub *lls = &lck->lls_sub[i]; struct lovsub_lock *lsl = lls->sub_lock; - if (lsl == NULL) /* already removed */ + if (!lsl) /* already removed */ continue; rc = lov_sublock_lock(env, lck, lls, closure, NULL); @@ -1090,9 +1058,9 @@ static void lov_lock_delete(const struct lu_env *env, lov_sublock_release(env, lck, i, 1, 0); link = lov_lock_link_find(env, lck, lsl); - LASSERT(link != NULL); + LASSERT(link); lov_lock_unlink(env, link, lsl); - LASSERT(lck->lls_sub[i].sub_lock == NULL); + LASSERT(!lck->lls_sub[i].sub_lock); lov_sublock_unlock(env, lsl, closure, NULL); } @@ -1112,7 +1080,7 @@ static int lov_lock_print(const struct lu_env *env, void *cookie, sub = &lck->lls_sub[i]; (*p)(env, cookie, " %d %x: ", i, sub->sub_flags); - if (sub->sub_lock != NULL) + if (sub->sub_lock) cl_lock_print(env, cookie, p, sub->sub_lock->lss_cl.cls_lock); else @@ -1139,8 +1107,8 @@ int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj, struct lov_lock *lck; int result; - lck = kmem_cache_alloc(lov_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (lck != NULL) { + lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS); + if (lck) { cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops); result = lov_lock_sub_init(env, lck, io); } else @@ -1157,7 +1125,8 @@ static void lov_empty_lock_fini(const struct lu_env *env, } static int lov_empty_lock_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct cl_lock_slice *slice) + lu_printer_t p, + const struct cl_lock_slice *slice) { (*p)(env, cookie, "empty\n"); return 0; @@ -1170,13 +1139,13 @@ static const struct cl_lock_operations lov_empty_lock_ops = { }; int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_lock *lock, const struct cl_io *io) + struct cl_lock *lock, const struct cl_io *io) { struct lov_lock *lck; int result = -ENOMEM; - lck = kmem_cache_alloc(lov_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (lck != NULL) { + lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS); + if (lck) { cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops); lck->lls_orig = lock->cll_descr; result = 0; diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c index 97115bec7..029cd4d62 100644 --- a/drivers/staging/lustre/lustre/lov/lov_merge.c +++ b/drivers/staging/lustre/lustre/lov/lov_merge.c @@ -129,7 +129,8 @@ int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm, "stripe %d KMS %sing %llu->%llu\n", stripe, kms > loi->loi_kms ? "increase":"shrink", loi->loi_kms, kms); - loi_kms_set(loi, loi->loi_lvb.lvb_size = kms); + loi->loi_lvb.lvb_size = kms; + loi_kms_set(loi, loi->loi_lvb.lvb_size); } return 0; } diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c index 6c2bdfe9c..5daa7faf4 100644 --- a/drivers/staging/lustre/lustre/lov/lov_obd.c +++ b/drivers/staging/lustre/lustre/lov/lov_obd.c @@ -61,7 +61,8 @@ #include "lov_internal.h" /* Keep a refcount of lov->tgt usage to prevent racing with addition/deletion. - Any function that expects lov_tgts to remain stationary must take a ref. */ + * Any function that expects lov_tgts to remain stationary must take a ref. + */ static void lov_getref(struct obd_device *obd) { struct lov_obd *lov = &obd->u.lov; @@ -96,7 +97,8 @@ static void lov_putref(struct obd_device *obd) list_add(&tgt->ltd_kill, &kill); /* XXX - right now there is a dependency on ld_tgt_count * being the maximum tgt index for computing the - * mds_max_easize. So we can't shrink it. */ + * mds_max_easize. So we can't shrink it. + */ lov_ost_pool_remove(&lov->lov_packed, i); lov->lov_tgts[i] = NULL; lov->lov_death_row--; @@ -158,7 +160,8 @@ int lov_connect_obd(struct obd_device *obd, __u32 index, int activate, if (activate) { tgt_obd->obd_no_recov = 0; /* FIXME this is probably supposed to be - ptlrpc_set_import_active. Horrible naming. */ + * ptlrpc_set_import_active. Horrible naming. + */ ptlrpc_activate_import(imp); } @@ -262,7 +265,7 @@ static int lov_disconnect_obd(struct obd_device *obd, struct lov_tgt_desc *tgt) osc_obd = class_exp2obd(tgt->ltd_exp); CDEBUG(D_CONFIG, "%s: disconnecting target %s\n", - obd->obd_name, osc_obd ? osc_obd->obd_name : "NULL"); + obd->obd_name, osc_obd ? osc_obd->obd_name : "NULL"); if (tgt->ltd_active) { tgt->ltd_active = 0; @@ -315,7 +318,8 @@ static int lov_disconnect(struct obd_export *exp) } /* Let's hold another reference so lov_del_obd doesn't spin through - putref every time */ + * putref every time + */ obd_getref(obd); for (i = 0; i < lov->desc.ld_tgt_count; i++) { @@ -358,7 +362,7 @@ static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid, * LU-642, initially inactive OSC could miss the obd_connect, * we make up for it here. */ - if (ev == OBD_NOTIFY_ACTIVATE && tgt->ltd_exp == NULL && + if (ev == OBD_NOTIFY_ACTIVATE && !tgt->ltd_exp && obd_uuid_equals(uuid, &tgt->ltd_uuid)) { struct obd_uuid lov_osc_uuid = {"LOV_OSC_UUID"}; @@ -399,10 +403,9 @@ static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid, CDEBUG(D_INFO, "OSC %s already %sactive!\n", uuid->uuid, active ? "" : "in"); goto out; - } else { - CDEBUG(D_CONFIG, "Marking OSC %s %sactive\n", - obd_uuid2str(uuid), active ? "" : "in"); } + CDEBUG(D_CONFIG, "Marking OSC %s %sactive\n", + obd_uuid2str(uuid), active ? "" : "in"); lov->lov_tgts[index]->ltd_active = active; if (active) { @@ -481,7 +484,8 @@ static int lov_notify(struct obd_device *obd, struct obd_device *watched, continue; /* don't send sync event if target not - * connected/activated */ + * connected/activated + */ if (is_sync && !lov->lov_tgts[i]->ltd_active) continue; @@ -521,12 +525,12 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp, tgt_obd = class_find_client_obd(uuidp, LUSTRE_OSC_NAME, &obd->obd_uuid); - if (tgt_obd == NULL) + if (!tgt_obd) return -EINVAL; mutex_lock(&lov->lov_lock); - if ((index < lov->lov_tgt_size) && (lov->lov_tgts[index] != NULL)) { + if ((index < lov->lov_tgt_size) && lov->lov_tgts[index]) { tgt = lov->lov_tgts[index]; CERROR("UUID %s already assigned at LOV target index %d\n", obd_uuid2str(&tgt->ltd_uuid), index); @@ -543,7 +547,7 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp, while (newsize < index + 1) newsize <<= 1; newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS); - if (newtgts == NULL) { + if (!newtgts) { mutex_unlock(&lov->lov_lock); return -ENOMEM; } @@ -590,14 +594,15 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp, mutex_unlock(&lov->lov_lock); CDEBUG(D_CONFIG, "idx=%d ltd_gen=%d ld_tgt_count=%d\n", - index, tgt->ltd_gen, lov->desc.ld_tgt_count); + index, tgt->ltd_gen, lov->desc.ld_tgt_count); rc = obd_notify(obd, tgt_obd, OBD_NOTIFY_CREATE, &index); if (lov->lov_connects == 0) { /* lov_connect hasn't been called yet. We'll do the - lov_connect_obd on this target when that fn first runs, - because we don't know the connect flags yet. */ + * lov_connect_obd on this target when that fn first runs, + * because we don't know the connect flags yet. + */ return 0; } @@ -613,11 +618,11 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp, goto out; } - if (lov->lov_cache != NULL) { + if (lov->lov_cache) { rc = obd_set_info_async(NULL, tgt->ltd_exp, - sizeof(KEY_CACHE_SET), KEY_CACHE_SET, - sizeof(struct cl_client_cache), lov->lov_cache, - NULL); + sizeof(KEY_CACHE_SET), KEY_CACHE_SET, + sizeof(struct cl_client_cache), + lov->lov_cache, NULL); if (rc < 0) goto out; } @@ -702,8 +707,9 @@ static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt) kfree(tgt); /* Manual cleanup - no cleanup logs to clean up the osc's. We must - do it ourselves. And we can't do it from lov_cleanup, - because we just lost our only reference to it. */ + * do it ourselves. And we can't do it from lov_cleanup, + * because we just lost our only reference to it. + */ if (osc_obd) class_manual_cleanup(osc_obd); } @@ -773,9 +779,9 @@ int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg) if (desc->ld_magic != LOV_DESC_MAGIC) { if (desc->ld_magic == __swab32(LOV_DESC_MAGIC)) { - CDEBUG(D_OTHER, "%s: Swabbing lov desc %p\n", - obd->obd_name, desc); - lustre_swab_lov_desc(desc); + CDEBUG(D_OTHER, "%s: Swabbing lov desc %p\n", + obd->obd_name, desc); + lustre_swab_lov_desc(desc); } else { CERROR("%s: Bad lov desc magic: %#x\n", obd->obd_name, desc->ld_magic); @@ -859,7 +865,8 @@ static int lov_cleanup(struct obd_device *obd) /* free pool structs */ CDEBUG(D_INFO, "delete pool %p\n", pool); /* In the function below, .hs_keycmp resolves to - * pool_hashkey_keycmp() */ + * pool_hashkey_keycmp() + */ /* coverity[overrun-buffer-val] */ lov_pool_del(obd, pool->pool_name); } @@ -879,8 +886,9 @@ static int lov_cleanup(struct obd_device *obd) if (lov->lov_tgts[i]->ltd_active || atomic_read(&lov->lov_refcount)) /* We should never get here - these - should have been removed in the - disconnect. */ + * should have been removed in the + * disconnect. + */ CERROR("lov tgt %d not cleaned! deathrow=%d, lovrc=%d\n", i, lov->lov_death_row, atomic_read(&lov->lov_refcount)); @@ -981,7 +989,7 @@ static int lov_recreate(struct obd_export *exp, struct obdo *src_oa, ost_idx = src_oa->o_nlink; lsm = *ea; - if (lsm == NULL) { + if (!lsm) { rc = -EINVAL; goto out; } @@ -1025,8 +1033,8 @@ static int lov_create(const struct lu_env *env, struct obd_export *exp, struct lov_obd *lov; int rc = 0; - LASSERT(ea != NULL); - if (exp == NULL) + LASSERT(ea); + if (!exp) return -EINVAL; if ((src_oa->o_valid & OBD_MD_FLFLAGS) && @@ -1043,7 +1051,7 @@ static int lov_create(const struct lu_env *env, struct obd_export *exp, /* Recreate a specific object id at the given OST index */ if ((src_oa->o_valid & OBD_MD_FLFLAGS) && (src_oa->o_flags & OBD_FL_RECREATE_OBJS)) { - rc = lov_recreate(exp, src_oa, ea, oti); + rc = lov_recreate(exp, src_oa, ea, oti); } obd_putref(exp->exp_obd); @@ -1052,7 +1060,7 @@ static int lov_create(const struct lu_env *env, struct obd_export *exp, #define ASSERT_LSM_MAGIC(lsmp) \ do { \ - LASSERT((lsmp) != NULL); \ + LASSERT((lsmp)); \ LASSERTF(((lsmp)->lsm_magic == LOV_MAGIC_V1 || \ (lsmp)->lsm_magic == LOV_MAGIC_V3), \ "%p->lsm_magic=%x\n", (lsmp), (lsmp)->lsm_magic); \ @@ -1065,7 +1073,6 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp, struct lov_request_set *set; struct obd_info oinfo; struct lov_request *req; - struct list_head *pos; struct lov_obd *lov; int rc = 0, err = 0; @@ -1085,9 +1092,7 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp, if (rc) goto out; - list_for_each(pos, &set->set_list) { - req = list_entry(pos, struct lov_request, rq_link); - + list_for_each_entry(req, &set->set_list, rq_link) { if (oa->o_valid & OBD_MD_FLCOOKIE) oti->oti_logcookies = set->set_cookies + req->rq_stripe; @@ -1105,10 +1110,9 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp, } } - if (rc == 0) { - LASSERT(lsm_op_find(lsm->lsm_magic) != NULL); + if (rc == 0) rc = lsm_op_find(lsm->lsm_magic)->lsm_destroy(lsm, oa, md_exp); - } + err = lov_fini_destroy_set(set); out: obd_putref(exp->exp_obd); @@ -1129,11 +1133,10 @@ static int lov_getattr_interpret(struct ptlrpc_request_set *rqset, } static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo, - struct ptlrpc_request_set *rqset) + struct ptlrpc_request_set *rqset) { struct lov_request_set *lovset; struct lov_obd *lov; - struct list_head *pos; struct lov_request *req; int rc = 0, err; @@ -1153,9 +1156,7 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo, POSTID(&oinfo->oi_md->lsm_oi), oinfo->oi_md->lsm_stripe_count, oinfo->oi_md->lsm_stripe_size); - list_for_each(pos, &lovset->set_list) { - req = list_entry(pos, struct lov_request, rq_link); - + list_for_each_entry(req, &lovset->set_list, rq_link) { CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n", POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe, POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx); @@ -1174,7 +1175,7 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo, if (!list_empty(&rqset->set_requests)) { LASSERT(rc == 0); - LASSERT(rqset->set_interpret == NULL); + LASSERT(!rqset->set_interpret); rqset->set_interpret = lov_getattr_interpret; rqset->set_arg = (void *)lovset; return rc; @@ -1199,14 +1200,14 @@ static int lov_setattr_interpret(struct ptlrpc_request_set *rqset, } /* If @oti is given, the request goes from MDS and responses from OSTs are not - needed. Otherwise, a client is waiting for responses. */ + * needed. Otherwise, a client is waiting for responses. + */ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo, struct obd_trans_info *oti, struct ptlrpc_request_set *rqset) { struct lov_request_set *set; struct lov_request *req; - struct list_head *pos; struct lov_obd *lov; int rc = 0; @@ -1230,9 +1231,7 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo, oinfo->oi_md->lsm_stripe_count, oinfo->oi_md->lsm_stripe_size); - list_for_each(pos, &set->set_list) { - req = list_entry(pos, struct lov_request, rq_link); - + list_for_each_entry(req, &set->set_list, rq_link) { if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) oti->oti_logcookies = set->set_cookies + req->rq_stripe; @@ -1262,7 +1261,7 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo, return rc ? rc : err; } - LASSERT(rqset->set_interpret == NULL); + LASSERT(!rqset->set_interpret); rqset->set_interpret = lov_setattr_interpret; rqset->set_arg = (void *)set; @@ -1272,7 +1271,8 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo, /* find any ldlm lock of the inode in lov * return 0 not find * 1 find one - * < 0 error */ + * < 0 error + */ static int lov_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm, ldlm_iterator_t it, void *data) @@ -1326,20 +1326,17 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo, struct obd_device *obd = class_exp2obd(exp); struct lov_request_set *set; struct lov_request *req; - struct list_head *pos; struct lov_obd *lov; int rc = 0; - LASSERT(oinfo != NULL); - LASSERT(oinfo->oi_osfs != NULL); + LASSERT(oinfo->oi_osfs); lov = &obd->u.lov; rc = lov_prep_statfs_set(obd, oinfo, &set); if (rc) return rc; - list_for_each(pos, &set->set_list) { - req = list_entry(pos, struct lov_request, rq_link); + list_for_each_entry(req, &set->set_list, rq_link) { rc = obd_statfs_async(lov->lov_tgts[req->rq_idx]->ltd_exp, &req->rq_oi, max_age, rqset); if (rc) @@ -1355,7 +1352,7 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo, return rc ? rc : err; } - LASSERT(rqset->set_interpret == NULL); + LASSERT(!rqset->set_interpret); rqset->set_interpret = lov_statfs_interpret; rqset->set_arg = (void *)set; return 0; @@ -1369,9 +1366,10 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp, int rc = 0; /* for obdclass we forbid using obd_statfs_rqset, but prefer using async - * statfs requests */ + * statfs requests + */ set = ptlrpc_prep_set(); - if (set == NULL) + if (!set) return -ENOMEM; oinfo.oi_osfs = osfs; @@ -1385,7 +1383,7 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp, } static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg) + void *karg, void __user *uarg) { struct obd_device *obddev = class_exp2obd(exp); struct lov_obd *lov = &obddev->u.lov; @@ -1416,11 +1414,13 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, /* copy UUID */ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd), - min((int) data->ioc_plen2, - (int) sizeof(struct obd_uuid)))) + min((int)data->ioc_plen2, + (int)sizeof(struct obd_uuid)))) return -EFAULT; - flags = uarg ? *(__u32 *)uarg : 0; + memcpy(&flags, data->ioc_inlbuf1, sizeof(__u32)); + flags = flags & LL_STATFS_NODELAY ? OBD_STATFS_NODELAY : 0; + /* got statfs data */ rc = obd_statfs(NULL, lov->lov_tgts[index]->ltd_exp, &stat_buf, cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), @@ -1428,8 +1428,8 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, if (rc) return rc; if (copy_to_user(data->ioc_pbuf1, &stat_buf, - min((int) data->ioc_plen1, - (int) sizeof(stat_buf)))) + min((int)data->ioc_plen1, + (int)sizeof(stat_buf)))) return -EFAULT; break; } @@ -1501,7 +1501,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, &qctl->obd_uuid)) continue; - if (tgt->ltd_exp == NULL) + if (!tgt->ltd_exp) return -EINVAL; break; @@ -1543,14 +1543,15 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, continue; /* ll_umount_begin() sets force flag but for lov, not - * osc. Let's pass it through */ + * osc. Let's pass it through + */ osc_obd = class_exp2obd(lov->lov_tgts[i]->ltd_exp); osc_obd->obd_force = obddev->obd_force; err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp, len, karg, uarg); - if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) { + if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) return err; - } else if (err) { + if (err) { if (lov->lov_tgts[i]->ltd_active) { CDEBUG(err == -ENOTTY ? D_IOCTL : D_WARNING, @@ -1620,7 +1621,8 @@ static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap, return -EINVAL; /* If we have finished mapping on previous device, shift logical - * offset to start of next device */ + * offset to start of next device + */ if ((lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end, &lun_start, &lun_end)) != 0 && local_end < lun_end) { @@ -1628,7 +1630,8 @@ static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap, *start_stripe = stripe_no; } else { /* This is a special value to indicate that caller should - * calculate offset in next stripe. */ + * calculate offset in next stripe. + */ fm_end_offset = 0; *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count; } @@ -1739,7 +1742,7 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key, buffer_size = fiemap_count_to_size(fm_key->fiemap.fm_extent_count); fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS); - if (fm_local == NULL) { + if (!fm_local) { rc = -ENOMEM; goto out; } @@ -1759,7 +1762,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key, fm_end = fm_key->oa.o_size; last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end, - actual_start_stripe, &stripe_count); + actual_start_stripe, + &stripe_count); fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start, fm_end, &start_stripe); @@ -1796,7 +1800,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key, /* If this is a continuation FIEMAP call and we are on * starting stripe then lun_start needs to be set to - * fm_end_offset */ + * fm_end_offset + */ if (fm_end_offset != 0 && cur_stripe == start_stripe) lun_start = fm_end_offset; @@ -1818,7 +1823,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key, len_mapped_single_call = 0; /* If the output buffer is very large and the objects have many - * extents we may need to loop on a single OST repeatedly */ + * extents we may need to loop on a single OST repeatedly + */ ost_eof = 0; ost_done = 0; do { @@ -1874,7 +1880,8 @@ inactive_tgt: if (ext_count == 0) { ost_done = 1; /* If last stripe has hole at the end, - * then we need to return */ + * then we need to return + */ if (cur_stripe_wrap == last_stripe) { fiemap->fm_mapped_extents = 0; goto finish; @@ -1896,7 +1903,8 @@ inactive_tgt: ost_done = 1; /* Clear the EXTENT_LAST flag which can be present on - * last extent */ + * last extent + */ if (lcl_fm_ext[ext_count-1].fe_flags & FIEMAP_EXTENT_LAST) lcl_fm_ext[ext_count - 1].fe_flags &= ~FIEMAP_EXTENT_LAST; @@ -1925,7 +1933,8 @@ inactive_tgt: finish: /* Indicate that we are returning device offsets unless file just has - * single stripe */ + * single stripe + */ if (lsm->lsm_stripe_count > 1) fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER; @@ -1933,7 +1942,8 @@ finish: goto skip_last_device_calc; /* Check if we have reached the last stripe and whether mapping for that - * stripe is done. */ + * stripe is done. + */ if (cur_stripe_wrap == last_stripe) { if (ost_done || ost_eof) fiemap->fm_extents[current_extent - 1].fe_flags |= @@ -1978,10 +1988,12 @@ static int lov_get_info(const struct lu_env *env, struct obd_export *exp, /* XXX This is another one of those bits that will need to * change if we ever actually support nested LOVs. It uses - * the lock's export to find out which stripe it is. */ + * the lock's export to find out which stripe it is. + */ /* XXX - it's assumed all the locks for deleted OSTs have * been cancelled. Also, the export for deleted OSTs will - * be NULL and won't match the lock's export. */ + * be NULL and won't match the lock's export. + */ for (i = 0; i < lsm->lsm_stripe_count; i++) { loi = lsm->lsm_oinfo[i]; if (lov_oinfo_is_dummy(loi)) @@ -2070,7 +2082,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp, unsigned next_id = 0, mds_con = 0; incr = check_uuid = do_inactive = no_set = 0; - if (set == NULL) { + if (!set) { no_set = 1; set = ptlrpc_prep_set(); if (!set) @@ -2093,7 +2105,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp, } else if (KEY_IS(KEY_MDS_CONN)) { mds_con = 1; } else if (KEY_IS(KEY_CACHE_SET)) { - LASSERT(lov->lov_cache == NULL); + LASSERT(!lov->lov_cache); lov->lov_cache = val; do_inactive = 1; } @@ -2119,12 +2131,12 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp, /* Only want a specific OSC */ if (mgi->uuid && !obd_uuid_equals(mgi->uuid, - &tgt->ltd_uuid)) + &tgt->ltd_uuid)) continue; err = obd_set_info_async(env, tgt->ltd_exp, - keylen, key, sizeof(int), - &mgi->group, set); + keylen, key, sizeof(int), + &mgi->group, set); } else if (next_id) { err = obd_set_info_async(env, tgt->ltd_exp, keylen, key, vallen, @@ -2136,7 +2148,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp, continue; err = obd_set_info_async(env, tgt->ltd_exp, - keylen, key, vallen, val, set); + keylen, key, vallen, val, set); } if (!rc) @@ -2187,7 +2199,7 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp, oqctl->qc_cmd != Q_INITQUOTA && oqctl->qc_cmd != LUSTRE_Q_SETQUOTA && oqctl->qc_cmd != Q_FINVALIDATE) { - CERROR("bad quota opc %x for lov obd", oqctl->qc_cmd); + CERROR("bad quota opc %x for lov obd\n", oqctl->qc_cmd); return -EFAULT; } @@ -2317,7 +2329,8 @@ static int __init lov_init(void) /* print an address of _any_ initialized kernel symbol from this * module, to allow debugging with gdb that doesn't support data - * symbols from modules.*/ + * symbols from modules. + */ CDEBUG(D_INFO, "Lustre LOV module (%p).\n", &lov_caches); rc = lu_kmem_init(lov_caches); @@ -2325,9 +2338,9 @@ static int __init lov_init(void) return rc; lov_oinfo_slab = kmem_cache_create("lov_oinfo", - sizeof(struct lov_oinfo), - 0, SLAB_HWCACHE_ALIGN, NULL); - if (lov_oinfo_slab == NULL) { + sizeof(struct lov_oinfo), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!lov_oinfo_slab) { lu_kmem_fini(lov_caches); return -ENOMEM; } @@ -2353,7 +2366,7 @@ static void /*__exit*/ lov_exit(void) } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Lustre Logical Object Volume OBD driver"); +MODULE_DESCRIPTION("Lustre Logical Object Volume"); MODULE_LICENSE("GPL"); MODULE_VERSION(LUSTRE_VERSION_STRING); diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c index 3b79ebc8e..1f8ed95a6 100644 --- a/drivers/staging/lustre/lustre/lov/lov_object.c +++ b/drivers/staging/lustre/lustre/lov/lov_object.c @@ -59,7 +59,7 @@ struct lov_layout_operations { const struct cl_object_conf *conf, union lov_layout_state *state); int (*llo_delete)(const struct lu_env *env, struct lov_object *lov, - union lov_layout_state *state); + union lov_layout_state *state); void (*llo_fini)(const struct lu_env *env, struct lov_object *lov, union lov_layout_state *state); void (*llo_install)(const struct lu_env *env, struct lov_object *lov, @@ -67,7 +67,7 @@ struct lov_layout_operations { int (*llo_print)(const struct lu_env *env, void *cookie, lu_printer_t p, const struct lu_object *o); int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage); + struct cl_page *page, struct page *vmpage); int (*llo_lock_init)(const struct lu_env *env, struct cl_object *obj, struct cl_lock *lock, const struct cl_io *io); @@ -135,7 +135,8 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov, * Do not leave the object in cache to avoid accessing * freed memory. This is because osc_object is referring to * lov_oinfo of lsm_stripe_data which will be freed due to - * this failure. */ + * this failure. + */ cl_object_kill(env, stripe); cl_object_put(env, stripe); return -EIO; @@ -154,7 +155,7 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov, /* reuse ->coh_attr_guard to protect coh_parent change */ spin_lock(&subhdr->coh_attr_guard); parent = subhdr->coh_parent; - if (parent == NULL) { + if (!parent) { subhdr->coh_parent = hdr; spin_unlock(&subhdr->coh_attr_guard); subhdr->coh_nesting = hdr->coh_nesting + 1; @@ -170,11 +171,12 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov, spin_unlock(&subhdr->coh_attr_guard); old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type); - LASSERT(old_obj != NULL); + LASSERT(old_obj); old_lov = cl2lov(lu2cl(old_obj)); if (old_lov->lo_layout_invalid) { /* the object's layout has already changed but isn't - * refreshed */ + * refreshed + */ lu_object_unhash(env, &stripe->co_lu); result = -EAGAIN; } else { @@ -212,14 +214,14 @@ static int lov_init_raid0(const struct lu_env *env, LOV_MAGIC_V1, LOV_MAGIC_V3, lsm->lsm_magic); } - LASSERT(lov->lo_lsm == NULL); + LASSERT(!lov->lo_lsm); lov->lo_lsm = lsm_addref(lsm); r0->lo_nr = lsm->lsm_stripe_count; LASSERT(r0->lo_nr <= lov_targets_nr(dev)); r0->lo_sub = libcfs_kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]), GFP_NOFS); - if (r0->lo_sub != NULL) { + if (r0->lo_sub) { result = 0; subconf->coc_inode = conf->coc_inode; spin_lock_init(&r0->lo_sub_lock); @@ -241,9 +243,10 @@ static int lov_init_raid0(const struct lu_env *env, subdev = lovsub2cl_dev(dev->ld_target[ost_idx]); subconf->u.coc_oinfo = oinfo; - LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx); + LASSERTF(subdev, "not init ost %d\n", ost_idx); /* In the function below, .hs_keycmp resolves to - * lu_obj_hop_keycmp() */ + * lu_obj_hop_keycmp() + */ /* coverity[overrun-buffer-val] */ stripe = lov_sub_find(env, subdev, ofid, subconf); if (!IS_ERR(stripe)) { @@ -263,15 +266,15 @@ out: } static int lov_init_released(const struct lu_env *env, - struct lov_device *dev, struct lov_object *lov, - const struct cl_object_conf *conf, - union lov_layout_state *state) + struct lov_device *dev, struct lov_object *lov, + const struct cl_object_conf *conf, + union lov_layout_state *state) { struct lov_stripe_md *lsm = conf->u.coc_md->lsm; - LASSERT(lsm != NULL); + LASSERT(lsm); LASSERT(lsm_is_released(lsm)); - LASSERT(lov->lo_lsm == NULL); + LASSERT(!lov->lo_lsm); lov->lo_lsm = lsm_addref(lsm); return 0; @@ -310,7 +313,8 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov, cl_object_put(env, sub); /* ... wait until it is actually destroyed---sub-object clears its - * ->lo_sub[] slot in lovsub_object_fini() */ + * ->lo_sub[] slot in lovsub_object_fini() + */ if (r0->lo_sub[idx] == los) { waiter = &lov_env_info(env)->lti_waiter; init_waitqueue_entry(waiter, current); @@ -318,7 +322,8 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov, set_current_state(TASK_UNINTERRUPTIBLE); while (1) { /* this wait-queue is signaled at the end of - * lu_object_free(). */ + * lu_object_free(). + */ set_current_state(TASK_UNINTERRUPTIBLE); spin_lock(&r0->lo_sub_lock); if (r0->lo_sub[idx] == los) { @@ -332,7 +337,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov, } remove_wait_queue(&bkt->lsb_marche_funebre, waiter); } - LASSERT(r0->lo_sub[idx] == NULL); + LASSERT(!r0->lo_sub[idx]); } static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov, @@ -345,11 +350,11 @@ static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov, dump_lsm(D_INODE, lsm); lov_layout_wait(env, lov); - if (r0->lo_sub != NULL) { + if (r0->lo_sub) { for (i = 0; i < r0->lo_nr; ++i) { struct lovsub_object *los = r0->lo_sub[i]; - if (los != NULL) { + if (los) { cl_locks_prune(env, &los->lso_cl, 1); /* * If top-level object is to be evicted from @@ -374,7 +379,7 @@ static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov, { struct lov_layout_raid0 *r0 = &state->raid0; - if (r0->lo_sub != NULL) { + if (r0->lo_sub) { kvfree(r0->lo_sub); r0->lo_sub = NULL; } @@ -384,7 +389,7 @@ static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov, } static void lov_fini_released(const struct lu_env *env, struct lov_object *lov, - union lov_layout_state *state) + union lov_layout_state *state) { dump_lsm(D_INODE, lov->lo_lsm); lov_free_memmd(&lov->lo_lsm); @@ -406,13 +411,13 @@ static int lov_print_raid0(const struct lu_env *env, void *cookie, int i; (*p)(env, cookie, "stripes: %d, %s, lsm{%p 0x%08X %d %u %u}:\n", - r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm, - lsm->lsm_magic, atomic_read(&lsm->lsm_refc), - lsm->lsm_stripe_count, lsm->lsm_layout_gen); + r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm, + lsm->lsm_magic, atomic_read(&lsm->lsm_refc), + lsm->lsm_stripe_count, lsm->lsm_layout_gen); for (i = 0; i < r0->lo_nr; ++i) { struct lu_object *sub; - if (r0->lo_sub[i] != NULL) { + if (r0->lo_sub[i]) { sub = lovsub2lu(r0->lo_sub[i]); lu_object_print(env, cookie, p, sub); } else { @@ -423,16 +428,16 @@ static int lov_print_raid0(const struct lu_env *env, void *cookie, } static int lov_print_released(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *o) + lu_printer_t p, const struct lu_object *o) { struct lov_object *lov = lu2lov(o); struct lov_stripe_md *lsm = lov->lo_lsm; (*p)(env, cookie, - "released: %s, lsm{%p 0x%08X %d %u %u}:\n", - lov->lo_layout_invalid ? "invalid" : "valid", lsm, - lsm->lsm_magic, atomic_read(&lsm->lsm_refc), - lsm->lsm_stripe_count, lsm->lsm_layout_gen); + "released: %s, lsm{%p 0x%08X %d %u %u}:\n", + lov->lo_layout_invalid ? "invalid" : "valid", lsm, + lsm->lsm_magic, atomic_read(&lsm->lsm_refc), + lsm->lsm_stripe_count, lsm->lsm_layout_gen); return 0; } @@ -465,7 +470,8 @@ static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj, * context, and this function is called in ccc_lock_state(), it will * hit this assertion. * Anyway, it's still okay to call attr_get w/o type guard as layout - * can't go if locks exist. */ + * can't go if locks exist. + */ /* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */ if (!r0->lo_attr_valid) { @@ -475,7 +481,8 @@ static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj, memset(lvb, 0, sizeof(*lvb)); /* XXX: timestamps can be negative by sanity:test_39m, - * how can it be? */ + * how can it be? + */ lvb->lvb_atime = LLONG_MIN; lvb->lvb_ctime = LLONG_MIN; lvb->lvb_mtime = LLONG_MIN; @@ -569,7 +576,7 @@ static const struct lov_layout_operations lov_dispatch[] = { */ static enum lov_layout_type lov_type(struct lov_stripe_md *lsm) { - if (lsm == NULL) + if (!lsm) return LLT_EMPTY; if (lsm_is_released(lsm)) return LLT_RELEASED; @@ -624,7 +631,7 @@ static void lov_conf_lock(struct lov_object *lov) { LASSERT(lov->lo_owner != current); down_write(&lov->lo_type_guard); - LASSERT(lov->lo_owner == NULL); + LASSERT(!lov->lo_owner); lov->lo_owner = current; } @@ -639,9 +646,9 @@ static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov) struct l_wait_info lwi = { 0 }; while (atomic_read(&lov->lo_active_ios) > 0) { - CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n", - PFID(lu_object_fid(lov2lu(lov))), - atomic_read(&lov->lo_active_ios)); + CDEBUG(D_INODE, "file:" DFID " wait for active IO, now: %d.\n", + PFID(lu_object_fid(lov2lu(lov))), + atomic_read(&lov->lo_active_ios)); l_wait_event(lov->lo_waitq, atomic_read(&lov->lo_active_ios) == 0, &lwi); @@ -666,7 +673,7 @@ static int lov_layout_change(const struct lu_env *unused, LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch)); - if (conf->u.coc_md != NULL) + if (conf->u.coc_md) llt = lov_type(conf->u.coc_md->lsm); LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch)); @@ -689,7 +696,7 @@ static int lov_layout_change(const struct lu_env *unused, old_ops->llo_fini(env, lov, &lov->u); LASSERT(atomic_read(&lov->lo_active_ios) == 0); - LASSERT(hdr->coh_tree.rnode == NULL); + LASSERT(!hdr->coh_tree.rnode); LASSERT(hdr->coh_pages == 0); lov->lo_type = LLT_EMPTY; @@ -767,10 +774,10 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj, LASSERT(conf->coc_opc == OBJECT_CONF_SET); - if (conf->u.coc_md != NULL) + if (conf->u.coc_md) lsm = conf->u.coc_md->lsm; - if ((lsm == NULL && lov->lo_lsm == NULL) || - ((lsm != NULL && lov->lo_lsm != NULL) && + if ((!lsm && !lov->lo_lsm) || + ((lsm && lov->lo_lsm) && (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) && (lov->lo_lsm->lsm_pattern == lsm->lsm_pattern))) { /* same version of layout */ @@ -818,7 +825,7 @@ static int lov_object_print(const struct lu_env *env, void *cookie, } int lov_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage) + struct cl_page *page, struct page *vmpage) { return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page, vmpage); @@ -845,7 +852,8 @@ static int lov_attr_get(const struct lu_env *env, struct cl_object *obj, struct cl_attr *attr) { /* do not take lock, as this function is called under a - * spin-lock. Layout is protected from changing by ongoing IO. */ + * spin-lock. Layout is protected from changing by ongoing IO. + */ return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr); } @@ -891,8 +899,8 @@ struct lu_object *lov_object_alloc(const struct lu_env *env, struct lov_object *lov; struct lu_object *obj; - lov = kmem_cache_alloc(lov_object_kmem, GFP_NOFS | __GFP_ZERO); - if (lov != NULL) { + lov = kmem_cache_zalloc(lov_object_kmem, GFP_NOFS); + if (lov) { obj = lov2lu(lov); lu_object_init(obj, NULL, dev); lov->lo_cl.co_ops = &lov_ops; @@ -913,11 +921,11 @@ static struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov) struct lov_stripe_md *lsm = NULL; lov_conf_freeze(lov); - if (lov->lo_lsm != NULL) { + if (lov->lo_lsm) { lsm = lsm_addref(lov->lo_lsm); CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n", - lsm, atomic_read(&lsm->lsm_refc), - lov->lo_layout_invalid, current); + lsm, atomic_read(&lsm->lsm_refc), + lov->lo_layout_invalid, current); } lov_conf_thaw(lov); return lsm; @@ -928,12 +936,12 @@ struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj) struct lu_object *luobj; struct lov_stripe_md *lsm = NULL; - if (clobj == NULL) + if (!clobj) return NULL; luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu, &lov_device_type); - if (luobj != NULL) + if (luobj) lsm = lov_lsm_addref(lu2lov(luobj)); return lsm; } @@ -941,7 +949,7 @@ EXPORT_SYMBOL(lov_lsm_get); void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm) { - if (lsm != NULL) + if (lsm) lov_free_memmd(&lsm); } EXPORT_SYMBOL(lov_lsm_put); @@ -953,7 +961,7 @@ int lov_read_and_clear_async_rc(struct cl_object *clob) luobj = lu_object_locate(&cl_object_header(clob)->coh_lu, &lov_device_type); - if (luobj != NULL) { + if (luobj) { struct lov_object *lov = lu2lov(luobj); lov_conf_freeze(lov); @@ -963,7 +971,6 @@ int lov_read_and_clear_async_rc(struct cl_object *clob) int i; lsm = lov->lo_lsm; - LASSERT(lsm != NULL); for (i = 0; i < lsm->lsm_stripe_count; i++) { struct lov_oinfo *loi = lsm->lsm_oinfo[i]; diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c index aa520aa76..ae83eb0f6 100644 --- a/drivers/staging/lustre/lustre/lov/lov_offset.c +++ b/drivers/staging/lustre/lustre/lov/lov_offset.c @@ -43,8 +43,7 @@ #include "lov_internal.h" /* compute object size given "stripeno" and the ost size */ -u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, - int stripeno) +u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno) { unsigned long ssize = lsm->lsm_stripe_size; unsigned long stripe_size; @@ -55,7 +54,6 @@ u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, if (ost_size == 0) return 0; - LASSERT(lsm_op_find(magic) != NULL); lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, NULL, &swidth); /* lov_do_div64(a, b) returns a % b, and a = a / b */ @@ -115,7 +113,8 @@ u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, * this function returns < 0 when the offset was "before" the stripe and * was moved forward to the start of the stripe in question; 0 when it * falls in the stripe and no shifting was done; > 0 when the offset - * was outside the stripe and was pulled back to its final byte. */ + * was outside the stripe and was pulled back to its final byte. + */ int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off, int stripeno, u64 *obdoff) { @@ -129,8 +128,6 @@ int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off, return 0; } - LASSERT(lsm_op_find(magic) != NULL); - lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &lov_off, &swidth); @@ -183,7 +180,6 @@ u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, if (file_size == OBD_OBJECT_EOF) return OBD_OBJECT_EOF; - LASSERT(lsm_op_find(magic) != NULL); lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &file_size, &swidth); @@ -213,7 +209,8 @@ u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, /* given an extent in an lov and a stripe, calculate the extent of the stripe * that is contained within the lov extent. this returns true if the given - * stripe does intersect with the lov extent. */ + * stripe does intersect with the lov extent. + */ int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno, u64 start, u64 end, u64 *obd_start, u64 *obd_end) { @@ -227,7 +224,8 @@ int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno, /* this stripe doesn't intersect the file extent when neither * start or the end intersected the stripe and obd_start and - * obd_end got rounded up to the save value. */ + * obd_end got rounded up to the save value. + */ if (start_side != 0 && end_side != 0 && *obd_start == *obd_end) return 0; @@ -238,7 +236,8 @@ int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno, * in the wrong direction and touch it up. * interestingly, this can't underflow since end must be > start * if we passed through the previous check. - * (should we assert for that somewhere?) */ + * (should we assert for that somewhere?) + */ if (end_side != 0) (*obd_end)--; @@ -252,7 +251,6 @@ int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off) u64 stripe_off, swidth; int magic = lsm->lsm_magic; - LASSERT(lsm_op_find(magic) != NULL); lsm_op_find(magic)->lsm_stripe_by_offset(lsm, NULL, &lov_off, &swidth); stripe_off = lov_do_div64(lov_off, swidth); diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c index 6b2d10071..3925633a9 100644 --- a/drivers/staging/lustre/lustre/lov/lov_pack.c +++ b/drivers/staging/lustre/lustre/lov/lov_pack.c @@ -134,17 +134,18 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, if ((lmm_magic != LOV_MAGIC_V1) && (lmm_magic != LOV_MAGIC_V3)) { CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", - lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); + lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); return -EINVAL; } if (lsm) { /* If we are just sizing the EA, limit the stripe count - * to the actual number of OSTs in this filesystem. */ + * to the actual number of OSTs in this filesystem. + */ if (!lmmp) { stripe_count = lov_get_stripecnt(lov, lmm_magic, - lsm->lsm_stripe_count); + lsm->lsm_stripe_count); lsm->lsm_stripe_count = stripe_count; } else if (!lsm_is_released(lsm)) { stripe_count = lsm->lsm_stripe_count; @@ -155,7 +156,8 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, /* No need to allocate more than maximum supported stripes. * Anyway, this is pretty inaccurate since ld_tgt_count now * represents max index and we should rely on the actual number - * of OSTs instead */ + * of OSTs instead + */ stripe_count = lov_mds_md_max_stripe_count( lov->lov_ocd.ocd_max_easize, lmm_magic); @@ -183,7 +185,7 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, return -ENOMEM; } - CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d \n", + CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d\n", lmm_magic, lmm_size); lmmv1 = *lmmp; @@ -241,7 +243,8 @@ __u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count) stripe_count = 1; /* stripe count is based on whether ldiskfs can handle - * larger EA sizes */ + * larger EA sizes + */ if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE && lov->lov_ocd.ocd_max_easize) max_stripes = lov_mds_md_max_stripe_count( @@ -257,14 +260,15 @@ static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count) { int rc; - if (lsm_op_find(le32_to_cpu(*(__u32 *)lmm)) == NULL) { + if (!lsm_op_find(le32_to_cpu(*(__u32 *)lmm))) { CERROR("bad disk LOV MAGIC: 0x%08X; dumping LMM (size=%d):\n", le32_to_cpu(*(__u32 *)lmm), lmm_bytes); CERROR("%*phN\n", lmm_bytes, lmm); return -EINVAL; } rc = lsm_op_find(le32_to_cpu(*(__u32 *)lmm))->lsm_lmm_verify(lmm, - lmm_bytes, stripe_count); + lmm_bytes, + stripe_count); return rc; } @@ -306,10 +310,9 @@ int lov_free_memmd(struct lov_stripe_md **lsmp) *lsmp = NULL; LASSERT(atomic_read(&lsm->lsm_refc) > 0); refc = atomic_dec_return(&lsm->lsm_refc); - if (refc == 0) { - LASSERT(lsm_op_find(lsm->lsm_magic) != NULL); + if (refc == 0) lsm_op_find(lsm->lsm_magic)->lsm_free(lsm); - } + return refc; } @@ -359,7 +362,6 @@ int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, if (!lmm) return lsm_size; - LASSERT(lsm_op_find(magic) != NULL); rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm); if (rc) { lov_free_memmd(lsmp); @@ -376,7 +378,7 @@ int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, * lmm_magic must be LOV_USER_MAGIC. */ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm, - struct lov_user_md *lump) + struct lov_user_md __user *lump) { /* * XXX huge struct allocated on stack. @@ -399,13 +401,15 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm, set_fs(KERNEL_DS); /* we only need the header part from user space to get lmm_magic and - * lmm_stripe_count, (the header part is common to v1 and v3) */ + * lmm_stripe_count, (the header part is common to v1 and v3) + */ lum_size = sizeof(struct lov_user_md_v1); if (copy_from_user(&lum, lump, lum_size)) { rc = -EFAULT; goto out_set; - } else if ((lum.lmm_magic != LOV_USER_MAGIC) && - (lum.lmm_magic != LOV_USER_MAGIC_V3)) { + } + if ((lum.lmm_magic != LOV_USER_MAGIC) && + (lum.lmm_magic != LOV_USER_MAGIC_V3)) { rc = -EINVAL; goto out_set; } diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c index 037ae91b7..fdcaf8047 100644 --- a/drivers/staging/lustre/lustre/lov/lov_page.c +++ b/drivers/staging/lustre/lustre/lov/lov_page.c @@ -57,7 +57,7 @@ static int lov_page_invariant(const struct cl_page_slice *slice) const struct cl_page *page = slice->cpl_page; const struct cl_page *sub = lov_sub_page(slice); - return ergo(sub != NULL, + return ergo(sub, page->cp_child == sub && sub->cp_parent == page && page->cp_state == sub->cp_state); @@ -70,7 +70,7 @@ static void lov_page_fini(const struct lu_env *env, LINVRNT(lov_page_invariant(slice)); - if (sub != NULL) { + if (sub) { LASSERT(sub->cp_state == CPS_FREEING); lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent); sub->cp_parent = NULL; @@ -151,7 +151,7 @@ static const struct cl_page_operations lov_page_ops = { static void lov_empty_page_fini(const struct lu_env *env, struct cl_page_slice *slice) { - LASSERT(slice->cpl_page->cp_child == NULL); + LASSERT(!slice->cpl_page->cp_child); } int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, @@ -172,8 +172,7 @@ int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, offset = cl_offset(obj, page->cp_index); stripe = lov_stripe_number(loo->lo_lsm, offset); LASSERT(stripe < r0->lo_nr); - rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, - &suboff); + rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, &suboff); LASSERT(rc == 0); lpg->lps_invalid = 1; diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c index b43ce6cd6..9ae1d6f42 100644 --- a/drivers/staging/lustre/lustre/lov/lov_pool.c +++ b/drivers/staging/lustre/lustre/lov/lov_pool.c @@ -64,7 +64,7 @@ void lov_pool_putref(struct pool_desc *pool) if (atomic_dec_and_test(&pool->pool_refcount)) { LASSERT(hlist_unhashed(&pool->pool_hash)); LASSERT(list_empty(&pool->pool_list)); - LASSERT(pool->pool_debugfs_entry == NULL); + LASSERT(!pool->pool_debugfs_entry); lov_ost_pool_free(&(pool->pool_rr.lqr_pool)); lov_ost_pool_free(&(pool->pool_obds)); kfree(pool); @@ -152,9 +152,8 @@ struct cfs_hash_ops pool_hash_operations = { }; -/* ifdef needed for liblustre support */ /* - * pool /proc seq_file methods + * pool debugfs seq_file methods */ /* * iterator is used to go through the target pool entries @@ -174,7 +173,7 @@ static void *pool_proc_next(struct seq_file *s, void *v, loff_t *pos) struct pool_iterator *iter = (struct pool_iterator *)s->private; int prev_idx; - LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic); + LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic); /* test if end of file */ if (*pos >= pool_tgt_count(iter->pool)) @@ -204,7 +203,8 @@ static void *pool_proc_start(struct seq_file *s, loff_t *pos) if ((pool_tgt_count(pool) == 0) || (*pos >= pool_tgt_count(pool))) { /* iter is not created, so stop() has no way to - * find pool to dec ref */ + * find pool to dec ref + */ lov_pool_putref(pool); return NULL; } @@ -217,7 +217,8 @@ static void *pool_proc_start(struct seq_file *s, loff_t *pos) iter->idx = 0; /* we use seq_file private field to memorized iterator so - * we can free it at stop() */ + * we can free it at stop() + */ /* /!\ do not forget to restore it to pool before freeing it */ s->private = iter; if (*pos > 0) { @@ -226,8 +227,8 @@ static void *pool_proc_start(struct seq_file *s, loff_t *pos) i = 0; do { - ptr = pool_proc_next(s, &iter, &i); - } while ((i < *pos) && (ptr != NULL)); + ptr = pool_proc_next(s, &iter, &i); + } while ((i < *pos) && ptr); return ptr; } return iter; @@ -239,15 +240,16 @@ static void pool_proc_stop(struct seq_file *s, void *v) /* in some cases stop() method is called 2 times, without * calling start() method (see seq_read() from fs/seq_file.c) - * we have to free only if s->private is an iterator */ + * we have to free only if s->private is an iterator + */ if ((iter) && (iter->magic == POOL_IT_MAGIC)) { /* we restore s->private so next call to pool_proc_start() - * will work */ + * will work + */ s->private = iter->pool; lov_pool_putref(iter->pool); kfree(iter); } - return; } static int pool_proc_show(struct seq_file *s, void *v) @@ -255,8 +257,8 @@ static int pool_proc_show(struct seq_file *s, void *v) struct pool_iterator *iter = (struct pool_iterator *)v; struct lov_tgt_desc *tgt; - LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic); - LASSERT(iter->pool != NULL); + LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic); + LASSERT(iter->pool); LASSERT(iter->idx <= pool_tgt_count(iter->pool)); down_read(&pool_tgt_rw_sem(iter->pool)); @@ -305,7 +307,7 @@ int lov_ost_pool_init(struct ost_pool *op, unsigned int count) init_rwsem(&op->op_rw_sem); op->op_size = count; op->op_array = kcalloc(op->op_size, sizeof(op->op_array[0]), GFP_NOFS); - if (op->op_array == NULL) { + if (!op->op_array) { op->op_size = 0; return -ENOMEM; } @@ -325,7 +327,7 @@ int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count) new_size = max(min_count, 2 * op->op_size); new = kcalloc(new_size, sizeof(op->op_array[0]), GFP_NOFS); - if (new == NULL) + if (!new) return -ENOMEM; /* copy old array to new one */ @@ -429,8 +431,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname) INIT_HLIST_NODE(&new_pool->pool_hash); - /* we need this assert seq_file is not implemented for liblustre */ - /* get ref for /proc file */ + /* get ref for debugfs file */ lov_pool_getref(new_pool); new_pool->pool_debugfs_entry = ldebugfs_add_simple( lov->lov_pool_debugfs_entry, @@ -443,7 +444,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname) lov_pool_putref(new_pool); } CDEBUG(D_INFO, "pool %p - proc %p\n", - new_pool, new_pool->pool_debugfs_entry); + new_pool, new_pool->pool_debugfs_entry); spin_lock(&obd->obd_dev_lock); list_add_tail(&new_pool->pool_list, &lov->lov_pool_list); @@ -487,7 +488,7 @@ int lov_pool_del(struct obd_device *obd, char *poolname) /* lookup and kill hash reference */ pool = cfs_hash_del_key(lov->lov_pools_hash_body, poolname); - if (pool == NULL) + if (!pool) return -ENOENT; if (!IS_ERR_OR_NULL(pool->pool_debugfs_entry)) { @@ -518,7 +519,7 @@ int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname) lov = &(obd->u.lov); pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname); - if (pool == NULL) + if (!pool) return -ENOENT; obd_str2uuid(&ost_uuid, ostname); @@ -564,7 +565,7 @@ int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname) lov = &(obd->u.lov); pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname); - if (pool == NULL) + if (!pool) return -ENOENT; obd_str2uuid(&ost_uuid, ostname); @@ -632,12 +633,12 @@ struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname) pool = NULL; if (poolname[0] != '\0') { pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname); - if (pool == NULL) + if (!pool) CWARN("Request for an unknown pool ("LOV_POOLNAMEF")\n", poolname); - if ((pool != NULL) && (pool_tgt_count(pool) == 0)) { + if (pool && (pool_tgt_count(pool) == 0)) { CWARN("Request for an empty pool ("LOV_POOLNAMEF")\n", - poolname); + poolname); /* pool is ignored, so we remove ref on it */ lov_pool_putref(pool); pool = NULL; diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c index 42deda71f..7178a02d6 100644 --- a/drivers/staging/lustre/lustre/lov/lov_request.c +++ b/drivers/staging/lustre/lustre/lov/lov_request.c @@ -156,7 +156,7 @@ static int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx) tgt = lov->lov_tgts[ost_idx]; - if (unlikely(tgt == NULL)) { + if (unlikely(!tgt)) { rc = 0; goto out; } @@ -178,7 +178,7 @@ static int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx) cfs_time_seconds(1), NULL, NULL); rc = l_wait_event(waitq, lov_check_set(lov, ost_idx), &lwi); - if (tgt != NULL && tgt->ltd_active) + if (tgt->ltd_active) return 1; return 0; @@ -190,28 +190,23 @@ out: static int common_attr_done(struct lov_request_set *set) { - struct list_head *pos; struct lov_request *req; struct obdo *tmp_oa; int rc = 0, attrset = 0; - LASSERT(set->set_oi != NULL); - - if (set->set_oi->oi_oa == NULL) + if (!set->set_oi->oi_oa) return 0; if (!atomic_read(&set->set_success)) return -EIO; - tmp_oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); - if (tmp_oa == NULL) { + tmp_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!tmp_oa) { rc = -ENOMEM; goto out; } - list_for_each(pos, &set->set_list) { - req = list_entry(pos, struct lov_request, rq_link); - + list_for_each_entry(req, &set->set_list, rq_link) { if (!req->rq_complete || req->rq_rc) continue; if (req->rq_oi.oi_oa->o_valid == 0) /* inactive stripe */ @@ -227,7 +222,8 @@ static int common_attr_done(struct lov_request_set *set) if ((set->set_oi->oi_oa->o_valid & OBD_MD_FLEPOCH) && (set->set_oi->oi_md->lsm_stripe_count != attrset)) { /* When we take attributes of some epoch, we require all the - * ost to be active. */ + * ost to be active. + */ CERROR("Not all the stripes had valid attrs\n"); rc = -EIO; goto out; @@ -246,7 +242,7 @@ int lov_fini_getattr_set(struct lov_request_set *set) { int rc = 0; - if (set == NULL) + if (!set) return 0; LASSERT(set->set_exp); if (atomic_read(&set->set_completes)) @@ -258,7 +254,8 @@ int lov_fini_getattr_set(struct lov_request_set *set) } /* The callback for osc_getattr_async that finalizes a request info when a - * response is received. */ + * response is received. + */ static int cb_getattr_update(void *cookie, int rc) { struct obd_info *oinfo = cookie; @@ -310,9 +307,8 @@ int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo, req->rq_stripe = i; req->rq_idx = loi->loi_ost_idx; - req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep, - GFP_NOFS | __GFP_ZERO); - if (req->rq_oi.oi_oa == NULL) { + req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!req->rq_oi.oi_oa) { kfree(req); rc = -ENOMEM; goto out_set; @@ -337,7 +333,7 @@ out_set: int lov_fini_destroy_set(struct lov_request_set *set) { - if (set == NULL) + if (!set) return 0; LASSERT(set->set_exp); if (atomic_read(&set->set_completes)) { @@ -368,7 +364,7 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo, set->set_oi->oi_md = lsm; set->set_oi->oi_oa = src_oa; set->set_oti = oti; - if (oti != NULL && src_oa->o_valid & OBD_MD_FLCOOKIE) + if (oti && src_oa->o_valid & OBD_MD_FLCOOKIE) set->set_cookies = oti->oti_logcookies; for (i = 0; i < lsm->lsm_stripe_count; i++) { @@ -393,9 +389,8 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo, req->rq_stripe = i; req->rq_idx = loi->loi_ost_idx; - req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep, - GFP_NOFS | __GFP_ZERO); - if (req->rq_oi.oi_oa == NULL) { + req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!req->rq_oi.oi_oa) { kfree(req); rc = -ENOMEM; goto out_set; @@ -419,7 +414,7 @@ int lov_fini_setattr_set(struct lov_request_set *set) { int rc = 0; - if (set == NULL) + if (!set) return 0; LASSERT(set->set_exp); if (atomic_read(&set->set_completes)) { @@ -460,7 +455,8 @@ int lov_update_setattr_set(struct lov_request_set *set, } /* The callback for osc_setattr_async that finalizes a request info when a - * response is received. */ + * response is received. + */ static int cb_setattr_update(void *cookie, int rc) { struct obd_info *oinfo = cookie; @@ -486,7 +482,7 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo, set->set_exp = exp; set->set_oti = oti; set->set_oi = oinfo; - if (oti != NULL && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) + if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) set->set_cookies = oti->oti_logcookies; for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) { @@ -509,9 +505,8 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo, req->rq_stripe = i; req->rq_idx = loi->loi_ost_idx; - req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep, - GFP_NOFS | __GFP_ZERO); - if (req->rq_oi.oi_oa == NULL) { + req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!req->rq_oi.oi_oa) { kfree(req); rc = -ENOMEM; goto out_set; @@ -581,7 +576,7 @@ int lov_fini_statfs_set(struct lov_request_set *set) { int rc = 0; - if (set == NULL) + if (!set) return 0; if (atomic_read(&set->set_completes)) { @@ -648,7 +643,8 @@ static void lov_update_statfs(struct obd_statfs *osfs, } /* The callback for osc_statfs_async that finalizes a request info when a - * response is received. */ + * response is received. + */ static int cb_statfs_update(void *cookie, int rc) { struct obd_info *oinfo = cookie; @@ -668,7 +664,8 @@ static int cb_statfs_update(void *cookie, int rc) lov_sfs = oinfo->oi_osfs; success = atomic_read(&set->set_success); /* XXX: the same is done in lov_update_common_set, however - lovset->set_exp is not initialized. */ + * lovset->set_exp is not initialized. + */ lov_update_set(set, lovreq, rc); if (rc) goto out; @@ -718,7 +715,7 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo, for (i = 0; i < lov->desc.ld_tgt_count; i++) { struct lov_request *req; - if (lov->lov_tgts[i] == NULL || + if (!lov->lov_tgts[i] || (!lov_check_and_wait_active(lov, i) && (oinfo->oi_flags & OBD_STATFS_NODELAY))) { CDEBUG(D_HA, "lov idx %d inactive\n", i); @@ -726,7 +723,8 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo, } /* skip targets that have been explicitly disabled by the - * administrator */ + * administrator + */ if (!lov->lov_tgts[i]->ltd_exp) { CDEBUG(D_HA, "lov idx %d administratively disabled\n", i); continue; diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c index f1795c3e2..c335c020f 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c @@ -101,7 +101,6 @@ static int lovsub_device_init(const struct lu_env *env, struct lu_device *d, next->ld_site = d->ld_site; ldt = next->ld_type; - LASSERT(ldt != NULL); rc = ldt->ldt_ops->ldto_device_init(env, next, ldt->ldt_name, NULL); if (rc) { next->ld_site = NULL; @@ -148,8 +147,8 @@ static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev, struct lovsub_req *lsr; int result; - lsr = kmem_cache_alloc(lovsub_req_kmem, GFP_NOFS | __GFP_ZERO); - if (lsr != NULL) { + lsr = kmem_cache_zalloc(lovsub_req_kmem, GFP_NOFS); + if (lsr) { cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops); result = 0; } else @@ -175,7 +174,7 @@ static struct lu_device *lovsub_device_alloc(const struct lu_env *env, struct lovsub_device *lsd; lsd = kzalloc(sizeof(*lsd), GFP_NOFS); - if (lsd != NULL) { + if (lsd) { int result; result = cl_device_init(&lsd->acid_cl, t); diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c index 1a3e30a14..3bb0c9068 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c @@ -148,7 +148,8 @@ static void lovsub_lock_descr_map(const struct cl_lock_descr *in, { pgoff_t size; /* stripe size in pages */ pgoff_t skip; /* how many pages in every stripe are occupied by - * "other" stripes */ + * "other" stripes + */ pgoff_t start; pgoff_t end; @@ -284,7 +285,8 @@ static int lovsub_lock_delete_one(const struct lu_env *env, switch (parent->cll_state) { case CLS_ENQUEUED: /* See LU-1355 for the case that a glimpse lock is - * interrupted by signal */ + * interrupted by signal + */ LASSERT(parent->cll_flags & CLF_CANCELLED); break; case CLS_QUEUING: @@ -402,7 +404,7 @@ static void lovsub_lock_delete(const struct lu_env *env, restart = 0; list_for_each_entry_safe(scan, temp, - &sub->lss_parents, lll_list) { + &sub->lss_parents, lll_list) { lov = scan->lll_super; subdata = &lov->lls_sub[scan->lll_idx]; lovsub_parent_lock(env, lov); @@ -429,7 +431,7 @@ static int lovsub_lock_print(const struct lu_env *env, void *cookie, list_for_each_entry(scan, &sub->lss_parents, lll_list) { lov = scan->lll_super; (*p)(env, cookie, "[%d %p ", scan->lll_idx, lov); - if (lov != NULL) + if (lov) cl_lock_descr_print(env, cookie, p, &lov->lls_cl.cls_lock->cll_descr); (*p)(env, cookie, "] "); @@ -453,8 +455,8 @@ int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, struct lovsub_lock *lsk; int result; - lsk = kmem_cache_alloc(lovsub_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (lsk != NULL) { + lsk = kmem_cache_zalloc(lovsub_lock_kmem, GFP_NOFS); + if (lsk) { INIT_LIST_HEAD(&lsk->lss_parents); cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops); result = 0; diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c index 5ba5ee1b8..6c5430d93 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_object.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c @@ -63,7 +63,7 @@ int lovsub_object_init(const struct lu_env *env, struct lu_object *obj, under = &dev->acid_next->cd_lu_dev; below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under); - if (below != NULL) { + if (below) { lu_object_add(obj, below); cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page)); result = 0; @@ -143,8 +143,8 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env, struct lovsub_object *los; struct lu_object *obj; - los = kmem_cache_alloc(lovsub_object_kmem, GFP_NOFS | __GFP_ZERO); - if (los != NULL) { + los = kmem_cache_zalloc(lovsub_object_kmem, GFP_NOFS); + if (los) { struct cl_object_header *hdr; obj = lovsub2lu(los); diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c index 3f00ce967..2d945532b 100644 --- a/drivers/staging/lustre/lustre/lov/lovsub_page.c +++ b/drivers/staging/lustre/lustre/lov/lovsub_page.c @@ -60,7 +60,7 @@ static const struct cl_page_operations lovsub_page_ops = { }; int lovsub_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *unused) + struct cl_page *page, struct page *unused) { struct lovsub_page *lsb = cl_object_page_slice(obj, page); diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c index 337241d84..0dcb6b6a7 100644 --- a/drivers/staging/lustre/lustre/lov/lproc_lov.c +++ b/drivers/staging/lustre/lustre/lov/lproc_lov.c @@ -46,22 +46,22 @@ static int lov_stripesize_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lov_desc *desc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; seq_printf(m, "%llu\n", desc->ld_default_stripe_size); return 0; } static ssize_t lov_stripesize_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *dev = ((struct seq_file *)file->private_data)->private; struct lov_desc *desc; __u64 val; int rc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; rc = lprocfs_write_u64_helper(buffer, count, &val); if (rc) @@ -79,22 +79,22 @@ static int lov_stripeoffset_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lov_desc *desc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; seq_printf(m, "%llu\n", desc->ld_default_stripe_offset); return 0; } static ssize_t lov_stripeoffset_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *dev = ((struct seq_file *)file->private_data)->private; struct lov_desc *desc; __u64 val; int rc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; rc = lprocfs_write_u64_helper(buffer, count, &val); if (rc) @@ -111,21 +111,21 @@ static int lov_stripetype_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lov_desc *desc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; seq_printf(m, "%u\n", desc->ld_pattern); return 0; } static ssize_t lov_stripetype_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *dev = ((struct seq_file *)file->private_data)->private; struct lov_desc *desc; int val, rc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; rc = lprocfs_write_helper(buffer, count, &val); if (rc) @@ -143,21 +143,21 @@ static int lov_stripecount_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lov_desc *desc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; seq_printf(m, "%d\n", (__s16)(desc->ld_default_stripe_count + 1) - 1); return 0; } static ssize_t lov_stripecount_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *dev = ((struct seq_file *)file->private_data)->private; struct lov_desc *desc; int val, rc; - LASSERT(dev != NULL); + LASSERT(dev); desc = &dev->u.lov.desc; rc = lprocfs_write_helper(buffer, count, &val); if (rc) @@ -199,7 +199,7 @@ static int lov_desc_uuid_seq_show(struct seq_file *m, void *v) struct obd_device *dev = (struct obd_device *)m->private; struct lov_obd *lov; - LASSERT(dev != NULL); + LASSERT(dev); lov = &dev->u.lov; seq_printf(m, "%s\n", lov->desc.ld_uuid.uuid); return 0; diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h index 3d2997a16..c5519aeb0 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h +++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h @@ -53,7 +53,7 @@ void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, __u32 size, void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags, struct md_op_data *data, int ea_size); void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, - void *ea, int ealen, void *ea2, int ea2len); + void *ea, int ealen, void *ea2, int ea2len); void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data, const void *data, int datalen, __u32 mode, __u32 uid, __u32 gid, cfs_cap_t capability, __u64 rdev); @@ -90,7 +90,7 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, struct ptlrpc_request **req, __u64 extra_lock_flags); int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid, - struct list_head *cancels, ldlm_mode_t mode, + struct list_head *cancels, enum ldlm_mode mode, __u64 bits); /* mdc/mdc_request.c */ int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid, @@ -119,8 +119,8 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data, struct ptlrpc_request **request); int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - ldlm_cancel_flags_t flags, void *opaque); + ldlm_policy_data_t *policy, enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque); int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, struct lu_fid *fid, __u64 *bits); @@ -129,10 +129,10 @@ int mdc_intent_getattr_async(struct obd_export *exp, struct md_enqueue_info *minfo, struct ldlm_enqueue_info *einfo); -ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, ldlm_type_t type, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh); +enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags, + const struct lu_fid *fid, enum ldlm_type type, + ldlm_policy_data_t *policy, enum ldlm_mode mode, + struct lustre_handle *lockh); static inline int mdc_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, int opc, diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c index 7218532ff..b3bfdcb73 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c @@ -41,8 +41,6 @@ static void __mdc_pack_body(struct mdt_body *b, __u32 suppgid) { - LASSERT(b != NULL); - b->suppgid = suppgid; b->uid = from_kuid(&init_user_ns, current_uid()); b->gid = from_kgid(&init_user_ns, current_gid()); @@ -83,7 +81,6 @@ void mdc_pack_body(struct ptlrpc_request *req, const struct lu_fid *fid, { struct mdt_body *b = req_capsule_client_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(b != NULL); b->valid = valid; b->eadatasize = ea_size; b->flags = flags; @@ -323,7 +320,7 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, return; lum = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); - if (ea == NULL) { /* Remove LOV EA */ + if (!ea) { /* Remove LOV EA */ lum->lmm_magic = LOV_USER_MAGIC_V1; lum->lmm_stripe_size = 0; lum->lmm_stripe_count = 0; @@ -346,7 +343,6 @@ void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data) CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_unlink)); rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); - LASSERT(rec != NULL); rec->ul_opcode = op_data->op_cli_flags & CLI_RM_ENTRY ? REINT_RMENTRY : REINT_UNLINK; @@ -362,7 +358,7 @@ void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data) rec->ul_bias = op_data->op_bias; tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME); - LASSERT(tmp != NULL); + LASSERT(tmp); LOGL0(op_data->op_name, op_data->op_namelen, tmp); } @@ -373,7 +369,6 @@ void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data) CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_link)); rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); - LASSERT(rec != NULL); rec->lk_opcode = REINT_LINK; rec->lk_fsuid = op_data->op_fsuid; /* current->fsuid; */ @@ -456,10 +451,9 @@ static void mdc_hsm_release_pack(struct ptlrpc_request *req, struct ldlm_lock *lock; data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA); - LASSERT(data != NULL); lock = ldlm_handle2lock(&op_data->op_lease_handle); - if (lock != NULL) { + if (lock) { data->cd_handle = lock->l_remote_handle; ldlm_lock_put(lock); } @@ -495,7 +489,8 @@ static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw) /* We record requests in flight in cli->cl_r_in_flight here. * There is only one write rpc possible in mdc anyway. If this to change - * in the future - the code may need to be revisited. */ + * in the future - the code may need to be revisited. + */ int mdc_enter_request(struct client_obd *cli) { int rc = 0; diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c index ef9a1e124..958a164f6 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c @@ -129,7 +129,7 @@ int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data, lock = ldlm_handle2lock((struct lustre_handle *)lockh); - LASSERT(lock != NULL); + LASSERT(lock); lock_res_and_lock(lock); if (lock->l_resource->lr_lvb_inode && lock->l_resource->lr_lvb_inode != data) { @@ -151,13 +151,13 @@ int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data, return 0; } -ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags, - const struct lu_fid *fid, ldlm_type_t type, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh) +enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags, + const struct lu_fid *fid, enum ldlm_type type, + ldlm_policy_data_t *policy, enum ldlm_mode mode, + struct lustre_handle *lockh) { struct ldlm_res_id res_id; - ldlm_mode_t rc; + enum ldlm_mode rc; fid_build_reg_res_name(fid, &res_id); /* LU-4405: Clear bits not supported by server */ @@ -170,8 +170,8 @@ ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags, int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, ldlm_policy_data_t *policy, - ldlm_mode_t mode, - ldlm_cancel_flags_t flags, + enum ldlm_mode mode, + enum ldlm_cancel_flags flags, void *opaque) { struct ldlm_res_id res_id; @@ -191,12 +191,12 @@ int mdc_null_inode(struct obd_export *exp, struct ldlm_resource *res; struct ldlm_namespace *ns = class_exp2obd(exp)->obd_namespace; - LASSERTF(ns != NULL, "no namespace passed\n"); + LASSERTF(ns, "no namespace passed\n"); fid_build_reg_res_name(fid, &res_id); res = ldlm_resource_get(ns, NULL, &res_id, 0, 0); - if (res == NULL) + if (!res) return 0; lock_res(res); @@ -210,7 +210,8 @@ int mdc_null_inode(struct obd_export *exp, /* find any ldlm lock of the inode in mdc * return 0 not find * 1 find one - * < 0 error */ + * < 0 error + */ int mdc_find_cbdata(struct obd_export *exp, const struct lu_fid *fid, ldlm_iterator_t it, void *data) @@ -252,7 +253,8 @@ static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc) * OOM here may cause recovery failure if lmm is needed (only for the * original open if the MDS crashed just when this client also OOM'd) * but this is incredibly unlikely, and questionable whether the client - * could do MDS recovery under OOM anyways... */ + * could do MDS recovery under OOM anyways... + */ static void mdc_realloc_openmsg(struct ptlrpc_request *req, struct mdt_body *body) { @@ -317,7 +319,7 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_INTENT_OPEN); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return ERR_PTR(-ENOMEM); } @@ -364,8 +366,8 @@ mdc_intent_getxattr_pack(struct obd_export *exp, LIST_HEAD(cancels); req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_LDLM_INTENT_GETXATTR); - if (req == NULL) + &RQF_LDLM_INTENT_GETXATTR); + if (!req) return ERR_PTR(-ENOMEM); rc = ldlm_prep_enqueue_req(exp, req, &cancels, count); @@ -384,14 +386,12 @@ mdc_intent_getxattr_pack(struct obd_export *exp, mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid, maxdata, -1, 0); - req_capsule_set_size(&req->rq_pill, &RMF_EADATA, - RCL_SERVER, maxdata); + req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, maxdata); - req_capsule_set_size(&req->rq_pill, &RMF_EAVALS, - RCL_SERVER, maxdata); + req_capsule_set_size(&req->rq_pill, &RMF_EAVALS, RCL_SERVER, maxdata); req_capsule_set_size(&req->rq_pill, &RMF_EAVALS_LENS, - RCL_SERVER, maxdata); + RCL_SERVER, maxdata); ptlrpc_request_set_replen(req); @@ -409,7 +409,7 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_INTENT_UNLINK); - if (req == NULL) + if (!req) return ERR_PTR(-ENOMEM); req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, @@ -437,8 +437,8 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp, } static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp, - struct lookup_intent *it, - struct md_op_data *op_data) + struct lookup_intent *it, + struct md_op_data *op_data) { struct ptlrpc_request *req; struct obd_device *obddev = class_exp2obd(exp); @@ -453,7 +453,7 @@ static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_INTENT_GETATTR); - if (req == NULL) + if (!req) return ERR_PTR(-ENOMEM); req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, @@ -496,8 +496,8 @@ static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), - &RQF_LDLM_INTENT_LAYOUT); - if (req == NULL) + &RQF_LDLM_INTENT_LAYOUT); + if (!req) return ERR_PTR(-ENOMEM); req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, 0); @@ -514,7 +514,8 @@ static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp, /* pack the layout intent request */ layout = req_capsule_client_get(&req->rq_pill, &RMF_LAYOUT_INTENT); /* LAYOUT_INTENT_ACCESS is generic, specific operation will be - * set for replication */ + * set for replication + */ layout->li_opc = LAYOUT_INTENT_ACCESS; req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, @@ -530,7 +531,7 @@ mdc_enqueue_pack(struct obd_export *exp, int lvb_len) int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE); - if (req == NULL) + if (!req) return ERR_PTR(-ENOMEM); rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); @@ -561,7 +562,8 @@ static int mdc_finish_enqueue(struct obd_export *exp, LASSERT(rc >= 0); /* Similarly, if we're going to replay this request, we don't want to - * actually get a lock, just perform the intent. */ + * actually get a lock, just perform the intent. + */ if (req->rq_transno || req->rq_replay) { lockreq = req_capsule_client_get(pill, &RMF_DLM_REQ); lockreq->lock_flags |= ldlm_flags_to_wire(LDLM_FL_INTENT_ONLY); @@ -573,10 +575,10 @@ static int mdc_finish_enqueue(struct obd_export *exp, rc = 0; } else { /* rc = 0 */ lock = ldlm_handle2lock(lockh); - LASSERT(lock != NULL); /* If the server gave us back a different lock mode, we should - * fix up our variables. */ + * fix up our variables. + */ if (lock->l_req_mode != einfo->ei_mode) { ldlm_lock_addref(lockh, lock->l_req_mode); ldlm_lock_decref(lockh, einfo->ei_mode); @@ -586,7 +588,6 @@ static int mdc_finish_enqueue(struct obd_export *exp, } lockrep = req_capsule_server_get(pill, &RMF_DLM_REP); - LASSERT(lockrep != NULL); /* checked by ldlm_cli_enqueue() */ intent->it_disposition = (int)lockrep->lock_policy_res1; intent->it_status = (int)lockrep->lock_policy_res2; @@ -595,7 +596,8 @@ static int mdc_finish_enqueue(struct obd_export *exp, intent->it_data = req; /* Technically speaking rq_transno must already be zero if - * it_status is in error, so the check is a bit redundant */ + * it_status is in error, so the check is a bit redundant + */ if ((!req->rq_transno || intent->it_status < 0) && req->rq_replay) mdc_clear_replay_flag(req, intent->it_status); @@ -605,7 +607,8 @@ static int mdc_finish_enqueue(struct obd_export *exp, * * It's important that we do this first! Otherwise we might exit the * function without doing so, and try to replay a failed create - * (bug 3440) */ + * (bug 3440) + */ if (it->it_op & IT_OPEN && req->rq_replay && (!it_disposition(it, DISP_OPEN_OPEN) || intent->it_status != 0)) mdc_clear_replay_flag(req, intent->it_status); @@ -618,7 +621,7 @@ static int mdc_finish_enqueue(struct obd_export *exp, struct mdt_body *body; body = req_capsule_server_get(pill, &RMF_MDT_BODY); - if (body == NULL) { + if (!body) { CERROR("Can't swab mdt_body\n"); return -EPROTO; } @@ -645,11 +648,12 @@ static int mdc_finish_enqueue(struct obd_export *exp, */ eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD, body->eadatasize); - if (eadata == NULL) + if (!eadata) return -EPROTO; /* save lvb data and length in case this is for layout - * lock */ + * lock + */ lvb_data = eadata; lvb_len = body->eadatasize; @@ -690,31 +694,32 @@ static int mdc_finish_enqueue(struct obd_export *exp, LASSERT(client_is_remote(exp)); perm = req_capsule_server_swab_get(pill, &RMF_ACL, lustre_swab_mdt_remote_perm); - if (perm == NULL) + if (!perm) return -EPROTO; } } else if (it->it_op & IT_LAYOUT) { /* maybe the lock was granted right away and layout - * is packed into RMF_DLM_LVB of req */ + * is packed into RMF_DLM_LVB of req + */ lvb_len = req_capsule_get_size(pill, &RMF_DLM_LVB, RCL_SERVER); if (lvb_len > 0) { lvb_data = req_capsule_server_sized_get(pill, &RMF_DLM_LVB, lvb_len); - if (lvb_data == NULL) + if (!lvb_data) return -EPROTO; } } /* fill in stripe data for layout lock */ lock = ldlm_handle2lock(lockh); - if (lock != NULL && ldlm_has_layout(lock) && lvb_data != NULL) { + if (lock && ldlm_has_layout(lock) && lvb_data) { void *lmm; LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d\n", - ldlm_it2str(it->it_op), lvb_len); + ldlm_it2str(it->it_op), lvb_len); lmm = libcfs_kvzalloc(lvb_len, GFP_NOFS); - if (lmm == NULL) { + if (!lmm) { LDLM_LOCK_PUT(lock); return -ENOMEM; } @@ -722,24 +727,25 @@ static int mdc_finish_enqueue(struct obd_export *exp, /* install lvb_data */ lock_res_and_lock(lock); - if (lock->l_lvb_data == NULL) { + if (!lock->l_lvb_data) { lock->l_lvb_type = LVB_T_LAYOUT; lock->l_lvb_data = lmm; lock->l_lvb_len = lvb_len; lmm = NULL; } unlock_res_and_lock(lock); - if (lmm != NULL) + if (lmm) kvfree(lmm); } - if (lock != NULL) + if (lock) LDLM_LOCK_PUT(lock); return rc; } /* We always reserve enough space in the reply packet for a stripe MD, because - * we don't know in advance the file type. */ + * we don't know in advance the file type. + */ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, struct lookup_intent *it, struct md_op_data *op_data, struct lustre_handle *lockh, void *lmm, int lmmsize, @@ -782,14 +788,15 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, policy = &getxattr_policy; } - LASSERT(reqp == NULL); + LASSERT(!reqp); generation = obddev->u.cli.cl_import->imp_generation; resend: flags = saved_flags; if (!it) { /* The only way right now is FLOCK, in this case we hide flock - policy as lmm, but lmmsize is 0 */ + * policy as lmm, but lmmsize is 0 + */ LASSERT(lmm && lmmsize == 0); LASSERTF(einfo->ei_type == LDLM_FLOCK, "lock type %d\n", einfo->ei_type); @@ -823,9 +830,10 @@ resend: if (IS_ERR(req)) return PTR_ERR(req); - if (req != NULL && it && it->it_op & IT_CREAT) + if (req && it && it->it_op & IT_CREAT) /* ask ptlrpc not to resend on EINPROGRESS since we have our own - * retry logic */ + * retry logic + */ req->rq_no_retry_einprogress = 1; if (resends) { @@ -836,7 +844,8 @@ resend: /* It is important to obtain rpc_lock first (if applicable), so that * threads that are serialised with rpc_lock are not polluting our - * rpcs in flight counter. We do not do flock request limiting, though*/ + * rpcs in flight counter. We do not do flock request limiting, though + */ if (it) { mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it); rc = mdc_enter_request(&obddev->u.cli); @@ -852,13 +861,14 @@ resend: 0, lvb_type, lockh, 0); if (!it) { /* For flock requests we immediately return without further - delay and let caller deal with the rest, since rest of - this function metadata processing makes no sense for flock - requests anyway. But in case of problem during comms with - Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we - can not rely on caller and this mainly for F_UNLCKs - (explicits or automatically generated by Kernel to clean - current FLocks upon exit) that can't be trashed */ + * delay and let caller deal with the rest, since rest of + * this function metadata processing makes no sense for flock + * requests anyway. But in case of problem during comms with + * Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we + * can not rely on caller and this mainly for F_UNLCKs + * (explicits or automatically generated by Kernel to clean + * current FLocks upon exit) that can't be trashed + */ if ((rc == -EINTR) || (rc == -ETIMEDOUT)) goto resend; return rc; @@ -878,13 +888,13 @@ resend: } lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - LASSERT(lockrep != NULL); lockrep->lock_policy_res2 = ptlrpc_status_ntoh(lockrep->lock_policy_res2); /* Retry the create infinitely when we get -EINPROGRESS from - * server. This is required by the new quota design. */ + * server. This is required by the new quota design. + */ if (it->it_op & IT_CREAT && (int)lockrep->lock_policy_res2 == -EINPROGRESS) { mdc_clear_replay_flag(req, rc); @@ -930,13 +940,13 @@ static int mdc_finish_intent_lock(struct obd_export *exp, struct ldlm_lock *lock; int rc; - LASSERT(request != NULL); LASSERT(request != LP_POISON); LASSERT(request->rq_repmsg != LP_POISON); if (!it_disposition(it, DISP_IT_EXECD)) { /* The server failed before it even started executing the - * intent, i.e. because it couldn't unpack the request. */ + * intent, i.e. because it couldn't unpack the request. + */ LASSERT(it->d.lustre.it_status != 0); return it->d.lustre.it_status; } @@ -945,10 +955,11 @@ static int mdc_finish_intent_lock(struct obd_export *exp, return rc; mdt_body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY); - LASSERT(mdt_body != NULL); /* mdc_enqueue checked */ + LASSERT(mdt_body); /* mdc_enqueue checked */ /* If we were revalidating a fid/name pair, mark the intent in - * case we fail and get called again from lookup */ + * case we fail and get called again from lookup + */ if (fid_is_sane(&op_data->op_fid2) && it->it_create_mode & M_CHECK_STALE && it->it_op != IT_GETATTR) { @@ -957,7 +968,8 @@ static int mdc_finish_intent_lock(struct obd_export *exp, /* sever can return one of two fids: * op_fid2 - new allocated fid - if file is created. * op_fid3 - existent fid - if file only open. - * op_fid3 is saved in lmv_intent_open */ + * op_fid3 is saved in lmv_intent_open + */ if ((!lu_fid_eq(&op_data->op_fid2, &mdt_body->fid1)) && (!lu_fid_eq(&op_data->op_fid3, &mdt_body->fid1))) { CDEBUG(D_DENTRY, "Found stale data "DFID"("DFID")/"DFID @@ -1001,7 +1013,8 @@ static int mdc_finish_intent_lock(struct obd_export *exp, * one. We have to set the data here instead of in * mdc_enqueue, because we need to use the child's inode as * the l_ast_data to match, and that's not available until - * intent_finish has performed the iget().) */ + * intent_finish has performed the iget().) + */ lock = ldlm_handle2lock(lockh); if (lock) { ldlm_policy_data_t policy = lock->l_policy_data; @@ -1036,11 +1049,12 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, { /* We could just return 1 immediately, but since we should only * be called in revalidate_it if we already have a lock, let's - * verify that. */ + * verify that. + */ struct ldlm_res_id res_id; struct lustre_handle lockh; ldlm_policy_data_t policy; - ldlm_mode_t mode; + enum ldlm_mode mode; if (it->d.lustre.it_lock_handle) { lockh.cookie = it->d.lustre.it_lock_handle; @@ -1059,10 +1073,12 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, * Unfortunately, if the bits are split across multiple * locks, there's no easy way to match all of them here, * so an extra RPC would be performed to fetch all - * of those bits at once for now. */ + * of those bits at once for now. + */ /* For new MDTs(> 2.4), UPDATE|PERM should be enough, * but for old MDTs (< 2.4), permission is covered - * by LOOKUP lock, so it needs to match all bits here.*/ + * by LOOKUP lock, so it needs to match all bits here. + */ policy.l_inodebits.bits = MDS_INODELOCK_UPDATE | MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM; @@ -1076,7 +1092,7 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, } mode = mdc_lock_match(exp, LDLM_FL_BLOCK_GRANTED, fid, - LDLM_IBITS, &policy, + LDLM_IBITS, &policy, LCK_CR | LCK_CW | LCK_PR | LCK_PW, &lockh); } @@ -1147,11 +1163,13 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data, (it->it_op & (IT_LOOKUP | IT_GETATTR))) { /* We could just return 1 immediately, but since we should only * be called in revalidate_it if we already have a lock, let's - * verify that. */ + * verify that. + */ it->d.lustre.it_lock_handle = 0; rc = mdc_revalidate_lock(exp, it, &op_data->op_fid2, NULL); /* Only return failure if it was not GETATTR by cfid - (from inode_revalidate) */ + * (from inode_revalidate) + */ if (rc || op_data->op_namelen != 0) return rc; } @@ -1206,7 +1224,6 @@ static int mdc_intent_getattr_async_interpret(const struct lu_env *env, } lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - LASSERT(lockrep != NULL); lockrep->lock_policy_res2 = ptlrpc_status_ntoh(lockrep->lock_policy_res2); @@ -1235,7 +1252,8 @@ int mdc_intent_getattr_async(struct obd_export *exp, struct ldlm_res_id res_id; /*XXX: Both MDS_INODELOCK_LOOKUP and MDS_INODELOCK_UPDATE are needed * for statahead currently. Consider CMD in future, such two bits - * maybe managed by different MDS, should be adjusted then. */ + * maybe managed by different MDS, should be adjusted then. + */ ldlm_policy_data_t policy = { .l_inodebits = { MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE } @@ -1244,9 +1262,9 @@ int mdc_intent_getattr_async(struct obd_export *exp, __u64 flags = LDLM_FL_HAS_INTENT; CDEBUG(D_DLMTRACE, - "name: %.*s in inode "DFID", intent: %s flags %#Lo\n", - op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1), - ldlm_it2str(it->it_op), it->it_flags); + "name: %.*s in inode " DFID ", intent: %s flags %#Lo\n", + op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1), + ldlm_it2str(it->it_op), it->it_flags); fid_build_reg_res_name(&op_data->op_fid1, &res_id); req = mdc_intent_getattr_pack(exp, it, op_data); diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c index ac7695a10..4ef3db147 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c @@ -65,9 +65,10 @@ static int mdc_reint(struct ptlrpc_request *request, /* Find and cancel locally locks matched by inode @bits & @mode in the resource * found by @fid. Found locks are added into @cancel list. Returns the amount of - * locks added to @cancels list. */ + * locks added to @cancels list. + */ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid, - struct list_head *cancels, ldlm_mode_t mode, + struct list_head *cancels, enum ldlm_mode mode, __u64 bits) { struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; @@ -81,14 +82,15 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid, * * This distinguishes from a case when ELC is not supported originally, * when we still want to cancel locks in advance and just cancel them - * locally, without sending any RPC. */ + * locally, without sending any RPC. + */ if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns)) return 0; fid_build_reg_res_name(fid, &res_id); res = ldlm_resource_get(exp->exp_obd->obd_namespace, NULL, &res_id, 0, 0); - if (res == NULL) + if (!res) return 0; LDLM_RESOURCE_ADDREF(res); /* Initialize ibits lock policy. */ @@ -111,8 +113,6 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, int count = 0, rc; __u64 bits; - LASSERT(op_data != NULL); - bits = MDS_INODELOCK_UPDATE; if (op_data->op_attr.ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) bits |= MDS_INODELOCK_LOOKUP; @@ -123,7 +123,7 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, &cancels, LCK_EX, bits); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_SETATTR); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -151,10 +151,10 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, ptlrpc_request_set_replen(req); if (mod && (op_data->op_flags & MF_EPOCH_OPEN) && req->rq_import->imp_replayable) { - LASSERT(*mod == NULL); + LASSERT(!*mod); *mod = obd_mod_alloc(); - if (*mod == NULL) { + if (!*mod) { DEBUG_REQ(D_ERROR, req, "Can't allocate md_open_data"); } else { req->rq_replay = 1; @@ -181,8 +181,6 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH); body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(epoch != NULL); - LASSERT(body != NULL); epoch->handle = body->handle; epoch->ioepoch = body->ioepoch; req->rq_replay_cb = mdc_replay_open; @@ -195,7 +193,7 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data, *request = req; if (rc && req->rq_commit_cb) { /* Put an extra reference on \var mod on error case. */ - if (mod != NULL && *mod != NULL) + if (mod && *mod) obd_mod_put(*mod); req->rq_commit_cb(req); } @@ -237,7 +235,7 @@ rebuild: req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_CREATE_RMT_ACL); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -262,7 +260,8 @@ rebuild: ptlrpc_request_set_replen(req); /* ask ptlrpc not to resend on EINPROGRESS since we have our own retry - * logic here */ + * logic here + */ req->rq_no_retry_einprogress = 1; if (resends) { @@ -280,7 +279,8 @@ rebuild: goto resend; } else if (rc == -EINPROGRESS) { /* Retry create infinitely until succeed or get other - * error code. */ + * error code. + */ ptlrpc_req_finished(req); resends++; @@ -308,7 +308,7 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data, struct ptlrpc_request *req = *request; int count = 0, rc; - LASSERT(req == NULL); + LASSERT(!req); if ((op_data->op_flags & MF_MDC_CANCEL_FID1) && (fid_is_sane(&op_data->op_fid1)) && @@ -324,7 +324,7 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data, MDS_INODELOCK_FULL); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_UNLINK); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -373,7 +373,7 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data, MDS_INODELOCK_UPDATE); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_LINK); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -422,14 +422,14 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data, &cancels, LCK_EX, MDS_INODELOCK_LOOKUP); if ((op_data->op_flags & MF_MDC_CANCEL_FID4) && - (fid_is_sane(&op_data->op_fid4))) + (fid_is_sane(&op_data->op_fid4))) count += mdc_resource_get_unused(exp, &op_data->op_fid4, &cancels, LCK_EX, MDS_INODELOCK_FULL); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_RENAME); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c index 57e0fc1e8..b91d3ff18 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_request.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c @@ -48,6 +48,7 @@ #include "../include/lprocfs_status.h" #include "../include/lustre_param.h" #include "../include/lustre_log.h" +#include "../include/lustre_kernelcomm.h" #include "mdc_internal.h" @@ -62,7 +63,8 @@ static inline int mdc_queue_wait(struct ptlrpc_request *req) /* mdc_enter_request() ensures that this client has no more * than cl_max_rpcs_in_flight RPCs simultaneously inf light - * against an MDT. */ + * against an MDT. + */ rc = mdc_enter_request(cli); if (rc != 0) return rc; @@ -82,7 +84,7 @@ static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid) req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_GETSTATUS, LUSTRE_MDS_VERSION, MDS_GETSTATUS); - if (req == NULL) + if (!req) return -ENOMEM; mdc_pack_body(req, NULL, 0, 0, -1, 0); @@ -95,7 +97,7 @@ static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid) goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -135,7 +137,7 @@ static int mdc_getattr_common(struct obd_export *exp, /* sanity check for the reply */ body = req_capsule_server_get(pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) return -EPROTO; CDEBUG(D_NET, "mode: %o\n", body->mode); @@ -145,7 +147,7 @@ static int mdc_getattr_common(struct obd_export *exp, eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD, body->eadatasize); - if (eadata == NULL) + if (!eadata) return -EPROTO; } @@ -155,7 +157,7 @@ static int mdc_getattr_common(struct obd_export *exp, LASSERT(client_is_remote(exp)); perm = req_capsule_server_swab_get(pill, &RMF_ACL, lustre_swab_mdt_remote_perm); - if (perm == NULL) + if (!perm) return -EPROTO; } @@ -163,7 +165,7 @@ static int mdc_getattr_common(struct obd_export *exp, } static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) + struct ptlrpc_request **request) { struct ptlrpc_request *req; int rc; @@ -175,7 +177,7 @@ static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data, } *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR); @@ -205,7 +207,7 @@ static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data, } static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data, - struct ptlrpc_request **request) + struct ptlrpc_request **request) { struct ptlrpc_request *req; int rc; @@ -213,7 +215,7 @@ static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data, *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR_NAME); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, @@ -260,7 +262,7 @@ static int mdc_is_subdir(struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_IS_SUBDIR, LUSTRE_MDS_VERSION, MDS_IS_SUBDIR); - if (req == NULL) + if (!req) return -ENOMEM; mdc_is_subdir_pack(req, pfid, cfid, 0); @@ -289,7 +291,7 @@ static int mdc_xattr_common(struct obd_export *exp, *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt); - if (req == NULL) + if (!req) return -ENOMEM; if (xattr_name) { @@ -424,7 +426,7 @@ static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md) return -EPROTO; acl = posix_acl_from_xattr(&init_user_ns, buf, body->aclsize); - if (acl == NULL) + if (!acl) return 0; if (IS_ERR(acl)) { @@ -460,7 +462,6 @@ static int mdc_get_lustre_md(struct obd_export *exp, memset(md, 0, sizeof(*md)); md->body = req_capsule_server_get(pill, &RMF_MDT_BODY); - LASSERT(md->body != NULL); if (md->body->valid & OBD_MD_FLEASIZE) { int lmmsize; @@ -592,17 +593,16 @@ void mdc_replay_open(struct ptlrpc_request *req) struct lustre_handle old; struct mdt_body *body; - if (mod == NULL) { + if (!mod) { DEBUG_REQ(D_ERROR, req, "Can't properly replay without open data."); return; } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); och = mod->mod_och; - if (och != NULL) { + if (och) { struct lustre_handle *file_fh; LASSERT(och->och_magic == OBD_CLIENT_HANDLE_MAGIC); @@ -614,7 +614,7 @@ void mdc_replay_open(struct ptlrpc_request *req) *file_fh = body->handle; } close_req = mod->mod_close_req; - if (close_req != NULL) { + if (close_req) { __u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg); struct mdt_ioepoch *epoch; @@ -623,7 +623,7 @@ void mdc_replay_open(struct ptlrpc_request *req) &RMF_MDT_EPOCH); LASSERT(epoch); - if (och != NULL) + if (och) LASSERT(!memcmp(&old, &epoch->handle, sizeof(old))); DEBUG_REQ(D_HA, close_req, "updating close body with new fh"); epoch->handle = body->handle; @@ -634,7 +634,7 @@ void mdc_commit_open(struct ptlrpc_request *req) { struct md_open_data *mod = req->rq_cb_data; - if (mod == NULL) + if (!mod) return; /** @@ -674,15 +674,15 @@ int mdc_set_open_replay_data(struct obd_export *exp, rec = req_capsule_client_get(&open_req->rq_pill, &RMF_REC_REINT); body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY); - LASSERT(rec != NULL); + LASSERT(rec); /* Incoming message in my byte order (it's been swabbed). */ /* Outgoing messages always in my byte order. */ - LASSERT(body != NULL); + LASSERT(body); /* Only if the import is replayable, we set replay_open data */ if (och && imp->imp_replayable) { mod = obd_mod_alloc(); - if (mod == NULL) { + if (!mod) { DEBUG_REQ(D_ERROR, open_req, "Can't allocate md_open_data"); return 0; @@ -748,11 +748,11 @@ static int mdc_clear_open_replay_data(struct obd_export *exp, * It is possible to not have \var mod in a case of eviction between * lookup and ll_file_open(). **/ - if (mod == NULL) + if (!mod) return 0; LASSERT(mod != LP_POISON); - LASSERT(mod->mod_open_req != NULL); + LASSERT(mod->mod_open_req); mdc_free_open(mod); mod->mod_och = NULL; @@ -803,7 +803,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE); @@ -814,13 +814,14 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, /* To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a * portal whose threads are not taking any DLM locks and are therefore - * always progressing */ + * always progressing + */ req->rq_request_portal = MDS_READPAGE_PORTAL; ptlrpc_at_set_req_timeout(req); /* Ensure that this close's handle is fixed up during replay. */ - if (likely(mod != NULL)) { - LASSERTF(mod->mod_open_req != NULL && + if (likely(mod)) { + LASSERTF(mod->mod_open_req && mod->mod_open_req->rq_type != LI_POISON, "POISONED open %p!\n", mod->mod_open_req); @@ -828,7 +829,8 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, DEBUG_REQ(D_HA, mod->mod_open_req, "matched open"); /* We no longer want to preserve this open for replay even - * though the open was committed. b=3632, b=3633 */ + * though the open was committed. b=3632, b=3633 + */ spin_lock(&mod->mod_open_req->rq_lock); mod->mod_open_req->rq_replay = 0; spin_unlock(&mod->mod_open_req->rq_lock); @@ -850,7 +852,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, rc = ptlrpc_queue_wait(req); mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL); - if (req->rq_repmsg == NULL) { + if (!req->rq_repmsg) { CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req, req->rq_status); if (rc == 0) @@ -866,7 +868,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, rc = -rc; } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - if (body == NULL) + if (!body) rc = -EPROTO; } else if (rc == -ESTALE) { /** @@ -876,7 +878,6 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, */ if (mod) { DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc); - LASSERT(mod->mod_open_req != NULL); if (mod->mod_open_req->rq_committed) rc = 0; } @@ -886,7 +887,8 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, if (rc != 0) mod->mod_close_req = NULL; /* Since now, mod is accessed through open_req only, - * thus close req does not keep a reference on mod anymore. */ + * thus close req does not keep a reference on mod anymore. + */ obd_mod_put(mod); } *request = req; @@ -903,7 +905,7 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_DONE_WRITING); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_DONE_WRITING); @@ -912,15 +914,16 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data, return rc; } - if (mod != NULL) { - LASSERTF(mod->mod_open_req != NULL && + if (mod) { + LASSERTF(mod->mod_open_req && mod->mod_open_req->rq_type != LI_POISON, "POISONED setattr %p!\n", mod->mod_open_req); mod->mod_close_req = req; DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr"); /* We no longer want to preserve this setattr for replay even - * though the open was committed. b=3632, b=3633 */ + * though the open was committed. b=3632, b=3633 + */ spin_lock(&mod->mod_open_req->rq_lock); mod->mod_open_req->rq_replay = 0; spin_unlock(&mod->mod_open_req->rq_lock); @@ -940,7 +943,6 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data, * Let's check if mod exists and return no error in that case */ if (mod) { - LASSERT(mod->mod_open_req != NULL); if (mod->mod_open_req->rq_committed) rc = 0; } @@ -949,11 +951,12 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data, if (mod) { if (rc != 0) mod->mod_close_req = NULL; - LASSERT(mod->mod_open_req != NULL); + LASSERT(mod->mod_open_req); mdc_free_open(mod); /* Since now, mod is accessed through setattr req only, - * thus DW req does not keep a reference on mod anymore. */ + * thus DW req does not keep a reference on mod anymore. + */ obd_mod_put(mod); } @@ -978,7 +981,7 @@ static int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data, restart_bulk: req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE); @@ -992,17 +995,17 @@ restart_bulk: desc = ptlrpc_prep_bulk_imp(req, op_data->op_npages, 1, BULK_PUT_SINK, MDS_BULK_PORTAL); - if (desc == NULL) { + if (!desc) { ptlrpc_request_free(req); return -ENOMEM; } /* NB req now owns desc and will free it when it gets freed */ for (i = 0; i < op_data->op_npages; i++) - ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); + ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE); mdc_readdir_pack(req, op_data->op_offset, - PAGE_CACHE_SIZE * op_data->op_npages, + PAGE_SIZE * op_data->op_npages, &op_data->op_fid1); ptlrpc_request_set_replen(req); @@ -1033,8 +1036,8 @@ restart_bulk: if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) { CERROR("Unexpected # bytes transferred: %d (%ld expected)\n", - req->rq_bulk->bd_nob_transferred, - PAGE_CACHE_SIZE * op_data->op_npages); + req->rq_bulk->bd_nob_transferred, + PAGE_SIZE * op_data->op_npages); ptlrpc_req_finished(req); return -EPROTO; } @@ -1066,7 +1069,7 @@ static int mdc_statfs(const struct lu_env *env, req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_STATFS, LUSTRE_MDS_VERSION, MDS_STATFS); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto output; } @@ -1088,7 +1091,7 @@ static int mdc_statfs(const struct lu_env *env, } msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); - if (msfs == NULL) { + if (!msfs) { rc = -EPROTO; goto out; } @@ -1161,7 +1164,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp, req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS, LUSTRE_MDS_VERSION, MDS_HSM_PROGRESS); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -1170,7 +1173,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp, /* Copy hsm_progress struct */ req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS); - if (req_hpk == NULL) { + if (!req_hpk) { rc = -EPROTO; goto out; } @@ -1195,7 +1198,7 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives) req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_REGISTER, LUSTRE_MDS_VERSION, MDS_HSM_CT_REGISTER); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -1205,7 +1208,7 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives) /* Copy hsm_progress struct */ archive_mask = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_ARCHIVE); - if (archive_mask == NULL) { + if (!archive_mask) { rc = -EPROTO; goto out; } @@ -1230,7 +1233,7 @@ static int mdc_ioc_hsm_current_action(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_HSM_ACTION); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_ACTION); @@ -1250,7 +1253,7 @@ static int mdc_ioc_hsm_current_action(struct obd_export *exp, req_hca = req_capsule_server_get(&req->rq_pill, &RMF_MDS_HSM_CURRENT_ACTION); - if (req_hca == NULL) { + if (!req_hca) { rc = -EPROTO; goto out; } @@ -1270,7 +1273,7 @@ static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp) req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER, LUSTRE_MDS_VERSION, MDS_HSM_CT_UNREGISTER); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -1295,7 +1298,7 @@ static int mdc_ioc_hsm_state_get(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_HSM_STATE_GET); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_GET); @@ -1314,7 +1317,7 @@ static int mdc_ioc_hsm_state_get(struct obd_export *exp, goto out; req_hus = req_capsule_server_get(&req->rq_pill, &RMF_HSM_USER_STATE); - if (req_hus == NULL) { + if (!req_hus) { rc = -EPROTO; goto out; } @@ -1336,7 +1339,7 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_HSM_STATE_SET); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_SET); @@ -1350,7 +1353,7 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp, /* Copy states */ req_hss = req_capsule_client_get(&req->rq_pill, &RMF_HSM_STATE_SET); - if (req_hss == NULL) { + if (!req_hss) { rc = -EPROTO; goto out; } @@ -1375,7 +1378,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp, int rc; req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -1396,7 +1399,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp, /* Copy hsm_request struct */ req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST); - if (req_hr == NULL) { + if (!req_hr) { rc = -EPROTO; goto out; } @@ -1404,7 +1407,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp, /* Copy hsm_user_item structs */ req_hui = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM); - if (req_hui == NULL) { + if (!req_hui) { rc = -EPROTO; goto out; } @@ -1413,7 +1416,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp, /* Copy opaque field */ req_opaque = req_capsule_client_get(&req->rq_pill, &RMF_GENERIC_DATA); - if (req_opaque == NULL) { + if (!req_opaque) { rc = -EPROTO; goto out; } @@ -1512,7 +1515,7 @@ static int mdc_changelog_send_thread(void *csdata) /* Set up the remote catalog handle */ ctxt = llog_get_context(cs->cs_obd, LLOG_CHANGELOG_REPL_CTXT); - if (ctxt == NULL) { + if (!ctxt) { rc = -ENOENT; goto out; } @@ -1553,6 +1556,7 @@ static int mdc_ioc_changelog_send(struct obd_device *obd, struct ioc_changelog *icc) { struct changelog_show *cs; + struct task_struct *task; int rc; /* Freed in mdc_changelog_send_thread */ @@ -1570,15 +1574,20 @@ static int mdc_ioc_changelog_send(struct obd_device *obd, * New thread because we should return to user app before * writing into our pipe */ - rc = PTR_ERR(kthread_run(mdc_changelog_send_thread, cs, - "mdc_clg_send_thread")); - if (!IS_ERR_VALUE(rc)) { - CDEBUG(D_CHANGELOG, "start changelog thread\n"); - return 0; + task = kthread_run(mdc_changelog_send_thread, cs, + "mdc_clg_send_thread"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("%s: can't start changelog thread: rc = %d\n", + obd->obd_name, rc); + kfree(cs); + } else { + rc = 0; + CDEBUG(D_CHANGELOG, "%s: started changelog thread\n", + obd->obd_name); } CERROR("Failed to start changelog thread: %d\n", rc); - kfree(cs); return rc; } @@ -1596,7 +1605,7 @@ static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION, MDS_QUOTACHECK); - if (req == NULL) + if (!req) return -ENOMEM; body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); @@ -1605,7 +1614,8 @@ static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp, ptlrpc_request_set_replen(req); /* the next poll will find -ENODATA, that means quotacheck is - * going on */ + * going on + */ cli->cl_qchk_stat = -ENODATA; rc = ptlrpc_queue_wait(req); if (rc) @@ -1640,7 +1650,7 @@ static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_QUOTACTL, LUSTRE_MDS_VERSION, MDS_QUOTACTL); - if (req == NULL) + if (!req) return -ENOMEM; oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); @@ -1694,7 +1704,7 @@ static int mdc_ioc_swap_layouts(struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SWAP_LAYOUTS); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -1721,7 +1731,7 @@ static int mdc_ioc_swap_layouts(struct obd_export *exp, } static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg) + void *karg, void __user *uarg) { struct obd_device *obd = exp->exp_obd; struct obd_ioctl_data *data = karg; @@ -1729,7 +1739,8 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, int rc; if (!try_module_get(THIS_MODULE)) { - CERROR("Can't get module. Is it alive?"); + CERROR("%s: cannot get module '%s'\n", obd->obd_name, + module_name(THIS_MODULE)); return -EINVAL; } switch (cmd) { @@ -1805,7 +1816,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, /* copy UUID */ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd), min_t(size_t, data->ioc_plen2, - sizeof(struct obd_uuid)))) { + sizeof(struct obd_uuid)))) { rc = -EFAULT; goto out; } @@ -1818,7 +1829,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, if (copy_to_user(data->ioc_pbuf1, &stat_buf, min_t(size_t, data->ioc_plen1, - sizeof(stat_buf)))) { + sizeof(stat_buf)))) { rc = -EFAULT; goto out; } @@ -1880,7 +1891,7 @@ static int mdc_get_info_rpc(struct obd_export *exp, int rc = -EINVAL; req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY, @@ -1905,7 +1916,8 @@ static int mdc_get_info_rpc(struct obd_export *exp, rc = ptlrpc_queue_wait(req); /* -EREMOTE means the get_info result is partial, and it needs to - * continue on another MDT, see fid2path part in lmv_iocontrol */ + * continue on another MDT, see fid2path part in lmv_iocontrol + */ if (rc == 0 || rc == -EREMOTE) { tmp = req_capsule_server_get(&req->rq_pill, &RMF_GETINFO_VAL); memcpy(val, tmp, vallen); @@ -2013,21 +2025,27 @@ static int mdc_hsm_copytool_send(int len, void *val) /** * callback function passed to kuc for re-registering each HSM copytool * running on MDC, after MDT shutdown/recovery. - * @param data archive id served by the copytool + * @param data copytool registration data * @param cb_arg callback argument (obd_import) */ -static int mdc_hsm_ct_reregister(__u32 data, void *cb_arg) +static int mdc_hsm_ct_reregister(void *data, void *cb_arg) { + struct kkuc_ct_data *kcd = data; struct obd_import *imp = (struct obd_import *)cb_arg; - __u32 archive = data; int rc; - CDEBUG(D_HA, "recover copytool registration to MDT (archive=%#x)\n", - archive); - rc = mdc_ioc_hsm_ct_register(imp, archive); + if (!kcd || kcd->kcd_magic != KKUC_CT_DATA_MAGIC) + return -EPROTO; + + if (!obd_uuid_equals(&kcd->kcd_uuid, &imp->imp_obd->obd_uuid)) + return 0; + + CDEBUG(D_HA, "%s: recover copytool registration to MDT (archive=%#x)\n", + imp->imp_obd->obd_name, kcd->kcd_archive); + rc = mdc_ioc_hsm_ct_register(imp, kcd->kcd_archive); /* ignore error if the copytool is already registered */ - return ((rc != 0) && (rc != -EEXIST)) ? rc : 0; + return (rc == -EEXIST) ? 0 : rc; } static int mdc_set_info_async(const struct lu_env *env, @@ -2133,7 +2151,7 @@ static int mdc_sync(struct obd_export *exp, const struct lu_fid *fid, *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_SYNC); @@ -2175,7 +2193,7 @@ static int mdc_import_event(struct obd_device *obd, struct obd_import *imp, * Flush current sequence to make client obtain new one * from server in case of disconnect/reconnect. */ - if (cli->cl_seq != NULL) + if (cli->cl_seq) seq_client_flush(cli->cl_seq); rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL); @@ -2238,7 +2256,8 @@ static int mdc_cancel_for_recovery(struct ldlm_lock *lock) /* FIXME: if we ever get into a situation where there are too many * opened files with open locks on a single node, then we really - * should replay these open locks to reget it */ + * should replay these open locks to reget it + */ if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN) return 0; @@ -2422,7 +2441,7 @@ static int mdc_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid, *request = NULL; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR); @@ -2519,6 +2538,7 @@ static void /*__exit*/ mdc_exit(void) MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); MODULE_DESCRIPTION("Lustre Metadata Client"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); module_init(mdc_init); diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c index ab4800c20..3924b095b 100644 --- a/drivers/staging/lustre/lustre/mgc/mgc_request.c +++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c @@ -90,7 +90,8 @@ static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id, int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type) { /* fsname is at most 8 chars long, maybe contain "-". - * e.g. "lustre", "SUN-000" */ + * e.g. "lustre", "SUN-000" + */ return mgc_name2resid(fsname, strlen(fsname), res_id, type); } EXPORT_SYMBOL(mgc_fsname2resid); @@ -102,7 +103,8 @@ static int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id, int type /* logname consists of "fsname-nodetype". * e.g. "lustre-MDT0001", "SUN-000-client" - * there is an exception: llog "params" */ + * there is an exception: llog "params" + */ name_end = strrchr(logname, '-'); if (!name_end) len = strlen(logname); @@ -125,7 +127,8 @@ static int config_log_get(struct config_llog_data *cld) } /* Drop a reference to a config log. When no longer referenced, - we can free the config log data */ + * we can free the config log data + */ static void config_log_put(struct config_llog_data *cld) { CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname, @@ -162,7 +165,7 @@ struct config_llog_data *config_log_find(char *logname, struct config_llog_data *found = NULL; void *instance; - LASSERT(logname != NULL); + LASSERT(logname); instance = cfg ? cfg->cfg_instance : NULL; spin_lock(&config_list_lock); @@ -242,17 +245,18 @@ struct config_llog_data *do_config_log_add(struct obd_device *obd, return cld; } -static struct config_llog_data *config_recover_log_add(struct obd_device *obd, - char *fsname, - struct config_llog_instance *cfg, - struct super_block *sb) +static struct config_llog_data * +config_recover_log_add(struct obd_device *obd, char *fsname, + struct config_llog_instance *cfg, + struct super_block *sb) { struct config_llog_instance lcfg = *cfg; struct config_llog_data *cld; char logname[32]; /* we have to use different llog for clients and mdts for cmd - * where only clients are notified if one of cmd server restarts */ + * where only clients are notified if one of cmd server restarts + */ LASSERT(strlen(fsname) < sizeof(logname) / 2); strcpy(logname, fsname); LASSERT(lcfg.cfg_instance); @@ -262,8 +266,9 @@ static struct config_llog_data *config_recover_log_add(struct obd_device *obd, return cld; } -static struct config_llog_data *config_params_log_add(struct obd_device *obd, - struct config_llog_instance *cfg, struct super_block *sb) +static struct config_llog_data * +config_params_log_add(struct obd_device *obd, + struct config_llog_instance *cfg, struct super_block *sb) { struct config_llog_instance lcfg = *cfg; struct config_llog_data *cld; @@ -300,7 +305,7 @@ static int config_log_add(struct obd_device *obd, char *logname, * <fsname>-sptlrpc. multiple regular logs may share one sptlrpc log. */ ptr = strrchr(logname, '-'); - if (ptr == NULL || ptr - logname > 8) { + if (!ptr || ptr - logname > 8) { CERROR("logname %s is too long\n", logname); return -EINVAL; } @@ -309,7 +314,7 @@ static int config_log_add(struct obd_device *obd, char *logname, strcpy(seclogname + (ptr - logname), "-sptlrpc"); sptlrpc_cld = config_log_find(seclogname, NULL); - if (sptlrpc_cld == NULL) { + if (!sptlrpc_cld) { sptlrpc_cld = do_config_log_add(obd, seclogname, CONFIG_T_SPTLRPC, NULL, NULL); if (IS_ERR(sptlrpc_cld)) { @@ -339,7 +344,16 @@ static int config_log_add(struct obd_device *obd, char *logname, LASSERT(lsi->lsi_lmd); if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)) { struct config_llog_data *recover_cld; - *strrchr(seclogname, '-') = 0; + + ptr = strrchr(seclogname, '-'); + if (ptr) { + *ptr = 0; + } else { + CERROR("%s: sptlrpc log name not correct, %s: rc = %d\n", + obd->obd_name, seclogname, -EINVAL); + config_log_put(cld); + return -EINVAL; + } recover_cld = config_recover_log_add(obd, seclogname, cfg, sb); if (IS_ERR(recover_cld)) { rc = PTR_ERR(recover_cld); @@ -376,7 +390,7 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg) int rc = 0; cld = config_log_find(logname, cfg); - if (cld == NULL) + if (!cld) return -ENOENT; mutex_lock(&cld->cld_lock); @@ -450,16 +464,16 @@ int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data) ocd = &imp->imp_connect_data; seq_printf(m, "imperative_recovery: %s\n", - OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED"); + OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED"); seq_printf(m, "client_state:\n"); spin_lock(&config_list_lock); list_for_each_entry(cld, &config_llog_list, cld_list_chain) { - if (cld->cld_recover == NULL) + if (!cld->cld_recover) continue; - seq_printf(m, " - { client: %s, nidtbl_version: %u }\n", - cld->cld_logname, - cld->cld_recover->cld_cfg.cfg_last_idx); + seq_printf(m, " - { client: %s, nidtbl_version: %u }\n", + cld->cld_logname, + cld->cld_recover->cld_cfg.cfg_last_idx); } spin_unlock(&config_list_lock); @@ -483,8 +497,9 @@ static void do_requeue(struct config_llog_data *cld) LASSERT(atomic_read(&cld->cld_refcount) > 0); /* Do not run mgc_process_log on a disconnected export or an - export which is being disconnected. Take the client - semaphore to make the check non-racy. */ + * export which is being disconnected. Take the client + * semaphore to make the check non-racy. + */ down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem); if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) { CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname); @@ -529,8 +544,9 @@ static int mgc_requeue_thread(void *data) } /* Always wait a few seconds to allow the server who - caused the lock revocation to finish its setup, plus some - random so everyone doesn't try to reconnect at once. */ + * caused the lock revocation to finish its setup, plus some + * random so everyone doesn't try to reconnect at once. + */ to = MGC_TIMEOUT_MIN_SECONDS * HZ; to += rand * HZ / 100; /* rand is centi-seconds */ lwi = LWI_TIMEOUT(to, NULL, NULL); @@ -549,8 +565,7 @@ static int mgc_requeue_thread(void *data) spin_lock(&config_list_lock); rq_state &= ~RQ_PRECLEANUP; - list_for_each_entry(cld, &config_llog_list, - cld_list_chain) { + list_for_each_entry(cld, &config_llog_list, cld_list_chain) { if (!cld->cld_lostlock) continue; @@ -559,7 +574,8 @@ static int mgc_requeue_thread(void *data) LASSERT(atomic_read(&cld->cld_refcount) > 0); /* Whether we enqueued again or not in mgc_process_log, - * we're done with the ref from the old enqueue */ + * we're done with the ref from the old enqueue + */ if (cld_prev) config_log_put(cld_prev); cld_prev = cld; @@ -575,7 +591,8 @@ static int mgc_requeue_thread(void *data) config_log_put(cld_prev); /* break after scanning the list so that we can drop - * refcount to losing lock clds */ + * refcount to losing lock clds + */ if (unlikely(stopped)) { spin_lock(&config_list_lock); break; @@ -598,7 +615,8 @@ static int mgc_requeue_thread(void *data) } /* Add a cld to the list to requeue. Start the requeue thread if needed. - We are responsible for dropping the config log reference from here on out. */ + * We are responsible for dropping the config log reference from here on out. + */ static void mgc_requeue_add(struct config_llog_data *cld) { CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n", @@ -635,7 +653,8 @@ static int mgc_llog_init(const struct lu_env *env, struct obd_device *obd) int rc; /* setup only remote ctxt, the local disk context is switched per each - * filesystem during mgc_fs_setup() */ + * filesystem during mgc_fs_setup() + */ rc = llog_setup(env, obd, &obd->obd_olg, LLOG_CONFIG_REPL_CTXT, obd, &llog_client_ops); if (rc) @@ -697,7 +716,8 @@ static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) static int mgc_cleanup(struct obd_device *obd) { /* COMPAT_146 - old config logs may have added profiles we don't - know about */ + * know about + */ if (obd->obd_type->typ_refcnt <= 1) /* Only for the last mgc */ class_del_profiles(); @@ -711,6 +731,7 @@ static int mgc_cleanup(struct obd_device *obd) static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) { struct lprocfs_static_vars lvars = { NULL }; + struct task_struct *task; int rc; ptlrpcd_addref(); @@ -734,10 +755,10 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg) init_waitqueue_head(&rq_waitq); /* start requeue thread */ - rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL, - "ll_cfg_requeue")); - if (IS_ERR_VALUE(rc)) { - CERROR("%s: Cannot start requeue thread (%d),no more log updates!\n", + task = kthread_run(mgc_requeue_thread, NULL, "ll_cfg_requeue"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("%s: cannot start requeue thread: rc = %d; no more log updates\n", obd->obd_name, rc); goto err_cleanup; } @@ -793,7 +814,8 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, break; } /* Make sure not to re-enqueue when the mgc is stopping - (we get called from client_disconnect_export) */ + * (we get called from client_disconnect_export) + */ if (!lock->l_conn_export || !lock->l_conn_export->exp_obd->u.cli.cl_conn_count) { CDEBUG(D_MGC, "log %.8s: disconnecting, won't requeue\n", @@ -815,7 +837,8 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, /* Not sure where this should go... */ /* This is the timeout value for MGS_CONNECT request plus a ping interval, such - * that we can have a chance to try the secondary MGS if any. */ + * that we can have a chance to try the secondary MGS if any. + */ #define MGC_ENQUEUE_LIMIT (INITIAL_CONNECT_TIMEOUT + (AT_OFF ? 0 : at_min) \ + PING_INTERVAL) #define MGC_TARGET_REG_LIMIT 10 @@ -879,11 +902,12 @@ static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm, cld->cld_resid.name[0]); /* We need a callback for every lockholder, so don't try to - ldlm_lock_match (see rev 1.1.2.11.2.47) */ + * ldlm_lock_match (see rev 1.1.2.11.2.47) + */ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION, LDLM_ENQUEUE); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, 0); @@ -894,7 +918,8 @@ static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm, rc = ldlm_cli_enqueue(exp, &req, &einfo, &cld->cld_resid, NULL, flags, NULL, 0, LVB_T_NONE, lockh, 0); /* A failed enqueue should still call the mgc_blocking_ast, - where it will be requeued if needed ("grant failed"). */ + * where it will be requeued if needed ("grant failed"). + */ ptlrpc_req_finished(req); return rc; } @@ -921,7 +946,7 @@ static int mgc_target_register(struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MGS_TARGET_REG, LUSTRE_MGS_VERSION, MGS_TARGET_REG); - if (req == NULL) + if (!req) return -ENOMEM; req_mti = req_capsule_client_get(&req->rq_pill, &RMF_MGS_TARGET_INFO); @@ -950,8 +975,8 @@ static int mgc_target_register(struct obd_export *exp, } static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp, - u32 keylen, void *key, u32 vallen, - void *val, struct ptlrpc_request_set *set) + u32 keylen, void *key, u32 vallen, + void *val, struct ptlrpc_request_set *set) { int rc = -EINVAL; @@ -1088,7 +1113,7 @@ static int mgc_import_event(struct obd_device *obd, } enum { - CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT), + CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT), CONFIG_READ_NRPAGES = 4 }; @@ -1109,22 +1134,22 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, int rc = 0; int off = 0; - LASSERT(cfg->cfg_instance != NULL); + LASSERT(cfg->cfg_instance); LASSERT(cfg->cfg_sb == cfg->cfg_instance); - inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + inst = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!inst) return -ENOMEM; - pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance); - if (pos >= PAGE_CACHE_SIZE) { + pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance); + if (pos >= PAGE_SIZE) { kfree(inst); return -E2BIG; } ++pos; buf = inst + pos; - bufsz = PAGE_CACHE_SIZE - pos; + bufsz = PAGE_SIZE - pos; while (datalen > 0) { int entry_len = sizeof(*entry); @@ -1156,7 +1181,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, /* Keep this swab for normal mixed endian handling. LU-1644 */ if (mne_swab) lustre_swab_mgs_nidtbl_entry(entry); - if (entry->mne_length > PAGE_CACHE_SIZE) { + if (entry->mne_length > PAGE_SIZE) { CERROR("MNE too large (%u)\n", entry->mne_length); break; } @@ -1195,7 +1220,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, /* lustre-OST0001-osc-<instance #> */ strcpy(obdname, cld->cld_logname); cname = strrchr(obdname, '-'); - if (cname == NULL) { + if (!cname) { CERROR("mgc %s: invalid logname %s\n", mgc->obd_name, obdname); break; @@ -1212,7 +1237,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, /* find the obd by obdname */ obd = class_name2obd(obdname); - if (obd == NULL) { + if (!obd) { CDEBUG(D_INFO, "mgc %s: cannot find obdname %s\n", mgc->obd_name, obdname); rc = 0; @@ -1227,7 +1252,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, uuid = buf + pos; down_read(&obd->u.cli.cl_sem); - if (obd->u.cli.cl_import == NULL) { + if (!obd->u.cli.cl_import) { /* client does not connect to the OST yet */ up_read(&obd->u.cli.cl_sem); rc = 0; @@ -1257,7 +1282,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, rc = -ENOMEM; lcfg = lustre_cfg_new(LCFG_PARAM, &bufs); - if (lcfg == NULL) { + if (IS_ERR(lcfg)) { CERROR("mgc: cannot allocate memory\n"); break; } @@ -1309,14 +1334,14 @@ static int mgc_process_recover_log(struct obd_device *obd, nrpages = CONFIG_READ_NRPAGES_INIT; pages = kcalloc(nrpages, sizeof(*pages), GFP_KERNEL); - if (pages == NULL) { + if (!pages) { rc = -ENOMEM; goto out; } for (i = 0; i < nrpages; i++) { pages[i] = alloc_page(GFP_KERNEL); - if (pages[i] == NULL) { + if (!pages[i]) { rc = -ENOMEM; goto out; } @@ -1327,7 +1352,7 @@ again: LASSERT(mutex_is_locked(&cld->cld_lock)); req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp), &RQF_MGS_CONFIG_READ); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -1338,7 +1363,6 @@ again: /* pack request */ body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY); - LASSERT(body != NULL); LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname)); if (strlcpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name)) >= sizeof(body->mcb_name)) { @@ -1347,19 +1371,19 @@ again: } body->mcb_offset = cfg->cfg_last_idx + 1; body->mcb_type = cld->cld_type; - body->mcb_bits = PAGE_CACHE_SHIFT; + body->mcb_bits = PAGE_SHIFT; body->mcb_units = nrpages; /* allocate bulk transfer descriptor */ desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK, MGS_BULK_PORTAL); - if (desc == NULL) { + if (!desc) { rc = -ENOMEM; goto out; } for (i = 0; i < nrpages; i++) - ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); + ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE); ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); @@ -1373,7 +1397,8 @@ again: } /* always update the index even though it might have errors with - * handling the recover logs */ + * handling the recover logs + */ cfg->cfg_last_idx = res->mcr_offset; eof = res->mcr_offset == res->mcr_size; @@ -1386,7 +1411,7 @@ again: goto out; } - if (ealen > nrpages << PAGE_CACHE_SHIFT) { + if (ealen > nrpages << PAGE_SHIFT) { rc = -EINVAL; goto out; } @@ -1400,7 +1425,8 @@ again: mne_swab = !!ptlrpc_rep_need_swab(req); #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0) /* This import flag means the server did an extra swab of IR MNE - * records (fixed in LU-1252), reverse it here if needed. LU-1644 */ + * records (fixed in LU-1252), reverse it here if needed. LU-1644 + */ if (unlikely(req->rq_import->imp_need_mne_swab)) mne_swab = !mne_swab; #else @@ -1413,7 +1439,7 @@ again: ptr = kmap(pages[i]); rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr, - min_t(int, ealen, PAGE_CACHE_SIZE), + min_t(int, ealen, PAGE_SIZE), mne_swab); kunmap(pages[i]); if (rc2 < 0) { @@ -1422,7 +1448,7 @@ again: break; } - ealen -= PAGE_CACHE_SIZE; + ealen -= PAGE_SIZE; } out: @@ -1434,7 +1460,7 @@ out: if (pages) { for (i = 0; i < nrpages; i++) { - if (pages[i] == NULL) + if (!pages[i]) break; __free_page(pages[i]); } @@ -1489,7 +1515,8 @@ static int mgc_process_cfg_log(struct obd_device *mgc, /* logname and instance info should be the same, so use our * copy of the instance for the update. The cfg_last_idx will - * be updated here. */ + * be updated here. + */ rc = class_config_parse_llog(env, ctxt, cld->cld_logname, &cld->cld_cfg); @@ -1529,9 +1556,10 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld) LASSERT(cld); /* I don't want multiple processes running process_log at once -- - sounds like badness. It actually might be fine, as long as - we're not trying to update from the same log - simultaneously (in which case we should use a per-log sem.) */ + * sounds like badness. It actually might be fine, as long as + * we're not trying to update from the same log + * simultaneously (in which case we should use a per-log sem.) + */ mutex_lock(&cld->cld_lock); if (cld->cld_stopping) { mutex_unlock(&cld->cld_lock); @@ -1556,7 +1584,8 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld) CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl); /* mark cld_lostlock so that it will requeue - * after MGC becomes available. */ + * after MGC becomes available. + */ cld->cld_lostlock = 1; /* Get extra reference, it will be put in requeue thread */ config_log_get(cld); @@ -1635,18 +1664,19 @@ static int mgc_process_config(struct obd_device *obd, u32 len, void *buf) if (rc) break; cld = config_log_find(logname, cfg); - if (cld == NULL) { + if (!cld) { rc = -ENOENT; break; } /* COMPAT_146 */ /* FIXME only set this for old logs! Right now this forces - us to always skip the "inside markers" check */ + * us to always skip the "inside markers" check + */ cld->cld_cfg.cfg_flags |= CFG_F_COMPAT146; rc = mgc_process_log(obd, cld); - if (rc == 0 && cld->cld_recover != NULL) { + if (rc == 0 && cld->cld_recover) { if (OCD_HAS_FLAG(&obd->u.cli.cl_import-> imp_connect_data, IMP_RECOV)) { rc = mgc_process_log(obd, cld->cld_recover); @@ -1660,7 +1690,7 @@ static int mgc_process_config(struct obd_device *obd, u32 len, void *buf) CERROR("Cannot process recover llog %d\n", rc); } - if (rc == 0 && cld->cld_params != NULL) { + if (rc == 0 && cld->cld_params) { rc = mgc_process_log(obd, cld->cld_params); if (rc == -ENOENT) { CDEBUG(D_MGC, @@ -1727,6 +1757,7 @@ static void /*__exit*/ mgc_exit(void) MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); MODULE_DESCRIPTION("Lustre Management Client"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); module_init(mgc_init); diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile index acc685712..c404eb386 100644 --- a/drivers/staging/lustre/lustre/obdclass/Makefile +++ b/drivers/staging/lustre/lustre/obdclass/Makefile @@ -2,8 +2,8 @@ obj-$(CONFIG_LUSTRE_FS) += obdclass.o obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \ llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \ - genops.o uuid.o lprocfs_status.o \ - lustre_handles.o lustre_peer.o \ - statfs_pack.o obdo.o obd_config.o obd_mount.o \ - lu_object.o cl_object.o \ - cl_page.o cl_lock.o cl_io.o lu_ref.o acl.o lprocfs_counters.o + genops.o uuid.o lprocfs_status.o lprocfs_counters.o \ + lustre_handles.o lustre_peer.o statfs_pack.o \ + obdo.o obd_config.o obd_mount.o lu_object.o lu_ref.o \ + cl_object.o cl_page.o cl_lock.o cl_io.o \ + acl.o kernelcomm.o diff --git a/drivers/staging/lustre/lustre/obdclass/acl.c b/drivers/staging/lustre/lustre/obdclass/acl.c index 49ba8851c..0e02ae97b 100644 --- a/drivers/staging/lustre/lustre/obdclass/acl.c +++ b/drivers/staging/lustre/lustre/obdclass/acl.c @@ -104,7 +104,7 @@ static int lustre_posix_acl_xattr_reduce_space(posix_acl_xattr_header **header, return old_size; new = kmemdup(*header, new_size, GFP_NOFS); - if (unlikely(new == NULL)) + if (unlikely(!new)) return -ENOMEM; kfree(*header); @@ -124,7 +124,7 @@ static int lustre_ext_acl_xattr_reduce_space(ext_acl_xattr_header **header, return 0; new = kmemdup(*header, ext_size, GFP_NOFS); - if (unlikely(new == NULL)) + if (unlikely(!new)) return -ENOMEM; kfree(*header); @@ -149,7 +149,7 @@ lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size) count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr); esize = CFS_ACL_XATTR_SIZE(count, ext_acl_xattr); new = kzalloc(esize, GFP_NOFS); - if (unlikely(new == NULL)) + if (unlikely(!new)) return ERR_PTR(-ENOMEM); new->a_count = cpu_to_le32(count); @@ -180,7 +180,7 @@ int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size, return -EINVAL; new = kzalloc(size, GFP_NOFS); - if (unlikely(new == NULL)) + if (unlikely(!new)) return -ENOMEM; new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION); @@ -300,7 +300,7 @@ lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size, ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr); new = kzalloc(ext_size, GFP_NOFS); - if (unlikely(new == NULL)) + if (unlikely(!new)) return ERR_PTR(-ENOMEM); for (i = 0, j = 0; i < posix_count; i++) { diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c index 63246ba36..f5128b4f1 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_io.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c @@ -44,6 +44,7 @@ #include "../include/obd_support.h" #include "../include/lustre_fid.h" #include <linux/list.h> +#include <linux/sched.h> #include "../include/cl_object.h" #include "cl_internal.h" @@ -93,7 +94,7 @@ static int cl_io_invariant(const struct cl_io *io) * CIS_IO_GOING. */ ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING || - (io->ci_state == CIS_LOCKED && up != NULL)); + (io->ci_state == CIS_LOCKED && up)); } /** @@ -111,7 +112,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io) slice = container_of(io->ci_layers.prev, struct cl_io_slice, cis_linkage); list_del_init(&slice->cis_linkage); - if (slice->cis_iop->op[io->ci_type].cio_fini != NULL) + if (slice->cis_iop->op[io->ci_type].cio_fini) slice->cis_iop->op[io->ci_type].cio_fini(env, slice); /* * Invalidate slice to catch use after free. This assumes that @@ -138,7 +139,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io) case CIT_MISC: /* Check ignore layout change conf */ LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout, - !io->ci_need_restart)); + !io->ci_need_restart)); break; default: LBUG(); @@ -164,7 +165,7 @@ static int cl_io_init0(const struct lu_env *env, struct cl_io *io, result = 0; cl_object_for_each(scan, obj) { - if (scan->co_ops->coo_io_init != NULL) { + if (scan->co_ops->coo_io_init) { result = scan->co_ops->coo_io_init(env, scan, io); if (result != 0) break; @@ -186,7 +187,7 @@ int cl_io_sub_init(const struct lu_env *env, struct cl_io *io, struct cl_thread_info *info = cl_env_info(env); LASSERT(obj != cl_object_top(obj)); - if (info->clt_current_io == NULL) + if (!info->clt_current_io) info->clt_current_io = io; return cl_io_init0(env, io, iot, obj); } @@ -208,7 +209,7 @@ int cl_io_init(const struct lu_env *env, struct cl_io *io, struct cl_thread_info *info = cl_env_info(env); LASSERT(obj == cl_object_top(obj)); - LASSERT(info->clt_current_io == NULL); + LASSERT(!info->clt_current_io); info->clt_current_io = io; return cl_io_init0(env, io, iot, obj); @@ -224,7 +225,7 @@ int cl_io_rw_init(const struct lu_env *env, struct cl_io *io, enum cl_io_type iot, loff_t pos, size_t count) { LINVRNT(iot == CIT_READ || iot == CIT_WRITE); - LINVRNT(io->ci_obj != NULL); + LINVRNT(io->ci_obj); LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu, "io range: %u [%llu, %llu) %u %u\n", @@ -290,11 +291,11 @@ static void cl_io_locks_sort(struct cl_io *io) prev = NULL; list_for_each_entry_safe(curr, temp, - &io->ci_lockset.cls_todo, - cill_linkage) { - if (prev != NULL) { + &io->ci_lockset.cls_todo, + cill_linkage) { + if (prev) { switch (cl_lock_descr_sort(&prev->cill_descr, - &curr->cill_descr)) { + &curr->cill_descr)) { case 0: /* * IMPOSSIBLE: Identical locks are @@ -305,10 +306,11 @@ static void cl_io_locks_sort(struct cl_io *io) LBUG(); case 1: list_move_tail(&curr->cill_linkage, - &prev->cill_linkage); + &prev->cill_linkage); done = 0; continue; /* don't change prev: it's - * still "previous" */ + * still "previous" + */ case -1: /* already in order */ break; } @@ -327,32 +329,31 @@ static void cl_io_locks_sort(struct cl_io *io) int cl_queue_match(const struct list_head *queue, const struct cl_lock_descr *need) { - struct cl_io_lock_link *scan; + struct cl_io_lock_link *scan; - list_for_each_entry(scan, queue, cill_linkage) { - if (cl_lock_descr_match(&scan->cill_descr, need)) - return 1; - } - return 0; + list_for_each_entry(scan, queue, cill_linkage) { + if (cl_lock_descr_match(&scan->cill_descr, need)) + return 1; + } + return 0; } EXPORT_SYMBOL(cl_queue_match); static int cl_queue_merge(const struct list_head *queue, const struct cl_lock_descr *need) { - struct cl_io_lock_link *scan; - - list_for_each_entry(scan, queue, cill_linkage) { - if (cl_lock_descr_cmp(&scan->cill_descr, need)) - continue; - cl_lock_descr_merge(&scan->cill_descr, need); - CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n", - scan->cill_descr.cld_mode, scan->cill_descr.cld_start, - scan->cill_descr.cld_end); - return 1; - } - return 0; + struct cl_io_lock_link *scan; + list_for_each_entry(scan, queue, cill_linkage) { + if (cl_lock_descr_cmp(&scan->cill_descr, need)) + continue; + cl_lock_descr_merge(&scan->cill_descr, need); + CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n", + scan->cill_descr.cld_mode, scan->cill_descr.cld_start, + scan->cill_descr.cld_end); + return 1; + } + return 0; } static int cl_lockset_match(const struct cl_lockset *set, @@ -384,8 +385,7 @@ static int cl_lockset_lock_one(const struct lu_env *env, if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) { result = cl_wait(env, lock); if (result == 0) - list_move(&link->cill_linkage, - &set->cls_done); + list_move(&link->cill_linkage, &set->cls_done); } else result = 0; } else @@ -399,11 +399,11 @@ static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io, struct cl_lock *lock = link->cill_lock; list_del_init(&link->cill_linkage); - if (lock != NULL) { + if (lock) { cl_lock_release(env, lock, "io", io); link->cill_lock = NULL; } - if (link->cill_fini != NULL) + if (link->cill_fini) link->cill_fini(env, link); } @@ -419,7 +419,8 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io, list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) { if (!cl_lockset_match(set, &link->cill_descr)) { /* XXX some locking to guarantee that locks aren't - * expanded in between. */ + * expanded in between. + */ result = cl_lockset_lock_one(env, io, set, link); if (result != 0) break; @@ -428,12 +429,11 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io, } if (result == 0) { list_for_each_entry_safe(link, temp, - &set->cls_curr, cill_linkage) { + &set->cls_curr, cill_linkage) { lock = link->cill_lock; result = cl_wait(env, lock); if (result == 0) - list_move(&link->cill_linkage, - &set->cls_done); + list_move(&link->cill_linkage, &set->cls_done); else break; } @@ -458,7 +458,7 @@ int cl_io_lock(const struct lu_env *env, struct cl_io *io) LINVRNT(cl_io_invariant(io)); cl_io_for_each(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_lock == NULL) + if (!scan->cis_iop->op[io->ci_type].cio_lock) continue; result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan); if (result != 0) @@ -503,7 +503,7 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io) cl_lock_link_fini(env, io, link); } cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL) + if (scan->cis_iop->op[io->ci_type].cio_unlock) scan->cis_iop->op[io->ci_type].cio_unlock(env, scan); } io->ci_state = CIS_UNLOCKED; @@ -529,7 +529,7 @@ int cl_io_iter_init(const struct lu_env *env, struct cl_io *io) result = 0; cl_io_for_each(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL) + if (!scan->cis_iop->op[io->ci_type].cio_iter_init) continue; result = scan->cis_iop->op[io->ci_type].cio_iter_init(env, scan); @@ -556,7 +556,7 @@ void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io) LINVRNT(cl_io_invariant(io)); cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL) + if (scan->cis_iop->op[io->ci_type].cio_iter_fini) scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan); } io->ci_state = CIS_IT_ENDED; @@ -581,7 +581,7 @@ static void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, /* layers have to be notified. */ cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_advance != NULL) + if (scan->cis_iop->op[io->ci_type].cio_advance) scan->cis_iop->op[io->ci_type].cio_advance(env, scan, nob); } @@ -621,7 +621,7 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io, int result; link = kzalloc(sizeof(*link), GFP_NOFS); - if (link != NULL) { + if (link) { link->cill_descr = *descr; link->cill_fini = cl_free_io_lock_link; result = cl_io_lock_add(env, io, link); @@ -648,7 +648,7 @@ int cl_io_start(const struct lu_env *env, struct cl_io *io) io->ci_state = CIS_IO_GOING; cl_io_for_each(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_start == NULL) + if (!scan->cis_iop->op[io->ci_type].cio_start) continue; result = scan->cis_iop->op[io->ci_type].cio_start(env, scan); if (result != 0) @@ -673,7 +673,7 @@ void cl_io_end(const struct lu_env *env, struct cl_io *io) LINVRNT(cl_io_invariant(io)); cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->op[io->ci_type].cio_end != NULL) + if (scan->cis_iop->op[io->ci_type].cio_end) scan->cis_iop->op[io->ci_type].cio_end(env, scan); /* TODO: error handling. */ } @@ -687,7 +687,7 @@ cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page) const struct cl_page_slice *slice; slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type); - LINVRNT(slice != NULL); + LINVRNT(slice); return slice; } @@ -759,11 +759,11 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io, * "parallel io" (see CLO_REPEAT loops in cl_lock.c). */ cl_io_for_each(scan, io) { - if (scan->cis_iop->cio_read_page != NULL) { + if (scan->cis_iop->cio_read_page) { const struct cl_page_slice *slice; slice = cl_io_slice_page(scan, page); - LINVRNT(slice != NULL); + LINVRNT(slice); result = scan->cis_iop->cio_read_page(env, scan, slice); if (result != 0) break; @@ -798,7 +798,7 @@ int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, LASSERT(cl_page_in_io(page, io)); cl_io_for_each_reverse(scan, io) { - if (scan->cis_iop->cio_prepare_write != NULL) { + if (scan->cis_iop->cio_prepare_write) { const struct cl_page_slice *slice; slice = cl_io_slice_page(scan, page); @@ -833,11 +833,11 @@ int cl_io_commit_write(const struct lu_env *env, struct cl_io *io, * state. Better (and more general) way of dealing with such situation * is needed. */ - LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL); + LASSERT(cl_page_is_owned(page, io) || page->cp_parent); LASSERT(cl_page_in_io(page, io)); cl_io_for_each(scan, io) { - if (scan->cis_iop->cio_commit_write != NULL) { + if (scan->cis_iop->cio_commit_write) { const struct cl_page_slice *slice; slice = cl_io_slice_page(scan, page); @@ -872,7 +872,7 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op)); cl_io_for_each(scan, io) { - if (scan->cis_iop->req_op[crt].cio_submit == NULL) + if (!scan->cis_iop->req_op[crt].cio_submit) continue; result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt, queue); @@ -900,7 +900,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, int rc; cl_page_list_for_each(pg, &queue->c2_qin) { - LASSERT(pg->cp_sync_io == NULL); + LASSERT(!pg->cp_sync_io); pg->cp_sync_io = anchor; } @@ -913,14 +913,14 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, * clean pages), count them as completed to avoid infinite * wait. */ - cl_page_list_for_each(pg, &queue->c2_qin) { + cl_page_list_for_each(pg, &queue->c2_qin) { pg->cp_sync_io = NULL; cl_sync_io_note(anchor, 1); - } + } - /* wait for the IO to be finished. */ - rc = cl_sync_io_wait(env, io, &queue->c2_qout, - anchor, timeout); + /* wait for the IO to be finished. */ + rc = cl_sync_io_wait(env, io, &queue->c2_qout, + anchor, timeout); } else { LASSERT(list_empty(&queue->c2_qout.pl_pages)); cl_page_list_for_each(pg, &queue->c2_qin) @@ -1026,7 +1026,7 @@ void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice, { struct list_head *linkage = &slice->cis_linkage; - LASSERT((linkage->prev == NULL && linkage->next == NULL) || + LASSERT((!linkage->prev && !linkage->next) || list_empty(linkage)); list_add_tail(linkage, &io->ci_layers); @@ -1053,8 +1053,9 @@ EXPORT_SYMBOL(cl_page_list_init); void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page) { /* it would be better to check that page is owned by "current" io, but - * it is not passed here. */ - LASSERT(page->cp_owner != NULL); + * it is not passed here. + */ + LASSERT(page->cp_owner); LINVRNT(plist->pl_owner == current); lockdep_off(); @@ -1263,7 +1264,7 @@ EXPORT_SYMBOL(cl_2queue_init_page); */ struct cl_io *cl_io_top(struct cl_io *io) { - while (io->ci_parent != NULL) + while (io->ci_parent) io = io->ci_parent; return io; } @@ -1296,13 +1297,13 @@ static void cl_req_free(const struct lu_env *env, struct cl_req *req) LASSERT(list_empty(&req->crq_pages)); LASSERT(req->crq_nrpages == 0); LINVRNT(list_empty(&req->crq_layers)); - LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL)); + LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o)); - if (req->crq_o != NULL) { + if (req->crq_o) { for (i = 0; i < req->crq_nrobjs; ++i) { struct cl_object *obj = req->crq_o[i].ro_obj; - if (obj != NULL) { + if (obj) { lu_object_ref_del_at(&obj->co_lu, &req->crq_o[i].ro_obj_ref, "cl_req", req); @@ -1326,7 +1327,7 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req, do { list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev); - if (dev->cd_ops->cdo_req_init != NULL) { + if (dev->cd_ops->cdo_req_init) { result = dev->cd_ops->cdo_req_init(env, dev, req); if (result != 0) @@ -1334,7 +1335,7 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req, } } page = page->cp_child; - } while (page != NULL && result == 0); + } while (page && result == 0); return result; } @@ -1351,9 +1352,9 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc) */ while (!list_empty(&req->crq_layers)) { slice = list_entry(req->crq_layers.prev, - struct cl_req_slice, crs_linkage); + struct cl_req_slice, crs_linkage); list_del_init(&slice->crs_linkage); - if (slice->crs_ops->cro_completion != NULL) + if (slice->crs_ops->cro_completion) slice->crs_ops->cro_completion(env, slice, rc); } cl_req_free(env, req); @@ -1371,7 +1372,7 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page, LINVRNT(nr_objects > 0); req = kzalloc(sizeof(*req), GFP_NOFS); - if (req != NULL) { + if (req) { int result; req->crq_type = crt; @@ -1380,7 +1381,7 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page, req->crq_o = kcalloc(nr_objects, sizeof(req->crq_o[0]), GFP_NOFS); - if (req->crq_o != NULL) { + if (req->crq_o) { req->crq_nrobjs = nr_objects; result = cl_req_init(env, req, page); } else @@ -1408,7 +1409,7 @@ void cl_req_page_add(const struct lu_env *env, page = cl_page_top(page); LASSERT(list_empty(&page->cp_flight)); - LASSERT(page->cp_req == NULL); + LASSERT(!page->cp_req); CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n", req, req->crq_type, req->crq_nrpages); @@ -1418,7 +1419,7 @@ void cl_req_page_add(const struct lu_env *env, page->cp_req = req; obj = cl_object_top(page->cp_obj); for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) { - if (rqo->ro_obj == NULL) { + if (!rqo->ro_obj) { rqo->ro_obj = obj; cl_object_get(obj); lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref, @@ -1463,11 +1464,11 @@ int cl_req_prep(const struct lu_env *env, struct cl_req *req) * of objects. */ for (i = 0; i < req->crq_nrobjs; ++i) - LASSERT(req->crq_o[i].ro_obj != NULL); + LASSERT(req->crq_o[i].ro_obj); result = 0; list_for_each_entry(slice, &req->crq_layers, crs_linkage) { - if (slice->crs_ops->cro_prep != NULL) { + if (slice->crs_ops->cro_prep) { result = slice->crs_ops->cro_prep(env, slice); if (result != 0) break; @@ -1501,9 +1502,8 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req, scan = cl_page_at(page, slice->crs_dev->cd_lu_dev.ld_type); - LASSERT(scan != NULL); obj = scan->cpl_obj; - if (slice->crs_ops->cro_attr_set != NULL) + if (slice->crs_ops->cro_attr_set) slice->crs_ops->cro_attr_set(env, slice, obj, attr + i, flags); } @@ -1511,9 +1511,6 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req, } EXPORT_SYMBOL(cl_req_attr_set); -/* XXX complete(), init_completion(), and wait_for_completion(), until they are - * implemented in libcfs. */ -# include <linux/sched.h> /** * Initialize synchronous io wait anchor, for transfer of \a nrpages pages. diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c index 1836dc014..aec644eb4 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c @@ -96,8 +96,8 @@ static int cl_lock_invariant(const struct lu_env *env, result = atomic_read(&lock->cll_ref) > 0 && cl_lock_invariant_trusted(env, lock); - if (!result && env != NULL) - CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken"); + if (!result && env) + CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken\n"); return result; } @@ -259,7 +259,7 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock) struct cl_lock_slice *slice; slice = list_entry(lock->cll_layers.next, - struct cl_lock_slice, cls_linkage); + struct cl_lock_slice, cls_linkage); list_del_init(lock->cll_layers.next); slice->cls_ops->clo_fini(env, slice); } @@ -288,7 +288,7 @@ void cl_lock_put(const struct lu_env *env, struct cl_lock *lock) LINVRNT(cl_lock_invariant(env, lock)); obj = lock->cll_descr.cld_obj; - LINVRNT(obj != NULL); + LINVRNT(obj); CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n", atomic_read(&lock->cll_ref), lock, RETIP); @@ -361,8 +361,8 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env, struct cl_lock *lock; struct lu_object_header *head; - lock = kmem_cache_alloc(cl_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (lock != NULL) { + lock = kmem_cache_zalloc(cl_lock_kmem, GFP_NOFS); + if (lock) { atomic_set(&lock->cll_ref, 1); lock->cll_descr = *descr; lock->cll_state = CLS_NEW; @@ -382,8 +382,7 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env, CS_LOCK_INC(obj, total); CS_LOCK_INC(obj, create); cl_lock_lockdep_init(lock); - list_for_each_entry(obj, &head->loh_layers, - co_lu.lo_linkage) { + list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) { int err; err = obj->co_ops->coo_lock_init(env, obj, lock, io); @@ -461,7 +460,7 @@ static int cl_lock_fits_into(const struct lu_env *env, LINVRNT(cl_lock_invariant_trusted(env, lock)); list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_fits_into != NULL && + if (slice->cls_ops->clo_fits_into && !slice->cls_ops->clo_fits_into(env, slice, need, io)) return 0; } @@ -524,17 +523,17 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env, lock = cl_lock_lookup(env, obj, io, need); spin_unlock(&head->coh_lock_guard); - if (lock == NULL) { + if (!lock) { lock = cl_lock_alloc(env, obj, io, need); if (!IS_ERR(lock)) { struct cl_lock *ghost; spin_lock(&head->coh_lock_guard); ghost = cl_lock_lookup(env, obj, io, need); - if (ghost == NULL) { + if (!ghost) { cl_lock_get_trust(lock); list_add_tail(&lock->cll_linkage, - &head->coh_locks); + &head->coh_locks); spin_unlock(&head->coh_lock_guard); CS_LOCK_INC(obj, busy); } else { @@ -572,7 +571,7 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io, spin_lock(&head->coh_lock_guard); lock = cl_lock_lookup(env, obj, io, need); spin_unlock(&head->coh_lock_guard); - if (lock == NULL) + if (!lock) return NULL; cl_lock_mutex_get(env, lock); @@ -584,7 +583,7 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io, cl_lock_put(env, lock); lock = NULL; } - } while (lock == NULL); + } while (!lock); cl_lock_hold_add(env, lock, scope, source); cl_lock_user_add(env, lock); @@ -774,8 +773,8 @@ static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock) lock->cll_flags |= CLF_CANCELLED; list_for_each_entry_reverse(slice, &lock->cll_layers, - cls_linkage) { - if (slice->cls_ops->clo_cancel != NULL) + cls_linkage) { + if (slice->cls_ops->clo_cancel) slice->cls_ops->clo_cancel(env, slice); } } @@ -811,8 +810,8 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock) * by cl_lock_lookup(). */ list_for_each_entry_reverse(slice, &lock->cll_layers, - cls_linkage) { - if (slice->cls_ops->clo_delete != NULL) + cls_linkage) { + if (slice->cls_ops->clo_delete) slice->cls_ops->clo_delete(env, slice); } /* @@ -935,7 +934,8 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock) if (result == 0) { /* To avoid being interrupted by the 'non-fatal' signals * (SIGCHLD, for instance), we'd block them temporarily. - * LU-305 */ + * LU-305 + */ blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); init_waitqueue_entry(&waiter, current); @@ -946,7 +946,8 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock) LASSERT(cl_lock_nr_mutexed(env) == 0); /* Returning ERESTARTSYS instead of EINTR so syscalls - * can be restarted if signals are pending here */ + * can be restarted if signals are pending here + */ result = -ERESTARTSYS; if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) { schedule(); @@ -974,7 +975,7 @@ static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock, LINVRNT(cl_lock_invariant(env, lock)); list_for_each_entry(slice, &lock->cll_layers, cls_linkage) - if (slice->cls_ops->clo_state != NULL) + if (slice->cls_ops->clo_state) slice->cls_ops->clo_state(env, slice, state); wake_up_all(&lock->cll_wq); } @@ -1038,8 +1039,8 @@ static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock) result = -ENOSYS; list_for_each_entry_reverse(slice, &lock->cll_layers, - cls_linkage) { - if (slice->cls_ops->clo_unuse != NULL) { + cls_linkage) { + if (slice->cls_ops->clo_unuse) { result = slice->cls_ops->clo_unuse(env, slice); if (result != 0) break; @@ -1072,7 +1073,7 @@ int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic) result = -ENOSYS; state = cl_lock_intransit(env, lock); list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_use != NULL) { + if (slice->cls_ops->clo_use) { result = slice->cls_ops->clo_use(env, slice); if (result != 0) break; @@ -1125,7 +1126,7 @@ static int cl_enqueue_kick(const struct lu_env *env, result = -ENOSYS; list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_enqueue != NULL) { + if (slice->cls_ops->clo_enqueue) { result = slice->cls_ops->clo_enqueue(env, slice, io, flags); if (result != 0) @@ -1170,7 +1171,8 @@ int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock, /* kick layers. */ result = cl_enqueue_kick(env, lock, io, flags); /* For AGL case, the cl_lock::cll_state may - * become CLS_HELD already. */ + * become CLS_HELD already. + */ if (result == 0 && lock->cll_state == CLS_QUEUING) cl_lock_state_set(env, lock, CLS_ENQUEUED); break; @@ -1215,7 +1217,7 @@ int cl_lock_enqueue_wait(const struct lu_env *env, LASSERT(cl_lock_is_mutexed(lock)); LASSERT(lock->cll_state == CLS_QUEUING); - LASSERT(lock->cll_conflict != NULL); + LASSERT(lock->cll_conflict); conflict = lock->cll_conflict; lock->cll_conflict = NULL; @@ -1258,7 +1260,7 @@ static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock, do { result = cl_enqueue_try(env, lock, io, enqflags); if (result == CLO_WAIT) { - if (lock->cll_conflict != NULL) + if (lock->cll_conflict) result = cl_lock_enqueue_wait(env, lock, 1); else result = cl_lock_state_wait(env, lock); @@ -1300,7 +1302,8 @@ int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock) } /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold - * underlying resources. */ + * underlying resources. + */ if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) { cl_lock_user_del(env, lock); return 0; @@ -1416,7 +1419,7 @@ int cl_wait_try(const struct lu_env *env, struct cl_lock *lock) result = -ENOSYS; list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_wait != NULL) { + if (slice->cls_ops->clo_wait) { result = slice->cls_ops->clo_wait(env, slice); if (result != 0) break; @@ -1449,7 +1452,7 @@ int cl_wait(const struct lu_env *env, struct cl_lock *lock) LINVRNT(cl_lock_invariant(env, lock)); LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD, - "Wrong state %d \n", lock->cll_state); + "Wrong state %d\n", lock->cll_state); LASSERT(lock->cll_holds > 0); do { @@ -1487,7 +1490,7 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock) pound = 0; list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_weigh != NULL) { + if (slice->cls_ops->clo_weigh) { ounce = slice->cls_ops->clo_weigh(env, slice); pound += ounce; if (pound < ounce) /* over-weight^Wflow */ @@ -1523,7 +1526,7 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock, LINVRNT(cl_lock_invariant(env, lock)); list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_modify != NULL) { + if (slice->cls_ops->clo_modify) { result = slice->cls_ops->clo_modify(env, slice, desc); if (result != 0) return result; @@ -1584,7 +1587,7 @@ int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock, result = cl_lock_enclosure(env, lock, closure); if (result == 0) { list_for_each_entry(slice, &lock->cll_layers, cls_linkage) { - if (slice->cls_ops->clo_closure != NULL) { + if (slice->cls_ops->clo_closure) { result = slice->cls_ops->clo_closure(env, slice, closure); if (result != 0) @@ -1654,7 +1657,7 @@ void cl_lock_disclosure(const struct lu_env *env, cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin); list_for_each_entry_safe(scan, temp, &closure->clc_list, - cll_inclosure){ + cll_inclosure) { list_del_init(&scan->cll_inclosure); cl_lock_mutex_put(env, scan); lu_ref_del(&scan->cll_reference, "closure", closure); @@ -1777,13 +1780,15 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env, lock = NULL; need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but - * not PHANTOM */ + * not PHANTOM + */ need->cld_start = need->cld_end = index; need->cld_enq_flags = 0; spin_lock(&head->coh_lock_guard); /* It is fine to match any group lock since there could be only one - * with a uniq gid and it conflicts with all other lock modes too */ + * with a uniq gid and it conflicts with all other lock modes too + */ list_for_each_entry(scan, &head->coh_locks, cll_linkage) { if (scan != except && (scan->cll_descr.cld_mode == CLM_GROUP || @@ -1798,7 +1803,8 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env, (canceld || !(scan->cll_flags & CLF_CANCELLED)) && (pending || !(scan->cll_flags & CLF_CANCELPEND))) { /* Don't increase cs_hit here since this - * is just a helper function. */ + * is just a helper function. + */ cl_lock_get_trust(scan); lock = scan; break; @@ -1820,7 +1826,6 @@ static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock) dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type; slice = cl_page_at(page, dtype); - LASSERT(slice != NULL); return slice->cpl_page->cp_index; } @@ -1839,12 +1844,13 @@ static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io, /* refresh non-overlapped index */ tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index, - lock, 1, 0); - if (tmp != NULL) { + lock, 1, 0); + if (tmp) { /* Cache the first-non-overlapped index so as to skip * all pages within [index, clt_fn_index). This * is safe because if tmp lock is canceled, it will - * discard these pages. */ + * discard these pages. + */ info->clt_fn_index = tmp->cll_descr.cld_end + 1; if (tmp->cll_descr.cld_end == CL_PAGE_EOF) info->clt_fn_index = CL_PAGE_EOF; @@ -1950,7 +1956,7 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel) * already destroyed (as otherwise they will be left unprotected). */ LASSERT(ergo(!cancel, - head->coh_tree.rnode == NULL && head->coh_pages == 0)); + !head->coh_tree.rnode && head->coh_pages == 0)); spin_lock(&head->coh_lock_guard); while (!list_empty(&head->coh_locks)) { @@ -2166,8 +2172,8 @@ EXPORT_SYMBOL(cl_lock_mode_name); * Prints human readable representation of a lock description. */ void cl_lock_descr_print(const struct lu_env *env, void *cookie, - lu_printer_t printer, - const struct cl_lock_descr *descr) + lu_printer_t printer, + const struct cl_lock_descr *descr) { const struct lu_fid *fid; @@ -2194,7 +2200,7 @@ void cl_lock_print(const struct lu_env *env, void *cookie, (*printer)(env, cookie, " %s@%p: ", slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name, slice); - if (slice->cls_ops->clo_print != NULL) + if (slice->cls_ops->clo_print) slice->cls_ops->clo_print(env, cookie, printer, slice); (*printer)(env, cookie, "\n"); } diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c index 57c8d5412..43e299d4d 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_object.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c @@ -152,7 +152,7 @@ struct cl_object *cl_object_top(struct cl_object *o) struct cl_object_header *hdr = cl_object_header(o); struct cl_object *top; - while (hdr->coh_parent != NULL) + while (hdr->coh_parent) hdr = hdr->coh_parent; top = lu2cl(lu_object_top(&hdr->coh_lu)); @@ -217,7 +217,7 @@ int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, top = obj->co_lu.lo_header; result = 0; list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { - if (obj->co_ops->coo_attr_get != NULL) { + if (obj->co_ops->coo_attr_get) { result = obj->co_ops->coo_attr_get(env, obj, attr); if (result != 0) { if (result > 0) @@ -247,9 +247,8 @@ int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj, top = obj->co_lu.lo_header; result = 0; - list_for_each_entry_reverse(obj, &top->loh_layers, - co_lu.lo_linkage) { - if (obj->co_ops->coo_attr_set != NULL) { + list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) { + if (obj->co_ops->coo_attr_set) { result = obj->co_ops->coo_attr_set(env, obj, attr, v); if (result != 0) { if (result > 0) @@ -278,9 +277,8 @@ int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj, top = obj->co_lu.lo_header; result = 0; - list_for_each_entry_reverse(obj, &top->loh_layers, - co_lu.lo_linkage) { - if (obj->co_ops->coo_glimpse != NULL) { + list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) { + if (obj->co_ops->coo_glimpse) { result = obj->co_ops->coo_glimpse(env, obj, lvb); if (result != 0) break; @@ -306,7 +304,7 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj, top = obj->co_lu.lo_header; result = 0; list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) { - if (obj->co_ops->coo_conf_set != NULL) { + if (obj->co_ops->coo_conf_set) { result = obj->co_ops->coo_conf_set(env, obj, conf); if (result != 0) break; @@ -328,7 +326,7 @@ void cl_object_kill(const struct lu_env *env, struct cl_object *obj) struct cl_object_header *hdr; hdr = cl_object_header(obj); - LASSERT(hdr->coh_tree.rnode == NULL); + LASSERT(!hdr->coh_tree.rnode); LASSERT(hdr->coh_pages == 0); set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags); @@ -362,7 +360,8 @@ void cache_stats_init(struct cache_stats *cs, const char *name) atomic_set(&cs->cs_stats[i], 0); } -int cache_stats_print(const struct cache_stats *cs, struct seq_file *m, int h) +static int cache_stats_print(const struct cache_stats *cs, + struct seq_file *m, int h) { int i; /* @@ -456,13 +455,13 @@ locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......] seq_printf(m, " ["); for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i) seq_printf(m, "%s: %u ", pstate[i], - atomic_read(&site->cs_pages_state[i])); + atomic_read(&site->cs_pages_state[i])); seq_printf(m, "]\n"); cache_stats_print(&site->cs_locks, m, 0); seq_printf(m, " ["); for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i) seq_printf(m, "%s: %u ", lstate[i], - atomic_read(&site->cs_locks_state[i])); + atomic_read(&site->cs_locks_state[i])); seq_printf(m, "]\n"); cache_stats_print(&cl_env_stats, m, 0); seq_printf(m, "\n"); @@ -482,7 +481,6 @@ EXPORT_SYMBOL(cl_site_stats_print); * because Lustre code may call into other fs which has certain assumptions * about journal_info. Currently following fields in task_struct are identified * can be used for this purpose: - * - cl_env: for liblustre. * - tux_info: only on RedHat kernel. * - ... * \note As long as we use task_struct to store cl_env, we assume that once @@ -540,7 +538,7 @@ static void cl_env_init0(struct cl_env *cle, void *debug) { LASSERT(cle->ce_ref == 0); LASSERT(cle->ce_magic == &cl_env_init0); - LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL); + LASSERT(!cle->ce_debug && !cle->ce_owner); cle->ce_ref = 1; cle->ce_debug = debug; @@ -575,7 +573,7 @@ static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn) { struct cl_env *cle = cl_env_hops_obj(hn); - LASSERT(cle->ce_owner != NULL); + LASSERT(cle->ce_owner); return (key == cle->ce_owner); } @@ -609,7 +607,7 @@ static inline void cl_env_attach(struct cl_env *cle) if (cle) { int rc; - LASSERT(cle->ce_owner == NULL); + LASSERT(!cle->ce_owner); cle->ce_owner = (void *) (long) current->pid; rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner, &cle->ce_node); @@ -637,7 +635,7 @@ static int cl_env_store_init(void) CFS_HASH_MAX_THETA, &cl_env_hops, CFS_HASH_RW_BKTLOCK); - return cl_env_hash != NULL ? 0 : -ENOMEM; + return cl_env_hash ? 0 : -ENOMEM; } static void cl_env_store_fini(void) @@ -647,7 +645,7 @@ static void cl_env_store_fini(void) static inline struct cl_env *cl_env_detach(struct cl_env *cle) { - if (cle == NULL) + if (!cle) cle = cl_env_fetch(); if (cle && cle->ce_owner) @@ -661,8 +659,8 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug) struct lu_env *env; struct cl_env *cle; - cle = kmem_cache_alloc(cl_env_kmem, GFP_NOFS | __GFP_ZERO); - if (cle != NULL) { + cle = kmem_cache_zalloc(cl_env_kmem, GFP_NOFS); + if (cle) { int rc; INIT_LIST_HEAD(&cle->ce_linkage); @@ -716,7 +714,7 @@ static struct lu_env *cl_env_peek(int *refcheck) env = NULL; cle = cl_env_fetch(); - if (cle != NULL) { + if (cle) { CL_ENV_INC(hit); env = &cle->ce_lu; *refcheck = ++cle->ce_ref; @@ -741,7 +739,7 @@ struct lu_env *cl_env_get(int *refcheck) struct lu_env *env; env = cl_env_peek(refcheck); - if (env == NULL) { + if (!env) { env = cl_env_new(lu_context_tags_default, lu_session_tags_default, __builtin_return_address(0)); @@ -768,7 +766,7 @@ struct lu_env *cl_env_alloc(int *refcheck, __u32 tags) { struct lu_env *env; - LASSERT(cl_env_peek(refcheck) == NULL); + LASSERT(!cl_env_peek(refcheck)); env = cl_env_new(tags, tags, __builtin_return_address(0)); if (!IS_ERR(env)) { struct cl_env *cle; @@ -783,7 +781,7 @@ EXPORT_SYMBOL(cl_env_alloc); static void cl_env_exit(struct cl_env *cle) { - LASSERT(cle->ce_owner == NULL); + LASSERT(!cle->ce_owner); lu_context_exit(&cle->ce_lu.le_ctx); lu_context_exit(&cle->ce_ses); } @@ -802,7 +800,7 @@ void cl_env_put(struct lu_env *env, int *refcheck) cle = cl_env_container(env); LASSERT(cle->ce_ref > 0); - LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck)); + LASSERT(ergo(refcheck, cle->ce_ref == *refcheck)); CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle); if (--cle->ce_ref == 0) { @@ -877,7 +875,7 @@ struct lu_env *cl_env_nested_get(struct cl_env_nest *nest) nest->cen_cookie = NULL; env = cl_env_peek(&nest->cen_refcheck); - if (env != NULL) { + if (env) { if (!cl_io_is_going(env)) return env; cl_env_put(env, &nest->cen_refcheck); @@ -929,14 +927,12 @@ struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site, const char *typename; struct lu_device *d; - LASSERT(ldt != NULL); - typename = ldt->ldt_name; d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL); if (!IS_ERR(d)) { int rc; - if (site != NULL) + if (site) d->ld_site = site; rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next); if (rc == 0) { diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c index 61f28ebfc..394580016 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_page.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c @@ -69,7 +69,7 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg, */ static struct cl_page *cl_page_top_trusted(struct cl_page *page) { - while (page->cp_parent != NULL) + while (page->cp_parent) page = page->cp_parent; return page; } @@ -110,7 +110,7 @@ cl_page_at_trusted(const struct cl_page *page, return slice; } page = page->cp_child; - } while (page != NULL); + } while (page); return NULL; } @@ -127,7 +127,7 @@ struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index) assert_spin_locked(&hdr->coh_page_guard); page = radix_tree_lookup(&hdr->coh_tree, index); - if (page != NULL) + if (page) cl_page_get_trust(page); return page; } @@ -188,7 +188,7 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj, * Pages for lsm-less file has no underneath sub-page * for osc, in case of ... */ - PASSERT(env, page, slice != NULL); + PASSERT(env, page, slice); page = slice->cpl_page; /* @@ -245,9 +245,9 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page) struct cl_object *obj = page->cp_obj; PASSERT(env, page, list_empty(&page->cp_batch)); - PASSERT(env, page, page->cp_owner == NULL); - PASSERT(env, page, page->cp_req == NULL); - PASSERT(env, page, page->cp_parent == NULL); + PASSERT(env, page, !page->cp_owner); + PASSERT(env, page, !page->cp_req); + PASSERT(env, page, !page->cp_parent); PASSERT(env, page, page->cp_state == CPS_FREEING); might_sleep(); @@ -255,7 +255,7 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page) struct cl_page_slice *slice; slice = list_entry(page->cp_layers.next, - struct cl_page_slice, cpl_linkage); + struct cl_page_slice, cpl_linkage); list_del_init(page->cp_layers.next); slice->cpl_ops->cpo_fini(env, slice); } @@ -277,14 +277,15 @@ static inline void cl_page_state_set_trust(struct cl_page *page, } static struct cl_page *cl_page_alloc(const struct lu_env *env, - struct cl_object *o, pgoff_t ind, struct page *vmpage, - enum cl_page_type type) + struct cl_object *o, pgoff_t ind, + struct page *vmpage, + enum cl_page_type type) { struct cl_page *page; struct lu_object_header *head; page = kzalloc(cl_object_header(o)->coh_page_bufsize, GFP_NOFS); - if (page != NULL) { + if (page) { int result = 0; atomic_set(&page->cp_ref, 1); @@ -303,9 +304,8 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env, mutex_init(&page->cp_mutex); lu_ref_init(&page->cp_reference); head = o->co_lu.lo_header; - list_for_each_entry(o, &head->loh_layers, - co_lu.lo_linkage) { - if (o->co_ops->coo_page_init != NULL) { + list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) { + if (o->co_ops->coo_page_init) { result = o->co_ops->coo_page_init(env, o, page, vmpage); if (result != 0) { @@ -369,13 +369,13 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, */ page = cl_vmpage_page(vmpage, o); PINVRNT(env, page, - ergo(page != NULL, + ergo(page, cl_page_vmpage(env, page) == vmpage && (void *)radix_tree_lookup(&hdr->coh_tree, idx) == page)); } - if (page != NULL) + if (page) return page; /* allocate and initialize cl_page */ @@ -385,7 +385,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, if (type == CPT_TRANSIENT) { if (parent) { - LASSERT(page->cp_parent == NULL); + LASSERT(!page->cp_parent); page->cp_parent = parent; parent->cp_child = page; } @@ -418,7 +418,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, "fail to insert into radix tree: %d\n", err); } else { if (parent) { - LASSERT(page->cp_parent == NULL); + LASSERT(!page->cp_parent); page->cp_parent = parent; parent->cp_child = page; } @@ -426,7 +426,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env, } spin_unlock(&hdr->coh_page_guard); - if (unlikely(ghost != NULL)) { + if (unlikely(ghost)) { cl_page_delete0(env, ghost, 0); cl_page_free(env, ghost); } @@ -467,14 +467,13 @@ static inline int cl_page_invariant(const struct cl_page *pg) owner = pg->cp_owner; return cl_page_in_use(pg) && - ergo(parent != NULL, parent->cp_child == pg) && - ergo(child != NULL, child->cp_parent == pg) && - ergo(child != NULL, pg->cp_obj != child->cp_obj) && - ergo(parent != NULL, pg->cp_obj != parent->cp_obj) && - ergo(owner != NULL && parent != NULL, + ergo(parent, parent->cp_child == pg) && + ergo(child, child->cp_parent == pg) && + ergo(child, pg->cp_obj != child->cp_obj) && + ergo(parent, pg->cp_obj != parent->cp_obj) && + ergo(owner && parent, parent->cp_owner == pg->cp_owner->ci_parent) && - ergo(owner != NULL && child != NULL, - child->cp_owner->ci_parent == owner) && + ergo(owner && child, child->cp_owner->ci_parent == owner) && /* * Either page is early in initialization (has neither child * nor parent yet), or it is in the object radix tree. @@ -482,7 +481,7 @@ static inline int cl_page_invariant(const struct cl_page *pg) ergo(pg->cp_state < CPS_FREEING && pg->cp_type == CPT_CACHEABLE, (void *)radix_tree_lookup(&header->coh_tree, pg->cp_index) == pg || - (child == NULL && parent == NULL)); + (!child && !parent)); } static void cl_page_state_set0(const struct lu_env *env, @@ -535,10 +534,10 @@ static void cl_page_state_set0(const struct lu_env *env, old = page->cp_state; PASSERT(env, page, allowed_transitions[old][state]); CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state); - for (; page != NULL; page = page->cp_child) { + for (; page; page = page->cp_child) { PASSERT(env, page, page->cp_state == old); PASSERT(env, page, - equi(state == CPS_OWNED, page->cp_owner != NULL)); + equi(state == CPS_OWNED, page->cp_owner)); cl_page_state_set_trust(page, state); } @@ -584,7 +583,7 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page) LASSERT(page->cp_state == CPS_FREEING); LASSERT(atomic_read(&page->cp_ref) == 0); - PASSERT(env, page, page->cp_owner == NULL); + PASSERT(env, page, !page->cp_owner); PASSERT(env, page, list_empty(&page->cp_batch)); /* * Page is no longer reachable by other threads. Tear @@ -609,11 +608,11 @@ struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page) page = cl_page_top(page); do { list_for_each_entry(slice, &page->cp_layers, cpl_linkage) { - if (slice->cpl_ops->cpo_vmpage != NULL) + if (slice->cpl_ops->cpo_vmpage) return slice->cpl_ops->cpo_vmpage(env, slice); } page = page->cp_child; - } while (page != NULL); + } while (page); LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */ } EXPORT_SYMBOL(cl_page_vmpage); @@ -639,10 +638,10 @@ struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj) * can be rectified easily. */ top = (struct cl_page *)vmpage->private; - if (top == NULL) + if (!top) return NULL; - for (page = top; page != NULL; page = page->cp_child) { + for (page = top; page; page = page->cp_child) { if (cl_object_same(page->cp_obj, obj)) { cl_page_get_trust(page); break; @@ -689,7 +688,7 @@ EXPORT_SYMBOL(cl_page_at); cpl_linkage) { \ __method = *(void **)((char *)__scan->cpl_ops + \ __op); \ - if (__method != NULL) { \ + if (__method) { \ __result = (*__method)(__env, __scan, \ ## __VA_ARGS__); \ if (__result != 0) \ @@ -697,7 +696,7 @@ EXPORT_SYMBOL(cl_page_at); } \ } \ __page = __page->cp_child; \ - } while (__page != NULL && __result == 0); \ + } while (__page && __result == 0); \ if (__result > 0) \ __result = 0; \ __result; \ @@ -717,12 +716,12 @@ do { \ cpl_linkage) { \ __method = *(void **)((char *)__scan->cpl_ops + \ __op); \ - if (__method != NULL) \ + if (__method) \ (*__method)(__env, __scan, \ ## __VA_ARGS__); \ } \ __page = __page->cp_child; \ - } while (__page != NULL); \ + } while (__page); \ } while (0) #define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \ @@ -734,19 +733,19 @@ do { \ void (*__method)_proto; \ \ /* get to the bottom page. */ \ - while (__page->cp_child != NULL) \ + while (__page->cp_child) \ __page = __page->cp_child; \ do { \ list_for_each_entry_reverse(__scan, &__page->cp_layers, \ cpl_linkage) { \ __method = *(void **)((char *)__scan->cpl_ops + \ __op); \ - if (__method != NULL) \ + if (__method) \ (*__method)(__env, __scan, \ ## __VA_ARGS__); \ } \ __page = __page->cp_parent; \ - } while (__page != NULL); \ + } while (__page); \ } while (0) static int cl_page_invoke(const struct lu_env *env, @@ -772,8 +771,8 @@ static void cl_page_invoid(const struct lu_env *env, static void cl_page_owner_clear(struct cl_page *page) { - for (page = cl_page_top(page); page != NULL; page = page->cp_child) { - if (page->cp_owner != NULL) { + for (page = cl_page_top(page); page; page = page->cp_child) { + if (page->cp_owner) { LASSERT(page->cp_owner->ci_owned_nr > 0); page->cp_owner->ci_owned_nr--; page->cp_owner = NULL; @@ -784,10 +783,8 @@ static void cl_page_owner_clear(struct cl_page *page) static void cl_page_owner_set(struct cl_page *page) { - for (page = cl_page_top(page); page != NULL; page = page->cp_child) { - LASSERT(page->cp_owner != NULL); + for (page = cl_page_top(page); page; page = page->cp_child) page->cp_owner->ci_owned_nr++; - } } void cl_page_disown0(const struct lu_env *env, @@ -862,8 +859,8 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io, struct cl_io *, int), io, nonblock); if (result == 0) { - PASSERT(env, pg, pg->cp_owner == NULL); - PASSERT(env, pg, pg->cp_req == NULL); + PASSERT(env, pg, !pg->cp_owner); + PASSERT(env, pg, !pg->cp_req); pg->cp_owner = io; pg->cp_task = current; cl_page_owner_set(pg); @@ -921,7 +918,7 @@ void cl_page_assume(const struct lu_env *env, io = cl_io_top(io); cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume)); - PASSERT(env, pg, pg->cp_owner == NULL); + PASSERT(env, pg, !pg->cp_owner); pg->cp_owner = io; pg->cp_task = current; cl_page_owner_set(pg); @@ -1037,7 +1034,7 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg, * skip removing it. */ tmp = pg->cp_child; - for (; tmp != NULL; tmp = tmp->cp_child) { + for (; tmp; tmp = tmp->cp_child) { void *value; struct cl_object_header *hdr; @@ -1135,7 +1132,7 @@ int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg) pg = cl_page_top_trusted((struct cl_page *)pg); slice = container_of(pg->cp_layers.next, const struct cl_page_slice, cpl_linkage); - PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL); + PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked); /* * Call ->cpo_is_vmlocked() directly instead of going through * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by @@ -1216,7 +1213,7 @@ void cl_page_completion(const struct lu_env *env, PASSERT(env, pg, crt < CRT_NR); /* cl_page::cp_req already cleared by the caller (osc_completion()) */ - PASSERT(env, pg, pg->cp_req == NULL); + PASSERT(env, pg, !pg->cp_req); PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt)); CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret); @@ -1304,7 +1301,7 @@ int cl_page_cache_add(const struct lu_env *env, struct cl_io *io, return -EINVAL; list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) { - if (scan->cpl_ops->io[crt].cpo_cache_add == NULL) + if (!scan->cpl_ops->io[crt].cpo_cache_add) continue; result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io); @@ -1450,8 +1447,8 @@ void cl_page_print(const struct lu_env *env, void *cookie, { struct cl_page *scan; - for (scan = cl_page_top((struct cl_page *)pg); - scan != NULL; scan = scan->cp_child) + for (scan = cl_page_top((struct cl_page *)pg); scan; + scan = scan->cp_child) cl_page_header_print(env, cookie, printer, scan); CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print), (const struct lu_env *env, @@ -1480,7 +1477,7 @@ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx) /* * XXX for now. */ - return (loff_t)idx << PAGE_CACHE_SHIFT; + return (loff_t)idx << PAGE_SHIFT; } EXPORT_SYMBOL(cl_offset); @@ -1492,13 +1489,13 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset) /* * XXX for now. */ - return offset >> PAGE_CACHE_SHIFT; + return offset >> PAGE_SHIFT; } EXPORT_SYMBOL(cl_index); int cl_page_size(const struct cl_object *obj) { - return 1 << PAGE_CACHE_SHIFT; + return 1 << PAGE_SHIFT; } EXPORT_SYMBOL(cl_page_size); diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c index 0975e4430..c2cf01596 100644 --- a/drivers/staging/lustre/lustre/obdclass/class_obd.c +++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c @@ -42,7 +42,6 @@ #include "../../include/linux/lnet/lnetctl.h" #include "../include/lustre_debug.h" #include "../include/lprocfs_status.h" -#include "../include/lustre/lustre_build_version.h" #include <linux/list.h> #include "../include/cl_object.h" #include "llog_internal.h" @@ -52,7 +51,7 @@ EXPORT_SYMBOL(obd_devs); struct list_head obd_types; DEFINE_RWLOCK(obd_dev_lock); -/* The following are visible and mutable through /proc/sys/lustre/. */ +/* The following are visible and mutable through /sys/fs/lustre. */ unsigned int obd_debug_peer_on_timeout; EXPORT_SYMBOL(obd_debug_peer_on_timeout); unsigned int obd_dump_on_timeout; @@ -67,7 +66,7 @@ unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */ EXPORT_SYMBOL(obd_timeout); unsigned int obd_timeout_set; EXPORT_SYMBOL(obd_timeout_set); -/* Adaptive timeout defs here instead of ptlrpc module for /proc/sys/ access */ +/* Adaptive timeout defs here instead of ptlrpc module for /sys/fs/ access */ unsigned int at_min; EXPORT_SYMBOL(at_min); unsigned int at_max = 600; @@ -180,7 +179,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) } CDEBUG(D_IOCTL, "cmd = %x\n", cmd); - if (obd_ioctl_getdata(&buf, &len, (void *)arg)) { + if (obd_ioctl_getdata(&buf, &len, (void __user *)arg)) { CERROR("OBD ioctl: data error\n"); return -EINVAL; } @@ -200,8 +199,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) err = -ENOMEM; goto out; } - err = copy_from_user(lcfg, data->ioc_pbuf1, - data->ioc_plen1); + err = copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1); if (!err) err = lustre_cfg_sanity_check(lcfg, data->ioc_plen1); if (!err) @@ -218,16 +216,16 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) goto out; } - if (strlen(BUILD_VERSION) + 1 > data->ioc_inllen1) { + if (strlen(LUSTRE_VERSION_STRING) + 1 > data->ioc_inllen1) { CERROR("ioctl buffer too small to hold version\n"); err = -EINVAL; goto out; } - memcpy(data->ioc_bulk, BUILD_VERSION, - strlen(BUILD_VERSION) + 1); + memcpy(data->ioc_bulk, LUSTRE_VERSION_STRING, + strlen(LUSTRE_VERSION_STRING) + 1); - err = obd_ioctl_popdata((void *)arg, data, len); + err = obd_ioctl_popdata((void __user *)arg, data, len); if (err) err = -EFAULT; goto out; @@ -246,7 +244,8 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) goto out; } - err = obd_ioctl_popdata((void *)arg, data, sizeof(*data)); + err = obd_ioctl_popdata((void __user *)arg, data, + sizeof(*data)); if (err) err = -EFAULT; goto out; @@ -283,7 +282,8 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) CDEBUG(D_IOCTL, "device name %s, dev %d\n", data->ioc_inlbuf1, dev); - err = obd_ioctl_popdata((void *)arg, data, sizeof(*data)); + err = obd_ioctl_popdata((void __user *)arg, data, + sizeof(*data)); if (err) err = -EFAULT; goto out; @@ -330,7 +330,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) (int)index, status, obd->obd_type->typ_name, obd->obd_name, obd->obd_uuid.uuid, atomic_read(&obd->obd_refcount)); - err = obd_ioctl_popdata((void *)arg, data, len); + err = obd_ioctl_popdata((void __user *)arg, data, len); err = 0; goto out; @@ -339,7 +339,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) } if (data->ioc_dev == OBD_DEV_BY_DEVNAME) { - if (data->ioc_inllen4 <= 0 || data->ioc_inlbuf4 == NULL) { + if (data->ioc_inllen4 <= 0 || !data->ioc_inlbuf4) { err = -EINVAL; goto out; } @@ -356,7 +356,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) goto out; } - if (obd == NULL) { + if (!obd) { CERROR("OBD ioctl : No Device %d\n", data->ioc_dev); err = -EINVAL; goto out; @@ -388,7 +388,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) if (err) goto out; - err = obd_ioctl_popdata((void *)arg, data, len); + err = obd_ioctl_popdata((void __user *)arg, data, len); if (err) err = -EFAULT; goto out; @@ -461,9 +461,9 @@ static int obd_init_checks(void) CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len); ret = -EINVAL; } - if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) { + if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) { CWARN("mask failed: u64val %llu >= %llu\n", u64val, - (__u64)PAGE_CACHE_SIZE); + (__u64)PAGE_SIZE); ret = -EINVAL; } @@ -473,13 +473,13 @@ static int obd_init_checks(void) extern int class_procfs_init(void); extern int class_procfs_clean(void); -static int __init init_obdclass(void) +static int __init obdclass_init(void) { int i, err; int lustre_register_fs(void); - LCONSOLE_INFO("Lustre: Build Version: "BUILD_VERSION"\n"); + LCONSOLE_INFO("Lustre: Build Version: " LUSTRE_VERSION_STRING "\n"); spin_lock_init(&obd_types_lock); obd_zombie_impexp_init(); @@ -507,8 +507,9 @@ static int __init init_obdclass(void) /* Default the dirty page cache cap to 1/2 of system memory. * For clients with less memory, a larger fraction is needed - * for other purposes (mostly for BGL). */ - if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT)) + * for other purposes (mostly for BGL). + */ + if (totalram_pages <= 512 << (20 - PAGE_SHIFT)) obd_max_dirty_pages = totalram_pages / 4; else obd_max_dirty_pages = totalram_pages / 2; @@ -542,9 +543,7 @@ static int __init init_obdclass(void) return err; } -/* liblustre doesn't call cleanup_obdclass, apparently. we carry on in this - * ifdef to the end of the file to cover module and versioning goo.*/ -static void cleanup_obdclass(void) +static void obdclass_exit(void) { int i; @@ -577,9 +576,9 @@ static void cleanup_obdclass(void) } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Lustre Class Driver Build Version: " BUILD_VERSION); -MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Lustre Class Driver"); MODULE_VERSION(LUSTRE_VERSION_STRING); +MODULE_LICENSE("GPL"); -module_init(init_obdclass); -module_exit(cleanup_obdclass); +module_init(obdclass_init); +module_exit(obdclass_exit); diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c index 228c44c37..cf97b8f06 100644 --- a/drivers/staging/lustre/lustre/obdclass/genops.c +++ b/drivers/staging/lustre/lustre/obdclass/genops.c @@ -42,6 +42,7 @@ #define DEBUG_SUBSYSTEM S_CLASS #include "../include/obd_class.h" #include "../include/lprocfs_status.h" +#include "../include/lustre_kernelcomm.h" spinlock_t obd_types_lock; @@ -68,18 +69,17 @@ static struct obd_device *obd_device_alloc(void) { struct obd_device *obd; - obd = kmem_cache_alloc(obd_device_cachep, GFP_NOFS | __GFP_ZERO); - if (obd != NULL) + obd = kmem_cache_zalloc(obd_device_cachep, GFP_NOFS); + if (obd) obd->obd_magic = OBD_DEVICE_MAGIC; return obd; } static void obd_device_free(struct obd_device *obd) { - LASSERT(obd != NULL); LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "obd %p obd_magic %08x != %08x\n", obd, obd->obd_magic, OBD_DEVICE_MAGIC); - if (obd->obd_namespace != NULL) { + if (obd->obd_namespace) { CERROR("obd %p: namespace %p was not properly cleaned up (obd_force=%d)!\n", obd, obd->obd_namespace, obd->obd_force); LBUG(); @@ -112,15 +112,6 @@ static struct obd_type *class_get_type(const char *name) if (!type) { const char *modname = name; - if (strcmp(modname, "obdfilter") == 0) - modname = "ofd"; - - if (strcmp(modname, LUSTRE_LWP_NAME) == 0) - modname = LUSTRE_OSP_NAME; - - if (!strncmp(modname, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME))) - modname = LUSTRE_MDT_NAME; - if (!request_module("%s", modname)) { CDEBUG(D_INFO, "Loaded module '%s'\n", modname); type = class_search_type(name); @@ -202,7 +193,7 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops, goto failed; } - if (ldt != NULL) { + if (ldt) { type->typ_lu = ldt; rc = lu_device_type_init(ldt); if (rc != 0) @@ -364,7 +355,7 @@ void class_release_dev(struct obd_device *obd) obd, obd->obd_magic, OBD_DEVICE_MAGIC); LASSERTF(obd == obd_devs[obd->obd_minor], "obd %p != obd_devs[%d] %p\n", obd, obd->obd_minor, obd_devs[obd->obd_minor]); - LASSERT(obd_type != NULL); + LASSERT(obd_type); CDEBUG(D_INFO, "Release obd device %s at %d obd_type name =%s\n", obd->obd_name, obd->obd_minor, obd->obd_type->typ_name); @@ -390,7 +381,8 @@ int class_name2dev(const char *name) if (obd && strcmp(name, obd->obd_name) == 0) { /* Make sure we finished attaching before we give - out any references */ + * out any references + */ LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC); if (obd->obd_attached) { read_unlock(&obd_dev_lock); @@ -465,11 +457,12 @@ struct obd_device *class_num2obd(int num) EXPORT_SYMBOL(class_num2obd); /* Search for a client OBD connected to tgt_uuid. If grp_uuid is - specified, then only the client with that uuid is returned, - otherwise any client connected to the tgt is returned. */ + * specified, then only the client with that uuid is returned, + * otherwise any client connected to the tgt is returned. + */ struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid, - const char *typ_name, - struct obd_uuid *grp_uuid) + const char *typ_name, + struct obd_uuid *grp_uuid) { int i; @@ -497,9 +490,10 @@ struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid, EXPORT_SYMBOL(class_find_client_obd); /* Iterate the obd_device list looking devices have grp_uuid. Start - searching at *next, and if a device is found, the next index to look - at is saved in *next. If next is NULL, then the first matching device - will always be returned. */ + * searching at *next, and if a device is found, the next index to look + * at is saved in *next. If next is NULL, then the first matching device + * will always be returned. + */ struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, int *next) { int i; @@ -588,21 +582,21 @@ int obd_init_caches(void) { LASSERT(!obd_device_cachep); obd_device_cachep = kmem_cache_create("ll_obd_dev_cache", - sizeof(struct obd_device), - 0, 0, NULL); + sizeof(struct obd_device), + 0, 0, NULL); if (!obd_device_cachep) goto out; LASSERT(!obdo_cachep); obdo_cachep = kmem_cache_create("ll_obdo_cache", sizeof(struct obdo), - 0, 0, NULL); + 0, 0, NULL); if (!obdo_cachep) goto out; LASSERT(!import_cachep); import_cachep = kmem_cache_create("ll_import_cache", - sizeof(struct obd_import), - 0, 0, NULL); + sizeof(struct obd_import), + 0, 0, NULL); if (!import_cachep) goto out; @@ -658,7 +652,7 @@ static void class_export_destroy(struct obd_export *exp) struct obd_device *obd = exp->exp_obd; LASSERT_ATOMIC_ZERO(&exp->exp_refcount); - LASSERT(obd != NULL); + LASSERT(obd); CDEBUG(D_IOCTL, "destroying export %p/%s for %s\n", exp, exp->exp_client_uuid.uuid, obd->obd_name); @@ -698,7 +692,6 @@ EXPORT_SYMBOL(class_export_get); void class_export_put(struct obd_export *exp) { - LASSERT(exp != NULL); LASSERT_ATOMIC_GT_LT(&exp->exp_refcount, 0, LI_POISON); CDEBUG(D_INFO, "PUTting export %p : new refcount %d\n", exp, atomic_read(&exp->exp_refcount) - 1); @@ -718,7 +711,8 @@ EXPORT_SYMBOL(class_export_put); /* Creates a new export, adds it to the hash table, and returns a * pointer to it. The refcount is 2: one for the hash reference, and - * one for the pointer returned by this function. */ + * one for the pointer returned by this function. + */ struct obd_export *class_new_export(struct obd_device *obd, struct obd_uuid *cluuid) { @@ -834,7 +828,7 @@ EXPORT_SYMBOL(class_unlink_export); static void class_import_destroy(struct obd_import *imp) { CDEBUG(D_IOCTL, "destroying import %p for %s\n", imp, - imp->imp_obd->obd_name); + imp->imp_obd->obd_name); LASSERT_ATOMIC_ZERO(&imp->imp_refcount); @@ -844,7 +838,7 @@ static void class_import_destroy(struct obd_import *imp) struct obd_import_conn *imp_conn; imp_conn = list_entry(imp->imp_conn_list.next, - struct obd_import_conn, oic_item); + struct obd_import_conn, oic_item); list_del_init(&imp_conn->oic_item); ptlrpc_put_connection_superhack(imp_conn->oic_conn); kfree(imp_conn); @@ -901,8 +895,9 @@ static void init_imp_at(struct imp_at *at) at_init(&at->iat_net_latency, 0, 0); for (i = 0; i < IMP_AT_MAX_PORTALS; i++) { /* max service estimates are tracked on the server side, so - don't use the AT history here, just use the last reported - val. (But keep hist for proc histogram, worst_ever) */ + * don't use the AT history here, just use the last reported + * val. (But keep hist for proc histogram, worst_ever) + */ at_init(&at->iat_service_estimate[i], INITIAL_CONNECT_TIMEOUT, AT_FLG_NOHIST); } @@ -941,7 +936,8 @@ struct obd_import *class_new_import(struct obd_device *obd) init_imp_at(&imp->imp_at); /* the default magic is V2, will be used in connect RPC, and - * then adjusted according to the flags in request/reply. */ + * then adjusted according to the flags in request/reply. + */ imp->imp_msg_magic = LUSTRE_MSG_MAGIC_V2; return imp; @@ -950,7 +946,7 @@ EXPORT_SYMBOL(class_new_import); void class_destroy_import(struct obd_import *import) { - LASSERT(import != NULL); + LASSERT(import); LASSERT(import != LP_POISON); class_handle_unhash(&import->imp_handle); @@ -970,8 +966,7 @@ void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock) LASSERT(lock->l_exp_refs_nr >= 0); - if (lock->l_exp_refs_target != NULL && - lock->l_exp_refs_target != exp) { + if (lock->l_exp_refs_target && lock->l_exp_refs_target != exp) { LCONSOLE_WARN("setting export %p for lock %p which already has export %p\n", exp, lock, lock->l_exp_refs_target); } @@ -1005,17 +1000,18 @@ EXPORT_SYMBOL(__class_export_del_lock_ref); #endif /* A connection defines an export context in which preallocation can - be managed. This releases the export pointer reference, and returns - the export handle, so the export refcount is 1 when this function - returns. */ + * be managed. This releases the export pointer reference, and returns + * the export handle, so the export refcount is 1 when this function + * returns. + */ int class_connect(struct lustre_handle *conn, struct obd_device *obd, struct obd_uuid *cluuid) { struct obd_export *export; - LASSERT(conn != NULL); - LASSERT(obd != NULL); - LASSERT(cluuid != NULL); + LASSERT(conn); + LASSERT(obd); + LASSERT(cluuid); export = class_new_export(obd, cluuid); if (IS_ERR(export)) @@ -1035,7 +1031,8 @@ EXPORT_SYMBOL(class_connect); * and if disconnect really need * 2 - removing from hash * 3 - in client_unlink_export - * The export pointer passed to this function can destroyed */ + * The export pointer passed to this function can destroyed + */ int class_disconnect(struct obd_export *export) { int already_disconnected; @@ -1052,7 +1049,8 @@ int class_disconnect(struct obd_export *export) /* class_cleanup(), abort_recovery(), and class_fail_export() * all end up in here, and if any of them race we shouldn't - * call extra class_export_puts(). */ + * call extra class_export_puts(). + */ if (already_disconnected) goto no_disconn; @@ -1092,7 +1090,8 @@ void class_fail_export(struct obd_export *exp) /* Most callers into obd_disconnect are removing their own reference * (request, for example) in addition to the one from the hash table. - * We don't have such a reference here, so make one. */ + * We don't have such a reference here, so make one. + */ class_export_get(exp); rc = obd_disconnect(exp); if (rc) @@ -1126,29 +1125,29 @@ static void obd_zombie_impexp_cull(void) import = NULL; if (!list_empty(&obd_zombie_imports)) { import = list_entry(obd_zombie_imports.next, - struct obd_import, - imp_zombie_chain); + struct obd_import, + imp_zombie_chain); list_del_init(&import->imp_zombie_chain); } export = NULL; if (!list_empty(&obd_zombie_exports)) { export = list_entry(obd_zombie_exports.next, - struct obd_export, - exp_obd_chain); + struct obd_export, + exp_obd_chain); list_del_init(&export->exp_obd_chain); } spin_unlock(&obd_zombie_impexp_lock); - if (import != NULL) { + if (import) { class_import_destroy(import); spin_lock(&obd_zombie_impexp_lock); zombies_count--; spin_unlock(&obd_zombie_impexp_lock); } - if (export != NULL) { + if (export) { class_export_destroy(export); spin_lock(&obd_zombie_impexp_lock); zombies_count--; @@ -1156,7 +1155,7 @@ static void obd_zombie_impexp_cull(void) } cond_resched(); - } while (import != NULL || export != NULL); + } while (import || export); } static struct completion obd_zombie_start; diff --git a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c index d8230aec9..8405eccda 100644 --- a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c +++ b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c @@ -42,9 +42,8 @@ #define DEBUG_SUBSYSTEM S_CLASS #define D_KUC D_OTHER -#include "../../include/linux/libcfs/libcfs.h" - -/* This is the kernel side (liblustre as well). */ +#include "../include/obd_support.h" +#include "../include/lustre_kernelcomm.h" /** * libcfs_kkuc_msg_put - send an message from kernel to userspace @@ -58,14 +57,14 @@ int libcfs_kkuc_msg_put(struct file *filp, void *payload) ssize_t count = kuch->kuc_msglen; loff_t offset = 0; mm_segment_t fs; - int rc = -ENOSYS; + int rc = -ENXIO; - if (filp == NULL || IS_ERR(filp)) + if (IS_ERR_OR_NULL(filp)) return -EBADF; if (kuch->kuc_magic != KUC_MAGIC) { CERROR("KernelComm: bad magic %x\n", kuch->kuc_magic); - return -ENOSYS; + return rc; } fs = get_fs(); @@ -90,18 +89,20 @@ int libcfs_kkuc_msg_put(struct file *filp, void *payload) } EXPORT_SYMBOL(libcfs_kkuc_msg_put); -/* Broadcast groups are global across all mounted filesystems; +/* + * Broadcast groups are global across all mounted filesystems; * i.e. registering for a group on 1 fs will get messages for that - * group from any fs */ + * group from any fs + */ /** A single group registration has a uid and a file pointer */ struct kkuc_reg { - struct list_head kr_chain; - int kr_uid; + struct list_head kr_chain; + int kr_uid; struct file *kr_fp; - __u32 kr_data; + char kr_data[0]; }; -static struct list_head kkuc_groups[KUC_GRP_MAX+1] = {}; +static struct list_head kkuc_groups[KUC_GRP_MAX + 1] = {}; /* Protect message sending against remove and adds */ static DECLARE_RWSEM(kg_sem); @@ -109,9 +110,10 @@ static DECLARE_RWSEM(kg_sem); * @param filp pipe to write into * @param uid identifier for this receiver * @param group group number + * @param data user data */ int libcfs_kkuc_group_add(struct file *filp, int uid, unsigned int group, - __u32 data) + void *data, size_t data_len) { struct kkuc_reg *reg; @@ -121,20 +123,20 @@ int libcfs_kkuc_group_add(struct file *filp, int uid, unsigned int group, } /* fput in group_rem */ - if (filp == NULL) + if (!filp) return -EBADF; /* freed in group_rem */ - reg = kmalloc(sizeof(*reg), 0); - if (reg == NULL) + reg = kmalloc(sizeof(*reg) + data_len, 0); + if (!reg) return -ENOMEM; reg->kr_fp = filp; reg->kr_uid = uid; - reg->kr_data = data; + memcpy(reg->kr_data, data, data_len); down_write(&kg_sem); - if (kkuc_groups[group].next == NULL) + if (!kkuc_groups[group].next) INIT_LIST_HEAD(&kkuc_groups[group]); list_add(®->kr_chain, &kkuc_groups[group]); up_write(&kg_sem); @@ -145,14 +147,14 @@ int libcfs_kkuc_group_add(struct file *filp, int uid, unsigned int group, } EXPORT_SYMBOL(libcfs_kkuc_group_add); -int libcfs_kkuc_group_rem(int uid, int group) +int libcfs_kkuc_group_rem(int uid, unsigned int group) { struct kkuc_reg *reg, *next; - if (kkuc_groups[group].next == NULL) + if (!kkuc_groups[group].next) return 0; - if (uid == 0) { + if (!uid) { /* Broadcast a shutdown message */ struct kuc_hdr lh; @@ -165,11 +167,11 @@ int libcfs_kkuc_group_rem(int uid, int group) down_write(&kg_sem); list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) { - if ((uid == 0) || (uid == reg->kr_uid)) { + if (!uid || (uid == reg->kr_uid)) { list_del(®->kr_chain); CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n", reg->kr_uid, reg->kr_fp, group); - if (reg->kr_fp != NULL) + if (reg->kr_fp) fput(reg->kr_fp); kfree(reg); } @@ -180,28 +182,30 @@ int libcfs_kkuc_group_rem(int uid, int group) } EXPORT_SYMBOL(libcfs_kkuc_group_rem); -int libcfs_kkuc_group_put(int group, void *payload) +int libcfs_kkuc_group_put(unsigned int group, void *payload) { struct kkuc_reg *reg; - int rc = 0; + int rc = 0; int one_success = 0; - down_read(&kg_sem); + down_write(&kg_sem); list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { - if (reg->kr_fp != NULL) { + if (reg->kr_fp) { rc = libcfs_kkuc_msg_put(reg->kr_fp, payload); - if (rc == 0) + if (!rc) { one_success = 1; - else if (rc == -EPIPE) { + } else if (rc == -EPIPE) { fput(reg->kr_fp); reg->kr_fp = NULL; } } } - up_read(&kg_sem); + up_write(&kg_sem); - /* don't return an error if the message has been delivered - * at least to one agent */ + /* + * don't return an error if the message has been delivered + * at least to one agent + */ if (one_success) rc = 0; @@ -213,9 +217,9 @@ EXPORT_SYMBOL(libcfs_kkuc_group_put); * Calls a callback function for each link of the given kuc group. * @param group the group to call the function on. * @param cb_func the function to be called. - * @param cb_arg iextra argument to be passed to the callback function. + * @param cb_arg extra argument to be passed to the callback function. */ -int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func, +int libcfs_kkuc_group_foreach(unsigned int group, libcfs_kkuc_cb_t cb_func, void *cb_arg) { struct kkuc_reg *reg; @@ -227,15 +231,15 @@ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func, } /* no link for this group */ - if (kkuc_groups[group].next == NULL) + if (!kkuc_groups[group].next) return 0; - down_write(&kg_sem); + down_read(&kg_sem); list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { - if (reg->kr_fp != NULL) + if (reg->kr_fp) rc = cb_func(reg->kr_data, cb_arg); } - up_write(&kg_sem); + up_read(&kg_sem); return rc; } diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c index a055cbb4f..8eddf206f 100644 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c +++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c @@ -59,7 +59,6 @@ #include <linux/highmem.h> #include <linux/io.h> #include <asm/ioctls.h> -#include <linux/poll.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <linux/seq_file.h> @@ -71,17 +70,16 @@ #include "../../include/obd_class.h" #include "../../include/lprocfs_status.h" #include "../../include/lustre_ver.h" -#include "../../include/lustre/lustre_build_version.h" /* buffer MUST be at least the size of obd_ioctl_hdr */ -int obd_ioctl_getdata(char **buf, int *len, void *arg) +int obd_ioctl_getdata(char **buf, int *len, void __user *arg) { struct obd_ioctl_hdr hdr; struct obd_ioctl_data *data; int err; int offset = 0; - if (copy_from_user(&hdr, (void *)arg, sizeof(hdr))) + if (copy_from_user(&hdr, arg, sizeof(hdr))) return -EFAULT; if (hdr.ioc_version != OBD_IOCTL_VERSION) { @@ -104,9 +102,10 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg) /* When there are lots of processes calling vmalloc on multi-core * system, the high lock contention will hurt performance badly, * obdfilter-survey is an example, which relies on ioctl. So we'd - * better avoid vmalloc on ioctl path. LU-66 */ + * better avoid vmalloc on ioctl path. LU-66 + */ *buf = libcfs_kvzalloc(hdr.ioc_len, GFP_NOFS); - if (*buf == NULL) { + if (!*buf) { CERROR("Cannot allocate control buffer of len %d\n", hdr.ioc_len); return -EINVAL; @@ -114,7 +113,7 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg) *len = hdr.ioc_len; data = (struct obd_ioctl_data *)*buf; - if (copy_from_user(*buf, (void *)arg, hdr.ioc_len)) { + if (copy_from_user(*buf, arg, hdr.ioc_len)) { err = -EFAULT; goto free_buf; } @@ -144,9 +143,8 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg) offset += cfs_size_round(data->ioc_inllen3); } - if (data->ioc_inllen4) { + if (data->ioc_inllen4) data->ioc_inlbuf4 = &data->ioc_bulk[0] + offset; - } return 0; @@ -156,7 +154,7 @@ free_buf: } EXPORT_SYMBOL(obd_ioctl_getdata); -int obd_ioctl_popdata(void *arg, void *data, int len) +int obd_ioctl_popdata(void __user *arg, void *data, int len) { int err; @@ -240,7 +238,7 @@ static ssize_t health_show(struct kobject *kobj, struct attribute *attr, struct obd_device *obd; obd = class_num2obd(i); - if (obd == NULL || !obd->obd_attached || !obd->obd_set_up) + if (!obd || !obd->obd_attached || !obd->obd_set_up) continue; LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC); @@ -250,9 +248,8 @@ static ssize_t health_show(struct kobject *kobj, struct attribute *attr, class_incref(obd, __func__, current); read_unlock(&obd_dev_lock); - if (obd_health_check(NULL, obd)) { + if (obd_health_check(NULL, obd)) healthy = false; - } class_decref(obd, __func__, current); read_lock(&obd_dev_lock); } @@ -360,7 +357,7 @@ static int obd_device_list_seq_show(struct seq_file *p, void *v) struct obd_device *obd = class_num2obd((int)index); char *status; - if (obd == NULL) + if (!obd) return 0; LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC); @@ -424,7 +421,7 @@ int class_procfs_init(void) struct dentry *file; lustre_kobj = kobject_create_and_add("lustre", fs_kobj); - if (lustre_kobj == NULL) + if (!lustre_kobj) goto out; /* Create the files associated with this kobject */ @@ -456,8 +453,7 @@ out: int class_procfs_clean(void) { - if (debugfs_lustre_root != NULL) - debugfs_remove_recursive(debugfs_lustre_root); + debugfs_remove_recursive(debugfs_lustre_root); debugfs_lustre_root = NULL; diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c index 9496c09b2..b41b65e2f 100644 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c +++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c @@ -47,7 +47,6 @@ #include "../../include/lustre/lustre_idl.h" #include <linux/fs.h> -#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */ void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid) { @@ -71,8 +70,8 @@ void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid) if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits)) dst->i_blkbits = ffs(src->o_blksize) - 1; - if (dst->i_blkbits < PAGE_CACHE_SHIFT) - dst->i_blkbits = PAGE_CACHE_SHIFT; + if (dst->i_blkbits < PAGE_SHIFT) + dst->i_blkbits = PAGE_SHIFT; /* allocation of space */ if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks) diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c index 42fc26f4a..e6bf414a4 100644 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c +++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c @@ -62,8 +62,8 @@ struct static_lustre_uintvalue_attr { }; static ssize_t static_uintvalue_show(struct kobject *kobj, - struct attribute *attr, - char *buf) + struct attribute *attr, + char *buf) { struct static_lustre_uintvalue_attr *lattr = (void *)attr; @@ -71,8 +71,8 @@ static ssize_t static_uintvalue_show(struct kobject *kobj, } static ssize_t static_uintvalue_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, size_t count) + struct attribute *attr, + const char *buffer, size_t count) { struct static_lustre_uintvalue_attr *lattr = (void *)attr; int rc; @@ -100,7 +100,7 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%ul\n", - obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT))); + obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT))); } static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr, @@ -113,14 +113,14 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr, if (rc) return rc; - val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */ + val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */ if (val > ((totalram_pages / 10) * 9)) { /* Somebody wants to assign too much memory to dirty pages */ return -EINVAL; } - if (val < 4 << (20 - PAGE_CACHE_SHIFT)) { + if (val < 4 << (20 - PAGE_SHIFT)) { /* Less than 4 Mb for dirty cache is also bad */ return -EINVAL; } diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c index f956d7ed6..992573eae 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog.c +++ b/drivers/staging/lustre/lustre/obdclass/llog.c @@ -76,8 +76,6 @@ static struct llog_handle *llog_alloc_handle(void) */ static void llog_free_handle(struct llog_handle *loghandle) { - LASSERT(loghandle != NULL); - /* failed llog_init_handle */ if (!loghandle->lgh_hdr) goto out; @@ -115,7 +113,7 @@ static int llog_read_header(const struct lu_env *env, if (rc) return rc; - if (lop->lop_read_header == NULL) + if (!lop->lop_read_header) return -EOPNOTSUPP; rc = lop->lop_read_header(env, handle); @@ -144,7 +142,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle, struct llog_log_hdr *llh; int rc; - LASSERT(handle->lgh_hdr == NULL); + LASSERT(!handle->lgh_hdr); llh = kzalloc(sizeof(*llh), GFP_NOFS); if (!llh) @@ -228,11 +226,11 @@ static int llog_process_thread(void *arg) return 0; } - if (cd != NULL) { + if (cd) { last_called_index = cd->lpcd_first_idx; index = cd->lpcd_first_idx + 1; } - if (cd != NULL && cd->lpcd_last_idx) + if (cd && cd->lpcd_last_idx) last_index = cd->lpcd_last_idx; else last_index = LLOG_BITMAP_BYTES * 8 - 1; @@ -262,7 +260,8 @@ repeat: /* NB: when rec->lrh_len is accessed it is already swabbed * since it is used at the "end" of the loop and the rec - * swabbing is done at the beginning of the loop. */ + * swabbing is done at the beginning of the loop. + */ for (rec = (struct llog_rec_hdr *)buf; (char *)rec < buf + LLOG_CHUNK_SIZE; rec = (struct llog_rec_hdr *)((char *)rec + rec->lrh_len)) { @@ -328,7 +327,7 @@ repeat: } out: - if (cd != NULL) + if (cd) cd->lpcd_last_idx = last_called_index; kfree(buf); @@ -366,27 +365,28 @@ int llog_process_or_fork(const struct lu_env *env, int rc; lpi = kzalloc(sizeof(*lpi), GFP_NOFS); - if (!lpi) { - CERROR("cannot alloc pointer\n"); + if (!lpi) return -ENOMEM; - } lpi->lpi_loghandle = loghandle; lpi->lpi_cb = cb; lpi->lpi_cbdata = data; lpi->lpi_catdata = catdata; if (fork) { + struct task_struct *task; + /* The new thread can't use parent env, - * init the new one in llog_process_thread_daemonize. */ + * init the new one in llog_process_thread_daemonize. + */ lpi->lpi_env = NULL; init_completion(&lpi->lpi_completion); - rc = PTR_ERR(kthread_run(llog_process_thread_daemonize, lpi, - "llog_process_thread")); - if (IS_ERR_VALUE(rc)) { + task = kthread_run(llog_process_thread_daemonize, lpi, + "llog_process_thread"); + if (IS_ERR(task)) { + rc = PTR_ERR(task); CERROR("%s: cannot start thread: rc = %d\n", loghandle->lgh_ctxt->loc_obd->obd_name, rc); - kfree(lpi); - return rc; + goto out_lpi; } wait_for_completion(&lpi->lpi_completion); } else { @@ -394,6 +394,7 @@ int llog_process_or_fork(const struct lu_env *env, llog_process_thread(lpi); } rc = lpi->lpi_rc; +out_lpi: kfree(lpi); return rc; } @@ -416,13 +417,13 @@ int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt, LASSERT(ctxt); LASSERT(ctxt->loc_logops); - if (ctxt->loc_logops->lop_open == NULL) { + if (!ctxt->loc_logops->lop_open) { *lgh = NULL; return -EOPNOTSUPP; } *lgh = llog_alloc_handle(); - if (*lgh == NULL) + if (!*lgh) return -ENOMEM; (*lgh)->lgh_ctxt = ctxt; (*lgh)->lgh_logops = ctxt->loc_logops; @@ -449,7 +450,7 @@ int llog_close(const struct lu_env *env, struct llog_handle *loghandle) rc = llog_handle2ops(loghandle, &lop); if (rc) goto out; - if (lop->lop_close == NULL) { + if (!lop->lop_close) { rc = -EOPNOTSUPP; goto out; } diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c index 0f05e9c4a..c27d4ec1d 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c +++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c @@ -69,12 +69,12 @@ static int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *loghandle; int rc = 0; - if (cathandle == NULL) + if (!cathandle) return -EBADF; down_write(&cathandle->lgh_lock); list_for_each_entry(loghandle, &cathandle->u.chd.chd_head, - u.phd.phd_entry) { + u.phd.phd_entry) { struct llog_logid *cgl = &loghandle->lgh_id; if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) && @@ -130,7 +130,7 @@ int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle) int rc; list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head, - u.phd.phd_entry) { + u.phd.phd_entry) { /* unlink open-not-created llogs */ list_del_init(&loghandle->u.phd.phd_entry); llog_close(env, loghandle); diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c index 9bc51998c..826623f52 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c +++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c @@ -88,7 +88,8 @@ int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt) spin_unlock(&obd->obd_dev_lock); /* obd->obd_starting is needed for the case of cleanup - * in error case while obd is starting up. */ + * in error case while obd is starting up. + */ LASSERTF(obd->obd_starting == 1 || obd->obd_stopping == 1 || obd->obd_set_up == 0, "wrong obd state: %d/%d/%d\n", !!obd->obd_starting, @@ -110,11 +111,8 @@ int llog_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt) struct obd_llog_group *olg; int rc, idx; - LASSERT(ctxt != NULL); - LASSERT(ctxt != LP_POISON); - olg = ctxt->loc_olg; - LASSERT(olg != NULL); + LASSERT(olg); LASSERT(olg != LP_POISON); idx = ctxt->loc_idx; @@ -151,7 +149,7 @@ int llog_setup(const struct lu_env *env, struct obd_device *obd, if (index < 0 || index >= LLOG_MAX_CTXTS) return -EINVAL; - LASSERT(olg != NULL); + LASSERT(olg); ctxt = llog_new_ctxt(obd); if (!ctxt) diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c index 3aa7393b2..967ba2e1b 100644 --- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c +++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c @@ -346,7 +346,6 @@ void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg) __swab32s(&lcfg->lcfg_buflens[i]); print_lustre_cfg(lcfg); - return; } EXPORT_SYMBOL(lustre_swab_lustre_cfg); @@ -387,7 +386,8 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size) * * Overwrite fields from the end first, so they are not * clobbered, and use memmove() instead of memcpy() because - * the source and target buffers overlap. bug 16771 */ + * the source and target buffers overlap. bug 16771 + */ createtime = cm32->cm_createtime; canceltime = cm32->cm_canceltime; memmove(marker->cm_comment, cm32->cm_comment, MTI_NAMELEN32); @@ -406,7 +406,5 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size) __swab64s(&marker->cm_createtime); __swab64s(&marker->cm_canceltime); } - - return; } EXPORT_SYMBOL(lustre_swab_cfg_marker); diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c index 6acc4a10f..13aca5b93 100644 --- a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c +++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c @@ -48,14 +48,15 @@ void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount) int smp_id; unsigned long flags = 0; - if (stats == NULL) + if (!stats) return; LASSERTF(0 <= idx && idx < stats->ls_num, "idx %d, ls_num %hu\n", idx, stats->ls_num); /* With per-client stats, statistics are allocated only for - * single CPU area, so the smp_id should be 0 always. */ + * single CPU area, so the smp_id should be 0 always. + */ smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags); if (smp_id < 0) return; @@ -96,14 +97,15 @@ void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount) int smp_id; unsigned long flags = 0; - if (stats == NULL) + if (!stats) return; LASSERTF(0 <= idx && idx < stats->ls_num, "idx %d, ls_num %hu\n", idx, stats->ls_num); /* With per-client stats, statistics are allocated only for - * single CPU area, so the smp_id should be 0 always. */ + * single CPU area, so the smp_id should be 0 always. + */ smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags); if (smp_id < 0) return; diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c index 51fe15f5d..d93f42fee 100644 --- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c +++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c @@ -109,7 +109,7 @@ int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep) __u64 mask = 1; int i, ret = 0; - for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) { + for (i = 0; obd_connect_names[i]; i++, mask <<= 1) { if (flags & mask) ret += snprintf(page + ret, count - ret, "%s%s", ret ? sep : "", obd_connect_names[i]); @@ -149,10 +149,10 @@ int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val, } /* * Need to think these cases : - * 1. #echo x.00 > /proc/xxx output result : x - * 2. #echo x.0x > /proc/xxx output result : x.0x - * 3. #echo x.x0 > /proc/xxx output result : x.x - * 4. #echo x.xx > /proc/xxx output result : x.xx + * 1. #echo x.00 > /sys/xxx output result : x + * 2. #echo x.0x > /sys/xxx output result : x.0x + * 3. #echo x.x0 > /sys/xxx output result : x.x + * 4. #echo x.xx > /sys/xxx output result : x.xx * Only reserved 2 bits fraction. */ for (i = 0; i < (5 - prtn); i++) @@ -199,7 +199,7 @@ int lprocfs_write_frac_helper(const char __user *buffer, unsigned long count, if (pbuf == end) return -EINVAL; - if (end != NULL && *end == '.') { + if (end && *end == '.') { int temp_val, pow = 1; int i; @@ -247,7 +247,7 @@ struct dentry *ldebugfs_add_simple(struct dentry *root, struct dentry *entry; umode_t mode = 0; - if (root == NULL || name == NULL || fops == NULL) + if (!root || !name || !fops) return ERR_PTR(-EINVAL); if (fops->read) @@ -256,12 +256,12 @@ struct dentry *ldebugfs_add_simple(struct dentry *root, mode |= 0200; entry = debugfs_create_file(name, mode, root, data, fops); if (IS_ERR_OR_NULL(entry)) { - CERROR("LprocFS: No memory to create <debugfs> entry %s", name); + CERROR("LprocFS: No memory to create <debugfs> entry %s\n", name); return entry ?: ERR_PTR(-ENOMEM); } return entry; } -EXPORT_SYMBOL(ldebugfs_add_simple); +EXPORT_SYMBOL_GPL(ldebugfs_add_simple); static struct file_operations lprocfs_generic_fops = { }; @@ -272,7 +272,7 @@ int ldebugfs_add_vars(struct dentry *parent, if (IS_ERR_OR_NULL(parent) || IS_ERR_OR_NULL(list)) return -EINVAL; - while (list->name != NULL) { + while (list->name) { struct dentry *entry; umode_t mode = 0; @@ -294,14 +294,14 @@ int ldebugfs_add_vars(struct dentry *parent, } return 0; } -EXPORT_SYMBOL(ldebugfs_add_vars); +EXPORT_SYMBOL_GPL(ldebugfs_add_vars); void ldebugfs_remove(struct dentry **entryp) { debugfs_remove_recursive(*entryp); *entryp = NULL; } -EXPORT_SYMBOL(ldebugfs_remove); +EXPORT_SYMBOL_GPL(ldebugfs_remove); struct dentry *ldebugfs_register(const char *name, struct dentry *parent, @@ -327,7 +327,7 @@ struct dentry *ldebugfs_register(const char *name, out: return entry; } -EXPORT_SYMBOL(ldebugfs_register); +EXPORT_SYMBOL_GPL(ldebugfs_register); /* Generic callbacks */ int lprocfs_rd_uint(struct seq_file *m, void *data) @@ -491,7 +491,7 @@ int lprocfs_rd_server_uuid(struct seq_file *m, void *data) char *imp_state_name = NULL; int rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = lprocfs_climp_check(obd); if (rc) return rc; @@ -514,7 +514,7 @@ int lprocfs_rd_conn_uuid(struct seq_file *m, void *data) struct ptlrpc_connection *conn; int rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = lprocfs_climp_check(obd); if (rc) @@ -543,7 +543,7 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx, memset(cnt, 0, sizeof(*cnt)); - if (stats == NULL) { + if (!stats) { /* set count to 1 to avoid divide-by-zero errs in callers */ cnt->lc_count = 1; return; @@ -554,7 +554,7 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx, num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); for (i = 0; i < num_entry; i++) { - if (stats->ls_percpu[i] == NULL) + if (!stats->ls_percpu[i]) continue; percpu_cntr = lprocfs_stats_counter_get(stats, i, idx); @@ -577,7 +577,7 @@ EXPORT_SYMBOL(lprocfs_stats_collect); #define flag2str(flag, first) \ do { \ if (imp->imp_##flag) \ - seq_printf(m, "%s" #flag, first ? "" : ", "); \ + seq_printf(m, "%s" #flag, first ? "" : ", "); \ } while (0) static int obd_import_flags2str(struct obd_import *imp, struct seq_file *m) { @@ -604,16 +604,16 @@ static void obd_connect_seq_flags2str(struct seq_file *m, __u64 flags, char *sep int i; bool first = true; - for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) { + for (i = 0; obd_connect_names[i]; i++, mask <<= 1) { if (flags & mask) { seq_printf(m, "%s%s", - first ? sep : "", obd_connect_names[i]); + first ? sep : "", obd_connect_names[i]); first = false; } } if (flags & ~(mask - 1)) seq_printf(m, "%sunknown flags %#llx", - first ? sep : "", flags & ~(mask - 1)); + first ? sep : "", flags & ~(mask - 1)); } int lprocfs_rd_import(struct seq_file *m, void *data) @@ -629,7 +629,7 @@ int lprocfs_rd_import(struct seq_file *m, void *data) int rw = 0; int rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = lprocfs_climp_check(obd); if (rc) return rc; @@ -637,26 +637,27 @@ int lprocfs_rd_import(struct seq_file *m, void *data) imp = obd->u.cli.cl_import; seq_printf(m, - "import:\n" - " name: %s\n" - " target: %s\n" - " state: %s\n" - " instance: %u\n" - " connect_flags: [", - obd->obd_name, - obd2cli_tgt(obd), - ptlrpc_import_state_name(imp->imp_state), - imp->imp_connect_data.ocd_instance); - obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags, ", "); + "import:\n" + " name: %s\n" + " target: %s\n" + " state: %s\n" + " instance: %u\n" + " connect_flags: [ ", + obd->obd_name, + obd2cli_tgt(obd), + ptlrpc_import_state_name(imp->imp_state), + imp->imp_connect_data.ocd_instance); + obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags, + ", "); seq_printf(m, - "]\n" - " import_flags: ["); + " ]\n" + " import_flags: [ "); obd_import_flags2str(imp, m); seq_printf(m, - "]\n" - " connection:\n" - " failover_nids: ["); + " ]\n" + " connection:\n" + " failover_nids: [ "); spin_lock(&imp->imp_lock); j = 0; list_for_each_entry(conn, &imp->imp_conn_list, oic_item) { @@ -665,24 +666,24 @@ int lprocfs_rd_import(struct seq_file *m, void *data) seq_printf(m, "%s%s", j ? ", " : "", nidstr); j++; } - if (imp->imp_connection != NULL) + if (imp->imp_connection) libcfs_nid2str_r(imp->imp_connection->c_peer.nid, nidstr, sizeof(nidstr)); else strncpy(nidstr, "<none>", sizeof(nidstr)); seq_printf(m, - "]\n" - " current_connection: %s\n" - " connection_attempts: %u\n" - " generation: %u\n" - " in-progress_invalidations: %u\n", - nidstr, - imp->imp_conn_cnt, - imp->imp_generation, - atomic_read(&imp->imp_inval_count)); + " ]\n" + " current_connection: %s\n" + " connection_attempts: %u\n" + " generation: %u\n" + " in-progress_invalidations: %u\n", + nidstr, + imp->imp_conn_cnt, + imp->imp_generation, + atomic_read(&imp->imp_inval_count)); spin_unlock(&imp->imp_lock); - if (obd->obd_svc_stats == NULL) + if (!obd->obd_svc_stats) goto out_climp; header = &obd->obd_svc_stats->ls_cnt_header[PTLRPC_REQWAIT_CNTR]; @@ -696,15 +697,15 @@ int lprocfs_rd_import(struct seq_file *m, void *data) } else ret.lc_sum = 0; seq_printf(m, - " rpcs:\n" - " inflight: %u\n" - " unregistering: %u\n" - " timeouts: %u\n" - " avg_waittime: %llu %s\n", - atomic_read(&imp->imp_inflight), - atomic_read(&imp->imp_unregistering), - atomic_read(&imp->imp_timeouts), - ret.lc_sum, header->lc_units); + " rpcs:\n" + " inflight: %u\n" + " unregistering: %u\n" + " timeouts: %u\n" + " avg_waittime: %llu %s\n", + atomic_read(&imp->imp_inflight), + atomic_read(&imp->imp_unregistering), + atomic_read(&imp->imp_timeouts), + ret.lc_sum, header->lc_units); k = 0; for (j = 0; j < IMP_AT_MAX_PORTALS; j++) { @@ -714,20 +715,20 @@ int lprocfs_rd_import(struct seq_file *m, void *data) at_get(&imp->imp_at.iat_service_estimate[j])); } seq_printf(m, - " service_estimates:\n" - " services: %u sec\n" - " network: %u sec\n", - k, - at_get(&imp->imp_at.iat_net_latency)); + " service_estimates:\n" + " services: %u sec\n" + " network: %u sec\n", + k, + at_get(&imp->imp_at.iat_net_latency)); seq_printf(m, - " transactions:\n" - " last_replay: %llu\n" - " peer_committed: %llu\n" - " last_checked: %llu\n", - imp->imp_last_replay_transno, - imp->imp_peer_committed_transno, - imp->imp_last_transno_checked); + " transactions:\n" + " last_replay: %llu\n" + " peer_committed: %llu\n" + " last_checked: %llu\n", + imp->imp_last_replay_transno, + imp->imp_peer_committed_transno, + imp->imp_last_transno_checked); /* avg data rates */ for (rw = 0; rw <= 1; rw++) { @@ -741,10 +742,10 @@ int lprocfs_rd_import(struct seq_file *m, void *data) do_div(sum, ret.lc_count); ret.lc_sum = sum; seq_printf(m, - " %s_data_averages:\n" - " bytes_per_rpc: %llu\n", - rw ? "write" : "read", - ret.lc_sum); + " %s_data_averages:\n" + " bytes_per_rpc: %llu\n", + rw ? "write" : "read", + ret.lc_sum); } k = (int)ret.lc_sum; j = opcode_offset(OST_READ + rw) + EXTRA_MAX_OPCODES; @@ -757,13 +758,13 @@ int lprocfs_rd_import(struct seq_file *m, void *data) do_div(sum, ret.lc_count); ret.lc_sum = sum; seq_printf(m, - " %s_per_rpc: %llu\n", - header->lc_units, ret.lc_sum); + " %s_per_rpc: %llu\n", + header->lc_units, ret.lc_sum); j = (int)ret.lc_sum; if (j > 0) seq_printf(m, - " MB_per_sec: %u.%.02u\n", - k / j, (100 * k / j) % 100); + " MB_per_sec: %u.%.02u\n", + k / j, (100 * k / j) % 100); } } @@ -779,7 +780,7 @@ int lprocfs_rd_state(struct seq_file *m, void *data) struct obd_import *imp; int j, k, rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = lprocfs_climp_check(obd); if (rc) return rc; @@ -787,7 +788,7 @@ int lprocfs_rd_state(struct seq_file *m, void *data) imp = obd->u.cli.cl_import; seq_printf(m, "current_state: %s\n", - ptlrpc_import_state_name(imp->imp_state)); + ptlrpc_import_state_name(imp->imp_state)); seq_printf(m, "state_history:\n"); k = imp->imp_state_hist_idx; for (j = 0; j < IMP_STATE_HIST_LEN; j++) { @@ -795,7 +796,7 @@ int lprocfs_rd_state(struct seq_file *m, void *data) &imp->imp_state_hist[(k + j) % IMP_STATE_HIST_LEN]; if (ish->ish_state == 0) continue; - seq_printf(m, " - [%lld, %s]\n", (s64)ish->ish_time, + seq_printf(m, " - [ %lld, %s ]\n", (s64)ish->ish_time, ptlrpc_import_state_name(ish->ish_state)); } @@ -825,7 +826,7 @@ int lprocfs_rd_timeouts(struct seq_file *m, void *data) struct dhms ts; int i, rc; - LASSERT(obd != NULL); + LASSERT(obd); rc = lprocfs_climp_check(obd); if (rc) return rc; @@ -942,7 +943,7 @@ int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list, return rc; } -EXPORT_SYMBOL(lprocfs_obd_setup); +EXPORT_SYMBOL_GPL(lprocfs_obd_setup); int lprocfs_obd_cleanup(struct obd_device *obd) { @@ -957,7 +958,7 @@ int lprocfs_obd_cleanup(struct obd_device *obd) return 0; } -EXPORT_SYMBOL(lprocfs_obd_cleanup); +EXPORT_SYMBOL_GPL(lprocfs_obd_cleanup); int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid) { @@ -967,12 +968,12 @@ int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid) unsigned long flags = 0; int i; - LASSERT(stats->ls_percpu[cpuid] == NULL); + LASSERT(!stats->ls_percpu[cpuid]); LASSERT((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0); percpusize = lprocfs_stats_counter_size(stats); LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[cpuid], percpusize); - if (stats->ls_percpu[cpuid] != NULL) { + if (stats->ls_percpu[cpuid]) { rc = 0; if (unlikely(stats->ls_biggest_alloc_num <= cpuid)) { if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) @@ -1017,7 +1018,7 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num, /* alloc percpu pointers for all possible cpu slots */ LIBCFS_ALLOC(stats, offsetof(typeof(*stats), ls_percpu[num_entry])); - if (stats == NULL) + if (!stats) return NULL; stats->ls_num = num; @@ -1027,14 +1028,14 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num, /* alloc num of counter headers */ LIBCFS_ALLOC(stats->ls_cnt_header, stats->ls_num * sizeof(struct lprocfs_counter_header)); - if (stats->ls_cnt_header == NULL) + if (!stats->ls_cnt_header) goto fail; if ((flags & LPROCFS_STATS_FLAG_NOPERCPU) != 0) { /* contains only one set counters */ percpusize = lprocfs_stats_counter_size(stats); LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[0], percpusize); - if (stats->ls_percpu[0] == NULL) + if (!stats->ls_percpu[0]) goto fail; stats->ls_biggest_alloc_num = 1; } else if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) { @@ -1059,7 +1060,7 @@ void lprocfs_free_stats(struct lprocfs_stats **statsh) unsigned int percpusize; unsigned int i; - if (stats == NULL || stats->ls_num == 0) + if (!stats || stats->ls_num == 0) return; *statsh = NULL; @@ -1070,9 +1071,9 @@ void lprocfs_free_stats(struct lprocfs_stats **statsh) percpusize = lprocfs_stats_counter_size(stats); for (i = 0; i < num_entry; i++) - if (stats->ls_percpu[i] != NULL) + if (stats->ls_percpu[i]) LIBCFS_FREE(stats->ls_percpu[i], percpusize); - if (stats->ls_cnt_header != NULL) + if (stats->ls_cnt_header) LIBCFS_FREE(stats->ls_cnt_header, stats->ls_num * sizeof(struct lprocfs_counter_header)); LIBCFS_FREE(stats, offsetof(typeof(*stats), ls_percpu[num_entry])); @@ -1090,7 +1091,7 @@ void lprocfs_clear_stats(struct lprocfs_stats *stats) num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); for (i = 0; i < num_entry; i++) { - if (stats->ls_percpu[i] == NULL) + if (!stats->ls_percpu[i]) continue; for (j = 0; j < stats->ls_num; j++) { percpu_cntr = lprocfs_stats_counter_get(stats, i, j); @@ -1196,7 +1197,7 @@ static int lprocfs_stats_seq_open(struct inode *inode, struct file *file) return 0; } -struct file_operations lprocfs_stats_seq_fops = { +static const struct file_operations lprocfs_stats_seq_fops = { .owner = THIS_MODULE, .open = lprocfs_stats_seq_open, .read = seq_read, @@ -1206,7 +1207,7 @@ struct file_operations lprocfs_stats_seq_fops = { }; int ldebugfs_register_stats(struct dentry *parent, const char *name, - struct lprocfs_stats *stats) + struct lprocfs_stats *stats) { struct dentry *entry; @@ -1219,7 +1220,7 @@ int ldebugfs_register_stats(struct dentry *parent, const char *name, return 0; } -EXPORT_SYMBOL(ldebugfs_register_stats); +EXPORT_SYMBOL_GPL(ldebugfs_register_stats); void lprocfs_counter_init(struct lprocfs_stats *stats, int index, unsigned conf, const char *name, const char *units) @@ -1230,10 +1231,8 @@ void lprocfs_counter_init(struct lprocfs_stats *stats, int index, unsigned int i; unsigned int num_cpu; - LASSERT(stats != NULL); - header = &stats->ls_cnt_header[index]; - LASSERTF(header != NULL, "Failed to allocate stats header:[%d]%s/%s\n", + LASSERTF(header, "Failed to allocate stats header:[%d]%s/%s\n", index, name, units); header->lc_config = conf; @@ -1242,7 +1241,7 @@ void lprocfs_counter_init(struct lprocfs_stats *stats, int index, num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags); for (i = 0; i < num_cpu; ++i) { - if (stats->ls_percpu[i] == NULL) + if (!stats->ls_percpu[i]) continue; percpu_cntr = lprocfs_stats_counter_get(stats, i, index); percpu_cntr->lc_count = 0; @@ -1270,7 +1269,7 @@ __s64 lprocfs_read_helper(struct lprocfs_counter *lc, { __s64 ret = 0; - if (lc == NULL || header == NULL) + if (!lc || !header) return 0; switch (field) { @@ -1319,8 +1318,8 @@ int lprocfs_write_u64_helper(const char __user *buffer, unsigned long count, } EXPORT_SYMBOL(lprocfs_write_u64_helper); -int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count, - __u64 *val, int mult) +int lprocfs_write_frac_u64_helper(const char __user *buffer, + unsigned long count, __u64 *val, int mult) { char kernbuf[22], *end, *pbuf; __u64 whole, frac = 0, units; @@ -1360,17 +1359,19 @@ int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count, } units = 1; - switch (tolower(*end)) { - case 'p': - units <<= 10; - case 't': - units <<= 10; - case 'g': - units <<= 10; - case 'm': - units <<= 10; - case 'k': - units <<= 10; + if (end) { + switch (tolower(*end)) { + case 'p': + units <<= 10; + case 't': + units <<= 10; + case 'g': + units <<= 10; + case 'm': + units <<= 10; + case 'k': + units <<= 10; + } } /* Specified units override the multiplier */ if (units > 1) @@ -1412,7 +1413,7 @@ char *lprocfs_find_named_value(const char *buffer, const char *name, /* there is no strnstr() in rhel5 and ubuntu kernels */ val = lprocfs_strnstr(buffer, name, buflen); - if (val == NULL) + if (!val) return (char *)buffer; val += strlen(name); /* skip prefix */ @@ -1429,11 +1430,9 @@ char *lprocfs_find_named_value(const char *buffer, const char *name, } EXPORT_SYMBOL(lprocfs_find_named_value); -int ldebugfs_seq_create(struct dentry *parent, - const char *name, - umode_t mode, - const struct file_operations *seq_fops, - void *data) +int ldebugfs_seq_create(struct dentry *parent, const char *name, + umode_t mode, const struct file_operations *seq_fops, + void *data) { struct dentry *entry; @@ -1446,7 +1445,7 @@ int ldebugfs_seq_create(struct dentry *parent, return 0; } -EXPORT_SYMBOL(ldebugfs_seq_create); +EXPORT_SYMBOL_GPL(ldebugfs_seq_create); int ldebugfs_obd_seq_create(struct obd_device *dev, const char *name, @@ -1457,7 +1456,7 @@ int ldebugfs_obd_seq_create(struct obd_device *dev, return ldebugfs_seq_create(dev->obd_debugfs_entry, name, mode, seq_fops, data); } -EXPORT_SYMBOL(ldebugfs_obd_seq_create); +EXPORT_SYMBOL_GPL(ldebugfs_obd_seq_create); void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value) { diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c index ce248f407..978568ada 100644 --- a/drivers/staging/lustre/lustre/obdclass/lu_object.c +++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c @@ -86,13 +86,12 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) */ fid = lu_object_fid(o); if (fid_is_zero(fid)) { - LASSERT(top->loh_hash.next == NULL - && top->loh_hash.pprev == NULL); + LASSERT(!top->loh_hash.next && !top->loh_hash.pprev); LASSERT(list_empty(&top->loh_lru)); if (!atomic_dec_and_test(&top->loh_ref)) return; list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { - if (o->lo_ops->loo_object_release != NULL) + if (o->lo_ops->loo_object_release) o->lo_ops->loo_object_release(env, o); } lu_object_free(env, orig); @@ -119,7 +118,7 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) * layers, and notify them that object is no longer busy. */ list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) { - if (o->lo_ops->loo_object_release != NULL) + if (o->lo_ops->loo_object_release) o->lo_ops->loo_object_release(env, o); } @@ -135,7 +134,7 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o) } /* - * If object is dying (will not be cached), removed it + * If object is dying (will not be cached), then removed it * from hash table and LRU. * * This is done with hash table and LRU lists locked. As the only @@ -210,7 +209,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env, * lu_object_header. */ top = dev->ld_ops->ldo_object_alloc(env, NULL, dev); - if (top == NULL) + if (!top) return ERR_PTR(-ENOMEM); if (IS_ERR(top)) return top; @@ -245,7 +244,7 @@ next: } while (!clean); list_for_each_entry_reverse(scan, layers, lo_linkage) { - if (scan->lo_ops->loo_object_start != NULL) { + if (scan->lo_ops->loo_object_start) { result = scan->lo_ops->loo_object_start(env, scan); if (result != 0) { lu_object_free(env, top); @@ -276,7 +275,7 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) * First call ->loo_object_delete() method to release all resources. */ list_for_each_entry_reverse(scan, layers, lo_linkage) { - if (scan->lo_ops->loo_object_delete != NULL) + if (scan->lo_ops->loo_object_delete) scan->lo_ops->loo_object_delete(env, scan); } @@ -296,7 +295,6 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) */ o = container_of0(splice.prev, struct lu_object, lo_linkage); list_del_init(&o->lo_linkage); - LASSERT(o->lo_ops->loo_object_free != NULL); o->lo_ops->loo_object_free(env, o); } @@ -451,7 +449,6 @@ int lu_cdebug_printer(const struct lu_env *env, va_start(args, format); key = lu_context_key_get(&env->le_ctx, &lu_global_key); - LASSERT(key != NULL); used = strlen(key->lck_area); complete = format[strlen(format) - 1] == '\n'; @@ -462,7 +459,7 @@ int lu_cdebug_printer(const struct lu_env *env, ARRAY_SIZE(key->lck_area) - used, format, args); if (complete) { if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys)) - libcfs_debug_msg(msgdata, "%s", key->lck_area); + libcfs_debug_msg(msgdata, "%s\n", key->lck_area); key->lck_area[0] = 0; } va_end(args); @@ -508,7 +505,7 @@ void lu_object_print(const struct lu_env *env, void *cookie, (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler, o->lo_dev->ld_type->ldt_name, o); - if (o->lo_ops->loo_object_print != NULL) + if (o->lo_ops->loo_object_print) (*o->lo_ops->loo_object_print)(env, cookie, printer, o); (*printer)(env, cookie, "\n"); @@ -535,9 +532,10 @@ static struct lu_object *htable_lookup(struct lu_site *s, *version = ver; bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd); /* cfs_hash_bd_peek_locked is a somehow "internal" function - * of cfs_hash, it doesn't add refcount on object. */ + * of cfs_hash, it doesn't add refcount on object. + */ hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f); - if (hnode == NULL) { + if (!hnode) { lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS); return ERR_PTR(-ENOENT); } @@ -636,7 +634,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env, * If dying object is found during index search, add @waiter to the * site wait-queue and return ERR_PTR(-EAGAIN). */ - if (conf != NULL && conf->loc_flags & LOC_F_NEW) + if (conf && conf->loc_flags & LOC_F_NEW) return lu_object_new(env, dev, f, conf); s = dev->ld_site; @@ -715,7 +713,7 @@ struct lu_object *lu_object_find_slice(const struct lu_env *env, top = lu_object_find(env, dev, f, conf); if (!IS_ERR(top)) { obj = lu_object_locate(top->lo_header, dev->ld_type); - if (obj == NULL) + if (!obj) lu_object_put(env, top); } else obj = top; @@ -842,8 +840,8 @@ static int lu_htable_order(void) #if BITS_PER_LONG == 32 /* limit hashtable size for lowmem systems to low RAM */ - if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT)) - cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4; + if (cache_size > 1 << (30 - PAGE_SHIFT)) + cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4; #endif /* clear off unreasonable cache setting. */ @@ -855,7 +853,7 @@ static int lu_htable_order(void) lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; } cache_size = cache_size / 100 * lu_cache_percent * - (PAGE_CACHE_SIZE / 1024); + (PAGE_SIZE / 1024); for (bits = 1; (1 << bits) < cache_size; ++bits) { ; @@ -966,11 +964,11 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) CFS_HASH_NO_ITEMREF | CFS_HASH_DEPTH | CFS_HASH_ASSERT_EMPTY); - if (s->ls_obj_hash != NULL) + if (s->ls_obj_hash) break; } - if (s->ls_obj_hash == NULL) { + if (!s->ls_obj_hash) { CERROR("failed to create lu_site hash with bits: %d\n", bits); return -ENOMEM; } @@ -982,7 +980,7 @@ int lu_site_init(struct lu_site *s, struct lu_device *top) } s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0); - if (s->ls_stats == NULL) { + if (!s->ls_stats) { cfs_hash_putref(s->ls_obj_hash); s->ls_obj_hash = NULL; return -ENOMEM; @@ -1031,19 +1029,19 @@ void lu_site_fini(struct lu_site *s) list_del_init(&s->ls_linkage); mutex_unlock(&lu_sites_guard); - if (s->ls_obj_hash != NULL) { + if (s->ls_obj_hash) { cfs_hash_putref(s->ls_obj_hash); s->ls_obj_hash = NULL; } - if (s->ls_top_dev != NULL) { + if (s->ls_top_dev) { s->ls_top_dev->ld_site = NULL; lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s); lu_device_put(s->ls_top_dev); s->ls_top_dev = NULL; } - if (s->ls_stats != NULL) + if (s->ls_stats) lprocfs_free_stats(&s->ls_stats); } EXPORT_SYMBOL(lu_site_fini); @@ -1088,7 +1086,7 @@ EXPORT_SYMBOL(lu_device_put); */ int lu_device_init(struct lu_device *d, struct lu_device_type *t) { - if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL) + if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start) t->ldt_ops->ldto_start(t); memset(d, 0, sizeof(*d)); atomic_set(&d->ld_ref, 0); @@ -1107,7 +1105,7 @@ void lu_device_fini(struct lu_device *d) struct lu_device_type *t; t = d->ld_type; - if (d->ld_obd != NULL) { + if (d->ld_obd) { d->ld_obd->obd_lu_dev = NULL; d->ld_obd = NULL; } @@ -1116,7 +1114,7 @@ void lu_device_fini(struct lu_device *d) LASSERTF(atomic_read(&d->ld_ref) == 0, "Refcount is %u\n", atomic_read(&d->ld_ref)); LASSERT(t->ldt_device_nr > 0); - if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL) + if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop) t->ldt_ops->ldto_stop(t); } EXPORT_SYMBOL(lu_device_fini); @@ -1148,7 +1146,7 @@ void lu_object_fini(struct lu_object *o) LASSERT(list_empty(&o->lo_linkage)); - if (dev != NULL) { + if (dev) { lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref, "lu_object", o); lu_device_put(dev); @@ -1239,7 +1237,7 @@ void lu_stack_fini(const struct lu_env *env, struct lu_device *top) struct lu_device *next; lu_site_purge(env, site, ~0); - for (scan = top; scan != NULL; scan = next) { + for (scan = top; scan; scan = next) { next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan); lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init); lu_device_put(scan); @@ -1248,13 +1246,13 @@ void lu_stack_fini(const struct lu_env *env, struct lu_device *top) /* purge again. */ lu_site_purge(env, site, ~0); - for (scan = top; scan != NULL; scan = next) { + for (scan = top; scan; scan = next) { const struct lu_device_type *ldt = scan->ld_type; struct obd_type *type; next = ldt->ldt_ops->ldto_device_free(env, scan); type = ldt->ldt_obd_type; - if (type != NULL) { + if (type) { type->typ_refcnt--; class_put_type(type); } @@ -1289,14 +1287,14 @@ int lu_context_key_register(struct lu_context_key *key) int result; int i; - LASSERT(key->lct_init != NULL); - LASSERT(key->lct_fini != NULL); + LASSERT(key->lct_init); + LASSERT(key->lct_fini); LASSERT(key->lct_tags != 0); result = -ENFILE; spin_lock(&lu_keys_guard); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { - if (lu_keys[i] == NULL) { + if (!lu_keys[i]) { key->lct_index = i; atomic_set(&key->lct_used, 1); lu_keys[i] = key; @@ -1313,12 +1311,10 @@ EXPORT_SYMBOL(lu_context_key_register); static void key_fini(struct lu_context *ctx, int index) { - if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) { + if (ctx->lc_value && ctx->lc_value[index]) { struct lu_context_key *key; key = lu_keys[index]; - LASSERT(key != NULL); - LASSERT(key->lct_fini != NULL); LASSERT(atomic_read(&key->lct_used) > 1); key->lct_fini(ctx, key, ctx->lc_value[index]); @@ -1376,7 +1372,7 @@ int lu_context_key_register_many(struct lu_context_key *k, ...) if (result) break; key = va_arg(args, struct lu_context_key *); - } while (key != NULL); + } while (key); va_end(args); if (result != 0) { @@ -1404,7 +1400,7 @@ void lu_context_key_degister_many(struct lu_context_key *k, ...) do { lu_context_key_degister(k); k = va_arg(args, struct lu_context_key*); - } while (k != NULL); + } while (k); va_end(args); } EXPORT_SYMBOL(lu_context_key_degister_many); @@ -1420,7 +1416,7 @@ void lu_context_key_revive_many(struct lu_context_key *k, ...) do { lu_context_key_revive(k); k = va_arg(args, struct lu_context_key*); - } while (k != NULL); + } while (k); va_end(args); } EXPORT_SYMBOL(lu_context_key_revive_many); @@ -1436,7 +1432,7 @@ void lu_context_key_quiesce_many(struct lu_context_key *k, ...) do { lu_context_key_quiesce(k); k = va_arg(args, struct lu_context_key*); - } while (k != NULL); + } while (k); va_end(args); } EXPORT_SYMBOL(lu_context_key_quiesce_many); @@ -1477,8 +1473,7 @@ void lu_context_key_quiesce(struct lu_context_key *key) * XXX memory barrier has to go here. */ spin_lock(&lu_keys_guard); - list_for_each_entry(ctx, &lu_context_remembered, - lc_remember) + list_for_each_entry(ctx, &lu_context_remembered, lc_remember) key_fini(ctx, key->lct_index); spin_unlock(&lu_keys_guard); ++key_set_version; @@ -1497,7 +1492,7 @@ static void keys_fini(struct lu_context *ctx) { int i; - if (ctx->lc_value == NULL) + if (!ctx->lc_value) return; for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) @@ -1511,12 +1506,12 @@ static int keys_fill(struct lu_context *ctx) { int i; - LINVRNT(ctx->lc_value != NULL); + LINVRNT(ctx->lc_value); for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { struct lu_context_key *key; key = lu_keys[i]; - if (ctx->lc_value[i] == NULL && key != NULL && + if (!ctx->lc_value[i] && key && (key->lct_tags & ctx->lc_tags) && /* * Don't create values for a LCT_QUIESCENT key, as this @@ -1525,7 +1520,7 @@ static int keys_fill(struct lu_context *ctx) !(key->lct_tags & LCT_QUIESCENT)) { void *value; - LINVRNT(key->lct_init != NULL); + LINVRNT(key->lct_init); LINVRNT(key->lct_index == i); value = key->lct_init(ctx, key); @@ -1542,7 +1537,7 @@ static int keys_fill(struct lu_context *ctx) * value. */ ctx->lc_value[i] = value; - if (key->lct_exit != NULL) + if (key->lct_exit) ctx->lc_tags |= LCT_HAS_EXIT; } ctx->lc_version = key_set_version; @@ -1554,7 +1549,7 @@ static int keys_init(struct lu_context *ctx) { ctx->lc_value = kcalloc(ARRAY_SIZE(lu_keys), sizeof(ctx->lc_value[0]), GFP_NOFS); - if (likely(ctx->lc_value != NULL)) + if (likely(ctx->lc_value)) return keys_fill(ctx); return -ENOMEM; @@ -1626,14 +1621,13 @@ void lu_context_exit(struct lu_context *ctx) LINVRNT(ctx->lc_state == LCS_ENTERED); ctx->lc_state = LCS_LEFT; - if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) { + if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value) { for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) { - if (ctx->lc_value[i] != NULL) { + if (ctx->lc_value[i]) { struct lu_context_key *key; key = lu_keys[i]; - LASSERT(key != NULL); - if (key->lct_exit != NULL) + if (key->lct_exit) key->lct_exit(ctx, key, ctx->lc_value[i]); } @@ -1688,7 +1682,7 @@ int lu_env_refill(struct lu_env *env) int result; result = lu_context_refill(&env->le_ctx); - if (result == 0 && env->le_ses != NULL) + if (result == 0 && env->le_ses) result = lu_context_refill(env->le_ses); return result; } @@ -1922,11 +1916,11 @@ int lu_kmem_init(struct lu_kmem_descr *caches) int result; struct lu_kmem_descr *iter = caches; - for (result = 0; iter->ckd_cache != NULL; ++iter) { + for (result = 0; iter->ckd_cache; ++iter) { *iter->ckd_cache = kmem_cache_create(iter->ckd_name, iter->ckd_size, 0, 0, NULL); - if (*iter->ckd_cache == NULL) { + if (!*iter->ckd_cache) { result = -ENOMEM; /* free all previously allocated caches */ lu_kmem_fini(caches); @@ -1943,7 +1937,7 @@ EXPORT_SYMBOL(lu_kmem_init); */ void lu_kmem_fini(struct lu_kmem_descr *caches) { - for (; caches->ckd_cache != NULL; ++caches) { + for (; caches->ckd_cache; ++caches) { kmem_cache_destroy(*caches->ckd_cache); *caches->ckd_cache = NULL; } diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c index fb9147cc6..403ceea06 100644 --- a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c +++ b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c @@ -65,7 +65,7 @@ void class_handle_hash(struct portals_handle *h, { struct handle_bucket *bucket; - LASSERT(h != NULL); + LASSERT(h); LASSERT(list_empty(&h->h_link)); /* @@ -140,10 +140,11 @@ void *class_handle2object(__u64 cookie) struct portals_handle *h; void *retval = NULL; - LASSERT(handle_hash != NULL); + LASSERT(handle_hash); /* Be careful when you want to change this code. See the - * rcu_read_lock() definition on top this file. - jxiong */ + * rcu_read_lock() definition on top this file. - jxiong + */ bucket = handle_hash + (cookie & HANDLE_HASH_MASK); rcu_read_lock(); @@ -170,7 +171,7 @@ void class_handle_free_cb(struct rcu_head *rcu) struct portals_handle *h = RCU2HANDLE(rcu); void *ptr = (void *)(unsigned long)h->h_cookie; - if (h->h_ops->hop_free != NULL) + if (h->h_ops->hop_free) h->h_ops->hop_free(ptr, h->h_size); else kfree(ptr); @@ -183,11 +184,11 @@ int class_handle_init(void) struct timespec64 ts; int seed[2]; - LASSERT(handle_hash == NULL); + LASSERT(!handle_hash); handle_hash = libcfs_kvzalloc(sizeof(*bucket) * HANDLE_HASH_SIZE, GFP_NOFS); - if (handle_hash == NULL) + if (!handle_hash) return -ENOMEM; spin_lock_init(&handle_base_lock); @@ -234,7 +235,7 @@ void class_handle_cleanup(void) { int count; - LASSERT(handle_hash != NULL); + LASSERT(handle_hash); count = cleanup_all_handles(); diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c index d6184f821..5f812460b 100644 --- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c +++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c @@ -93,7 +93,8 @@ int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index) EXPORT_SYMBOL(lustre_uuid_to_peer); /* Add a nid to a niduuid. Multiple nids can be added to a single uuid; - LNET will choose the best one. */ + * LNET will choose the best one. + */ int class_add_uuid(const char *uuid, __u64 nid) { struct uuid_nid_data *data, *entry; @@ -149,9 +150,10 @@ int class_del_uuid(const char *uuid) { LIST_HEAD(deathrow); struct uuid_nid_data *data; + struct uuid_nid_data *temp; spin_lock(&g_uuid_lock); - if (uuid != NULL) { + if (uuid) { struct obd_uuid tmp; obd_str2uuid(&tmp, uuid); @@ -165,14 +167,12 @@ int class_del_uuid(const char *uuid) list_splice_init(&g_uuid_list, &deathrow); spin_unlock(&g_uuid_lock); - if (uuid != NULL && list_empty(&deathrow)) { + if (uuid && list_empty(&deathrow)) { CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid); return -EINVAL; } - while (!list_empty(&deathrow)) { - data = list_entry(deathrow.next, struct uuid_nid_data, - un_list); + list_for_each_entry_safe(data, temp, &deathrow, un_list) { list_del(&data->un_list); CDEBUG(D_INFO, "del uuid %s %s/%d\n", diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c index 49cdc6479..5395e994d 100644 --- a/drivers/staging/lustre/lustre/obdclass/obd_config.c +++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c @@ -71,8 +71,9 @@ int class_find_param(char *buf, char *key, char **valp) EXPORT_SYMBOL(class_find_param); /* returns 0 if this is the first key in the buffer, else 1. - valp points to first char after key. */ -static int class_match_param(char *buf, char *key, char **valp) + * valp points to first char after key. + */ +static int class_match_param(char *buf, const char *key, char **valp) { if (!buf) return 1; @@ -114,9 +115,10 @@ enum { }; /* 0 is good nid, - 1 not found - < 0 error - endh is set to next separator */ + * 1 not found + * < 0 error + * endh is set to next separator + */ static int class_parse_value(char *buf, int opc, void *value, char **endh, int quiet) { @@ -210,7 +212,7 @@ static int class_attach(struct lustre_cfg *lcfg) name, typename, rc); goto out; } - LASSERTF(obd != NULL, "Cannot get obd device %s of type %s\n", + LASSERTF(obd, "Cannot get obd device %s of type %s\n", name, typename); LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "obd %p obd_magic %08X != %08X\n", @@ -230,7 +232,8 @@ static int class_attach(struct lustre_cfg *lcfg) mutex_init(&obd->obd_dev_mutex); spin_lock_init(&obd->obd_osfs_lock); /* obd->obd_osfs_age must be set to a value in the distant - * past to guarantee a fresh statfs is fetched on mount. */ + * past to guarantee a fresh statfs is fetched on mount. + */ obd->obd_osfs_age = cfs_time_shift_64(-1000); /* XXX belongs in setup not attach */ @@ -272,9 +275,9 @@ static int class_attach(struct lustre_cfg *lcfg) obd->obd_minor, typename, atomic_read(&obd->obd_refcount)); return 0; out: - if (obd != NULL) { + if (obd) class_release_dev(obd); - } + return rc; } @@ -286,7 +289,7 @@ static int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg) int err = 0; struct obd_export *exp; - LASSERT(obd != NULL); + LASSERT(obd); LASSERTF(obd == class_num2obd(obd->obd_minor), "obd %p != obd_devs[%d] %p\n", obd, obd->obd_minor, class_num2obd(obd->obd_minor)); @@ -315,7 +318,8 @@ static int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg) return -EEXIST; } /* just leave this on forever. I can't use obd_set_up here because - other fns check that status, and we're not actually set up yet. */ + * other fns check that status, and we're not actually set up yet. + */ obd->obd_starting = 1; obd->obd_uuid_hash = NULL; spin_unlock(&obd->obd_dev_lock); @@ -503,7 +507,8 @@ void class_decref(struct obd_device *obd, const char *scope, const void *source) if ((refs == 1) && obd->obd_stopping) { /* All exports have been destroyed; there should - be no more in-progress ops by this point.*/ + * be no more in-progress ops by this point. + */ spin_lock(&obd->obd_self_export->exp_lock); obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd); @@ -723,7 +728,8 @@ static int class_set_global(char *ptr, int val, struct lustre_cfg *lcfg) } /* We can't call ll_process_config or lquota_process_config directly because - * it lives in a module that must be loaded after this one. */ + * it lives in a module that must be loaded after this one. + */ static int (*client_process_config)(struct lustre_cfg *lcfg); static int (*quota_process_config)(struct lustre_cfg *lcfg); @@ -812,7 +818,8 @@ int class_process_config(struct lustre_cfg *lcfg) lustre_cfg_string(lcfg, 2), lustre_cfg_string(lcfg, 3)); /* set these mount options somewhere, so ll_fill_super - * can find them. */ + * can find them. + */ err = class_add_profile(LUSTRE_CFG_BUFLEN(lcfg, 1), lustre_cfg_string(lcfg, 1), LUSTRE_CFG_BUFLEN(lcfg, 2), @@ -988,8 +995,9 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars, fakefile.private_data = &fake_seqfile; fake_seqfile.private = data; /* e.g. tunefs.lustre --param mdt.group_upcall=foo /r/tmp/lustre-mdt - or lctl conf_param lustre-MDT0000.mdt.group_upcall=bar - or lctl conf_param lustre-OST0000.osc.max_dirty_mb=36 */ + * or lctl conf_param lustre-MDT0000.mdt.group_upcall=bar + * or lctl conf_param lustre-OST0000.osc.max_dirty_mb=36 + */ for (i = 1; i < lcfg->lcfg_bufcount; i++) { key = lustre_cfg_buf(lcfg, i); /* Strip off prefix */ @@ -1008,7 +1016,7 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars, /* Search proc entries */ while (lvars[j].name) { var = &lvars[j]; - if (class_match_param(key, (char *)var->name, NULL) == 0 + if (!class_match_param(key, var->name, NULL) && keylen == strlen(var->name)) { matched++; rc = -EROFS; @@ -1027,9 +1035,10 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars, } if (!matched) { /* If the prefix doesn't match, return error so we - can pass it down the stack */ + * can pass it down the stack + */ if (strnchr(key, keylen, '.')) - return -ENOSYS; + return -ENOSYS; CERROR("%s: unknown param %s\n", (char *)lustre_cfg_string(lcfg, 0), key); /* rc = -EINVAL; continue parsing other params */ @@ -1040,9 +1049,9 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars, rc = 0; } else { CDEBUG(D_CONFIG, "%s.%.*s: Set parameter %.*s=%s\n", - lustre_cfg_string(lcfg, 0), - (int)strlen(prefix) - 1, prefix, - (int)(sval - key - 1), key, sval); + lustre_cfg_string(lcfg, 0), + (int)strlen(prefix) - 1, prefix, + (int)(sval - key - 1), key, sval); } } @@ -1116,7 +1125,8 @@ int class_config_llog_handler(const struct lu_env *env, } } /* A config command without a start marker before it is - illegal (post 146) */ + * illegal (post 146) + */ if (!(clli->cfg_flags & CFG_F_COMPAT146) && !(clli->cfg_flags & CFG_F_MARKER) && (lcfg->lcfg_command != LCFG_MARKER)) { @@ -1182,8 +1192,9 @@ int class_config_llog_handler(const struct lu_env *env, } /* we override the llog's uuid for clients, to insure they - are unique */ - if (clli && clli->cfg_instance != NULL && + * are unique + */ + if (clli && clli->cfg_instance && lcfg->lcfg_command == LCFG_ATTACH) { lustre_cfg_bufs_set_string(&bufs, 2, clli->cfg_uuid.uuid); @@ -1211,7 +1222,8 @@ int class_config_llog_handler(const struct lu_env *env, lcfg_new->lcfg_flags = lcfg->lcfg_flags; /* XXX Hack to try to remain binary compatible with - * pre-newconfig logs */ + * pre-newconfig logs + */ if (lcfg->lcfg_nal != 0 && /* pre-newconfig log? */ (lcfg->lcfg_nid >> 32) == 0) { __u32 addr = (__u32)(lcfg->lcfg_nid & 0xffffffff); @@ -1270,7 +1282,7 @@ int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt, if (cfg) { cd.lpcd_first_idx = cfg->cfg_last_idx; callback = cfg->cfg_callback; - LASSERT(callback != NULL); + LASSERT(callback); } else { callback = class_config_llog_handler; } diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c index b5aa8168d..d3e28a389 100644 --- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c +++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c @@ -72,7 +72,7 @@ static void (*kill_super_cb)(struct super_block *sb); * this log, and is added to the mgc's list of logs to follow. */ int lustre_process_log(struct super_block *sb, char *logname, - struct config_llog_instance *cfg) + struct config_llog_instance *cfg) { struct lustre_cfg *lcfg; struct lustre_cfg_bufs *bufs; @@ -114,7 +114,7 @@ EXPORT_SYMBOL(lustre_process_log); /* Stop watching this config log for updates */ int lustre_end_log(struct super_block *sb, char *logname, - struct config_llog_instance *cfg) + struct config_llog_instance *cfg) { struct lustre_cfg *lcfg; struct lustre_cfg_bufs bufs; @@ -283,9 +283,10 @@ int lustre_start_mgc(struct super_block *sb) recov_bk = 0; /* Try all connections, but only once (again). - We don't want to block another target from starting - (using its local copy of the log), but we do want to connect - if at all possible. */ + * We don't want to block another target from starting + * (using its local copy of the log), but we do want to connect + * if at all possible. + */ recov_bk++; CDEBUG(D_MOUNT, "%s: Set MGC reconnect %d\n", mgcname, recov_bk); @@ -339,7 +340,7 @@ int lustre_start_mgc(struct super_block *sb) /* Add any failover MGS nids */ i = 1; while (ptr && ((*ptr == ':' || - class_find_param(ptr, PARAM_MGSNODE, &ptr) == 0))) { + class_find_param(ptr, PARAM_MGSNODE, &ptr) == 0))) { /* New failover node */ sprintf(niduuid, "%s_%x", mgcname, i); j = 0; @@ -375,7 +376,8 @@ int lustre_start_mgc(struct super_block *sb) goto out_free; /* Keep a refcount of servers/clients who started with "mount", - so we know when we can get rid of the mgc. */ + * so we know when we can get rid of the mgc. + */ atomic_set(&obd->u.cli.cl_mgc_refcount, 1); /* We connect to the MGS at setup, and don't disconnect until cleanup */ @@ -403,7 +405,8 @@ int lustre_start_mgc(struct super_block *sb) out: /* Keep the mgc info in the sb. Note that many lsi's can point - to the same mgc.*/ + * to the same mgc. + */ lsi->lsi_mgc = obd; out_free: mutex_unlock(&mgc_start_lock); @@ -432,7 +435,8 @@ static int lustre_stop_mgc(struct super_block *sb) LASSERT(atomic_read(&obd->u.cli.cl_mgc_refcount) > 0); if (!atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) { /* This is not fatal, every client that stops - will call in here. */ + * will call in here. + */ CDEBUG(D_MOUNT, "mgc still has %d references.\n", atomic_read(&obd->u.cli.cl_mgc_refcount)); rc = -EBUSY; @@ -440,19 +444,20 @@ static int lustre_stop_mgc(struct super_block *sb) } /* The MGC has no recoverable data in any case. - * force shutdown set in umount_begin */ + * force shutdown set in umount_begin + */ obd->obd_no_recov = 1; if (obd->u.cli.cl_mgc_mgsexp) { /* An error is not fatal, if we are unable to send the - disconnect mgs ping evictor cleans up the export */ + * disconnect mgs ping evictor cleans up the export + */ rc = obd_disconnect(obd->u.cli.cl_mgc_mgsexp); if (rc) CDEBUG(D_MOUNT, "disconnect failed %d\n", rc); } - /* Save the obdname for cleaning the nid uuids, which are - obdname_XX */ + /* Save the obdname for cleaning the nid uuids, which are obdname_XX */ len = strlen(obd->obd_name) + 6; niduuid = kzalloc(len, GFP_NOFS); if (niduuid) { @@ -518,13 +523,12 @@ static int lustre_free_lsi(struct super_block *sb) { struct lustre_sb_info *lsi = s2lsi(sb); - LASSERT(lsi != NULL); CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi); /* someone didn't call server_put_mount. */ LASSERT(atomic_read(&lsi->lsi_mounts) == 0); - if (lsi->lsi_lmd != NULL) { + if (lsi->lsi_lmd) { kfree(lsi->lsi_lmd->lmd_dev); kfree(lsi->lsi_lmd->lmd_profile); kfree(lsi->lsi_lmd->lmd_mgssec); @@ -538,7 +542,7 @@ static int lustre_free_lsi(struct super_block *sb) kfree(lsi->lsi_lmd); } - LASSERT(lsi->lsi_llsbi == NULL); + LASSERT(!lsi->lsi_llsbi); kfree(lsi); s2lsi_nocast(sb) = NULL; @@ -546,13 +550,12 @@ static int lustre_free_lsi(struct super_block *sb) } /* The lsi has one reference for every server that is using the disk - - e.g. MDT, MGS, and potentially MGC */ + * e.g. MDT, MGS, and potentially MGC + */ static int lustre_put_lsi(struct super_block *sb) { struct lustre_sb_info *lsi = s2lsi(sb); - LASSERT(lsi != NULL); - CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts)); if (atomic_dec_and_test(&lsi->lsi_mounts)) { lustre_free_lsi(sb); @@ -588,21 +591,22 @@ static int server_name2fsname(const char *svname, char *fsname, if (dash == svname) return -EINVAL; - if (fsname != NULL) { + if (fsname) { strncpy(fsname, svname, dash - svname); fsname[dash - svname] = '\0'; } - if (endptr != NULL) + if (endptr) *endptr = dash; return 0; } /* Get the index from the obd name. - rc = server type, or - rc < 0 on error - if endptr isn't NULL it is set to end of name */ + * rc = server type, or + * rc < 0 on error + * if endptr isn't NULL it is set to end of name + */ static int server_name2index(const char *svname, __u32 *idx, const char **endptr) { @@ -627,18 +631,18 @@ static int server_name2index(const char *svname, __u32 *idx, dash += 3; if (strncmp(dash, "all", 3) == 0) { - if (endptr != NULL) + if (endptr) *endptr = dash + 3; return rc | LDD_F_SV_ALL; } index = simple_strtoul(dash, (char **)endptr, 16); - if (idx != NULL) + if (idx) *idx = index; /* Account for -mdc after index that is possible when specifying mdt */ - if (endptr != NULL && strncmp(LUSTRE_MDC_NAME, *endptr + 1, - sizeof(LUSTRE_MDC_NAME)-1) == 0) + if (endptr && strncmp(LUSTRE_MDC_NAME, *endptr + 1, + sizeof(LUSTRE_MDC_NAME) - 1) == 0) *endptr += sizeof(LUSTRE_MDC_NAME); return rc; @@ -661,7 +665,8 @@ int lustre_common_put_super(struct super_block *sb) return rc; } /* BUSY just means that there's some other obd that - needs the mgc. Let him clean it up. */ + * needs the mgc. Let him clean it up. + */ CDEBUG(D_MOUNT, "MGC still in use\n"); } /* Drop a ref to the mounted disk */ @@ -731,8 +736,9 @@ static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr) int rc = 0, devmax; /* The shortest an ost name can be is 8 chars: -OST0000. - We don't actually know the fsname at this time, so in fact - a user could specify any fsname. */ + * We don't actually know the fsname at this time, so in fact + * a user could specify any fsname. + */ devmax = strlen(ptr) / 8 + 1; /* temp storage until we figure out how many we have */ @@ -756,7 +762,8 @@ static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr) (uint)(s2-s1), s1, rc); s1 = s2; /* now we are pointing at ':' (next exclude) - or ',' (end of excludes) */ + * or ',' (end of excludes) + */ if (lmd->lmd_exclude_count >= devmax) break; } @@ -788,7 +795,7 @@ static int lmd_parse_mgssec(struct lustre_mount_data *lmd, char *ptr) lmd->lmd_mgssec = NULL; tail = strchr(ptr, ','); - if (tail == NULL) + if (!tail) length = strlen(ptr); else length = tail - ptr; @@ -807,14 +814,14 @@ static int lmd_parse_string(char **handle, char *ptr) char *tail; int length; - if ((handle == NULL) || (ptr == NULL)) + if (!handle || !ptr) return -EINVAL; kfree(*handle); *handle = NULL; tail = strchr(ptr, ','); - if (tail == NULL) + if (!tail) length = strlen(ptr); else length = tail - ptr; @@ -847,14 +854,14 @@ static int lmd_parse_mgs(struct lustre_mount_data *lmd, char **ptr) return -EINVAL; } - if (lmd->lmd_mgs != NULL) + if (lmd->lmd_mgs) oldlen = strlen(lmd->lmd_mgs) + 1; mgsnid = kzalloc(oldlen + length + 1, GFP_NOFS); if (!mgsnid) return -ENOMEM; - if (lmd->lmd_mgs != NULL) { + if (lmd->lmd_mgs) { /* Multiple mgsnid= are taken to mean failover locations */ memcpy(mgsnid, lmd->lmd_mgs, oldlen); mgsnid[oldlen - 1] = ':'; @@ -909,10 +916,12 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd) s1++; /* Client options are parsed in ll_options: eg. flock, - user_xattr, acl */ + * user_xattr, acl + */ /* Parse non-ldiskfs options here. Rather than modifying - ldiskfs, we just zero these out here */ + * ldiskfs, we just zero these out here + */ if (strncmp(s1, "abort_recov", 11) == 0) { lmd->lmd_flags |= LMD_FLG_ABORT_RECOV; clear++; @@ -940,7 +949,8 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd) sizeof(PARAM_MGSNODE) - 1) == 0) { s2 = s1 + sizeof(PARAM_MGSNODE) - 1; /* Assume the next mount opt is the first - invalid nid we get to. */ + * invalid nid we get to. + */ rc = lmd_parse_mgs(lmd, &s2); if (rc) goto invalid; @@ -981,7 +991,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd) size_t length, params_length; char *tail = strchr(s1 + 6, ','); - if (tail == NULL) + if (!tail) length = strlen(s1); else length = tail - s1; @@ -1000,18 +1010,20 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd) clear++; } /* Linux 2.4 doesn't pass the device, so we stuck it at the - end of the options. */ + * end of the options. + */ else if (strncmp(s1, "device=", 7) == 0) { devname = s1 + 7; /* terminate options right before device. device - must be the last one. */ + * must be the last one. + */ *s1 = '\0'; break; } /* Find next opt */ s2 = strchr(s1, ','); - if (s2 == NULL) { + if (!s2) { if (clear) *s1 = '\0'; break; @@ -1113,9 +1125,9 @@ static int lustre_fill_super(struct super_block *sb, void *data, int silent) if (lmd_is_client(lmd)) { CDEBUG(D_MOUNT, "Mounting client %s\n", lmd->lmd_profile); - if (client_fill_super == NULL) + if (!client_fill_super) request_module("lustre"); - if (client_fill_super == NULL) { + if (!client_fill_super) { LCONSOLE_ERROR_MSG(0x165, "Nothing registered for client mount! Is the 'lustre' module loaded?\n"); lustre_put_lsi(sb); rc = -ENODEV; @@ -1136,7 +1148,8 @@ static int lustre_fill_super(struct super_block *sb, void *data, int silent) } /* If error happens in fill_super() call, @lsi will be killed there. - * This is why we do not put it here. */ + * This is why we do not put it here. + */ goto out; out: if (rc) { @@ -1151,7 +1164,8 @@ out: } /* We can't call ll_fill_super by name because it lives in a module that - must be loaded after this one. */ + * must be loaded after this one. + */ void lustre_register_client_fill_super(int (*cfs)(struct super_block *sb, struct vfsmount *mnt)) { @@ -1166,8 +1180,8 @@ void lustre_register_kill_super_cb(void (*cfs)(struct super_block *sb)) EXPORT_SYMBOL(lustre_register_kill_super_cb); /***************** FS registration ******************/ -struct dentry *lustre_mount(struct file_system_type *fs_type, int flags, - const char *devname, void *data) +static struct dentry *lustre_mount(struct file_system_type *fs_type, int flags, + const char *devname, void *data) { struct lustre_mount_data2 lmd2 = { .lmd2_data = data, diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c index 75e1deadd..e6436cb4a 100644 --- a/drivers/staging/lustre/lustre/obdclass/obdo.c +++ b/drivers/staging/lustre/lustre/obdclass/obdo.c @@ -55,7 +55,8 @@ void obdo_set_parent_fid(struct obdo *dst, const struct lu_fid *parent) EXPORT_SYMBOL(obdo_set_parent_fid); /* WARNING: the file systems must take care not to tinker with - attributes they don't manage (such as blocks). */ + * attributes they don't manage (such as blocks). + */ void obdo_from_inode(struct obdo *dst, struct inode *src, u32 valid) { u32 newvalid = 0; @@ -122,7 +123,8 @@ void obdo_to_ioobj(struct obdo *oa, struct obd_ioobj *ioobj) ostid_set_seq_mdt0(&ioobj->ioo_oid); /* Since 2.4 this does not contain o_mode in the low 16 bits. - * Instead, it holds (bd_md_max_brw - 1) for multi-bulk BRW RPCs */ + * Instead, it holds (bd_md_max_brw - 1) for multi-bulk BRW RPCs + */ ioobj->ioo_max_brw = 0; } EXPORT_SYMBOL(obdo_to_ioobj); diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c index 7b53f7dd1..1e83669c2 100644 --- a/drivers/staging/lustre/lustre/obdecho/echo_client.c +++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c @@ -60,7 +60,6 @@ struct echo_device { struct cl_site ed_site_myself; struct cl_site *ed_site; struct lu_device *ed_next; - int ed_next_islov; }; struct echo_object { @@ -147,7 +146,7 @@ static inline struct echo_thread_info *echo_env_info(const struct lu_env *env) struct echo_thread_info *info; info = lu_context_key_get(&env->le_ctx, &echo_thread_key); - LASSERT(info != NULL); + LASSERT(info); return info; } @@ -162,9 +161,6 @@ struct echo_object_conf *cl2echo_conf(const struct cl_object_conf *c) static struct echo_object *cl_echo_object_find(struct echo_device *d, struct lov_stripe_md **lsm); static int cl_echo_object_put(struct echo_object *eco); -static int cl_echo_enqueue(struct echo_object *eco, u64 start, - u64 end, int mode, __u64 *cookie); -static int cl_echo_cancel(struct echo_device *d, __u64 cookie); static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, struct page **pages, int npages, int async); @@ -224,7 +220,7 @@ static struct lu_kmem_descr echo_caches[] = { * @{ */ static struct page *echo_page_vmpage(const struct lu_env *env, - const struct cl_page_slice *slice) + const struct cl_page_slice *slice) { return cl2echo_page(slice)->ep_vmpage; } @@ -271,7 +267,7 @@ static void echo_page_completion(const struct lu_env *env, const struct cl_page_slice *slice, int ioret) { - LASSERT(slice->cpl_page->cp_sync_io != NULL); + LASSERT(slice->cpl_page->cp_sync_io); } static void echo_page_fini(const struct lu_env *env, @@ -282,7 +278,7 @@ static void echo_page_fini(const struct lu_env *env, struct page *vmpage = ep->ep_vmpage; atomic_dec(&eco->eo_npages); - page_cache_release(vmpage); + put_page(vmpage); } static int echo_page_prep(const struct lu_env *env, @@ -371,13 +367,13 @@ static struct cl_lock_operations echo_lock_ops = { * @{ */ static int echo_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage) + struct cl_page *page, struct page *vmpage) { struct echo_page *ep = cl_object_page_slice(obj, page); struct echo_object *eco = cl2echo_obj(obj); ep->ep_vmpage = vmpage; - page_cache_get(vmpage); + get_page(vmpage); mutex_init(&ep->ep_lock); cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops); atomic_inc(&eco->eo_npages); @@ -396,14 +392,14 @@ static int echo_lock_init(const struct lu_env *env, { struct echo_lock *el; - el = kmem_cache_alloc(echo_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (el != NULL) { + el = kmem_cache_zalloc(echo_lock_kmem, GFP_NOFS); + if (el) { cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops); el->el_object = cl2echo_obj(obj); INIT_LIST_HEAD(&el->el_chain); atomic_set(&el->el_refcount, 0); } - return el == NULL ? -ENOMEM : 0; + return !el ? -ENOMEM : 0; } static int echo_conf_set(const struct lu_env *env, struct cl_object *obj, @@ -443,7 +439,7 @@ static int echo_object_init(const struct lu_env *env, struct lu_object *obj, under = ed->ed_next; below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under); - if (below == NULL) + if (!below) return -ENOMEM; lu_object_add(obj, below); } @@ -474,12 +470,12 @@ static int echo_alloc_memmd(struct echo_device *ed, int lsm_size; /* If export is lov/osc then use their obd method */ - if (ed->ed_next != NULL) + if (ed->ed_next) return obd_alloc_memmd(ed->ed_ec->ec_exp, lsmp); /* OFD has no unpackmd method, do everything here */ lsm_size = lov_stripe_md_size(1); - LASSERT(*lsmp == NULL); + LASSERT(!*lsmp); *lsmp = kzalloc(lsm_size, GFP_NOFS); if (!*lsmp) return -ENOMEM; @@ -502,12 +498,11 @@ static int echo_free_memmd(struct echo_device *ed, struct lov_stripe_md **lsmp) int lsm_size; /* If export is lov/osc then use their obd method */ - if (ed->ed_next != NULL) + if (ed->ed_next) return obd_free_memmd(ed->ed_ec->ec_exp, lsmp); /* OFD has no unpackmd method, do everything here */ lsm_size = lov_stripe_md_size(1); - LASSERT(*lsmp != NULL); kfree((*lsmp)->lsm_oinfo[0]); kfree(*lsmp); *lsmp = NULL; @@ -534,7 +529,7 @@ static void echo_object_free(const struct lu_env *env, struct lu_object *obj) } static int echo_object_print(const struct lu_env *env, void *cookie, - lu_printer_t p, const struct lu_object *o) + lu_printer_t p, const struct lu_object *o) { struct echo_object *obj = cl2echo_obj(lu2cl(o)); @@ -566,9 +561,9 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env, struct lu_object *obj = NULL; /* we're the top dev. */ - LASSERT(hdr == NULL); - eco = kmem_cache_alloc(echo_object_kmem, GFP_NOFS | __GFP_ZERO); - if (eco != NULL) { + LASSERT(!hdr); + eco = kmem_cache_zalloc(echo_object_kmem, GFP_NOFS); + if (eco) { struct cl_object_header *hdr = &eco->eo_hdr; obj = &echo_obj2cl(eco)->co_lu; @@ -582,13 +577,13 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env, return obj; } -static struct lu_device_operations echo_device_lu_ops = { +static const struct lu_device_operations echo_device_lu_ops = { .ldo_object_alloc = echo_object_alloc, }; /** @} echo_lu_dev_ops */ -static struct cl_device_operations echo_device_cl_ops = { +static const struct cl_device_operations echo_device_cl_ops = { }; /** \defgroup echo_init Setup and teardown @@ -626,18 +621,18 @@ static void echo_site_fini(const struct lu_env *env, struct echo_device *ed) } static void *echo_thread_key_init(const struct lu_context *ctx, - struct lu_context_key *key) + struct lu_context_key *key) { struct echo_thread_info *info; - info = kmem_cache_alloc(echo_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(echo_thread_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } static void echo_thread_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) + struct lu_context_key *key, void *data) { struct echo_thread_info *info = data; @@ -645,7 +640,7 @@ static void echo_thread_key_fini(const struct lu_context *ctx, } static void echo_thread_key_exit(const struct lu_context *ctx, - struct lu_context_key *key, void *data) + struct lu_context_key *key, void *data) { } @@ -657,18 +652,18 @@ static struct lu_context_key echo_thread_key = { }; static void *echo_session_key_init(const struct lu_context *ctx, - struct lu_context_key *key) + struct lu_context_key *key) { struct echo_session_info *session; - session = kmem_cache_alloc(echo_session_kmem, GFP_NOFS | __GFP_ZERO); - if (session == NULL) + session = kmem_cache_zalloc(echo_session_kmem, GFP_NOFS); + if (!session) session = ERR_PTR(-ENOMEM); return session; } static void echo_session_key_fini(const struct lu_context *ctx, - struct lu_context_key *key, void *data) + struct lu_context_key *key, void *data) { struct echo_session_info *session = data; @@ -676,7 +671,7 @@ static void echo_session_key_fini(const struct lu_context *ctx, } static void echo_session_key_exit(const struct lu_context *ctx, - struct lu_context_key *key, void *data) + struct lu_context_key *key, void *data) { } @@ -719,13 +714,13 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env, cleanup = 2; obd = class_name2obd(lustre_cfg_string(cfg, 0)); - LASSERT(obd != NULL); - LASSERT(env != NULL); + LASSERT(obd); + LASSERT(env); tgt = class_name2obd(lustre_cfg_string(cfg, 1)); - if (tgt == NULL) { + if (!tgt) { CERROR("Can not find tgt device %s\n", - lustre_cfg_string(cfg, 1)); + lustre_cfg_string(cfg, 1)); rc = -ENODEV; goto out; } @@ -751,14 +746,14 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env, cleanup = 4; /* if echo client is to be stacked upon ost device, the next is - * NULL since ost is not a clio device so far */ - if (next != NULL && !lu_device_is_cl(next)) + * NULL since ost is not a clio device so far + */ + if (next && !lu_device_is_cl(next)) next = NULL; tgt_type_name = tgt->obd_type->typ_name; - if (next != NULL) { - LASSERT(next != NULL); - if (next->ld_site != NULL) { + if (next) { + if (next->ld_site) { rc = -EBUSY; goto out; } @@ -770,14 +765,6 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env, if (rc) goto out; - /* Tricky case, I have to determine the obd type since - * CLIO uses the different parameters to initialize - * objects for lov & osc. */ - if (strcmp(tgt_type_name, LUSTRE_LOV_NAME) == 0) - ed->ed_next_islov = 1; - else - LASSERT(strcmp(tgt_type_name, - LUSTRE_OSC_NAME) == 0); } else { LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0); } @@ -809,7 +796,7 @@ out: } static int echo_device_init(const struct lu_env *env, struct lu_device *d, - const char *name, struct lu_device *next) + const char *name, struct lu_device *next) { LBUG(); return 0; @@ -963,20 +950,11 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d, info = echo_env_info(env); conf = &info->eti_conf; if (d->ed_next) { - if (!d->ed_next_islov) { - struct lov_oinfo *oinfo = lsm->lsm_oinfo[0]; - - LASSERT(oinfo != NULL); - oinfo->loi_oi = lsm->lsm_oi; - conf->eoc_cl.u.coc_oinfo = oinfo; - } else { - struct lustre_md *md; + struct lov_oinfo *oinfo = lsm->lsm_oinfo[0]; - md = &info->eti_md; - memset(md, 0, sizeof(*md)); - md->lsm = lsm; - conf->eoc_cl.u.coc_md = md; - } + LASSERT(oinfo); + oinfo->loi_oi = lsm->lsm_oi; + conf->eoc_cl.u.coc_oinfo = oinfo; } conf->eoc_md = lsmp; @@ -988,7 +966,8 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d, } /* In the function below, .hs_keycmp resolves to - * lu_obj_hop_keycmp() */ + * lu_obj_hop_keycmp() + */ /* coverity[overrun-buffer-val] */ obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl); if (IS_ERR(obj)) { @@ -1076,36 +1055,6 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco, return rc; } -static int cl_echo_enqueue(struct echo_object *eco, u64 start, u64 end, - int mode, __u64 *cookie) -{ - struct echo_thread_info *info; - struct lu_env *env; - struct cl_io *io; - int refcheck; - int result; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - info = echo_env_info(env); - io = &info->eti_io; - - io->ci_ignore_layout = 1; - result = cl_io_init(env, io, CIT_MISC, echo_obj2cl(eco)); - if (result < 0) - goto out; - LASSERT(result == 0); - - result = cl_echo_enqueue0(env, eco, start, end, mode, cookie, 0); - cl_io_fini(env, io); - -out: - cl_env_put(env, &refcheck); - return result; -} - static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed, __u64 cookie) { @@ -1114,7 +1063,6 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed, struct list_head *el; int found = 0, still_used = 0; - LASSERT(ec != NULL); spin_lock(&ec->ec_lock); list_for_each(el, &ec->ec_locks) { ecl = list_entry(el, struct echo_lock, el_chain); @@ -1137,22 +1085,6 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed, return 0; } -static int cl_echo_cancel(struct echo_device *ed, __u64 cookie) -{ - struct lu_env *env; - int refcheck; - int rc; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - rc = cl_echo_cancel0(env, ed, cookie); - - cl_env_put(env, &refcheck); - return rc; -} - static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io, enum cl_req_type unused, struct cl_2queue *queue) { @@ -1188,7 +1120,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, int i; LASSERT((offset & ~CFS_PAGE_MASK) == 0); - LASSERT(ed->ed_next != NULL); + LASSERT(ed->ed_next); env = cl_env_get(&refcheck); if (IS_ERR(env)) return PTR_ERR(env); @@ -1206,7 +1138,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, LASSERT(rc == 0); rc = cl_echo_enqueue0(env, eco, offset, - offset + npages * PAGE_CACHE_SIZE - 1, + offset + npages * PAGE_SIZE - 1, rw == READ ? LCK_PR : LCK_PW, &lh.cookie, CEF_NEVER); if (rc < 0) @@ -1234,7 +1166,8 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, cl_page_list_add(&queue->c2_qin, clp); /* drop the reference count for cl_page_find, so that the page - * will be freed in cl_2queue_fini. */ + * will be freed in cl_2queue_fini. + */ cl_page_put(env, clp); cl_page_clip(env, clp, 0, page_size); @@ -1268,61 +1201,8 @@ out: static u64 last_object_id; -static int -echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob) -{ - struct lov_stripe_md *ulsm = _ulsm; - struct lov_oinfo **p; - int nob, i; - - nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]); - if (nob > ulsm_nob) - return -EINVAL; - - if (copy_to_user(ulsm, lsm, sizeof(*ulsm))) - return -EFAULT; - - for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) { - struct lov_oinfo __user *up; - if (get_user(up, ulsm->lsm_oinfo + i) || - copy_to_user(up, *p, sizeof(struct lov_oinfo))) - return -EFAULT; - } - return 0; -} - -static int -echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm, - struct lov_stripe_md __user *ulsm, int ulsm_nob) -{ - struct echo_client_obd *ec = ed->ed_ec; - struct lov_oinfo **p; - int i; - - if (ulsm_nob < sizeof(*lsm)) - return -EINVAL; - - if (copy_from_user(lsm, ulsm, sizeof(*lsm))) - return -EFAULT; - - if (lsm->lsm_stripe_count > ec->ec_nstripes || - lsm->lsm_magic != LOV_MAGIC || - (lsm->lsm_stripe_size & (~CFS_PAGE_MASK)) != 0 || - ((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL)) - return -EINVAL; - - for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) { - struct lov_oinfo __user *up; - if (get_user(up, ulsm->lsm_oinfo + i) || - copy_from_user(*p, up, sizeof(struct lov_oinfo))) - return -EFAULT; - } - return 0; -} - static int echo_create_object(const struct lu_env *env, struct echo_device *ed, - int on_target, struct obdo *oa, void *ulsm, - int ulsm_nob, struct obd_trans_info *oti) + struct obdo *oa, struct obd_trans_info *oti) { struct echo_object *eco; struct echo_client_obd *ec = ed->ed_ec; @@ -1330,10 +1210,10 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed, int rc; int created = 0; - if ((oa->o_valid & OBD_MD_FLID) == 0 && /* no obj id */ - (on_target || /* set_stripe */ - ec->ec_nstripes != 0)) { /* LOV */ - CERROR("No valid oid\n"); + if (!(oa->o_valid & OBD_MD_FLID) || + !(oa->o_valid & OBD_MD_FLGROUP) || + !fid_seq_is_echo(ostid_seq(&oa->o_oi))) { + CERROR("invalid oid " DOSTID "\n", POSTID(&oa->o_oi)); return -EINVAL; } @@ -1343,52 +1223,18 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed, goto failed; } - if (ulsm != NULL) { - int i, idx; - - rc = echo_copyin_lsm(ed, lsm, ulsm, ulsm_nob); - if (rc != 0) - goto failed; - - if (lsm->lsm_stripe_count == 0) - lsm->lsm_stripe_count = ec->ec_nstripes; - - if (lsm->lsm_stripe_size == 0) - lsm->lsm_stripe_size = PAGE_CACHE_SIZE; - - idx = cfs_rand(); - - /* setup stripes: indices + default ids if required */ - for (i = 0; i < lsm->lsm_stripe_count; i++) { - if (ostid_id(&lsm->lsm_oinfo[i]->loi_oi) == 0) - lsm->lsm_oinfo[i]->loi_oi = lsm->lsm_oi; - - lsm->lsm_oinfo[i]->loi_ost_idx = - (idx + i) % ec->ec_nstripes; - } - } - - /* setup object ID here for !on_target and LOV hint */ - if (oa->o_valid & OBD_MD_FLID) { - LASSERT(oa->o_valid & OBD_MD_FLGROUP); - lsm->lsm_oi = oa->o_oi; - } + /* setup object ID here */ + lsm->lsm_oi = oa->o_oi; if (ostid_id(&lsm->lsm_oi) == 0) ostid_set_id(&lsm->lsm_oi, ++last_object_id); - rc = 0; - if (on_target) { - /* Only echo objects are allowed to be created */ - LASSERT((oa->o_valid & OBD_MD_FLGROUP) && - (ostid_seq(&oa->o_oi) == FID_SEQ_ECHO)); - rc = obd_create(env, ec->ec_exp, oa, &lsm, oti); - if (rc != 0) { - CERROR("Cannot create objects: rc = %d\n", rc); - goto failed; - } - created = 1; + rc = obd_create(env, ec->ec_exp, oa, &lsm, oti); + if (rc != 0) { + CERROR("Cannot create objects: rc = %d\n", rc); + goto failed; } + created = 1; /* See what object ID we were given */ oa->o_oi = lsm->lsm_oi; @@ -1447,42 +1293,16 @@ static int echo_get_object(struct echo_object **ecop, struct echo_device *ed, static void echo_put_object(struct echo_object *eco) { - if (cl_echo_object_put(eco)) - CERROR("echo client: drop an object failed"); -} - -static void -echo_get_stripe_off_id(struct lov_stripe_md *lsm, u64 *offp, u64 *idp) -{ - unsigned long stripe_count; - unsigned long stripe_size; - unsigned long width; - unsigned long woffset; - int stripe_index; - u64 offset; - - if (lsm->lsm_stripe_count <= 1) - return; - - offset = *offp; - stripe_size = lsm->lsm_stripe_size; - stripe_count = lsm->lsm_stripe_count; - - /* width = # bytes in all stripes */ - width = stripe_size * stripe_count; - - /* woffset = offset within a width; offset = whole number of widths */ - woffset = do_div(offset, width); - - stripe_index = woffset / stripe_size; + int rc; - *idp = ostid_id(&lsm->lsm_oinfo[stripe_index]->loi_oi); - *offp = offset * stripe_size + woffset % stripe_size; + rc = cl_echo_object_put(eco); + if (rc) + CERROR("%s: echo client drop an object failed: rc = %d\n", + eco->eo_dev->ed_ec->ec_exp->exp_obd->obd_name, rc); } static void -echo_client_page_debug_setup(struct lov_stripe_md *lsm, - struct page *page, int rw, u64 id, +echo_client_page_debug_setup(struct page *page, int rw, u64 id, u64 offset, u64 count) { char *addr; @@ -1491,15 +1311,14 @@ echo_client_page_debug_setup(struct lov_stripe_md *lsm, int delta; /* no partial pages on the client */ - LASSERT(count == PAGE_CACHE_SIZE); + LASSERT(count == PAGE_SIZE); addr = kmap(page); - for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { + for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { if (rw == OBD_BRW_WRITE) { stripe_off = offset + delta; stripe_id = id; - echo_get_stripe_off_id(lsm, &stripe_off, &stripe_id); } else { stripe_off = 0xdeadbeef00c0ffeeULL; stripe_id = 0xdeadbeef00c0ffeeULL; @@ -1511,8 +1330,7 @@ echo_client_page_debug_setup(struct lov_stripe_md *lsm, kunmap(page); } -static int echo_client_page_debug_check(struct lov_stripe_md *lsm, - struct page *page, u64 id, +static int echo_client_page_debug_check(struct page *page, u64 id, u64 offset, u64 count) { u64 stripe_off; @@ -1523,14 +1341,13 @@ static int echo_client_page_debug_check(struct lov_stripe_md *lsm, int rc2; /* no partial pages on the client */ - LASSERT(count == PAGE_CACHE_SIZE); + LASSERT(count == PAGE_SIZE); addr = kmap(page); - for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { + for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { stripe_off = offset + delta; stripe_id = id; - echo_get_stripe_off_id(lsm, &stripe_off, &stripe_id); rc2 = block_debug_check("test_brw", addr + delta, OBD_ECHO_BLOCK_SIZE, @@ -1550,7 +1367,6 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, u64 count, int async, struct obd_trans_info *oti) { - struct lov_stripe_md *lsm = eco->eo_lsm; u32 npages; struct brw_page *pga; struct brw_page *pgp; @@ -1569,53 +1385,51 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_KERNEL : GFP_HIGHUSER; LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ); - LASSERT(lsm != NULL); - LASSERT(ostid_id(&lsm->lsm_oi) == ostid_id(&oa->o_oi)); if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0) return -EINVAL; /* XXX think again with misaligned I/O */ - npages = count >> PAGE_CACHE_SHIFT; + npages = count >> PAGE_SHIFT; if (rw == OBD_BRW_WRITE) brw_flags = OBD_BRW_ASYNC; pga = kcalloc(npages, sizeof(*pga), GFP_NOFS); - if (pga == NULL) + if (!pga) return -ENOMEM; pages = kcalloc(npages, sizeof(*pages), GFP_NOFS); - if (pages == NULL) { + if (!pages) { kfree(pga); return -ENOMEM; } for (i = 0, pgp = pga, off = offset; i < npages; - i++, pgp++, off += PAGE_CACHE_SIZE) { + i++, pgp++, off += PAGE_SIZE) { - LASSERT(pgp->pg == NULL); /* for cleanup */ + LASSERT(!pgp->pg); /* for cleanup */ rc = -ENOMEM; pgp->pg = alloc_page(gfp_mask); - if (pgp->pg == NULL) + if (!pgp->pg) goto out; pages[i] = pgp->pg; - pgp->count = PAGE_CACHE_SIZE; + pgp->count = PAGE_SIZE; pgp->off = off; pgp->flag = brw_flags; if (verify) - echo_client_page_debug_setup(lsm, pgp->pg, rw, + echo_client_page_debug_setup(pgp->pg, rw, ostid_id(&oa->o_oi), off, pgp->count); } /* brw mode can only be used at client */ - LASSERT(ed->ed_next != NULL); + LASSERT(ed->ed_next); rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async); out: @@ -1623,13 +1437,13 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, verify = 0; for (i = 0, pgp = pga; i < npages; i++, pgp++) { - if (pgp->pg == NULL) + if (!pgp->pg) continue; if (verify) { int vrc; - vrc = echo_client_page_debug_check(lsm, pgp->pg, + vrc = echo_client_page_debug_check(pgp->pg, ostid_id(&oa->o_oi), pgp->off, pgp->count); if (vrc != 0 && rc == 0) @@ -1649,7 +1463,6 @@ static int echo_client_prep_commit(const struct lu_env *env, u64 batch, struct obd_trans_info *oti, int async) { - struct lov_stripe_md *lsm = eco->eo_lsm; struct obd_ioobj ioo; struct niobuf_local *lnb; struct niobuf_remote *rnb; @@ -1657,17 +1470,16 @@ static int echo_client_prep_commit(const struct lu_env *env, u64 npages, tot_pages; int i, ret = 0, brw_flags = 0; - if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0 || - (lsm != NULL && ostid_id(&lsm->lsm_oi) != ostid_id(&oa->o_oi))) + if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0) return -EINVAL; - npages = batch >> PAGE_CACHE_SHIFT; - tot_pages = count >> PAGE_CACHE_SHIFT; + npages = batch >> PAGE_SHIFT; + tot_pages = count >> PAGE_SHIFT; lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS); rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS); - if (lnb == NULL || rnb == NULL) { + if (!lnb || !rnb) { ret = -ENOMEM; goto out; } @@ -1685,9 +1497,9 @@ static int echo_client_prep_commit(const struct lu_env *env, if (tot_pages < npages) npages = tot_pages; - for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) { + for (i = 0; i < npages; i++, off += PAGE_SIZE) { rnb[i].offset = off; - rnb[i].len = PAGE_CACHE_SIZE; + rnb[i].len = PAGE_SIZE; rnb[i].flags = brw_flags; } @@ -1705,7 +1517,7 @@ static int echo_client_prep_commit(const struct lu_env *env, struct page *page = lnb[i].page; /* read past eof? */ - if (page == NULL && lnb[i].rc == 0) + if (!page && lnb[i].rc == 0) continue; if (async) @@ -1717,12 +1529,12 @@ static int echo_client_prep_commit(const struct lu_env *env, continue; if (rw == OBD_BRW_WRITE) - echo_client_page_debug_setup(lsm, page, rw, + echo_client_page_debug_setup(page, rw, ostid_id(&oa->o_oi), rnb[i].offset, rnb[i].len); else - echo_client_page_debug_check(lsm, page, + echo_client_page_debug_check(page, ostid_id(&oa->o_oi), rnb[i].offset, rnb[i].len); @@ -1774,7 +1586,7 @@ static int echo_client_brw_ioctl(const struct lu_env *env, int rw, if (test_mode == 1) async = 0; - if (ed->ed_next == NULL && test_mode != 3) { + if (!ed->ed_next && test_mode != 3) { test_mode = 3; data->ioc_plen1 = data->ioc_count; } @@ -1805,55 +1617,8 @@ static int echo_client_brw_ioctl(const struct lu_env *env, int rw, } static int -echo_client_enqueue(struct obd_export *exp, struct obdo *oa, - int mode, u64 offset, u64 nob) -{ - struct echo_device *ed = obd2echo_dev(exp->exp_obd); - struct lustre_handle *ulh = &oa->o_handle; - struct echo_object *eco; - u64 end; - int rc; - - if (ed->ed_next == NULL) - return -EOPNOTSUPP; - - if (!(mode == LCK_PR || mode == LCK_PW)) - return -EINVAL; - - if ((offset & (~CFS_PAGE_MASK)) != 0 || - (nob & (~CFS_PAGE_MASK)) != 0) - return -EINVAL; - - rc = echo_get_object(&eco, ed, oa); - if (rc != 0) - return rc; - - end = (nob == 0) ? ((u64) -1) : (offset + nob - 1); - rc = cl_echo_enqueue(eco, offset, end, mode, &ulh->cookie); - if (rc == 0) { - oa->o_valid |= OBD_MD_FLHANDLE; - CDEBUG(D_INFO, "Cookie is %#llx\n", ulh->cookie); - } - echo_put_object(eco); - return rc; -} - -static int -echo_client_cancel(struct obd_export *exp, struct obdo *oa) -{ - struct echo_device *ed = obd2echo_dev(exp->exp_obd); - __u64 cookie = oa->o_handle.cookie; - - if ((oa->o_valid & OBD_MD_FLHANDLE) == 0) - return -EINVAL; - - CDEBUG(D_INFO, "Cookie is %#llx\n", cookie); - return cl_echo_cancel(ed, cookie); -} - -static int echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg) + void *karg, void __user *uarg) { struct obd_device *obd = exp->exp_obd; struct echo_device *ed = obd2echo_dev(obd); @@ -1899,8 +1664,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, goto out; } - rc = echo_create_object(env, ed, 1, oa, data->ioc_pbuf1, - data->ioc_plen1, &dummy_oti); + rc = echo_create_object(env, ed, oa, &dummy_oti); goto out; case OBD_IOC_DESTROY: @@ -1911,7 +1675,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, rc = echo_get_object(&eco, ed, oa); if (rc == 0) { - rc = obd_destroy(env, ec->ec_exp, oa, eco->eo_lsm, + rc = obd_destroy(env, ec->ec_exp, oa, NULL, &dummy_oti, NULL); if (rc == 0) eco->eo_deleted = 1; @@ -1922,10 +1686,10 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, case OBD_IOC_GETATTR: rc = echo_get_object(&eco, ed, oa); if (rc == 0) { - struct obd_info oinfo = { }; + struct obd_info oinfo = { + .oi_oa = oa, + }; - oinfo.oi_md = eco->eo_lsm; - oinfo.oi_oa = oa; rc = obd_getattr(env, ec->ec_exp, &oinfo); echo_put_object(eco); } @@ -1939,10 +1703,9 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, rc = echo_get_object(&eco, ed, oa); if (rc == 0) { - struct obd_info oinfo = { }; - - oinfo.oi_oa = oa; - oinfo.oi_md = eco->eo_lsm; + struct obd_info oinfo = { + .oi_oa = oa, + }; rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL); echo_put_object(eco); @@ -1961,50 +1724,6 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti); goto out; - case ECHO_IOC_GET_STRIPE: - rc = echo_get_object(&eco, ed, oa); - if (rc == 0) { - rc = echo_copyout_lsm(eco->eo_lsm, data->ioc_pbuf1, - data->ioc_plen1); - echo_put_object(eco); - } - goto out; - - case ECHO_IOC_SET_STRIPE: - if (!capable(CFS_CAP_SYS_ADMIN)) { - rc = -EPERM; - goto out; - } - - if (data->ioc_pbuf1 == NULL) { /* unset */ - rc = echo_get_object(&eco, ed, oa); - if (rc == 0) { - eco->eo_deleted = 1; - echo_put_object(eco); - } - } else { - rc = echo_create_object(env, ed, 0, oa, - data->ioc_pbuf1, - data->ioc_plen1, &dummy_oti); - } - goto out; - - case ECHO_IOC_ENQUEUE: - if (!capable(CFS_CAP_SYS_ADMIN)) { - rc = -EPERM; - goto out; - } - - rc = echo_client_enqueue(exp, oa, - data->ioc_conn1, /* lock mode */ - data->ioc_offset, - data->ioc_count);/*extent*/ - goto out; - - case ECHO_IOC_CANCEL: - rc = echo_client_cancel(exp, oa); - goto out; - default: CERROR("echo_ioctl(): unrecognised ioctl %#x\n", cmd); rc = -ENOTTY; @@ -2051,14 +1770,10 @@ static int echo_client_setup(const struct lu_env *env, INIT_LIST_HEAD(&ec->ec_objects); INIT_LIST_HEAD(&ec->ec_locks); ec->ec_unique = 0; - ec->ec_nstripes = 0; ocd = kzalloc(sizeof(*ocd), GFP_NOFS); - if (!ocd) { - CERROR("Can't alloc ocd connecting to %s\n", - lustre_cfg_string(lcfg, 1)); + if (!ocd) return -ENOMEM; - } ocd->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE | @@ -2120,7 +1835,7 @@ static int echo_client_disconnect(struct obd_export *exp) { int rc; - if (exp == NULL) { + if (!exp) { rc = -EINVAL; goto out; } @@ -2163,7 +1878,7 @@ static int __init obdecho_init(void) { LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n"); - LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0); + LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0); return echo_client_init(); } @@ -2175,9 +1890,9 @@ static void /*__exit*/ obdecho_exit(void) } MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); -MODULE_DESCRIPTION("Lustre Testing Echo OBD driver"); -MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Lustre Echo Client test driver"); MODULE_VERSION(LUSTRE_VERSION_STRING); +MODULE_LICENSE("GPL"); module_init(obdecho_init); module_exit(obdecho_exit); diff --git a/drivers/staging/lustre/lustre/obdecho/echo_internal.h b/drivers/staging/lustre/lustre/obdecho/echo_internal.h index 69063fa65..f5034a253 100644 --- a/drivers/staging/lustre/lustre/obdecho/echo_internal.h +++ b/drivers/staging/lustre/lustre/obdecho/echo_internal.h @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c index 1091536fc..a3358c39b 100644 --- a/drivers/staging/lustre/lustre/osc/lproc_osc.c +++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c @@ -162,15 +162,15 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, if (rc) return rc; - pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */ + pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */ if (pages_number <= 0 || - pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) || + pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) || pages_number > totalram_pages / 4) /* 1/4 of RAM */ return -ERANGE; client_obd_list_lock(&cli->cl_loi_list_lock); - cli->cl_dirty_max = (u32)(pages_number << PAGE_CACHE_SHIFT); + cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT); osc_wake_cache_waiters(cli); client_obd_list_unlock(&cli->cl_loi_list_lock); @@ -182,7 +182,7 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v) { struct obd_device *dev = m->private; struct client_obd *cli = &dev->u.cli; - int shift = 20 - PAGE_CACHE_SHIFT; + int shift = 20 - PAGE_SHIFT; seq_printf(m, "used_mb: %d\n" @@ -211,7 +211,7 @@ static ssize_t osc_cached_mb_seq_write(struct file *file, return -EFAULT; kernbuf[count] = 0; - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) - kernbuf; rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); @@ -381,7 +381,7 @@ static int osc_checksum_type_seq_show(struct seq_file *m, void *v) DECLARE_CKSUM_NAME; - if (obd == NULL) + if (!obd) return 0; for (i = 0; i < ARRAY_SIZE(cksum_name); i++) { @@ -397,8 +397,8 @@ static int osc_checksum_type_seq_show(struct seq_file *m, void *v) } static ssize_t osc_checksum_type_seq_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *off) + const char __user *buffer, + size_t count, loff_t *off) { struct obd_device *obd = ((struct seq_file *)file->private_data)->private; int i; @@ -406,7 +406,7 @@ static ssize_t osc_checksum_type_seq_write(struct file *file, DECLARE_CKSUM_NAME; char kernbuf[10]; - if (obd == NULL) + if (!obd) return 0; if (count > sizeof(kernbuf) - 1) @@ -422,8 +422,8 @@ static ssize_t osc_checksum_type_seq_write(struct file *file, if (((1 << i) & obd->u.cli.cl_supp_cksum_types) == 0) continue; if (!strcmp(kernbuf, cksum_name[i])) { - obd->u.cli.cl_cksum_type = 1 << i; - return count; + obd->u.cli.cl_cksum_type = 1 << i; + return count; } } return -EINVAL; @@ -480,9 +480,19 @@ static ssize_t contention_seconds_store(struct kobject *kobj, struct obd_device *obd = container_of(kobj, struct obd_device, obd_kobj); struct osc_device *od = obd2osc_dev(obd); + int rc; + int val; + + rc = kstrtoint(buffer, 10, &val); + if (rc) + return rc; + + if (val < 0) + return -EINVAL; + + od->od_contention_time = val; - return lprocfs_write_helper(buffer, count, &od->od_contention_time) ?: - count; + return count; } LUSTRE_RW_ATTR(contention_seconds); @@ -505,9 +515,16 @@ static ssize_t lockless_truncate_store(struct kobject *kobj, struct obd_device *obd = container_of(kobj, struct obd_device, obd_kobj); struct osc_device *od = obd2osc_dev(obd); + int rc; + unsigned int val; - return lprocfs_write_helper(buffer, count, &od->od_lockless_truncate) ?: - count; + rc = kstrtouint(buffer, 10, &val); + if (rc) + return rc; + + od->od_lockless_truncate = val; + + return count; } LUSTRE_RW_ATTR(lockless_truncate); @@ -552,12 +569,12 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj, /* if the max_pages is specified in bytes, convert to pages */ if (val >= ONE_MB_BRW_SIZE) - val >>= PAGE_CACHE_SHIFT; + val >>= PAGE_SHIFT; - chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1); + chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1); /* max_pages_per_rpc must be chunk aligned */ val = (val + ~chunk_mask) & chunk_mask; - if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) { + if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) { return -ERANGE; } client_obd_list_lock(&cli->cl_loi_list_lock); @@ -635,10 +652,10 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v) read_cum += r; write_cum += w; seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n", - 1 << i, r, pct(r, read_tot), - pct(read_cum, read_tot), w, - pct(w, write_tot), - pct(write_cum, write_tot)); + 1 << i, r, pct(r, read_tot), + pct(read_cum, read_tot), w, + pct(w, write_tot), + pct(write_cum, write_tot)); if (read_cum == read_tot && write_cum == write_tot) break; } @@ -659,10 +676,10 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v) read_cum += r; write_cum += w; seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n", - i, r, pct(r, read_tot), - pct(read_cum, read_tot), w, - pct(w, write_tot), - pct(write_cum, write_tot)); + i, r, pct(r, read_tot), + pct(read_cum, read_tot), w, + pct(w, write_tot), + pct(write_cum, write_tot)); if (read_cum == read_tot && write_cum == write_tot) break; } diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c index 2229419b7..5f25bf83d 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cache.c +++ b/drivers/staging/lustre/lustre/osc/osc_cache.c @@ -140,7 +140,7 @@ static const char *oes_strings[] = { static inline struct osc_extent *rb_extent(struct rb_node *n) { - if (n == NULL) + if (!n) return NULL; return container_of(n, struct osc_extent, oe_node); @@ -148,7 +148,7 @@ static inline struct osc_extent *rb_extent(struct rb_node *n) static inline struct osc_extent *next_extent(struct osc_extent *ext) { - if (ext == NULL) + if (!ext) return NULL; LASSERT(ext->oe_intree); @@ -157,7 +157,7 @@ static inline struct osc_extent *next_extent(struct osc_extent *ext) static inline struct osc_extent *prev_extent(struct osc_extent *ext) { - if (ext == NULL) + if (!ext) return NULL; LASSERT(ext->oe_intree); @@ -240,7 +240,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext, goto out; } - if (ext->oe_osclock == NULL && ext->oe_grants > 0) { + if (!ext->oe_osclock && ext->oe_grants > 0) { rc = 90; goto out; } @@ -262,7 +262,8 @@ static int osc_extent_sanity_check0(struct osc_extent *ext, } /* Do not verify page list if extent is in RPC. This is because an - * in-RPC extent is supposed to be exclusively accessible w/o lock. */ + * in-RPC extent is supposed to be exclusively accessible w/o lock. + */ if (ext->oe_state > OES_CACHE) { rc = 0; goto out; @@ -319,7 +320,7 @@ static int osc_extent_is_overlapped(struct osc_object *obj, if (!extent_debug) return 0; - for (tmp = first_extent(obj); tmp != NULL; tmp = next_extent(tmp)) { + for (tmp = first_extent(obj); tmp; tmp = next_extent(tmp)) { if (tmp == ext) continue; if (tmp->oe_end >= ext->oe_start && @@ -346,8 +347,8 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj) { struct osc_extent *ext; - ext = kmem_cache_alloc(osc_extent_kmem, GFP_NOFS | __GFP_ZERO); - if (ext == NULL) + ext = kmem_cache_zalloc(osc_extent_kmem, GFP_NOFS); + if (!ext) return NULL; RB_CLEAR_NODE(&ext->oe_node); @@ -415,7 +416,7 @@ static struct osc_extent *osc_extent_search(struct osc_object *obj, struct osc_extent *tmp, *p = NULL; LASSERT(osc_object_is_locked(obj)); - while (n != NULL) { + while (n) { tmp = rb_extent(n); if (index < tmp->oe_start) { n = n->rb_left; @@ -439,7 +440,7 @@ static struct osc_extent *osc_extent_lookup(struct osc_object *obj, struct osc_extent *ext; ext = osc_extent_search(obj, index); - if (ext != NULL && ext->oe_start <= index && index <= ext->oe_end) + if (ext && ext->oe_start <= index && index <= ext->oe_end) return osc_extent_get(ext); return NULL; } @@ -454,7 +455,7 @@ static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext) LASSERT(ext->oe_intree == 0); LASSERT(ext->oe_obj == obj); LASSERT(osc_object_is_locked(obj)); - while (*n != NULL) { + while (*n) { tmp = rb_extent(*n); parent = *n; @@ -463,7 +464,7 @@ static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext) else if (ext->oe_start > tmp->oe_end) n = &(*n)->rb_right; else - EASSERTF(0, tmp, EXTSTR, EXTPARA(ext)); + EASSERTF(0, tmp, EXTSTR"\n", EXTPARA(ext)); } rb_link_node(&ext->oe_node, parent, n); rb_insert_color(&ext->oe_node, &obj->oo_root); @@ -533,7 +534,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur, LASSERT(cur->oe_state == OES_CACHE); LASSERT(osc_object_is_locked(obj)); - if (victim == NULL) + if (!victim) return -EINVAL; if (victim->oe_state != OES_CACHE || victim->oe_fsync_wait) @@ -543,7 +544,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur, return -ERANGE; LASSERT(cur->oe_osclock == victim->oe_osclock); - ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT; + ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT; chunk_start = cur->oe_start >> ppc_bits; chunk_end = cur->oe_end >> ppc_bits; if (chunk_start != (victim->oe_end >> ppc_bits) + 1 && @@ -587,7 +588,8 @@ void osc_extent_release(const struct lu_env *env, struct osc_extent *ext) if (ext->oe_trunc_pending) { /* a truncate process is waiting for this extent. * This may happen due to a race, check - * osc_cache_truncate_start(). */ + * osc_cache_truncate_start(). + */ osc_extent_state_set(ext, OES_TRUNC); ext->oe_trunc_pending = 0; } else { @@ -601,7 +603,7 @@ void osc_extent_release(const struct lu_env *env, struct osc_extent *ext) if (ext->oe_urgent) list_move_tail(&ext->oe_link, - &obj->oo_urgent_exts); + &obj->oo_urgent_exts); } osc_object_unlock(obj); @@ -639,15 +641,14 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env, int rc; cur = osc_extent_alloc(obj); - if (cur == NULL) + if (!cur) return ERR_PTR(-ENOMEM); lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0); - LASSERT(lock != NULL); LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE); - LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT); - ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; + LASSERT(cli->cl_chunkbits >= PAGE_SHIFT); + ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; chunk_mask = ~((1 << ppc_bits) - 1); chunksize = 1 << cli->cl_chunkbits; chunk = index >> ppc_bits; @@ -673,14 +674,15 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env, /* grants has been allocated by caller */ LASSERTF(*grants >= chunksize + cli->cl_extent_tax, "%u/%u/%u.\n", *grants, chunksize, cli->cl_extent_tax); - LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR, EXTPARA(cur)); + LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR"\n", + EXTPARA(cur)); restart: osc_object_lock(obj); ext = osc_extent_search(obj, cur->oe_start); - if (ext == NULL) + if (!ext) ext = first_extent(obj); - while (ext != NULL) { + while (ext) { loff_t ext_chk_start = ext->oe_start >> ppc_bits; loff_t ext_chk_end = ext->oe_end >> ppc_bits; @@ -691,7 +693,7 @@ restart: /* if covering by different locks, no chance to match */ if (lock != ext->oe_osclock) { EASSERTF(!overlapped(ext, cur), ext, - EXTSTR, EXTPARA(cur)); + EXTSTR"\n", EXTPARA(cur)); ext = next_extent(ext); continue; @@ -705,18 +707,21 @@ restart: /* ok, from now on, ext and cur have these attrs: * 1. covered by the same lock - * 2. contiguous at chunk level or overlapping. */ + * 2. contiguous at chunk level or overlapping. + */ if (overlapped(ext, cur)) { /* cur is the minimum unit, so overlapping means - * full contain. */ + * full contain. + */ EASSERTF((ext->oe_start <= cur->oe_start && ext->oe_end >= cur->oe_end), - ext, EXTSTR, EXTPARA(cur)); + ext, EXTSTR"\n", EXTPARA(cur)); if (ext->oe_state > OES_CACHE || ext->oe_fsync_wait) { /* for simplicity, we wait for this extent to - * finish before going forward. */ + * finish before going forward. + */ conflict = osc_extent_get(ext); break; } @@ -729,17 +734,20 @@ restart: if (ext->oe_state != OES_CACHE || ext->oe_fsync_wait) { /* we can't do anything for a non OES_CACHE extent, or * if there is someone waiting for this extent to be - * flushed, try next one. */ + * flushed, try next one. + */ ext = next_extent(ext); continue; } /* check if they belong to the same rpc slot before trying to * merge. the extents are not overlapped and contiguous at - * chunk level to get here. */ + * chunk level to get here. + */ if (ext->oe_max_end != max_end) { /* if they don't belong to the same RPC slot or - * max_pages_per_rpc has ever changed, do not merge. */ + * max_pages_per_rpc has ever changed, do not merge. + */ ext = next_extent(ext); continue; } @@ -748,7 +756,8 @@ restart: * level so that we know the whole extent is covered by grant * (the pages in the extent are NOT required to be contiguous). * Otherwise, it will be too much difficult to know which - * chunks have grants allocated. */ + * chunks have grants allocated. + */ /* try to do front merge - extend ext's start */ if (chunk + 1 == ext_chk_start) { @@ -768,28 +777,29 @@ restart: *grants -= chunksize; /* try to merge with the next one because we just fill - * in a gap */ + * in a gap + */ if (osc_extent_merge(env, ext, next_extent(ext)) == 0) /* we can save extent tax from next extent */ *grants += cli->cl_extent_tax; found = osc_extent_hold(ext); } - if (found != NULL) + if (found) break; ext = next_extent(ext); } osc_extent_tree_dump(D_CACHE, obj); - if (found != NULL) { - LASSERT(conflict == NULL); + if (found) { + LASSERT(!conflict); if (!IS_ERR(found)) { LASSERT(found->oe_osclock == cur->oe_osclock); OSC_EXTENT_DUMP(D_CACHE, found, "found caching ext for %lu.\n", index); } - } else if (conflict == NULL) { + } else if (!conflict) { /* create a new extent */ EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur); cur->oe_grants = chunksize + cli->cl_extent_tax; @@ -804,11 +814,12 @@ restart: } osc_object_unlock(obj); - if (conflict != NULL) { - LASSERT(found == NULL); + if (conflict) { + LASSERT(!found); /* waiting for IO to finish. Please notice that it's impossible - * to be an OES_TRUNC extent. */ + * to be an OES_TRUNC extent. + */ rc = osc_extent_wait(env, conflict, OES_INV); osc_extent_put(env, conflict); conflict = NULL; @@ -845,8 +856,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, ext->oe_rc = rc ?: ext->oe_nr_pages; EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext); - list_for_each_entry_safe(oap, tmp, &ext->oe_pages, - oap_pending_item) { + list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) { list_del_init(&oap->oap_rpc_item); list_del_init(&oap->oap_pending_item); if (last_off <= oap->oap_obj_off) { @@ -861,11 +871,12 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, if (!sent) { lost_grant = ext->oe_grants; - } else if (blocksize < PAGE_CACHE_SIZE && - last_count != PAGE_CACHE_SIZE) { + } else if (blocksize < PAGE_SIZE && + last_count != PAGE_SIZE) { /* For short writes we shouldn't count parts of pages that * span a whole chunk on the OST side, or our accounting goes - * wrong. Should match the code in filter_grant_check. */ + * wrong. Should match the code in filter_grant_check. + */ int offset = oap->oap_page_off & ~CFS_PAGE_MASK; int count = oap->oap_count + (offset & (blocksize - 1)); int end = (offset + oap->oap_count) & (blocksize - 1); @@ -873,7 +884,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, if (end) count += blocksize - end; - lost_grant = PAGE_CACHE_SIZE - count; + lost_grant = PAGE_SIZE - count; } if (ext->oe_grants > 0) osc_free_grant(cli, nr_pages, lost_grant); @@ -909,7 +920,8 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext, osc_object_lock(obj); LASSERT(sanity_check_nolock(ext) == 0); /* `Kick' this extent only if the caller is waiting for it to be - * written out. */ + * written out. + */ if (state == OES_INV && !ext->oe_urgent && !ext->oe_hp && !ext->oe_trunc_pending) { if (ext->oe_state == OES_ACTIVE) { @@ -955,7 +967,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, struct osc_async_page *oap; struct osc_async_page *tmp; int pages_in_chunk = 0; - int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; + int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; __u64 trunc_chunk = trunc_index >> ppc_bits; int grants = 0; int nr_pages = 0; @@ -967,7 +979,8 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, /* Request new lu_env. * We can't use that env from osc_cache_truncate_start() because - * it's from lov_io_sub and not fully initialized. */ + * it's from lov_io_sub and not fully initialized. + */ env = cl_env_nested_get(&nest); io = &osc_env_info(env)->oti_io; io->ci_obj = cl_object_top(osc2cl(obj)); @@ -976,15 +989,15 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, goto out; /* discard all pages with index greater then trunc_index */ - list_for_each_entry_safe(oap, tmp, &ext->oe_pages, - oap_pending_item) { + list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) { struct cl_page *sub = oap2cl_page(oap); struct cl_page *page = cl_page_top(sub); LASSERT(list_empty(&oap->oap_rpc_item)); /* only discard the pages with their index greater than - * trunc_index, and ... */ + * trunc_index, and ... + */ if (sub->cp_index < trunc_index || (sub->cp_index == trunc_index && partial)) { /* accounting how many pages remaining in the chunk @@ -1028,11 +1041,13 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, pgoff_t last_index; /* if there is no pages in this chunk, we can also free grants - * for the last chunk */ + * for the last chunk + */ if (pages_in_chunk == 0) { /* if this is the 1st chunk and no pages in this chunk, * ext->oe_nr_pages must be zero, so we should be in - * the other if-clause. */ + * the other if-clause. + */ LASSERT(trunc_chunk > 0); --trunc_chunk; ++chunks; @@ -1074,13 +1089,13 @@ static int osc_extent_make_ready(const struct lu_env *env, LASSERT(sanity_check(ext) == 0); /* in locking state, any process should not touch this extent. */ EASSERT(ext->oe_state == OES_LOCKING, ext); - EASSERT(ext->oe_owner != NULL, ext); + EASSERT(ext->oe_owner, ext); OSC_EXTENT_DUMP(D_CACHE, ext, "make ready\n"); list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { ++page_count; - if (last == NULL || last->oap_obj_off < oap->oap_obj_off) + if (!last || last->oap_obj_off < oap->oap_obj_off) last = oap; /* checking ASYNC_READY is race safe */ @@ -1103,21 +1118,23 @@ static int osc_extent_make_ready(const struct lu_env *env, } LASSERT(page_count == ext->oe_nr_pages); - LASSERT(last != NULL); + LASSERT(last); /* the last page is the only one we need to refresh its count by - * the size of file. */ + * the size of file. + */ if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) { last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE); LASSERT(last->oap_count > 0); - LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE); + LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE); last->oap_async_flags |= ASYNC_COUNT_STABLE; } /* for the rest of pages, we don't need to call osf_refresh_count() - * because it's known they are not the last page */ + * because it's known they are not the last page + */ list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) { - oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off; + oap->oap_count = PAGE_SIZE - oap->oap_page_off; oap->oap_async_flags |= ASYNC_COUNT_STABLE; } } @@ -1141,7 +1158,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants) struct osc_object *obj = ext->oe_obj; struct client_obd *cli = osc_cli(obj); struct osc_extent *next; - int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; + int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; pgoff_t chunk = index >> ppc_bits; pgoff_t end_chunk; pgoff_t end_index; @@ -1167,9 +1184,10 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants) end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1); next = next_extent(ext); - if (next != NULL && next->oe_start <= end_index) { + if (next && next->oe_start <= end_index) { /* complex mode - overlapped with the next extent, - * this case will be handled by osc_extent_find() */ + * this case will be handled by osc_extent_find() + */ rc = -EAGAIN; goto out; } @@ -1197,7 +1215,7 @@ static void osc_extent_tree_dump0(int level, struct osc_object *obj, /* osc_object_lock(obj); */ cnt = 1; - for (ext = first_extent(obj); ext != NULL; ext = next_extent(ext)) + for (ext = first_extent(obj); ext; ext = next_extent(ext)) OSC_EXTENT_DUMP(level, ext, "in tree %d.\n", cnt++); cnt = 1; @@ -1262,7 +1280,6 @@ static int osc_refresh_count(const struct lu_env *env, /* readpage queues with _COUNT_STABLE, shouldn't get here. */ LASSERT(!(cmd & OBD_BRW_READ)); - LASSERT(opg != NULL); obj = opg->ops_cl.cpl_obj; cl_object_attr_lock(obj); @@ -1276,9 +1293,9 @@ static int osc_refresh_count(const struct lu_env *env, return 0; else if (cl_offset(obj, page->cp_index + 1) > kms) /* catch sub-page write at end of file */ - return kms % PAGE_CACHE_SIZE; + return kms % PAGE_SIZE; else - return PAGE_CACHE_SIZE; + return PAGE_SIZE; } static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, @@ -1299,16 +1316,16 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, * page->cp_req can be NULL if io submission failed before * cl_req was allocated. */ - if (page->cp_req != NULL) + if (page->cp_req) cl_req_page_done(env, page); - LASSERT(page->cp_req == NULL); + LASSERT(!page->cp_req); crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE; /* Clear opg->ops_transfer_pinned before VM lock is released. */ opg->ops_transfer_pinned = 0; spin_lock(&obj->oo_seatbelt); - LASSERT(opg->ops_submitter != NULL); + LASSERT(opg->ops_submitter); LASSERT(!list_empty(&opg->ops_inflight)); list_del_init(&opg->ops_inflight); opg->ops_submitter = NULL; @@ -1359,15 +1376,16 @@ static void osc_consume_write_grant(struct client_obd *cli, assert_spin_locked(&cli->cl_loi_list_lock.lock); LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT)); atomic_inc(&obd_dirty_pages); - cli->cl_dirty += PAGE_CACHE_SIZE; + cli->cl_dirty += PAGE_SIZE; pga->flag |= OBD_BRW_FROM_GRANT; CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n", - PAGE_CACHE_SIZE, pga, pga->pg); + PAGE_SIZE, pga, pga->pg); osc_update_next_shrink(cli); } /* the companion to osc_consume_write_grant, called when a brw has completed. - * must be called with the loi lock held. */ + * must be called with the loi lock held. + */ static void osc_release_write_grant(struct client_obd *cli, struct brw_page *pga) { @@ -1378,11 +1396,11 @@ static void osc_release_write_grant(struct client_obd *cli, pga->flag &= ~OBD_BRW_FROM_GRANT; atomic_dec(&obd_dirty_pages); - cli->cl_dirty -= PAGE_CACHE_SIZE; + cli->cl_dirty -= PAGE_SIZE; if (pga->flag & OBD_BRW_NOCACHE) { pga->flag &= ~OBD_BRW_NOCACHE; atomic_dec(&obd_dirty_transit_pages); - cli->cl_dirty_transit -= PAGE_CACHE_SIZE; + cli->cl_dirty_transit -= PAGE_SIZE; } } @@ -1410,7 +1428,8 @@ static void __osc_unreserve_grant(struct client_obd *cli, /* it's quite normal for us to get more grant than reserved. * Thinking about a case that two extents merged by adding a new * chunk, we can save one extent tax. If extent tax is greater than - * one chunk, we can save more grant by adding a new chunk */ + * one chunk, we can save more grant by adding a new chunk + */ cli->cl_reserved_grant -= reserved; if (unused > reserved) { cli->cl_avail_grant += reserved; @@ -1437,7 +1456,7 @@ static void osc_unreserve_grant(struct client_obd *cli, * used, we should return these grants to OST. There're two cases where grants * can be lost: * 1. truncate; - * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was + * 2. blocksize at OST is less than PAGE_SIZE and a partial page was * written. In this case OST may use less chunks to serve this partial * write. OSTs don't actually know the page size on the client side. so * clients have to calculate lost grant by the blocksize on the OST. @@ -1450,11 +1469,12 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages, client_obd_list_lock(&cli->cl_loi_list_lock); atomic_sub(nr_pages, &obd_dirty_pages); - cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT; + cli->cl_dirty -= nr_pages << PAGE_SHIFT; cli->cl_lost_grant += lost_grant; if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) { /* borrow some grant from truncate to avoid the case that - * truncate uses up all avail grant */ + * truncate uses up all avail grant + */ cli->cl_lost_grant -= grant; cli->cl_avail_grant += grant; } @@ -1492,11 +1512,11 @@ static int osc_enter_cache_try(struct client_obd *cli, if (rc < 0) return 0; - if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max && + if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max && atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) { osc_consume_write_grant(cli, &oap->oap_brw_page); if (transient) { - cli->cl_dirty_transit += PAGE_CACHE_SIZE; + cli->cl_dirty_transit += PAGE_SIZE; atomic_inc(&obd_dirty_transit_pages); oap->oap_brw_flags |= OBD_BRW_NOCACHE; } @@ -1539,9 +1559,10 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, client_obd_list_lock(&cli->cl_loi_list_lock); /* force the caller to try sync io. this can jump the list - * of queued writes and create a discontiguous rpc stream */ + * of queued writes and create a discontiguous rpc stream + */ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) || - cli->cl_dirty_max < PAGE_CACHE_SIZE || + cli->cl_dirty_max < PAGE_SIZE || cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) { rc = -EDQUOT; goto out; @@ -1558,7 +1579,8 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, * Adding a cache waiter will trigger urgent write-out no matter what * RPC size will be. * The exiting condition is no avail grants and no dirty pages caching, - * that really means there is no space on the OST. */ + * that really means there is no space on the OST. + */ init_waitqueue_head(&ocw.ocw_waitq); ocw.ocw_oap = oap; ocw.ocw_grant = bytes; @@ -1610,7 +1632,7 @@ void osc_wake_cache_waiters(struct client_obd *cli) ocw->ocw_rc = -EDQUOT; /* we can't dirty more */ - if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) || + if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) || (atomic_read(&obd_dirty_pages) + 1 > obd_max_dirty_pages)) { CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n", @@ -1640,7 +1662,8 @@ static int osc_max_rpc_in_flight(struct client_obd *cli, struct osc_object *osc) /* This maintains the lists of pending pages to read/write for a given object * (lop). This is used by osc_check_rpcs->osc_next_obj() and osc_list_maint() - * to quickly find objects that are ready to send an RPC. */ + * to quickly find objects that are ready to send an RPC. + */ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc, int cmd) { @@ -1649,8 +1672,9 @@ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc, /* if we have an invalid import we want to drain the queued pages * by forcing them through rpcs that immediately fail and complete * the pages. recovery relies on this to empty the queued pages - * before canceling the locks and evicting down the llite pages */ - if ((cli->cl_import == NULL || cli->cl_import->imp_invalid)) + * before canceling the locks and evicting down the llite pages + */ + if (!cli->cl_import || cli->cl_import->imp_invalid) invalid_import = 1; if (cmd & OBD_BRW_WRITE) { @@ -1670,7 +1694,8 @@ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc, } /* trigger a write rpc stream as long as there are dirtiers * waiting for space. as they're waiting, they're not going to - * create more pages to coalesce with what's waiting.. */ + * create more pages to coalesce with what's waiting.. + */ if (!list_empty(&cli->cl_cache_waiters)) { CDEBUG(D_CACHE, "cache waiters forcing RPC\n"); return 1; @@ -1723,7 +1748,8 @@ static void on_list(struct list_head *item, struct list_head *list, int should_b } /* maintain the osc's cli list membership invariants so that osc_send_oap_rpc - * can find pages to build into rpcs quickly */ + * can find pages to build into rpcs quickly + */ static int __osc_list_maint(struct client_obd *cli, struct osc_object *osc) { if (osc_makes_hprpc(osc)) { @@ -1761,7 +1787,8 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc) * application. As an async write fails we record the error code for later if * the app does an fsync. As long as errors persist we force future rpcs to be * sync so that the app can get a sync error and break the cycle of queueing - * pages for which writeback will fail. */ + * pages for which writeback will fail. + */ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid, int rc) { @@ -1780,7 +1807,8 @@ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid, } /* this must be called holding the loi list lock to give coverage to exit_cache, - * async_flag maintenance, and oap_request */ + * async_flag maintenance, and oap_request + */ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli, struct osc_async_page *oap, int sent, int rc) { @@ -1788,7 +1816,7 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli, struct lov_oinfo *loi = osc->oo_oinfo; __u64 xid = 0; - if (oap->oap_request != NULL) { + if (oap->oap_request) { xid = ptlrpc_req_xid(oap->oap_request); ptlrpc_req_finished(oap->oap_request); oap->oap_request = NULL; @@ -1877,13 +1905,12 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) { struct client_obd *cli = osc_cli(obj); struct osc_extent *ext; + struct osc_extent *temp; int page_count = 0; unsigned int max_pages = cli->cl_max_pages_per_rpc; LASSERT(osc_object_is_locked(obj)); - while (!list_empty(&obj->oo_hp_exts)) { - ext = list_entry(obj->oo_hp_exts.next, struct osc_extent, - oe_link); + list_for_each_entry_safe(ext, temp, &obj->oo_hp_exts, oe_link) { LASSERT(ext->oe_state == OES_CACHE); if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count, &max_pages)) @@ -1895,7 +1922,7 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) while (!list_empty(&obj->oo_urgent_exts)) { ext = list_entry(obj->oo_urgent_exts.next, - struct osc_extent, oe_link); + struct osc_extent, oe_link); if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count, &max_pages)) return page_count; @@ -1906,7 +1933,7 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) while ((ext = next_extent(ext)) != NULL) { if ((ext->oe_state != OES_CACHE) || (!list_empty(&ext->oe_link) && - ext->oe_owner != NULL)) + ext->oe_owner)) continue; if (!try_to_add_extent_for_io(cli, ext, rpclist, @@ -1918,10 +1945,10 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) return page_count; ext = first_extent(obj); - while (ext != NULL) { + while (ext) { if ((ext->oe_state != OES_CACHE) || /* this extent may be already in current rpclist */ - (!list_empty(&ext->oe_link) && ext->oe_owner != NULL)) { + (!list_empty(&ext->oe_link) && ext->oe_owner)) { ext = next_extent(ext); continue; } @@ -1938,6 +1965,7 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist) static int osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, struct osc_object *osc) + __must_hold(osc) { LIST_HEAD(rpclist); struct osc_extent *ext; @@ -1967,7 +1995,8 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, } /* we're going to grab page lock, so release object lock because - * lock order is page lock -> object lock. */ + * lock order is page lock -> object lock. + */ osc_object_unlock(osc); list_for_each_entry_safe(ext, tmp, &rpclist, oe_link) { @@ -1979,7 +2008,7 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, continue; } } - if (first == NULL) { + if (!first) { first = ext; srvlock = ext->oe_srvlock; } else { @@ -2010,6 +2039,7 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli, static int osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli, struct osc_object *osc) + __must_hold(osc) { struct osc_extent *ext; struct osc_extent *next; @@ -2019,8 +2049,7 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli, int rc = 0; LASSERT(osc_object_is_locked(osc)); - list_for_each_entry_safe(ext, next, - &osc->oo_reading_exts, oe_link) { + list_for_each_entry_safe(ext, next, &osc->oo_reading_exts, oe_link) { EASSERT(ext->oe_state == OES_LOCK_DONE, ext); if (!try_to_add_extent_for_io(cli, ext, &rpclist, &page_count, &max_pages)) @@ -2051,12 +2080,14 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli, }) /* This is called by osc_check_rpcs() to find which objects have pages that - * we could be sending. These lists are maintained by osc_makes_rpc(). */ + * we could be sending. These lists are maintained by osc_makes_rpc(). + */ static struct osc_object *osc_next_obj(struct client_obd *cli) { /* First return objects that have blocked locks so that they * will be flushed quickly and other clients can get the lock, - * then objects which have pages ready to be stuffed into RPCs */ + * then objects which have pages ready to be stuffed into RPCs + */ if (!list_empty(&cli->cl_loi_hp_ready_list)) return list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item); if (!list_empty(&cli->cl_loi_ready_list)) @@ -2065,14 +2096,16 @@ static struct osc_object *osc_next_obj(struct client_obd *cli) /* then if we have cache waiters, return all objects with queued * writes. This is especially important when many small files * have filled up the cache and not been fired into rpcs because - * they don't pass the nr_pending/object threshold */ + * they don't pass the nr_pending/object threshold + */ if (!list_empty(&cli->cl_cache_waiters) && !list_empty(&cli->cl_loi_write_list)) return list_to_obj(&cli->cl_loi_write_list, write_item); /* then return all queued objects when we have an invalid import - * so that they get flushed */ - if (cli->cl_import == NULL || cli->cl_import->imp_invalid) { + * so that they get flushed + */ + if (!cli->cl_import || cli->cl_import->imp_invalid) { if (!list_empty(&cli->cl_loi_write_list)) return list_to_obj(&cli->cl_loi_write_list, write_item); if (!list_empty(&cli->cl_loi_read_list)) @@ -2083,6 +2116,7 @@ static struct osc_object *osc_next_obj(struct client_obd *cli) /* called with the loi list lock held */ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) + __must_hold(&cli->cl_loi_list_lock) { struct osc_object *osc; int rc = 0; @@ -2108,7 +2142,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) * would be redundant if we were getting read/write work items * instead of objects. we don't want send_oap_rpc to drain a * partial read pending queue when we're given this object to - * do io on writes while there are cache waiters */ + * do io on writes while there are cache waiters + */ osc_object_lock(osc); if (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) { rc = osc_send_write_rpc(env, cli, osc); @@ -2130,7 +2165,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli) * because it might be blocked at grabbing * the page lock as we mentioned. * - * Anyway, continue to drain pages. */ + * Anyway, continue to drain pages. + */ /* break; */ } } @@ -2155,12 +2191,13 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli, { int rc = 0; - if (osc != NULL && osc_list_maint(cli, osc) == 0) + if (osc && osc_list_maint(cli, osc) == 0) return 0; if (!async) { /* disable osc_lru_shrink() temporarily to avoid - * potential stack overrun problem. LU-2859 */ + * potential stack overrun problem. LU-2859 + */ atomic_inc(&cli->cl_lru_shrinkers); client_obd_list_lock(&cli->cl_loi_list_lock); osc_check_rpcs(env, cli); @@ -2168,7 +2205,7 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli, atomic_dec(&cli->cl_lru_shrinkers); } else { CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli); - LASSERT(cli->cl_writeback_work != NULL); + LASSERT(cli->cl_writeback_work); rc = ptlrpcd_queue_work(cli->cl_writeback_work); } return rc; @@ -2233,7 +2270,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, if (oap->oap_magic != OAP_MAGIC) return -EINVAL; - if (cli->cl_import == NULL || cli->cl_import->imp_invalid) + if (!cli->cl_import || cli->cl_import->imp_invalid) return -EIO; if (!list_empty(&oap->oap_pending_item) || @@ -2284,12 +2321,14 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, * 1. if there exists an active extent for this IO, mostly this page * can be added to the active extent and sometimes we need to * expand extent to accommodate this page; - * 2. otherwise, a new extent will be allocated. */ + * 2. otherwise, a new extent will be allocated. + */ ext = oio->oi_active; - if (ext != NULL && ext->oe_start <= index && ext->oe_max_end >= index) { + if (ext && ext->oe_start <= index && ext->oe_max_end >= index) { /* one chunk plus extent overhead must be enough to write this - * page */ + * page + */ grants = (1 << cli->cl_chunkbits) + cli->cl_extent_tax; if (ext->oe_end >= index) grants = 0; @@ -2316,7 +2355,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, } } rc = 0; - } else if (ext != NULL) { + } else if (ext) { /* index is located outside of active extent */ need_release = 1; } @@ -2326,13 +2365,14 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, ext = NULL; } - if (ext == NULL) { + if (!ext) { int tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax; /* try to find new extent to cover this page */ - LASSERT(oio->oi_active == NULL); + LASSERT(!oio->oi_active); /* we may have allocated grant for this page if we failed - * to expand the previous active extent. */ + * to expand the previous active extent. + */ LASSERT(ergo(grants > 0, grants >= tmp)); rc = 0; @@ -2359,8 +2399,8 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, osc_unreserve_grant(cli, grants, tmp); } - LASSERT(ergo(rc == 0, ext != NULL)); - if (ext != NULL) { + LASSERT(ergo(rc == 0, ext)); + if (ext) { EASSERTF(ext->oe_end >= index && ext->oe_start <= index, ext, "index = %lu.\n", index); LASSERT((oap->oap_brw_flags & OBD_BRW_FROM_GRANT) != 0); @@ -2397,15 +2437,16 @@ int osc_teardown_async_page(const struct lu_env *env, ext = osc_extent_lookup(obj, oap2cl_page(oap)->cp_index); /* only truncated pages are allowed to be taken out. * See osc_extent_truncate() and osc_cache_truncate_start() - * for details. */ - if (ext != NULL && ext->oe_state != OES_TRUNC) { + * for details. + */ + if (ext && ext->oe_state != OES_TRUNC) { OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n", oap2cl_page(oap)->cp_index); rc = -EBUSY; } } osc_object_unlock(obj); - if (ext != NULL) + if (ext) osc_extent_put(env, ext); return rc; } @@ -2430,7 +2471,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, osc_object_lock(obj); ext = osc_extent_lookup(obj, index); - if (ext == NULL) { + if (!ext) { osc_extent_tree_dump(D_ERROR, obj); LASSERTF(0, "page index %lu is NOT covered.\n", index); } @@ -2448,7 +2489,8 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, * exists a deadlock problem because other process can wait for * page writeback bit holding page lock; and meanwhile in * vvp_page_make_ready(), we need to grab page lock before - * really sending the RPC. */ + * really sending the RPC. + */ case OES_TRUNC: /* race with truncate, page will be redirtied */ case OES_ACTIVE: @@ -2456,7 +2498,8 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, * re-dirty the page. If we continued on here, and we were the * one making the extent active, we could deadlock waiting for * the page writeback to clear but it won't because the extent - * is active and won't be written out. */ + * is active and won't be written out. + */ rc = -EAGAIN; goto out; default: @@ -2527,12 +2570,13 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops) if (ext->oe_start <= index && ext->oe_end >= index) { LASSERT(ext->oe_state == OES_LOCK_DONE); /* For OES_LOCK_DONE state extent, it has already held - * a refcount for RPC. */ + * a refcount for RPC. + */ found = osc_extent_get(ext); break; } } - if (found != NULL) { + if (found) { list_del_init(&found->oe_link); osc_update_pending(obj, cmd, -found->oe_nr_pages); osc_object_unlock(obj); @@ -2543,8 +2587,9 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops) } else { osc_object_unlock(obj); /* ok, it's been put in an rpc. only one oap gets a request - * reference */ - if (oap->oap_request != NULL) { + * reference + */ + if (oap->oap_request) { ptlrpc_mark_interrupted(oap->oap_request); ptlrpcd_wake(oap->oap_request); ptlrpc_req_finished(oap->oap_request); @@ -2579,7 +2624,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj, } ext = osc_extent_alloc(obj); - if (ext == NULL) { + if (!ext) { list_for_each_entry_safe(oap, tmp, list, oap_pending_item) { list_del_init(&oap->oap_pending_item); osc_ap_completion(env, cli, oap, 0, -ENOMEM); @@ -2621,6 +2666,7 @@ int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio, { struct client_obd *cli = osc_cli(obj); struct osc_extent *ext; + struct osc_extent *temp; struct osc_extent *waiting = NULL; pgoff_t index; LIST_HEAD(list); @@ -2634,18 +2680,19 @@ int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio, again: osc_object_lock(obj); ext = osc_extent_search(obj, index); - if (ext == NULL) + if (!ext) ext = first_extent(obj); else if (ext->oe_end < index) ext = next_extent(ext); - while (ext != NULL) { + while (ext) { EASSERT(ext->oe_state != OES_TRUNC, ext); if (ext->oe_state > OES_CACHE || ext->oe_urgent) { /* if ext is in urgent state, it means there must exist * a page already having been flushed by write_page(). * We have to wait for this extent because we can't - * truncate that page. */ + * truncate that page. + */ LASSERT(!ext->oe_hp); OSC_EXTENT_DUMP(D_CACHE, ext, "waiting for busy extent\n"); @@ -2660,7 +2707,8 @@ again: /* though we grab inode mutex for write path, but we * release it before releasing extent(in osc_io_end()), * so there is a race window that an extent is still - * in OES_ACTIVE when truncate starts. */ + * in OES_ACTIVE when truncate starts. + */ LASSERT(!ext->oe_trunc_pending); ext->oe_trunc_pending = 1; } else { @@ -2678,14 +2726,14 @@ again: osc_list_maint(cli, obj); - while (!list_empty(&list)) { + list_for_each_entry_safe(ext, temp, &list, oe_link) { int rc; - ext = list_entry(list.next, struct osc_extent, oe_link); list_del_init(&ext->oe_link); /* extent may be in OES_ACTIVE state because inode mutex - * is released before osc_io_end() in file write case */ + * is released before osc_io_end() in file write case + */ if (ext->oe_state != OES_TRUNC) osc_extent_wait(env, ext, OES_TRUNC); @@ -2710,19 +2758,21 @@ again: /* we need to hold this extent in OES_TRUNC state so * that no writeback will happen. This is to avoid - * BUG 17397. */ - LASSERT(oio->oi_trunc == NULL); + * BUG 17397. + */ + LASSERT(!oio->oi_trunc); oio->oi_trunc = osc_extent_get(ext); OSC_EXTENT_DUMP(D_CACHE, ext, "trunc at %llu\n", size); } osc_extent_put(env, ext); } - if (waiting != NULL) { + if (waiting) { int rc; /* ignore the result of osc_extent_wait the write initiator - * should take care of it. */ + * should take care of it. + */ rc = osc_extent_wait(env, waiting, OES_INV); if (rc < 0) OSC_EXTENT_DUMP(D_CACHE, waiting, "error: %d.\n", rc); @@ -2743,7 +2793,7 @@ void osc_cache_truncate_end(const struct lu_env *env, struct osc_io *oio, struct osc_extent *ext = oio->oi_trunc; oio->oi_trunc = NULL; - if (ext != NULL) { + if (ext) { bool unplug = false; EASSERT(ext->oe_nr_pages > 0, ext); @@ -2786,11 +2836,11 @@ int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj, again: osc_object_lock(obj); ext = osc_extent_search(obj, index); - if (ext == NULL) + if (!ext) ext = first_extent(obj); else if (ext->oe_end < index) ext = next_extent(ext); - while (ext != NULL) { + while (ext) { int rc; if (ext->oe_start > end) @@ -2841,11 +2891,11 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, osc_object_lock(obj); ext = osc_extent_search(obj, start); - if (ext == NULL) + if (!ext) ext = first_extent(obj); else if (ext->oe_end < start) ext = next_extent(ext); - while (ext != NULL) { + while (ext) { if (ext->oe_start > end) break; @@ -2864,18 +2914,18 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, ext->oe_urgent = 1; list = &obj->oo_urgent_exts; } - if (list != NULL) + if (list) list_move_tail(&ext->oe_link, list); unplug = true; } else { /* the only discarder is lock cancelling, so - * [start, end] must contain this extent */ + * [start, end] must contain this extent + */ EASSERT(ext->oe_start >= start && ext->oe_max_end <= end, ext); osc_extent_state_set(ext, OES_LOCKING); ext->oe_owner = current; - list_move_tail(&ext->oe_link, - &discard_list); + list_move_tail(&ext->oe_link, &discard_list); osc_update_pending(obj, OBD_BRW_WRITE, -ext->oe_nr_pages); } @@ -2884,14 +2934,16 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, /* It's pretty bad to wait for ACTIVE extents, because * we don't know how long we will wait for it to be * flushed since it may be blocked at awaiting more - * grants. We do this for the correctness of fsync. */ + * grants. We do this for the correctness of fsync. + */ LASSERT(hp == 0 && discard == 0); ext->oe_urgent = 1; break; case OES_TRUNC: /* this extent is being truncated, can't do anything * for it now. it will be set to urgent after truncate - * is finished in osc_cache_truncate_end(). */ + * is finished in osc_cache_truncate_end(). + */ default: break; } @@ -2910,7 +2962,8 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj, EASSERT(ext->oe_state == OES_LOCKING, ext); /* Discard caching pages. We don't actually write this - * extent out but we complete it as if we did. */ + * extent out but we complete it as if we did. + */ rc = osc_extent_make_ready(env, ext); if (unlikely(rc < 0)) { OSC_EXTENT_DUMP(D_ERROR, ext, diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h index 415c27e4a..d55d04d04 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h +++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h @@ -69,10 +69,12 @@ struct osc_io { /** true if this io is lockless. */ int oi_lockless; /** active extents, we know how many bytes is going to be written, - * so having an active extent will prevent it from being fragmented */ + * so having an active extent will prevent it from being fragmented + */ struct osc_extent *oi_active; /** partially truncated extent, we need to hold this extent to prevent - * page writeback from happening. */ + * page writeback from happening. + */ struct osc_extent *oi_trunc; struct obd_info oi_info; @@ -154,7 +156,8 @@ struct osc_object { atomic_t oo_nr_writes; /** Protect extent tree. Will be used to protect - * oo_{read|write}_pages soon. */ + * oo_{read|write}_pages soon. + */ spinlock_t oo_lock; }; @@ -472,7 +475,7 @@ static inline struct osc_thread_info *osc_env_info(const struct lu_env *env) struct osc_thread_info *info; info = lu_context_key_get(&env->le_ctx, &osc_key); - LASSERT(info != NULL); + LASSERT(info); return info; } @@ -481,7 +484,7 @@ static inline struct osc_session *osc_env_session(const struct lu_env *env) struct osc_session *ses; ses = lu_context_key_get(env->le_ses, &osc_session_key); - LASSERT(ses != NULL); + LASSERT(ses); return ses; } @@ -522,7 +525,7 @@ static inline struct cl_object *osc2cl(const struct osc_object *obj) return (struct cl_object *)&obj->oo_cl; } -static inline ldlm_mode_t osc_cl_lock2ldlm(enum cl_lock_mode mode) +static inline enum ldlm_mode osc_cl_lock2ldlm(enum cl_lock_mode mode) { LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP); if (mode == CLM_READ) @@ -533,7 +536,7 @@ static inline ldlm_mode_t osc_cl_lock2ldlm(enum cl_lock_mode mode) return LCK_GROUP; } -static inline enum cl_lock_mode osc_ldlm2cl_lock(ldlm_mode_t mode) +static inline enum cl_lock_mode osc_ldlm2cl_lock(enum ldlm_mode mode) { LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP); if (mode == LCK_PR) @@ -627,22 +630,26 @@ struct osc_extent { oe_srvlock:1, oe_memalloc:1, /** an ACTIVE extent is going to be truncated, so when this extent - * is released, it will turn into TRUNC state instead of CACHE. */ + * is released, it will turn into TRUNC state instead of CACHE. + */ oe_trunc_pending:1, /** this extent should be written asap and someone may wait for the * write to finish. This bit is usually set along with urgent if * the extent was CACHE state. * fsync_wait extent can't be merged because new extent region may - * exceed fsync range. */ + * exceed fsync range. + */ oe_fsync_wait:1, /** covering lock is being canceled */ oe_hp:1, /** this extent should be written back asap. set if one of pages is - * called by page WB daemon, or sync write or reading requests. */ + * called by page WB daemon, or sync write or reading requests. + */ oe_urgent:1; /** how many grants allocated for this extent. * Grant allocated for this extent. There is no grant allocated - * for reading extents and sync write extents. */ + * for reading extents and sync write extents. + */ unsigned int oe_grants; /** # of dirty pages in this extent */ unsigned int oe_nr_pages; @@ -655,21 +662,25 @@ struct osc_extent { struct osc_page *oe_next_page; /** start and end index of this extent, include start and end * themselves. Page offset here is the page index of osc_pages. - * oe_start is used as keyword for red-black tree. */ + * oe_start is used as keyword for red-black tree. + */ pgoff_t oe_start; pgoff_t oe_end; /** maximum ending index of this extent, this is limited by - * max_pages_per_rpc, lock extent and chunk size. */ + * max_pages_per_rpc, lock extent and chunk size. + */ pgoff_t oe_max_end; /** waitqueue - for those who want to be notified if this extent's - * state has changed. */ + * state has changed. + */ wait_queue_head_t oe_waitq; /** lock covering this extent */ struct cl_lock *oe_osclock; /** terminator of this extent. Must be true if this extent is in IO. */ struct task_struct *oe_owner; /** return value of writeback. If somebody is waiting for this extent, - * this value can be known by outside world. */ + * this value can be known by outside world. + */ int oe_rc; /** max pages per rpc when this extent was created */ unsigned int oe_mppr; diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c index 7078cc57d..d4fe507f1 100644 --- a/drivers/staging/lustre/lustre/osc/osc_dev.c +++ b/drivers/staging/lustre/lustre/osc/osc_dev.c @@ -122,8 +122,8 @@ static void *osc_key_init(const struct lu_context *ctx, { struct osc_thread_info *info; - info = kmem_cache_alloc(osc_thread_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(osc_thread_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -147,8 +147,8 @@ static void *osc_session_init(const struct lu_context *ctx, { struct osc_session *info; - info = kmem_cache_alloc(osc_session_kmem, GFP_NOFS | __GFP_ZERO); - if (info == NULL) + info = kmem_cache_zalloc(osc_session_kmem, GFP_NOFS); + if (!info) info = ERR_PTR(-ENOMEM); return info; } @@ -228,7 +228,7 @@ static struct lu_device *osc_device_alloc(const struct lu_env *env, /* Setup OSC OBD */ obd = class_name2obd(lustre_cfg_string(cfg, 0)); - LASSERT(obd != NULL); + LASSERT(obd); rc = osc_setup(obd, cfg); if (rc) { osc_device_free(env, d); diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h index a4c61463b..ea695c209 100644 --- a/drivers/staging/lustre/lustre/osc/osc_internal.h +++ b/drivers/staging/lustre/lustre/osc/osc_internal.h @@ -47,11 +47,13 @@ struct lu_env; enum async_flags { ASYNC_READY = 0x1, /* ap_make_ready will not be called before this - page is added to an rpc */ + * page is added to an rpc + */ ASYNC_URGENT = 0x2, /* page must be put into an RPC before return */ ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called - to give the caller a chance to update - or cancel the size of the io */ + * to give the caller a chance to update + * or cancel the size of the io + */ ASYNC_HP = 0x10, }; diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c index abd0beb48..6bd0a45d8 100644 --- a/drivers/staging/lustre/lustre/osc/osc_io.c +++ b/drivers/staging/lustre/lustre/osc/osc_io.c @@ -73,7 +73,7 @@ static struct osc_page *osc_cl_page_osc(struct cl_page *page) const struct cl_page_slice *slice; slice = cl_page_at(page, &osc_device_type); - LASSERT(slice != NULL); + LASSERT(slice); return cl2osc_page(slice); } @@ -135,7 +135,7 @@ static int osc_io_submit(const struct lu_env *env, /* Top level IO. */ io = page->cp_owner; - LASSERT(io != NULL); + LASSERT(io); opg = osc_cl_page_osc(page); oap = &opg->ops_oap; @@ -266,13 +266,14 @@ static int osc_io_prepare_write(const struct lu_env *env, * This implements OBD_BRW_CHECK logic from old client. */ - if (imp == NULL || imp->imp_invalid) + if (!imp || imp->imp_invalid) result = -EIO; if (result == 0 && oio->oi_lockless) /* this page contains `invalid' data, but who cares? * nobody can access the invalid data. * in osc_io_commit_write(), we're going to write exact - * [from, to) bytes of this page to OST. -jay */ + * [from, to) bytes of this page to OST. -jay + */ cl_page_export(env, slice->cpl_page, 1); return result; @@ -349,14 +350,14 @@ static int trunc_check_cb(const struct lu_env *env, struct cl_io *io, __u64 start = *(__u64 *)cbdata; slice = cl_page_at(page, &osc_device_type); - LASSERT(slice != NULL); + LASSERT(slice); ops = cl2osc_page(slice); oap = &ops->ops_oap; if (oap->oap_cmd & OBD_BRW_WRITE && !list_empty(&oap->oap_pending_item)) CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n", - start, current->comm); + start, current->comm); { struct page *vmpage = cl_page_vmpage(env, page); @@ -500,7 +501,7 @@ static void osc_io_setattr_end(const struct lu_env *env, __u64 size = io->u.ci_setattr.sa_attr.lvb_size; osc_trunc_check(env, io, oio, size); - if (oio->oi_trunc != NULL) { + if (oio->oi_trunc) { osc_cache_truncate_end(env, oio, cl2osc(obj)); oio->oi_trunc = NULL; } @@ -596,7 +597,8 @@ static int osc_io_fsync_start(const struct lu_env *env, * send OST_SYNC RPC. This is bad because it causes extents * to be written osc by osc. However, we usually start * writeback before CL_FSYNC_ALL so this won't have any real - * problem. */ + * problem. + */ rc = osc_cache_wait_range(env, osc, start, end); if (result == 0) result = rc; @@ -754,13 +756,12 @@ static void osc_req_attr_set(const struct lu_env *env, opg = osc_cl_page_osc(apage); apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */ lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1); - if (lock == NULL) { + if (!lock) { struct cl_object_header *head; struct cl_lock *scan; head = cl_object_header(apage->cp_obj); - list_for_each_entry(scan, &head->coh_locks, - cll_linkage) + list_for_each_entry(scan, &head->coh_locks, cll_linkage) CL_LOCK_DEBUG(D_ERROR, env, scan, "no cover page!\n"); CL_PAGE_DEBUG(D_ERROR, env, apage, @@ -770,10 +771,9 @@ static void osc_req_attr_set(const struct lu_env *env, } olck = osc_lock_at(lock); - LASSERT(olck != NULL); - LASSERT(ergo(opg->ops_srvlock, olck->ols_lock == NULL)); + LASSERT(ergo(opg->ops_srvlock, !olck->ols_lock)); /* check for lockless io. */ - if (olck->ols_lock != NULL) { + if (olck->ols_lock) { oa->o_handle = olck->ols_lock->l_remote_handle; oa->o_valid |= OBD_MD_FLHANDLE; } @@ -803,8 +803,8 @@ int osc_req_init(const struct lu_env *env, struct cl_device *dev, struct osc_req *or; int result; - or = kmem_cache_alloc(osc_req_kmem, GFP_NOFS | __GFP_ZERO); - if (or != NULL) { + or = kmem_cache_zalloc(osc_req_kmem, GFP_NOFS); + if (or) { cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops); result = 0; } else diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c index 71f2810d1..013df9787 100644 --- a/drivers/staging/lustre/lustre/osc/osc_lock.c +++ b/drivers/staging/lustre/lustre/osc/osc_lock.c @@ -79,7 +79,7 @@ static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle) struct ldlm_lock *lock; lock = ldlm_handle2lock(handle); - if (lock != NULL) + if (lock) LDLM_LOCK_PUT(lock); return lock; } @@ -94,42 +94,40 @@ static int osc_lock_invariant(struct osc_lock *ols) int handle_used = lustre_handle_is_used(&ols->ols_handle); if (ergo(osc_lock_is_lockless(ols), - ols->ols_locklessable && ols->ols_lock == NULL)) + ols->ols_locklessable && !ols->ols_lock)) return 1; /* * If all the following "ergo"s are true, return 1, otherwise 0 */ - if (!ergo(olock != NULL, handle_used)) + if (!ergo(olock, handle_used)) return 0; - if (!ergo(olock != NULL, - olock->l_handle.h_cookie == ols->ols_handle.cookie)) + if (!ergo(olock, olock->l_handle.h_cookie == ols->ols_handle.cookie)) return 0; if (!ergo(handle_used, - ergo(lock != NULL && olock != NULL, lock == olock) && - ergo(lock == NULL, olock == NULL))) + ergo(lock && olock, lock == olock) && + ergo(!lock, !olock))) return 0; /* * Check that ->ols_handle and ->ols_lock are consistent, but * take into account that they are set at the different time. */ if (!ergo(ols->ols_state == OLS_CANCELLED, - olock == NULL && !handle_used)) + !olock && !handle_used)) return 0; /* * DLM lock is destroyed only after we have seen cancellation * ast. */ - if (!ergo(olock != NULL && ols->ols_state < OLS_CANCELLED, - ((olock->l_flags & LDLM_FL_DESTROYED) == 0))) + if (!ergo(olock && ols->ols_state < OLS_CANCELLED, + ((olock->l_flags & LDLM_FL_DESTROYED) == 0))) return 0; if (!ergo(ols->ols_state == OLS_GRANTED, - olock != NULL && - olock->l_req_mode == olock->l_granted_mode && - ols->ols_hold)) + olock && olock->l_req_mode == olock->l_granted_mode && + ols->ols_hold)) return 0; return 1; } @@ -149,14 +147,15 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) spin_lock(&osc_ast_guard); dlmlock = olck->ols_lock; - if (dlmlock == NULL) { + if (!dlmlock) { spin_unlock(&osc_ast_guard); return; } olck->ols_lock = NULL; /* wb(); --- for all who checks (ols->ols_lock != NULL) before - * call to osc_lock_detach() */ + * call to osc_lock_detach() + */ dlmlock->l_ast_data = NULL; olck->ols_handle.cookie = 0ULL; spin_unlock(&osc_ast_guard); @@ -171,7 +170,8 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck) /* Must get the value under the lock to avoid possible races. */ old_kms = cl2osc(obj)->oo_oinfo->loi_kms; /* Update the kms. Need to loop all granted locks. - * Not a problem for the client */ + * Not a problem for the client + */ attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms); cl_object_attr_set(env, obj, attr, CAT_KMS); @@ -223,8 +223,7 @@ static int osc_lock_unuse(const struct lu_env *env, /* * Move lock into OLS_RELEASED state before calling * osc_cancel_base() so that possible synchronous cancellation - * (that always happens e.g., for liblustre) sees that lock is - * released. + * sees that lock is released. */ ols->ols_state = OLS_RELEASED; return osc_lock_unhold(ols); @@ -247,7 +246,7 @@ static void osc_lock_fini(const struct lu_env *env, * lock is destroyed immediately after upcall. */ osc_lock_unhold(ols); - LASSERT(ols->ols_lock == NULL); + LASSERT(!ols->ols_lock); LASSERT(atomic_read(&ols->ols_pageref) == 0 || atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC); @@ -292,7 +291,7 @@ static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock) lock_res_and_lock(dlm_lock); spin_lock(&osc_ast_guard); olck = dlm_lock->l_ast_data; - if (olck != NULL) { + if (olck) { struct cl_lock *lock = olck->ols_cl.cls_lock; /* * If osc_lock holds a reference on ldlm lock, return it even @@ -359,13 +358,13 @@ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck, __u64 size; dlmlock = olck->ols_lock; - LASSERT(dlmlock != NULL); /* re-grab LVB from a dlm lock under DLM spin-locks. */ *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; size = lvb->lvb_size; /* Extend KMS up to the end of this lock and no further - * A lock on [x,y] means a KMS of up to y + 1 bytes! */ + * A lock on [x,y] means a KMS of up to y + 1 bytes! + */ if (size > dlmlock->l_policy_data.l_extent.end) size = dlmlock->l_policy_data.l_extent.end + 1; if (size >= oinfo->loi_kms) { @@ -429,7 +428,8 @@ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck, * to take a semaphore on a parent lock. This is safe, because * spin-locks are needed to protect consistency of * dlmlock->l_*_mode and LVB, and we have finished processing - * them. */ + * them. + */ unlock_res_and_lock(dlmlock); cl_lock_modify(env, lock, descr); cl_lock_signal(env, lock); @@ -444,12 +444,12 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) struct ldlm_lock *dlmlock; dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0); - LASSERT(dlmlock != NULL); + LASSERT(dlmlock); lock_res_and_lock(dlmlock); spin_lock(&osc_ast_guard); LASSERT(dlmlock->l_ast_data == olck); - LASSERT(olck->ols_lock == NULL); + LASSERT(!olck->ols_lock); olck->ols_lock = dlmlock; spin_unlock(&osc_ast_guard); @@ -470,7 +470,8 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck) olck->ols_hold = 1; /* lock reference taken by ldlm_handle2lock_long() is owned by - * osc_lock and released in osc_lock_detach() */ + * osc_lock and released in osc_lock_detach() + */ lu_ref_add(&dlmlock->l_reference, "osc_lock", olck); olck->ols_has_ref = 1; } @@ -508,10 +509,10 @@ static int osc_lock_upcall(void *cookie, int errcode) struct ldlm_lock *dlmlock; dlmlock = ldlm_handle2lock(&olck->ols_handle); - if (dlmlock != NULL) { + if (dlmlock) { lock_res_and_lock(dlmlock); spin_lock(&osc_ast_guard); - LASSERT(olck->ols_lock == NULL); + LASSERT(!olck->ols_lock); dlmlock->l_ast_data = NULL; olck->ols_handle.cookie = 0ULL; spin_unlock(&osc_ast_guard); @@ -548,7 +549,8 @@ static int osc_lock_upcall(void *cookie, int errcode) /* For AGL case, the RPC sponsor may exits the cl_lock * processing without wait() called before related OSC * lock upcall(). So update the lock status according - * to the enqueue result inside AGL upcall(). */ + * to the enqueue result inside AGL upcall(). + */ if (olck->ols_agl) { lock->cll_flags |= CLF_FROM_UPCALL; cl_wait_try(env, lock); @@ -571,7 +573,8 @@ static int osc_lock_upcall(void *cookie, int errcode) lu_ref_del(&lock->cll_reference, "upcall", lock); /* This maybe the last reference, so must be called after - * cl_lock_mutex_put(). */ + * cl_lock_mutex_put(). + */ cl_lock_put(env, lock); cl_env_nested_put(&nest, env); @@ -634,7 +637,7 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env, cancel = 0; olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { + if (olck) { lock = olck->ols_cl.cls_lock; cl_lock_mutex_get(env, lock); LINVRNT(osc_lock_invariant(olck)); @@ -786,17 +789,17 @@ static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock, env = cl_env_nested_get(&nest); if (!IS_ERR(env)) { olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { + if (olck) { lock = olck->ols_cl.cls_lock; cl_lock_mutex_get(env, lock); /* * ldlm_handle_cp_callback() copied LVB from request * to lock->l_lvb_data, store it in osc_lock. */ - LASSERT(dlmlock->l_lvb_data != NULL); + LASSERT(dlmlock->l_lvb_data); lock_res_and_lock(dlmlock); olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data; - if (olck->ols_lock == NULL) { + if (!olck->ols_lock) { /* * upcall (osc_lock_upcall()) hasn't yet been * called. Do nothing now, upcall will bind @@ -850,14 +853,15 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data) * environment. */ olck = osc_ast_data_get(dlmlock); - if (olck != NULL) { + if (olck) { lock = olck->ols_cl.cls_lock; /* Do not grab the mutex of cl_lock for glimpse. * See LU-1274 for details. * BTW, it's okay for cl_lock to be cancelled during * this period because server can handle this race. * See ldlm_server_glimpse_ast() for details. - * cl_lock_mutex_get(env, lock); */ + * cl_lock_mutex_get(env, lock); + */ cap = &req->rq_pill; req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK); req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER, @@ -1017,7 +1021,8 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, LASSERT(cl_lock_is_mutexed(lock)); /* make it enqueue anyway for glimpse lock, because we actually - * don't need to cancel any conflicting locks. */ + * don't need to cancel any conflicting locks. + */ if (olck->ols_glimpse) return 0; @@ -1051,7 +1056,8 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, * imagine that client has PR lock on [0, 1000], and thread T0 * is doing lockless IO in [500, 1500] region. Concurrent * thread T1 can see lockless data in [500, 1000], which is - * wrong, because these data are possibly stale. */ + * wrong, because these data are possibly stale. + */ if (!lockless && osc_lock_compatible(olck, scan_ols)) continue; @@ -1074,7 +1080,7 @@ static int osc_lock_enqueue_wait(const struct lu_env *env, } else { CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, will wait\n", lock, conflict); - LASSERT(lock->cll_conflict == NULL); + LASSERT(!lock->cll_conflict); lu_ref_add(&conflict->cll_reference, "cancel-wait", lock); lock->cll_conflict = conflict; @@ -1111,7 +1117,7 @@ static int osc_lock_enqueue(const struct lu_env *env, "Impossible state: %d\n", ols->ols_state); LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ), - "lock = %p, ols = %p\n", lock, ols); + "lock = %p, ols = %p\n", lock, ols); result = osc_lock_enqueue_wait(env, ols); if (result == 0) { @@ -1123,7 +1129,8 @@ static int osc_lock_enqueue(const struct lu_env *env, struct ldlm_enqueue_info *einfo = &ols->ols_einfo; /* lock will be passed as upcall cookie, - * hold ref to prevent to be released. */ + * hold ref to prevent to be released. + */ cl_lock_hold_add(env, lock, "upcall", lock); /* a user for lock also */ cl_lock_user_add(env, lock); @@ -1137,12 +1144,12 @@ static int osc_lock_enqueue(const struct lu_env *env, ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname); osc_lock_build_policy(env, lock, policy); result = osc_enqueue_base(osc_export(obj), resname, - &ols->ols_flags, policy, - &ols->ols_lvb, - obj->oo_oinfo->loi_kms_valid, - osc_lock_upcall, - ols, einfo, &ols->ols_handle, - PTLRPCD_SET, 1, ols->ols_agl); + &ols->ols_flags, policy, + &ols->ols_lvb, + obj->oo_oinfo->loi_kms_valid, + osc_lock_upcall, + ols, einfo, &ols->ols_handle, + PTLRPCD_SET, 1, ols->ols_agl); if (result != 0) { cl_lock_user_del(env, lock); cl_lock_unhold(env, lock, "upcall", lock); @@ -1174,7 +1181,8 @@ static int osc_lock_wait(const struct lu_env *env, } else if (olck->ols_agl) { if (lock->cll_flags & CLF_FROM_UPCALL) /* It is from enqueue RPC reply upcall for - * updating state. Do not re-enqueue. */ + * updating state. Do not re-enqueue. + */ return -ENAVAIL; olck->ols_state = OLS_NEW; } else { @@ -1197,7 +1205,7 @@ static int osc_lock_wait(const struct lu_env *env, } LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED && - lock->cll_error == 0, olck->ols_lock != NULL)); + lock->cll_error == 0, olck->ols_lock)); return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT; } @@ -1235,7 +1243,8 @@ static int osc_lock_use(const struct lu_env *env, LASSERT(lock->cll_state == CLS_INTRANSIT); LASSERT(lock->cll_users > 0); /* set a flag for osc_dlm_blocking_ast0() to signal the - * lock.*/ + * lock. + */ olck->ols_ast_wait = 1; rc = CLO_WAIT; } @@ -1257,11 +1266,12 @@ static int osc_lock_flush(struct osc_lock *ols, int discard) if (descr->cld_mode >= CLM_WRITE) { result = osc_cache_writeback_range(env, obj, - descr->cld_start, descr->cld_end, - 1, discard); + descr->cld_start, + descr->cld_end, + 1, discard); LDLM_DEBUG(ols->ols_lock, - "lock %p: %d pages were %s.\n", lock, result, - discard ? "discarded" : "written"); + "lock %p: %d pages were %s.\n", lock, result, + discard ? "discarded" : "written"); if (result > 0) result = 0; } @@ -1306,7 +1316,7 @@ static void osc_lock_cancel(const struct lu_env *env, LASSERT(cl_lock_is_mutexed(lock)); LINVRNT(osc_lock_invariant(olck)); - if (dlmlock != NULL) { + if (dlmlock) { int do_cancel; discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA); @@ -1318,7 +1328,8 @@ static void osc_lock_cancel(const struct lu_env *env, /* Now that we're the only user of dlm read/write reference, * mostly the ->l_readers + ->l_writers should be zero. * However, there is a corner case. - * See bug 18829 for details.*/ + * See bug 18829 for details. + */ do_cancel = (dlmlock->l_readers == 0 && dlmlock->l_writers == 0); dlmlock->l_flags |= LDLM_FL_CBPENDING; @@ -1382,7 +1393,7 @@ static void osc_lock_state(const struct lu_env *env, if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) { struct osc_io *oio = osc_env_io(env); - LASSERT(lock->ols_owner == NULL); + LASSERT(!lock->ols_owner); lock->ols_owner = oio; } else if (state != CLS_HELD) lock->ols_owner = NULL; @@ -1517,7 +1528,8 @@ static void osc_lock_lockless_state(const struct lu_env *env, lock->ols_owner = oio; /* set the io to be lockless if this lock is for io's - * host object */ + * host object + */ if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj)) oio->oi_lockless = 1; } @@ -1555,8 +1567,8 @@ int osc_lock_init(const struct lu_env *env, struct osc_lock *clk; int result; - clk = kmem_cache_alloc(osc_lock_kmem, GFP_NOFS | __GFP_ZERO); - if (clk != NULL) { + clk = kmem_cache_zalloc(osc_lock_kmem, GFP_NOFS); + if (clk) { __u32 enqflags = lock->cll_descr.cld_enq_flags; osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo); @@ -1578,8 +1590,8 @@ int osc_lock_init(const struct lu_env *env, if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA)) clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION; - LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n", - lock, clk, clk->ols_flags); + LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx", + lock, clk, clk->ols_flags); result = 0; } else @@ -1599,9 +1611,9 @@ int osc_dlm_lock_pageref(struct ldlm_lock *dlm) * doesn't matter because in the worst case we don't cancel a lock * which we actually can, that's no harm. */ - if (olock != NULL && + if (olock && atomic_add_return(_PAGEREF_MAGIC, - &olock->ols_pageref) != _PAGEREF_MAGIC) { + &olock->ols_pageref) != _PAGEREF_MAGIC) { atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref); rc = 1; } diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c index fdd6219aa..9d474fcdd 100644 --- a/drivers/staging/lustre/lustre/osc/osc_object.c +++ b/drivers/staging/lustre/lustre/osc/osc_object.c @@ -113,7 +113,7 @@ static void osc_object_free(const struct lu_env *env, struct lu_object *obj) LASSERT(list_empty(&osc->oo_write_item)); LASSERT(list_empty(&osc->oo_read_item)); - LASSERT(osc->oo_root.rb_node == NULL); + LASSERT(!osc->oo_root.rb_node); LASSERT(list_empty(&osc->oo_hp_exts)); LASSERT(list_empty(&osc->oo_urgent_exts)); LASSERT(list_empty(&osc->oo_rpc_exts)); @@ -255,8 +255,8 @@ struct lu_object *osc_object_alloc(const struct lu_env *env, struct osc_object *osc; struct lu_object *obj; - osc = kmem_cache_alloc(osc_object_kmem, GFP_NOFS | __GFP_ZERO); - if (osc != NULL) { + osc = kmem_cache_zalloc(osc_object_kmem, GFP_NOFS); + if (osc) { obj = osc2lu(osc); lu_object_init(obj, NULL, dev); osc->oo_cl.co_ops = &osc_ops; diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c index 2439d804f..ce9ddd515 100644 --- a/drivers/staging/lustre/lustre/osc/osc_page.c +++ b/drivers/staging/lustre/lustre/osc/osc_page.c @@ -51,111 +51,12 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, * @{ */ -/* - * Comment out osc_page_protected because it may sleep inside the - * the client_obd_list_lock. - * client_obd_list_lock -> osc_ap_completion -> osc_completion -> - * -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base - * -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep. - */ -#if 0 -static int osc_page_is_dlocked(const struct lu_env *env, - const struct osc_page *opg, - enum cl_lock_mode mode, int pending, int unref) -{ - struct cl_page *page; - struct osc_object *obj; - struct osc_thread_info *info; - struct ldlm_res_id *resname; - struct lustre_handle *lockh; - ldlm_policy_data_t *policy; - ldlm_mode_t dlmmode; - __u64 flags; - - might_sleep(); - - info = osc_env_info(env); - resname = &info->oti_resname; - policy = &info->oti_policy; - lockh = &info->oti_handle; - page = opg->ops_cl.cpl_page; - obj = cl2osc(opg->ops_cl.cpl_obj); - - flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED; - if (pending) - flags |= LDLM_FL_CBPENDING; - - dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW; - osc_lock_build_res(env, obj, resname); - osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index); - return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy, - dlmmode, &flags, NULL, lockh, unref); -} - -/** - * Checks an invariant that a page in the cache is covered by a lock, as - * needed. - */ -static int osc_page_protected(const struct lu_env *env, - const struct osc_page *opg, - enum cl_lock_mode mode, int unref) -{ - struct cl_object_header *hdr; - struct cl_lock *scan; - struct cl_page *page; - struct cl_lock_descr *descr; - int result; - - LINVRNT(!opg->ops_temp); - - page = opg->ops_cl.cpl_page; - if (page->cp_owner != NULL && - cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER) - /* - * If IO is done without locks (liblustre, or lloop), lock is - * not required. - */ - result = 1; - else - /* otherwise check for a DLM lock */ - result = osc_page_is_dlocked(env, opg, mode, 1, unref); - if (result == 0) { - /* maybe this page is a part of a lockless io? */ - hdr = cl_object_header(opg->ops_cl.cpl_obj); - descr = &osc_env_info(env)->oti_descr; - descr->cld_mode = mode; - descr->cld_start = page->cp_index; - descr->cld_end = page->cp_index; - spin_lock(&hdr->coh_lock_guard); - list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) { - /* - * Lock-less sub-lock has to be either in HELD state - * (when io is actively going on), or in CACHED state, - * when top-lock is being unlocked: - * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse(). - */ - if ((scan->cll_state == CLS_HELD || - scan->cll_state == CLS_CACHED) && - cl_lock_ext_match(&scan->cll_descr, descr)) { - struct osc_lock *olck; - - olck = osc_lock_at(scan); - result = osc_lock_is_lockless(olck); - break; - } - } - spin_unlock(&hdr->coh_lock_guard); - } - return result; -} -#else static int osc_page_protected(const struct lu_env *env, const struct osc_page *opg, enum cl_lock_mode mode, int unref) { return 1; } -#endif /***************************************************************************** * @@ -168,7 +69,7 @@ static void osc_page_fini(const struct lu_env *env, struct osc_page *opg = cl2osc_page(slice); CDEBUG(D_TRACE, "%p\n", opg); - LASSERT(opg->ops_lock == NULL); + LASSERT(!opg->ops_lock); } static void osc_page_transfer_get(struct osc_page *opg, const char *label) @@ -204,7 +105,8 @@ static void osc_page_transfer_add(const struct lu_env *env, struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj); /* ops_lru and ops_inflight share the same field, so take it from LRU - * first and then use it as inflight. */ + * first and then use it as inflight. + */ osc_lru_del(osc_cli(obj), opg, false); spin_lock(&obj->oo_seatbelt); @@ -232,9 +134,10 @@ static int osc_page_cache_add(const struct lu_env *env, /* for sync write, kernel will wait for this page to be flushed before * osc_io_end() is called, so release it earlier. - * for mkwrite(), it's known there is no further pages. */ + * for mkwrite(), it's known there is no further pages. + */ if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) { - if (oio->oi_active != NULL) { + if (oio->oi_active) { osc_extent_release(env, oio->oi_active); oio->oi_active = NULL; } @@ -258,7 +161,7 @@ static int osc_page_addref_lock(const struct lu_env *env, struct osc_lock *olock; int rc; - LASSERT(opg->ops_lock == NULL); + LASSERT(!opg->ops_lock); olock = osc_lock_at(lock); if (atomic_inc_return(&olock->ols_pageref) <= 0) { @@ -278,7 +181,7 @@ static void osc_page_putref_lock(const struct lu_env *env, struct cl_lock *lock = opg->ops_lock; struct osc_lock *olock; - LASSERT(lock != NULL); + LASSERT(lock); olock = osc_lock_at(lock); atomic_dec(&olock->ols_pageref); @@ -296,7 +199,7 @@ static int osc_page_is_under_lock(const struct lu_env *env, lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page, NULL, 1, 0); - if (lock != NULL) { + if (lock) { if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0) result = -EBUSY; cl_lock_put(env, lock); @@ -424,7 +327,7 @@ static void osc_page_delete(const struct lu_env *env, } spin_lock(&obj->oo_seatbelt); - if (opg->ops_submitter != NULL) { + if (opg->ops_submitter) { LASSERT(!list_empty(&opg->ops_inflight)); list_del_init(&opg->ops_inflight); opg->ops_submitter = NULL; @@ -434,8 +337,8 @@ static void osc_page_delete(const struct lu_env *env, osc_lru_del(osc_cli(obj), opg, true); } -void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice, - int from, int to) +static void osc_page_clip(const struct lu_env *env, + const struct cl_page_slice *slice, int from, int to) { struct osc_page *opg = cl2osc_page(slice); struct osc_async_page *oap = &opg->ops_oap; @@ -458,7 +361,8 @@ static int osc_page_cancel(const struct lu_env *env, LINVRNT(osc_page_protected(env, opg, CLM_READ, 0)); /* Check if the transferring against this page - * is completed, or not even queued. */ + * is completed, or not even queued. + */ if (opg->ops_transfer_pinned) /* FIXME: may not be interrupted.. */ rc = osc_cancel_async_page(env, opg); @@ -499,30 +403,30 @@ static const struct cl_page_operations osc_page_ops = { }; int osc_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct page *vmpage) + struct cl_page *page, struct page *vmpage) { struct osc_object *osc = cl2osc(obj); struct osc_page *opg = cl_object_page_slice(obj, page); int result; opg->ops_from = 0; - opg->ops_to = PAGE_CACHE_SIZE; + opg->ops_to = PAGE_SIZE; result = osc_prep_async_page(osc, opg, vmpage, - cl_offset(obj, page->cp_index)); + cl_offset(obj, page->cp_index)); if (result == 0) { struct osc_io *oio = osc_env_io(env); opg->ops_srvlock = osc_io_srvlock(oio); - cl_page_slice_add(page, &opg->ops_cl, obj, - &osc_page_ops); + cl_page_slice_add(page, &opg->ops_cl, obj, &osc_page_ops); } /* * Cannot assert osc_page_protected() here as read-ahead * creates temporary pages outside of a lock. */ /* ops_inflight and ops_lru are the same field, but it doesn't - * hurt to initialize it twice :-) */ + * hurt to initialize it twice :-) + */ INIT_LIST_HEAD(&opg->ops_inflight); INIT_LIST_HEAD(&opg->ops_lru); @@ -557,7 +461,7 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg, oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC; if (!client_is_remote(osc_export(obj)) && - capable(CFS_CAP_SYS_RESOURCE)) { + capable(CFS_CAP_SYS_RESOURCE)) { oap->oap_brw_flags |= OBD_BRW_NOQUOTA; oap->oap_cmd |= OBD_BRW_NOQUOTA; } @@ -581,16 +485,18 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg, static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq); static atomic_t osc_lru_waiters = ATOMIC_INIT(0); /* LRU pages are freed in batch mode. OSC should at least free this - * number of pages to avoid running out of LRU budget, and.. */ -static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */ + * number of pages to avoid running out of LRU budget, and.. + */ +static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */ /* free this number at most otherwise it will take too long time to finish. */ -static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */ +static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */ /* Check if we can free LRU slots from this OSC. If there exists LRU waiters, * we should free slots aggressively. In this way, slots are freed in a steady * step to maintain fairness among OSCs. * - * Return how many LRU pages should be freed. */ + * Return how many LRU pages should be freed. + */ static int osc_cache_too_much(struct client_obd *cli) { struct cl_client_cache *cache = cli->cl_cache; @@ -602,7 +508,8 @@ static int osc_cache_too_much(struct client_obd *cli) return min(pages, lru_shrink_max); /* if it's going to run out LRU slots, we should free some, but not - * too much to maintain fairness among OSCs. */ + * too much to maintain fairness among OSCs. + */ if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) { unsigned long tmp; @@ -630,7 +537,8 @@ static int discard_pagevec(const struct lu_env *env, struct cl_io *io, /* free LRU page only if nobody is using it. * This check is necessary to avoid freeing the pages * having already been removed from LRU and pinned - * for IO. */ + * for IO. + */ if (!cl_page_in_use(page)) { cl_page_unmap(env, io, page); cl_page_discard(env, io, page); @@ -655,6 +563,7 @@ int osc_lru_shrink(struct client_obd *cli, int target) struct cl_object *clobj = NULL; struct cl_page **pvec; struct osc_page *opg; + struct osc_page *temp; int maxscan = 0; int count = 0; int index = 0; @@ -674,28 +583,26 @@ int osc_lru_shrink(struct client_obd *cli, int target) client_obd_list_lock(&cli->cl_lru_list_lock); atomic_inc(&cli->cl_lru_shrinkers); maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list)); - while (!list_empty(&cli->cl_lru_list)) { + list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) { struct cl_page *page; if (--maxscan < 0) break; - opg = list_entry(cli->cl_lru_list.next, struct osc_page, - ops_lru); page = cl_page_top(opg->ops_cl.cpl_page); if (cl_page_in_use_noref(page)) { list_move_tail(&opg->ops_lru, &cli->cl_lru_list); continue; } - LASSERT(page->cp_obj != NULL); + LASSERT(page->cp_obj); if (clobj != page->cp_obj) { struct cl_object *tmp = page->cp_obj; cl_object_get(tmp); client_obd_list_unlock(&cli->cl_lru_list_lock); - if (clobj != NULL) { + if (clobj) { count -= discard_pagevec(env, io, pvec, index); index = 0; @@ -720,11 +627,13 @@ int osc_lru_shrink(struct client_obd *cli, int target) /* move this page to the end of list as it will be discarded * soon. The page will be finally removed from LRU list in - * osc_page_delete(). */ + * osc_page_delete(). + */ list_move_tail(&opg->ops_lru, &cli->cl_lru_list); /* it's okay to grab a refcount here w/o holding lock because - * it has to grab cl_lru_list_lock to delete the page. */ + * it has to grab cl_lru_list_lock to delete the page. + */ cl_page_get(page); pvec[index++] = page; if (++count >= target) @@ -740,7 +649,7 @@ int osc_lru_shrink(struct client_obd *cli, int target) } client_obd_list_unlock(&cli->cl_lru_list_lock); - if (clobj != NULL) { + if (clobj) { count -= discard_pagevec(env, io, pvec, index); cl_io_fini(env, io); @@ -775,7 +684,8 @@ static void osc_lru_add(struct client_obd *cli, struct osc_page *opg) } /* delete page from LRUlist. The page can be deleted from LRUlist for two - * reasons: redirtied or deleted from page cache. */ + * reasons: redirtied or deleted from page cache. + */ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del) { if (opg->ops_in_lru) { @@ -797,7 +707,8 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del) * this osc occupies too many LRU pages and kernel is * stealing one of them. * cl_lru_shrinkers is to avoid recursive call in case - * we're already in the context of osc_lru_shrink(). */ + * we're already in the context of osc_lru_shrink(). + */ if (atomic_read(&cli->cl_lru_shrinkers) == 0 && !memory_pressure_get()) osc_lru_shrink(cli, osc_cache_too_much(cli)); @@ -819,22 +730,23 @@ static int osc_lru_reclaim(struct client_obd *cli) int max_scans; int rc; - LASSERT(cache != NULL); + LASSERT(cache); rc = osc_lru_shrink(cli, lru_shrink_min); if (rc != 0) { CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n", - cli->cl_import->imp_obd->obd_name, rc, cli); + cli->cl_import->imp_obd->obd_name, rc, cli); return rc; } CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n", - cli->cl_import->imp_obd->obd_name, cli, - atomic_read(&cli->cl_lru_in_list), - atomic_read(&cli->cl_lru_busy)); + cli->cl_import->imp_obd->obd_name, cli, + atomic_read(&cli->cl_lru_in_list), + atomic_read(&cli->cl_lru_busy)); /* Reclaim LRU slots from other client_obd as it can't free enough - * from its own. This should rarely happen. */ + * from its own. This should rarely happen. + */ spin_lock(&cache->ccc_lru_lock); LASSERT(!list_empty(&cache->ccc_lru)); @@ -844,12 +756,12 @@ static int osc_lru_reclaim(struct client_obd *cli) max_scans = atomic_read(&cache->ccc_users); while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) { cli = list_entry(cache->ccc_lru.next, struct client_obd, - cl_lru_osc); + cl_lru_osc); CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n", - cli->cl_import->imp_obd->obd_name, cli, - atomic_read(&cli->cl_lru_in_list), - atomic_read(&cli->cl_lru_busy)); + cli->cl_import->imp_obd->obd_name, cli, + atomic_read(&cli->cl_lru_in_list), + atomic_read(&cli->cl_lru_busy)); list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru); if (atomic_read(&cli->cl_lru_in_list) > 0) { @@ -864,7 +776,7 @@ static int osc_lru_reclaim(struct client_obd *cli) spin_unlock(&cache->ccc_lru_lock); CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n", - cli->cl_import->imp_obd->obd_name, cli, rc); + cli->cl_import->imp_obd->obd_name, cli, rc); return rc; } @@ -875,7 +787,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, struct client_obd *cli = osc_cli(obj); int rc = 0; - if (cli->cl_cache == NULL) /* shall not be in LRU */ + if (!cli->cl_cache) /* shall not be in LRU */ return 0; LASSERT(atomic_read(cli->cl_lru_left) >= 0); @@ -892,15 +804,16 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj, cond_resched(); /* slowest case, all of caching pages are busy, notifying - * other OSCs that we're lack of LRU slots. */ + * other OSCs that we're lack of LRU slots. + */ atomic_inc(&osc_lru_waiters); gen = atomic_read(&cli->cl_lru_in_list); rc = l_wait_event(osc_lru_waitq, - atomic_read(cli->cl_lru_left) > 0 || - (atomic_read(&cli->cl_lru_in_list) > 0 && - gen != atomic_read(&cli->cl_lru_in_list)), - &lwi); + atomic_read(cli->cl_lru_left) > 0 || + (atomic_read(&cli->cl_lru_in_list) > 0 && + gen != atomic_read(&cli->cl_lru_in_list)), + &lwi); atomic_dec(&osc_lru_waiters); if (rc < 0) diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c index e70e7961d..194d8ede4 100644 --- a/drivers/staging/lustre/lustre/osc/osc_quota.c +++ b/drivers/staging/lustre/lustre/osc/osc_quota.c @@ -13,11 +13,6 @@ * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA - * * GPL HEADER END */ /* @@ -35,8 +30,8 @@ static inline struct osc_quota_info *osc_oqi_alloc(u32 id) { struct osc_quota_info *oqi; - oqi = kmem_cache_alloc(osc_quota_kmem, GFP_NOFS | __GFP_ZERO); - if (oqi != NULL) + oqi = kmem_cache_zalloc(osc_quota_kmem, GFP_NOFS); + if (oqi) oqi->oqi_id = id; return oqi; @@ -52,10 +47,12 @@ int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]) oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]); if (oqi) { /* do not try to access oqi here, it could have been - * freed by osc_quota_setdq() */ + * freed by osc_quota_setdq() + */ /* the slot is busy, the user is about to run out of - * quota space on this OST */ + * quota space on this OST + */ CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n", type == USRQUOTA ? "user" : "grout", qid[type]); return NO_QUOTA; @@ -89,12 +86,13 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]); if ((flags & FL_QUOTA_FLAG(type)) != 0) { /* This ID is getting close to its quota limit, let's - * switch to sync I/O */ - if (oqi != NULL) + * switch to sync I/O + */ + if (oqi) continue; oqi = osc_oqi_alloc(qid[type]); - if (oqi == NULL) { + if (!oqi) { rc = -ENOMEM; break; } @@ -113,8 +111,9 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[], qid[type], rc); } else { /* This ID is now off the hook, let's remove it from - * the hash table */ - if (oqi == NULL) + * the hash table + */ + if (!oqi) continue; oqi = cfs_hash_del_key(cli->cl_quota_hash[type], @@ -147,7 +146,7 @@ oqi_keycmp(const void *key, struct hlist_node *hnode) struct osc_quota_info *oqi; u32 uid; - LASSERT(key != NULL); + LASSERT(key); uid = *((u32 *)key); oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash); @@ -218,7 +217,7 @@ int osc_quota_setup(struct obd_device *obd) CFS_HASH_MAX_THETA, "a_hash_ops, CFS_HASH_DEFAULT); - if (cli->cl_quota_hash[type] == NULL) + if (!cli->cl_quota_hash[type]) break; } @@ -252,7 +251,7 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_OST_QUOTACTL, LUSTRE_OST_VERSION, OST_QUOTACTL); - if (req == NULL) + if (!req) return -ENOMEM; oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); @@ -294,7 +293,7 @@ int osc_quotacheck(struct obd_device *unused, struct obd_export *exp, req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION, OST_QUOTACHECK); - if (req == NULL) + if (!req) return -ENOMEM; body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); @@ -302,8 +301,8 @@ int osc_quotacheck(struct obd_device *unused, struct obd_export *exp, ptlrpc_request_set_replen(req); - /* the next poll will find -ENODATA, that means quotacheck is - * going on */ + /* the next poll will find -ENODATA, that means quotacheck is going on + */ cli->cl_qchk_stat = -ENODATA; rc = ptlrpc_queue_wait(req); if (rc) diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c index 7034f0a94..30526ebca 100644 --- a/drivers/staging/lustre/lustre/osc/osc_request.c +++ b/drivers/staging/lustre/lustre/osc/osc_request.c @@ -104,7 +104,6 @@ struct osc_enqueue_args { static void osc_release_ppga(struct brw_page **ppga, u32 count); static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req, void *data, int rc); -static int osc_cleanup(struct obd_device *obd); /* Pack OSC object metadata for disk storage (LE byte order). */ static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, @@ -113,18 +112,18 @@ static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, int lmm_size; lmm_size = sizeof(**lmmp); - if (lmmp == NULL) + if (!lmmp) return lmm_size; - if (*lmmp != NULL && lsm == NULL) { + if (*lmmp && !lsm) { kfree(*lmmp); *lmmp = NULL; return 0; - } else if (unlikely(lsm != NULL && ostid_id(&lsm->lsm_oi) == 0)) { + } else if (unlikely(lsm && ostid_id(&lsm->lsm_oi) == 0)) { return -EBADF; } - if (*lmmp == NULL) { + if (!*lmmp) { *lmmp = kzalloc(lmm_size, GFP_NOFS); if (!*lmmp) return -ENOMEM; @@ -143,7 +142,7 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, int lsm_size; struct obd_import *imp = class_exp2cliimp(exp); - if (lmm != NULL) { + if (lmm) { if (lmm_bytes < sizeof(*lmm)) { CERROR("%s: lov_mds_md too small: %d, need %d\n", exp->exp_obd->obd_name, lmm_bytes, @@ -160,23 +159,23 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, } lsm_size = lov_stripe_md_size(1); - if (lsmp == NULL) + if (!lsmp) return lsm_size; - if (*lsmp != NULL && lmm == NULL) { + if (*lsmp && !lmm) { kfree((*lsmp)->lsm_oinfo[0]); kfree(*lsmp); *lsmp = NULL; return 0; } - if (*lsmp == NULL) { + if (!*lsmp) { *lsmp = kzalloc(lsm_size, GFP_NOFS); - if (unlikely(*lsmp == NULL)) + if (unlikely(!*lsmp)) return -ENOMEM; (*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo), GFP_NOFS); - if (unlikely((*lsmp)->lsm_oinfo[0] == NULL)) { + if (unlikely(!(*lsmp)->lsm_oinfo[0])) { kfree(*lsmp); return -ENOMEM; } @@ -185,11 +184,11 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, return -EBADF; } - if (lmm != NULL) + if (lmm) /* XXX zero *lsmp? */ ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi); - if (imp != NULL && + if (imp && (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES)) (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes; else @@ -246,7 +245,7 @@ static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); @@ -276,7 +275,7 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); @@ -294,7 +293,7 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -321,7 +320,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp, LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR); @@ -339,7 +338,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -362,7 +361,7 @@ static int osc_setattr_interpret(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out; } @@ -384,7 +383,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR); @@ -451,7 +450,7 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa, } req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -482,7 +481,7 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa, goto out_req; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { rc = -EPROTO; goto out_req; } @@ -500,7 +499,7 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa, lsm->lsm_oi = oa->o_oi; *ea = lsm; - if (oti != NULL) { + if (oti) { oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg); if (oa->o_valid & OBD_MD_FLCOOKIE) { @@ -530,7 +529,7 @@ int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH); @@ -573,7 +572,7 @@ static int osc_sync_interpret(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { CERROR("can't unpack ost_body\n"); rc = -EPROTO; goto out; @@ -595,7 +594,7 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo, int rc; req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC); @@ -629,10 +628,11 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo, /* Find and cancel locally locks matched by @mode in the resource found by * @objid. Found locks are added into @cancel list. Returns the amount of - * locks added to @cancels list. */ + * locks added to @cancels list. + */ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa, struct list_head *cancels, - ldlm_mode_t mode, __u64 lock_flags) + enum ldlm_mode mode, __u64 lock_flags) { struct ldlm_namespace *ns = exp->exp_obd->obd_namespace; struct ldlm_res_id res_id; @@ -644,13 +644,14 @@ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa, * * This distinguishes from a case when ELC is not supported originally, * when we still want to cancel locks in advance and just cancel them - * locally, without sending any RPC. */ + * locally, without sending any RPC. + */ if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns)) return 0; ostid_build_res_name(&oa->o_oi, &res_id); res = ldlm_resource_get(ns, NULL, &res_id, 0, 0); - if (res == NULL) + if (!res) return 0; LDLM_RESOURCE_ADDREF(res); @@ -723,7 +724,8 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp, * If the client dies, or the OST is down when the object should be destroyed, * the records are not cancelled, and when the OST reconnects to the MDS next, * it will retrieve the llog unlink logs and then sends the log cancellation - * cookies to the MDS after committing destroy transactions. */ + * cookies to the MDS after committing destroy transactions. + */ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, struct obdo *oa, struct lov_stripe_md *ea, struct obd_trans_info *oti, struct obd_export *md_export) @@ -743,7 +745,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, LDLM_FL_DISCARD_DATA); req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY); - if (req == NULL) { + if (!req) { ldlm_lock_list_put(&cancels, l_bl_ast, count); return -ENOMEM; } @@ -758,7 +760,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ ptlrpc_at_set_req_timeout(req); - if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE) + if (oti && oa->o_valid & OBD_MD_FLCOOKIE) oa->o_lcookie = *oti->oti_logcookies; body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); LASSERT(body); @@ -769,7 +771,8 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, /* If osc_destroy is for destroying the unlink orphan, * sent from MDT to OST, which should not be blocked here, * because the process might be triggered by ptlrpcd, and - * it is not good to block ptlrpcd thread (b=16006)*/ + * it is not good to block ptlrpcd thread (b=16006 + **/ if (!(oa->o_flags & OBD_FL_DELORPHAN)) { req->rq_interpret_reply = osc_destroy_interpret; if (!osc_can_send_destroy(cli)) { @@ -810,7 +813,8 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, (long)(obd_max_dirty_pages + 1))) { /* The atomic_read() allowing the atomic_inc() are * not covered by a lock thus they may safely race and trip - * this CERROR() unless we add in a small fudge factor (+1). */ + * this CERROR() unless we add in a small fudge factor (+1). + */ CERROR("dirty %d - %d > system dirty_max %d\n", atomic_read(&obd_dirty_pages), atomic_read(&obd_dirty_transit_pages), @@ -822,7 +826,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, oa->o_undirty = 0; } else { long max_in_flight = (cli->cl_max_pages_per_rpc << - PAGE_CACHE_SHIFT)* + PAGE_SHIFT)* (cli->cl_max_rpcs_in_flight + 1); oa->o_undirty = max(cli->cl_dirty_max, max_in_flight); } @@ -839,7 +843,7 @@ void osc_update_next_shrink(struct client_obd *cli) { cli->cl_next_shrink_grant = cfs_time_shift(cli->cl_grant_shrink_interval); - CDEBUG(D_CACHE, "next time %ld to shrink grant \n", + CDEBUG(D_CACHE, "next time %ld to shrink grant\n", cli->cl_next_shrink_grant); } @@ -900,15 +904,16 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa) /* Shrink the current grant, either from some large amount to enough for a * full set of in-flight RPCs, or if we have already shrunk to that limit * then to enough for a single RPC. This avoids keeping more grant than - * needed, and avoids shrinking the grant piecemeal. */ + * needed, and avoids shrinking the grant piecemeal. + */ static int osc_shrink_grant(struct client_obd *cli) { __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) * - (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT); + (cli->cl_max_pages_per_rpc << PAGE_SHIFT); client_obd_list_lock(&cli->cl_loi_list_lock); if (cli->cl_avail_grant <= target_bytes) - target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT; client_obd_list_unlock(&cli->cl_loi_list_lock); return osc_shrink_grant_to_target(cli, target_bytes); @@ -922,9 +927,10 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes) client_obd_list_lock(&cli->cl_loi_list_lock); /* Don't shrink if we are already above or below the desired limit * We don't want to shrink below a single RPC, as that will negatively - * impact block allocation and long-term performance. */ - if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT) - target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + * impact block allocation and long-term performance. + */ + if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT) + target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT; if (target_bytes >= cli->cl_avail_grant) { client_obd_list_unlock(&cli->cl_loi_list_lock); @@ -970,8 +976,9 @@ static int osc_should_shrink_grant(struct client_obd *client) if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) { /* Get the current RPC size directly, instead of going via: * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export) - * Keep comment here so that it can be found by searching. */ - int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + * Keep comment here so that it can be found by searching. + */ + int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT; if (client->cl_import->imp_state == LUSTRE_IMP_FULL && client->cl_avail_grant > brw_size) @@ -986,8 +993,7 @@ static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data) { struct client_obd *client; - list_for_each_entry(client, &item->ti_obd_list, - cl_grant_shrink_list) { + list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) { if (osc_should_shrink_grant(client)) osc_shrink_grant(client); } @@ -1004,10 +1010,10 @@ static int osc_add_shrink_grant(struct client_obd *client) &client->cl_grant_shrink_list); if (rc) { CERROR("add grant client %s error %d\n", - client->cl_import->imp_obd->obd_name, rc); + client->cl_import->imp_obd->obd_name, rc); return rc; } - CDEBUG(D_CACHE, "add grant client %s \n", + CDEBUG(D_CACHE, "add grant client %s\n", client->cl_import->imp_obd->obd_name); osc_update_next_shrink(client); return 0; @@ -1040,12 +1046,13 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant, ocd->ocd_grant, cli->cl_dirty); /* workaround for servers which do not have the patch from - * LU-2679 */ + * LU-2679 + */ cli->cl_avail_grant = ocd->ocd_grant; } /* determine the appropriate chunk size used by osc_extent. */ - cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize); + cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize); client_obd_list_unlock(&cli->cl_loi_list_lock); CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n", @@ -1060,7 +1067,8 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) /* We assume that the reason this OSC got a short read is because it read * beyond the end of a stripe file; i.e. lustre is reading a sparse file * via the LOV, and it _knows_ it's reading inside the file, it's just that - * this stripe never got written at or beyond this stripe offset yet. */ + * this stripe never got written at or beyond this stripe offset yet. + */ static void handle_short_read(int nob_read, u32 page_count, struct brw_page **pga) { @@ -1106,7 +1114,7 @@ static int check_write_rcs(struct ptlrpc_request *req, remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS, sizeof(*remote_rcs) * niocount); - if (remote_rcs == NULL) { + if (!remote_rcs) { CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n"); return -EPROTO; } @@ -1118,7 +1126,7 @@ static int check_write_rcs(struct ptlrpc_request *req, if (remote_rcs[i] != 0) { CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n", - i, remote_rcs[i], req); + i, remote_rcs[i], req); return -EPROTO; } } @@ -1139,7 +1147,8 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2) OBD_BRW_SYNC | OBD_BRW_ASYNC|OBD_BRW_NOQUOTA); /* warn if we try to combine flags that we don't know to be - * safe to combine */ + * safe to combine + */ if (unlikely((p1->flag & mask) != (p2->flag & mask))) { CWARN("Saw flags 0x%x and 0x%x in the same brw, please report this at http://bugs.whamcloud.com/\n", p1->flag, p2->flag); @@ -1152,7 +1161,7 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2) static u32 osc_checksum_bulk(int nob, u32 pg_count, struct brw_page **pga, int opc, - cksum_type_t cksum_type) + enum cksum_type cksum_type) { __u32 cksum; int i = 0; @@ -1174,7 +1183,8 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count, int count = pga[i]->count > nob ? nob : pga[i]->count; /* corrupt the data before we compute the checksum, to - * simulate an OST->client data error */ + * simulate an OST->client data error + */ if (i == 0 && opc == OST_READ && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) { unsigned char *ptr = kmap(pga[i]->pg); @@ -1184,7 +1194,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count, kunmap(pga[i]->pg); } cfs_crypto_hash_update_page(hdesc, pga[i]->pg, - pga[i]->off & ~CFS_PAGE_MASK, + pga[i]->off & ~CFS_PAGE_MASK, count); CDEBUG(D_PAGE, "page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n", @@ -1205,7 +1215,8 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count, cfs_crypto_hash_final(hdesc, NULL, NULL); /* For sending we only compute the wrong checksum instead - * of corrupting the data so it is still correct on a redo */ + * of corrupting the data so it is still correct on a redo + */ if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND)) cksum++; @@ -1244,7 +1255,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, opc = OST_READ; req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ); } - if (req == NULL) + if (!req) return -ENOMEM; for (niocount = i = 1; i < page_count; i++) { @@ -1266,7 +1277,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ ptlrpc_at_set_req_timeout(req); /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own - * retry logic */ + * retry logic + */ req->rq_no_retry_einprogress = 1; desc = ptlrpc_prep_bulk_imp(req, page_count, @@ -1274,7 +1286,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK, OST_BULK_PORTAL); - if (desc == NULL) { + if (!desc) { rc = -ENOMEM; goto out; } @@ -1283,7 +1295,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, body = req_capsule_client_get(pill, &RMF_OST_BODY); ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ); niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE); - LASSERT(body != NULL && ioobj != NULL && niobuf != NULL); + LASSERT(body && ioobj && niobuf); lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); @@ -1293,7 +1305,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, * that might be send for this request. The actual number is decided * when the RPC is finally sent in ptlrpc_register_bulk(). It sends * "max - 1" for old client compatibility sending "0", and also so the - * the actual maximum is a power-of-two number, not one less. LU-1431 */ + * the actual maximum is a power-of-two number, not one less. LU-1431 + */ ioobj_max_brw_set(ioobj, desc->bd_md_max_brw); LASSERT(page_count > 0); pg_prev = pga[0]; @@ -1304,9 +1317,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, LASSERT(pg->count > 0); /* make sure there is no gap in the middle of page array */ LASSERTF(page_count == 1 || - (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) && + (ergo(i == 0, poff + pg->count == PAGE_SIZE) && ergo(i > 0 && i < page_count - 1, - poff == 0 && pg->count == PAGE_CACHE_SIZE) && + poff == 0 && pg->count == PAGE_SIZE) && ergo(i == page_count - 1, poff == 0)), "i: %d/%d pg: %p off: %llu, count: %u\n", i, page_count, pg, pg->off, pg->count); @@ -1355,8 +1368,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, if (cli->cl_checksum && !sptlrpc_flavor_has_bulk(&req->rq_flvr)) { /* store cl_cksum_type in a local variable since - * it can be changed via lprocfs */ - cksum_type_t cksum_type = cli->cl_cksum_type; + * it can be changed via lprocfs + */ + enum cksum_type cksum_type = cli->cl_cksum_type; if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) { oa->o_flags &= OBD_FL_LOCAL_MASK; @@ -1375,7 +1389,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, oa->o_flags |= cksum_type_pack(cksum_type); } else { /* clear out the checksum flag, in case this is a - * resend but cl_checksum is no longer set. b=11238 */ + * resend but cl_checksum is no longer set. b=11238 + */ oa->o_valid &= ~OBD_MD_FLCKSUM; } oa->o_cksum = body->oa.o_cksum; @@ -1415,11 +1430,11 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer, __u32 client_cksum, __u32 server_cksum, int nob, u32 page_count, struct brw_page **pga, - cksum_type_t client_cksum_type) + enum cksum_type client_cksum_type) { __u32 new_cksum; char *msg; - cksum_type_t cksum_type; + enum cksum_type cksum_type; if (server_cksum == client_cksum) { CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum); @@ -1472,9 +1487,9 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) return rc; } - LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc); + LASSERTF(req->rq_repmsg, "rc = %d\n", rc); body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); - if (body == NULL) { + if (!body) { DEBUG_REQ(D_INFO, req, "Can't unpack body\n"); return -EPROTO; } @@ -1538,7 +1553,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) if (rc != req->rq_bulk->bd_nob_transferred) { CERROR("Unexpected rc %d (%d transferred)\n", - rc, req->rq_bulk->bd_nob_transferred); + rc, req->rq_bulk->bd_nob_transferred); return -EPROTO; } @@ -1550,7 +1565,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) __u32 server_cksum = body->oa.o_cksum; char *via = ""; char *router = ""; - cksum_type_t cksum_type; + enum cksum_type cksum_type; cksum_type = cksum_type_unpack(body->oa.o_valid&OBD_MD_FLFLAGS ? body->oa.o_flags : 0); @@ -1627,7 +1642,7 @@ static int osc_brw_redo_request(struct ptlrpc_request *request, return rc; list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) { - if (oap->oap_request != NULL) { + if (oap->oap_request) { LASSERTF(request == oap->oap_request, "request %p != oap_request %p\n", request, oap->oap_request); @@ -1638,12 +1653,14 @@ static int osc_brw_redo_request(struct ptlrpc_request *request, } } /* New request takes over pga and oaps from old request. - * Note that copying a list_head doesn't work, need to move it... */ + * Note that copying a list_head doesn't work, need to move it... + */ aa->aa_resends++; new_req->rq_interpret_reply = request->rq_interpret_reply; new_req->rq_async_args = request->rq_async_args; /* cap resend delay to the current request timeout, this is similar to - * what ptlrpc does (see after_reply()) */ + * what ptlrpc does (see after_reply()) + */ if (aa->aa_resends > new_req->rq_timeout) new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout; else @@ -1669,7 +1686,8 @@ static int osc_brw_redo_request(struct ptlrpc_request *request, /* XXX: This code will run into problem if we're going to support * to add a series of BRW RPCs into a self-defined ptlrpc_request_set * and wait for all of them to be finished. We should inherit request - * set from old request. */ + * set from old request. + */ ptlrpcd_add_req(new_req); DEBUG_REQ(D_INFO, new_req, "new request"); @@ -1709,7 +1727,7 @@ static void sort_brw_pages(struct brw_page **array, int num) static void osc_release_ppga(struct brw_page **ppga, u32 count) { - LASSERT(ppga != NULL); + LASSERT(ppga); kfree(ppga); } @@ -1725,7 +1743,8 @@ static int brw_interpret(const struct lu_env *env, rc = osc_brw_fini_request(req, rc); CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc); /* When server return -EINPROGRESS, client should always retry - * regardless of the number of times the bulk was resent already. */ + * regardless of the number of times the bulk was resent already. + */ if (osc_recoverable_error(rc)) { if (req->rq_import_generation != req->rq_import->imp_generation) { @@ -1748,7 +1767,7 @@ static int brw_interpret(const struct lu_env *env, } list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) { - if (obj == NULL && rc == 0) { + if (!obj && rc == 0) { obj = osc2cl(ext->oe_obj); cl_object_get(obj); } @@ -1759,7 +1778,7 @@ static int brw_interpret(const struct lu_env *env, LASSERT(list_empty(&aa->aa_exts)); LASSERT(list_empty(&aa->aa_oaps)); - if (obj != NULL) { + if (obj) { struct obdo *oa = aa->aa_oa; struct cl_attr *attr = &osc_env_info(env)->oti_attr; unsigned long valid = 0; @@ -1798,7 +1817,8 @@ static int brw_interpret(const struct lu_env *env, client_obd_list_lock(&cli->cl_loi_list_lock); /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters * is called so we know whether to go to sync BRWs or wait for more - * RPCs to complete */ + * RPCs to complete + */ if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) cli->cl_w_in_flight--; else @@ -1857,7 +1877,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, oap->oap_count; else LASSERT(oap->oap_page_off + oap->oap_count == - PAGE_CACHE_SIZE); + PAGE_SIZE); } } @@ -1871,13 +1891,13 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, } pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS); - if (pga == NULL) { + if (!pga) { rc = -ENOMEM; goto out; } - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); - if (oa == NULL) { + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); + if (!oa) { rc = -ENOMEM; goto out; } @@ -1886,7 +1906,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, list_for_each_entry(oap, &rpc_list, oap_rpc_item) { struct cl_page *page = oap2cl_page(oap); - if (clerq == NULL) { + if (!clerq) { clerq = cl_req_alloc(env, page, crt, 1 /* only 1-object rpcs for now */); if (IS_ERR(clerq)) { @@ -1907,7 +1927,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, } /* always get the data for the obdo for the rpc */ - LASSERT(clerq != NULL); + LASSERT(clerq); crattr->cra_oa = oa; cl_req_attr_set(env, clerq, crattr, ~0ULL); if (lock) { @@ -1923,7 +1943,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, sort_brw_pages(pga, page_count); rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count, - pga, &req, 1, 0); + pga, &req, 1, 0); if (rc != 0) { CERROR("prep_req failed: %d\n", rc); goto out; @@ -1938,7 +1958,8 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, * we race with setattr (locally or in queue at OST). If OST gets * later setattr before earlier BRW (as determined by the request xid), * the OST will not use BRW timestamps. Sadly, there is no obvious - * way to do this in a single call. bug 10150 */ + * way to do this in a single call. bug 10150 + */ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); crattr->cra_oa = &body->oa; cl_req_attr_set(env, clerq, crattr, @@ -1955,23 +1976,24 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, aa->aa_clerq = clerq; /* queued sync pages can be torn down while the pages - * were between the pending list and the rpc */ + * were between the pending list and the rpc + */ tmp = NULL; list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) { /* only one oap gets a request reference */ - if (tmp == NULL) + if (!tmp) tmp = oap; if (oap->oap_interrupted && !req->rq_intr) { CDEBUG(D_INODE, "oap %p in req %p interrupted\n", - oap, req); + oap, req); ptlrpc_mark_interrupted(req); } } - if (tmp != NULL) + if (tmp) tmp->oap_request = ptlrpc_request_addref(req); client_obd_list_lock(&cli->cl_loi_list_lock); - starting_offset >>= PAGE_CACHE_SHIFT; + starting_offset >>= PAGE_SHIFT; if (cmd == OBD_BRW_READ) { cli->cl_r_in_flight++; lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count); @@ -2001,16 +2023,17 @@ out: kfree(crattr); if (rc != 0) { - LASSERT(req == NULL); + LASSERT(!req); if (oa) kmem_cache_free(obdo_cachep, oa); kfree(pga); /* this should happen rarely and is pretty bad, it makes the - * pending list not follow the dirty order */ + * pending list not follow the dirty order + */ while (!list_empty(ext_list)) { ext = list_entry(ext_list->next, struct osc_extent, - oe_link); + oe_link); list_del_init(&ext->oe_link); osc_extent_finish(env, ext, 0, rc); } @@ -2026,7 +2049,6 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock, void *data = einfo->ei_cbdata; int set = 0; - LASSERT(lock != NULL); LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl); LASSERT(lock->l_resource->lr_type == einfo->ei_type); LASSERT(lock->l_completion_ast == einfo->ei_cb_cp); @@ -2035,7 +2057,7 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock, lock_res_and_lock(lock); spin_lock(&osc_ast_guard); - if (lock->l_ast_data == NULL) + if (!lock->l_ast_data) lock->l_ast_data = data; if (lock->l_ast_data == data) set = 1; @@ -2052,7 +2074,7 @@ static int osc_set_data_with_check(struct lustre_handle *lockh, struct ldlm_lock *lock = ldlm_handle2lock(lockh); int set = 0; - if (lock != NULL) { + if (lock) { set = osc_set_lock_data_with_check(lock, einfo); LDLM_LOCK_PUT(lock); } else @@ -2064,7 +2086,8 @@ static int osc_set_data_with_check(struct lustre_handle *lockh, /* find any ldlm lock of the inode in osc * return 0 not find * 1 find one - * < 0 error */ + * < 0 error + */ static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm, ldlm_iterator_t replace, void *data) { @@ -2095,7 +2118,6 @@ static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb, rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - LASSERT(rep != NULL); rep->lock_policy_res1 = ptlrpc_status_ntoh(rep->lock_policy_res1); if (rep->lock_policy_res1) @@ -2127,18 +2149,21 @@ static int osc_enqueue_interpret(const struct lu_env *env, __u64 *flags = aa->oa_flags; /* Make a local copy of a lock handle and a mode, because aa->oa_* - * might be freed anytime after lock upcall has been called. */ + * might be freed anytime after lock upcall has been called. + */ lustre_handle_copy(&handle, aa->oa_lockh); mode = aa->oa_ei->ei_mode; /* ldlm_cli_enqueue is holding a reference on the lock, so it must - * be valid. */ + * be valid. + */ lock = ldlm_handle2lock(&handle); /* Take an additional reference so that a blocking AST that * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed * to arrive after an upcall has been executed by - * osc_enqueue_fini(). */ + * osc_enqueue_fini(). + */ ldlm_lock_addref(&handle, mode); /* Let CP AST to grant the lock first. */ @@ -2170,7 +2195,7 @@ static int osc_enqueue_interpret(const struct lu_env *env, */ ldlm_lock_decref(&handle, mode); - LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n", + LASSERTF(lock, "lockh %p, req %p, aa %p - client evicted?\n", aa->oa_lockh, req, aa); ldlm_lock_decref(&handle, mode); LDLM_LOCK_PUT(lock); @@ -2185,7 +2210,8 @@ struct ptlrpc_request_set *PTLRPCD_SET = (void *)1; * others may take a considerable amount of time in a case of ost failure; and * when other sync requests do not get released lock from a client, the client * is excluded from the cluster -- such scenarious make the life difficult, so - * release locks just after they are obtained. */ + * release locks just after they are obtained. + */ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, __u64 *flags, ldlm_policy_data_t *policy, struct ost_lvb *lvb, int kms_valid, @@ -2198,11 +2224,12 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, struct ptlrpc_request *req = NULL; int intent = *flags & LDLM_FL_HAS_INTENT; __u64 match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY); - ldlm_mode_t mode; + enum ldlm_mode mode; int rc; /* Filesystem lock extents are extended to page boundaries so that - * dealing with the page cache is a little smoother. */ + * dealing with the page cache is a little smoother. + */ policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK; policy->l_extent.end |= ~CFS_PAGE_MASK; @@ -2226,7 +2253,8 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, * * At some point we should cancel the read lock instead of making them * send us a blocking callback, but there are problems with canceling - * locks out from other users right now, too. */ + * locks out from other users right now, too. + */ mode = einfo->ei_mode; if (einfo->ei_mode == LCK_PR) mode |= LCK_PW; @@ -2238,7 +2266,8 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) { /* For AGL, if enqueue RPC is sent but the lock is not * granted, then skip to process this strpe. - * Return -ECANCELED to tell the caller. */ + * Return -ECANCELED to tell the caller. + */ ldlm_lock_decref(lockh, mode); LDLM_LOCK_PUT(matched); return -ECANCELED; @@ -2247,19 +2276,22 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, if (osc_set_lock_data_with_check(matched, einfo)) { *flags |= LDLM_FL_LVB_READY; /* addref the lock only if not async requests and PW - * lock is matched whereas we asked for PR. */ + * lock is matched whereas we asked for PR. + */ if (!rqset && einfo->ei_mode != mode) ldlm_lock_addref(lockh, LCK_PR); if (intent) { /* I would like to be able to ASSERT here that * rss <= kms, but I can't, for reasons which - * are explained in lov_enqueue() */ + * are explained in lov_enqueue() + */ } /* We already have a lock, and it's referenced. * * At this point, the cl_lock::cll_state is CLS_QUEUING, - * AGL upcall may change it to CLS_HELD directly. */ + * AGL upcall may change it to CLS_HELD directly. + */ (*upcall)(cookie, ELDLM_OK); if (einfo->ei_mode != mode) @@ -2281,7 +2313,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE_LVB); - if (req == NULL) + if (!req) return -ENOMEM; rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0); @@ -2341,27 +2373,29 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id, { struct obd_device *obd = exp->exp_obd; __u64 lflags = *flags; - ldlm_mode_t rc; + enum ldlm_mode rc; if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH)) return -EIO; /* Filesystem lock extents are extended to page boundaries so that - * dealing with the page cache is a little smoother */ + * dealing with the page cache is a little smoother + */ policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK; policy->l_extent.end |= ~CFS_PAGE_MASK; /* Next, search for already existing extent locks that will cover us */ /* If we're trying to read, we also search for an existing PW lock. The * VFS and page cache already protect us locally, so lots of readers/ - * writers can share a single PW lock. */ + * writers can share a single PW lock. + */ rc = mode; if (mode == LCK_PR) rc |= LCK_PW; rc = ldlm_lock_match(obd->obd_namespace, lflags, res_id, type, policy, rc, lockh, unref); if (rc) { - if (data != NULL) { + if (data) { if (!osc_set_data_with_check(lockh, data)) { if (!(lflags & LDLM_FL_TEST_LOCK)) ldlm_lock_decref(lockh, rc); @@ -2398,8 +2432,9 @@ static int osc_statfs_interpret(const struct lu_env *env, * due to issues at a higher level (LOV). * Exit immediately since the caller is * aware of the problem and takes care - * of the clean up */ - return rc; + * of the clean up + */ + return rc; if ((rc == -ENOTCONN || rc == -EAGAIN) && (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY)) { @@ -2411,7 +2446,7 @@ static int osc_statfs_interpret(const struct lu_env *env, goto out; msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); - if (msfs == NULL) { + if (!msfs) { rc = -EPROTO; goto out; } @@ -2436,9 +2471,10 @@ static int osc_statfs_async(struct obd_export *exp, * extra calls into the filesystem if that isn't necessary (e.g. * during mount that would help a bit). Having relative timestamps * is not so great if request processing is slow, while absolute - * timestamps are not ideal because they need time synchronization. */ + * timestamps are not ideal because they need time synchronization. + */ req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS); @@ -2474,8 +2510,9 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, struct obd_import *imp = NULL; int rc; - /*Since the request might also come from lprocfs, so we need - *sync this with client_disconnect_export Bug15684*/ + /* Since the request might also come from lprocfs, so we need + * sync this with client_disconnect_export Bug15684 + */ down_read(&obd->u.cli.cl_sem); if (obd->u.cli.cl_import) imp = class_import_get(obd->u.cli.cl_import); @@ -2488,12 +2525,13 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, * extra calls into the filesystem if that isn't necessary (e.g. * during mount that would help a bit). Having relative timestamps * is not so great if request processing is slow, while absolute - * timestamps are not ideal because they need time synchronization. */ + * timestamps are not ideal because they need time synchronization. + */ req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS); class_import_put(imp); - if (req == NULL) + if (!req) return -ENOMEM; rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS); @@ -2516,7 +2554,7 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, goto out; msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); - if (msfs == NULL) { + if (!msfs) { rc = -EPROTO; goto out; } @@ -2534,7 +2572,8 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp, * the maximum number of OST indices which will fit in the user buffer. * lmm_magic must be LOV_MAGIC (we only use 1 slot here). */ -static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) +static int osc_getstripe(struct lov_stripe_md *lsm, + struct lov_user_md __user *lump) { /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */ struct lov_user_md_v3 lum, *lumk; @@ -2545,7 +2584,8 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) return -ENODATA; /* we only need the header part from user space to get lmm_magic and - * lmm_stripe_count, (the header part is common to v1 and v3) */ + * lmm_stripe_count, (the header part is common to v1 and v3) + */ lum_size = sizeof(struct lov_user_md_v1); if (copy_from_user(&lum, lump, lum_size)) return -EFAULT; @@ -2560,7 +2600,8 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0])); /* we can use lov_mds_md_size() to compute lum_size - * because lov_user_md_vX and lov_mds_md_vX have the same size */ + * because lov_user_md_vX and lov_mds_md_vX have the same size + */ if (lum.lmm_stripe_count > 0) { lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic); lumk = kzalloc(lum_size, GFP_NOFS); @@ -2591,14 +2632,15 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) } static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, - void *karg, void *uarg) + void *karg, void __user *uarg) { struct obd_device *obd = exp->exp_obd; struct obd_ioctl_data *data = karg; int err = 0; if (!try_module_get(THIS_MODULE)) { - CERROR("Can't get module. Is it alive?"); + CERROR("%s: cannot get module '%s'\n", obd->obd_name, + module_name(THIS_MODULE)); return -EINVAL; } switch (cmd) { @@ -2700,7 +2742,7 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp, req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GET_INFO_LAST_ID); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, @@ -2721,7 +2763,7 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp, goto out; reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID); - if (reply == NULL) { + if (!reply) { rc = -EPROTO; goto out; } @@ -2735,7 +2777,7 @@ out: struct ldlm_res_id res_id; ldlm_policy_data_t policy; struct lustre_handle lockh; - ldlm_mode_t mode = 0; + enum ldlm_mode mode = 0; struct ptlrpc_request *req; struct ll_user_fiemap *reply; char *tmp; @@ -2748,12 +2790,12 @@ out: CFS_PAGE_MASK; if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <= - fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1) + fm_key->fiemap.fm_start + PAGE_SIZE - 1) policy.l_extent.end = OBD_OBJECT_EOF; else policy.l_extent.end = (fm_key->fiemap.fm_start + fm_key->fiemap.fm_length + - PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK; + PAGE_SIZE - 1) & CFS_PAGE_MASK; ostid_build_res_name(&fm_key->oa.o_oi, &res_id); mode = ldlm_lock_match(exp->exp_obd->obd_namespace, @@ -2774,7 +2816,7 @@ out: skip_locking: req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GET_INFO_FIEMAP); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto drop_lock; } @@ -2803,7 +2845,7 @@ skip_locking: goto fini_req; reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL); - if (reply == NULL) { + if (!reply) { rc = -EPROTO; goto fini_req; } @@ -2852,7 +2894,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, if (KEY_IS(KEY_CACHE_SET)) { struct client_obd *cli = &obd->u.cli; - LASSERT(cli->cl_cache == NULL); /* only once */ + LASSERT(!cli->cl_cache); /* only once */ cli->cl_cache = val; atomic_inc(&cli->cl_cache->ccc_users); cli->cl_lru_left = &cli->cl_cache->ccc_lru_left; @@ -2880,16 +2922,17 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, return -EINVAL; /* We pass all other commands directly to OST. Since nobody calls osc - methods directly and everybody is supposed to go through LOV, we - assume lov checked invalid values for us. - The only recognised values so far are evict_by_nid and mds_conn. - Even if something bad goes through, we'd get a -EINVAL from OST - anyway. */ + * methods directly and everybody is supposed to go through LOV, we + * assume lov checked invalid values for us. + * The only recognised values so far are evict_by_nid and mds_conn. + * Even if something bad goes through, we'd get a -EINVAL from OST + * anyway. + */ req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ? &RQF_OST_SET_GRANT_INFO : &RQF_OBD_SET_INFO); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, @@ -2916,7 +2959,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); aa = ptlrpc_req_async_args(req); - oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO); + oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS); if (!oa) { ptlrpc_req_finished(req); return -ENOMEM; @@ -2928,7 +2971,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, ptlrpc_request_set_replen(req); if (!KEY_IS(KEY_GRANT_SHRINK)) { - LASSERT(set != NULL); + LASSERT(set); ptlrpc_set_add_req(set, req); ptlrpc_check_set(NULL, set); } else { @@ -2946,7 +2989,7 @@ static int osc_reconnect(const struct lu_env *env, { struct client_obd *cli = &obd->u.cli; - if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) { + if (data && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) { long lost_grant; client_obd_list_lock(&cli->cl_loi_list_lock); @@ -2987,7 +3030,7 @@ static int osc_disconnect(struct obd_export *exp) * So the osc should be disconnected from the shrink list, after we * are sure the import has been destroyed. BUG18662 */ - if (obd->u.cli.cl_import == NULL) + if (!obd->u.cli.cl_import) osc_del_shrink_grant(&obd->u.cli); return rc; } @@ -3024,7 +3067,8 @@ static int osc_import_event(struct obd_device *obd, /* Reset grants */ cli = &obd->u.cli; /* all pages go to failing rpcs due to the invalid - * import */ + * import + */ osc_io_unplug(env, cli, NULL); ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY); @@ -3206,13 +3250,13 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) return 0; } -int osc_cleanup(struct obd_device *obd) +static int osc_cleanup(struct obd_device *obd) { struct client_obd *cli = &obd->u.cli; int rc; /* lru cleanup */ - if (cli->cl_cache != NULL) { + if (cli->cl_cache) { LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0); spin_lock(&cli->cl_cache->ccc_lru_lock); list_del_init(&cli->cl_lru_osc); @@ -3255,7 +3299,7 @@ static int osc_process_config(struct obd_device *obd, u32 len, void *buf) return osc_process_config_base(obd, buf); } -struct obd_ops osc_obd_ops = { +static struct obd_ops osc_obd_ops = { .owner = THIS_MODULE, .setup = osc_setup, .precleanup = osc_precleanup, @@ -3298,7 +3342,8 @@ static int __init osc_init(void) /* print an address of _any_ initialized kernel symbol from this * module, to allow debugging with gdb that doesn't support data - * symbols from modules.*/ + * symbols from modules. + */ CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches); rc = lu_kmem_init(osc_caches); diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c index efdda0950..cf3ac8eee 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/client.c +++ b/drivers/staging/lustre/lustre/ptlrpc/client.c @@ -145,7 +145,7 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE); desc = ptlrpc_new_bulk(npages, max_brw, type, portal); - if (desc == NULL) + if (!desc) return NULL; desc->bd_import_generation = req->rq_import_generation; @@ -171,15 +171,15 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, int pageoffset, int len, int pin) { LASSERT(desc->bd_iov_count < desc->bd_max_iov); - LASSERT(page != NULL); + LASSERT(page); LASSERT(pageoffset >= 0); LASSERT(len > 0); - LASSERT(pageoffset + len <= PAGE_CACHE_SIZE); + LASSERT(pageoffset + len <= PAGE_SIZE); desc->bd_nob += len; if (pin) - page_cache_get(page); + get_page(page); ptlrpc_add_bulk_page(desc, page, pageoffset, len); } @@ -193,7 +193,6 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin) { int i; - LASSERT(desc != NULL); LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */ LASSERT(desc->bd_md_count == 0); /* network hands off */ LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL)); @@ -207,7 +206,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin) if (unpin) { for (i = 0; i < desc->bd_iov_count; i++) - page_cache_release(desc->bd_iov[i].kiov_page); + put_page(desc->bd_iov[i].kiov_page); } kfree(desc); @@ -353,6 +352,7 @@ static int unpack_reply(struct ptlrpc_request *req) * If anything goes wrong just ignore it - same as if it never happened */ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) + __must_hold(&req->rq_lock) { struct ptlrpc_request *early_req; time64_t olddl; @@ -411,7 +411,7 @@ int ptlrpc_request_cache_init(void) request_cache = kmem_cache_create("ptlrpc_cache", sizeof(struct ptlrpc_request), 0, SLAB_HWCACHE_ALIGN, NULL); - return request_cache == NULL ? -ENOMEM : 0; + return !request_cache ? -ENOMEM : 0; } void ptlrpc_request_cache_fini(void) @@ -423,7 +423,7 @@ struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags) { struct ptlrpc_request *req; - req = kmem_cache_alloc(request_cache, flags | __GFP_ZERO); + req = kmem_cache_zalloc(request_cache, flags); return req; } @@ -441,8 +441,6 @@ void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool) struct list_head *l, *tmp; struct ptlrpc_request *req; - LASSERT(pool != NULL); - spin_lock(&pool->prp_lock); list_for_each_safe(l, tmp, &pool->prp_req_list) { req = list_entry(l, struct ptlrpc_request, rq_list); @@ -559,7 +557,7 @@ ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool) } request = list_entry(pool->prp_req_list.next, struct ptlrpc_request, - rq_list); + rq_list); list_del_init(&request->rq_list); spin_unlock(&pool->prp_lock); @@ -724,10 +722,10 @@ struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp, request = ptlrpc_prep_req_from_pool(pool); if (request) { - LASSERTF((unsigned long)imp > 0x1000, "%p", imp); + LASSERTF((unsigned long)imp > 0x1000, "%p\n", imp); LASSERT(imp != LP_POISON); - LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p", - imp->imp_client); + LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n", + imp->imp_client); LASSERT(imp->imp_client != LP_POISON); request->rq_import = class_import_get(imp); @@ -752,7 +750,7 @@ ptlrpc_request_alloc_internal(struct obd_import *imp, struct ptlrpc_request *request; request = __ptlrpc_request_alloc(imp, pool); - if (request == NULL) + if (!request) return NULL; req_capsule_init(&request->rq_pill, request, RCL_CLIENT); @@ -898,8 +896,7 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) RQ_PHASE_COMPLETE : RQ_PHASE_NEW; list_for_each(tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_entry(tmp, struct ptlrpc_request, rq_set_chain); LASSERT(req->rq_phase == expected_phase); n++; @@ -911,8 +908,7 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) list_for_each_safe(tmp, next, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_entry(tmp, struct ptlrpc_request, rq_set_chain); list_del_init(&req->rq_set_chain); LASSERT(req->rq_phase == expected_phase); @@ -951,10 +947,10 @@ void ptlrpc_set_add_req(struct ptlrpc_request_set *set, atomic_inc(&set->set_remaining); req->rq_queued_time = cfs_time_current(); - if (req->rq_reqmsg != NULL) + if (req->rq_reqmsg) lustre_msg_set_jobid(req->rq_reqmsg, NULL); - if (set->set_producer != NULL) + if (set->set_producer) /* * If the request set has a producer callback, the RPC must be * sent straight away @@ -974,7 +970,7 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, struct ptlrpc_request_set *set = pc->pc_set; int count, i; - LASSERT(req->rq_set == NULL); + LASSERT(!req->rq_set); LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0); spin_lock(&set->set_new_req_lock); @@ -1015,7 +1011,6 @@ static int ptlrpc_import_delay_req(struct obd_import *imp, { int delay = 0; - LASSERT(status != NULL); *status = 0; if (req->rq_ctx_init || req->rq_ctx_fini) { @@ -1078,7 +1073,7 @@ static int ptlrpc_console_allow(struct ptlrpc_request *req) __u32 opc; int err; - LASSERT(req->rq_reqmsg != NULL); + LASSERT(req->rq_reqmsg); opc = lustre_msg_get_opc(req->rq_reqmsg); /* @@ -1167,7 +1162,7 @@ static int after_reply(struct ptlrpc_request *req) struct timespec64 work_start; long timediff; - LASSERT(obd != NULL); + LASSERT(obd); /* repbuf must be unlinked */ LASSERT(!req->rq_receiving_reply && !req->rq_reply_unlink); @@ -1247,7 +1242,7 @@ static int after_reply(struct ptlrpc_request *req) ktime_get_real_ts64(&work_start); timediff = (work_start.tv_sec - req->rq_arrival_time.tv_sec) * USEC_PER_SEC + (work_start.tv_nsec - req->rq_arrival_time.tv_nsec) / NSEC_PER_USEC; - if (obd->obd_svc_stats != NULL) { + if (obd->obd_svc_stats) { lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, timediff); ptlrpc_lprocfs_rpc_sent(req, timediff); @@ -1310,7 +1305,7 @@ static int after_reply(struct ptlrpc_request *req) /* version recovery */ ptlrpc_save_versions(req); ptlrpc_retain_replayable_request(req, imp); - } else if (req->rq_commit_cb != NULL && + } else if (req->rq_commit_cb && list_empty(&req->rq_replay_list)) { /* * NB: don't call rq_commit_cb if it's already on @@ -1334,8 +1329,8 @@ static int after_reply(struct ptlrpc_request *req) struct ptlrpc_request *last; last = list_entry(imp->imp_replay_list.prev, - struct ptlrpc_request, - rq_replay_list); + struct ptlrpc_request, + rq_replay_list); /* * Requests with rq_replay stay on the list even if no * commit is expected. @@ -1437,7 +1432,7 @@ static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set) { int remaining, rc; - LASSERT(set->set_producer != NULL); + LASSERT(set->set_producer); remaining = atomic_read(&set->set_remaining); @@ -1478,8 +1473,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) INIT_LIST_HEAD(&comp_reqs); list_for_each_safe(tmp, next, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_entry(tmp, struct ptlrpc_request, rq_set_chain); struct obd_import *imp = req->rq_import; int unregistered = 0; int rc = 0; @@ -1621,8 +1615,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) */ list_del_init(&req->rq_list); list_add_tail(&req->rq_list, - &imp-> - imp_delayed_list); + &imp->imp_delayed_list); spin_unlock(&imp->imp_lock); continue; } @@ -1630,7 +1623,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) if (status != 0) { req->rq_status = status; ptlrpc_rqphase_move(req, - RQ_PHASE_INTERPRET); + RQ_PHASE_INTERPRET); spin_unlock(&imp->imp_lock); goto interpret; } @@ -1645,7 +1638,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) list_del_init(&req->rq_list); list_add_tail(&req->rq_list, - &imp->imp_sending_list); + &imp->imp_sending_list); spin_unlock(&imp->imp_lock); @@ -1750,7 +1743,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) * process the reply. Similarly if the RPC returned * an error, and therefore the bulk will never arrive. */ - if (req->rq_bulk == NULL || req->rq_status < 0) { + if (!req->rq_bulk || req->rq_status < 0) { ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); goto interpret; } @@ -1802,7 +1795,7 @@ interpret: } ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE); - CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0, + CDEBUG(req->rq_reqmsg ? D_RPCTRACE : 0, "Completed RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n", current_comm(), imp->imp_obd->obd_uuid.uuid, lustre_msg_get_status(req->rq_reqmsg), req->rq_xid, @@ -1882,8 +1875,8 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) "timed out for sent delay" : "timed out for slow reply"), (s64)req->rq_sent, (s64)req->rq_real_sent); - if (imp != NULL && obd_debug_peer_on_timeout) - LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer); + if (imp && obd_debug_peer_on_timeout) + LNetDebugPeer(imp->imp_connection->c_peer); ptlrpc_unregister_reply(req, async_unlink); ptlrpc_unregister_bulk(req, async_unlink); @@ -1891,7 +1884,7 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) if (obd_dump_on_timeout) libcfs_debug_dumplog(); - if (imp == NULL) { + if (!imp) { DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?"); return 1; } @@ -1944,13 +1937,10 @@ int ptlrpc_expired_set(void *data) struct list_head *tmp; time64_t now = ktime_get_real_seconds(); - LASSERT(set != NULL); - /* A timeout expired. See which reqs it applies to... */ list_for_each(tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_entry(tmp, struct ptlrpc_request, rq_set_chain); /* don't expire request waiting for context */ if (req->rq_wait_ctx) @@ -2002,13 +1992,11 @@ void ptlrpc_interrupted_set(void *data) struct ptlrpc_request_set *set = data; struct list_head *tmp; - LASSERT(set != NULL); CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set); list_for_each(tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_entry(tmp, struct ptlrpc_request, rq_set_chain); if (req->rq_phase != RQ_PHASE_RPC && req->rq_phase != RQ_PHASE_UNREGISTERING) @@ -2081,7 +2069,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) else list_for_each(tmp, &set->set_requests) { req = list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + rq_set_chain); if (req->rq_phase == RQ_PHASE_NEW) (void)ptlrpc_send_new_req(req); } @@ -2155,7 +2143,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) if (rc == 0 && atomic_read(&set->set_remaining) == 0) { list_for_each(tmp, &set->set_requests) { req = list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + rq_set_chain); spin_lock(&req->rq_lock); req->rq_invalid_rqset = 1; spin_unlock(&req->rq_lock); @@ -2174,7 +2162,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set) rc = req->rq_status; } - if (set->set_interpret != NULL) { + if (set->set_interpret) { int (*interpreter)(struct ptlrpc_request_set *set, void *, int) = set->set_interpret; rc = interpreter(set, set->set_arg, rc); @@ -2206,10 +2194,10 @@ EXPORT_SYMBOL(ptlrpc_set_wait); */ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) { - if (request == NULL) + if (!request) return; LASSERTF(!request->rq_receiving_reply, "req %p\n", request); - LASSERTF(request->rq_rqbd == NULL, "req %p\n", request);/* client-side */ + LASSERTF(!request->rq_rqbd, "req %p\n", request);/* client-side */ LASSERTF(list_empty(&request->rq_list), "req %p\n", request); LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request); LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request); @@ -2221,7 +2209,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) * We must take it off the imp_replay_list first. Otherwise, we'll set * request->rq_reqmsg to NULL while osc_close is dereferencing it. */ - if (request->rq_import != NULL) { + if (request->rq_import) { if (!locked) spin_lock(&request->rq_import->imp_lock); list_del_init(&request->rq_replay_list); @@ -2236,20 +2224,20 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) LBUG(); } - if (request->rq_repbuf != NULL) + if (request->rq_repbuf) sptlrpc_cli_free_repbuf(request); - if (request->rq_export != NULL) { + if (request->rq_export) { class_export_put(request->rq_export); request->rq_export = NULL; } - if (request->rq_import != NULL) { + if (request->rq_import) { class_import_put(request->rq_import); request->rq_import = NULL; } - if (request->rq_bulk != NULL) + if (request->rq_bulk) ptlrpc_free_bulk_pin(request->rq_bulk); - if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL) + if (request->rq_reqbuf || request->rq_clrbuf) sptlrpc_cli_free_reqbuf(request); if (request->rq_cli_ctx) @@ -2269,7 +2257,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked) */ static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked) { - if (request == NULL) + if (!request) return 1; if (request == LP_POISON || @@ -2351,7 +2339,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async) * a chance to run reply_in_callback(), and to make sure we've * unlinked before returning a req to the pool. */ - if (request->rq_set != NULL) + if (request->rq_set) wq = &request->rq_set->set_waitq; else wq = &request->rq_reply_waitq; @@ -2386,7 +2374,7 @@ static void ptlrpc_free_request(struct ptlrpc_request *req) req->rq_replay = 0; spin_unlock(&req->rq_lock); - if (req->rq_commit_cb != NULL) + if (req->rq_commit_cb) req->rq_commit_cb(req); list_del_init(&req->rq_replay_list); @@ -2427,7 +2415,6 @@ void ptlrpc_free_committed(struct obd_import *imp) struct ptlrpc_request *last_req = NULL; /* temporary fire escape */ bool skip_committed_list = true; - LASSERT(imp != NULL); assert_spin_locked(&imp->imp_lock); if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked && @@ -2575,8 +2562,7 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, ptlrpc_request_addref(req); list_for_each_prev(tmp, &imp->imp_replay_list) { struct ptlrpc_request *iter = - list_entry(tmp, struct ptlrpc_request, - rq_replay_list); + list_entry(tmp, struct ptlrpc_request, rq_replay_list); /* * We may have duplicate transnos if we create and then @@ -2611,12 +2597,12 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req) struct ptlrpc_request_set *set; int rc; - LASSERT(req->rq_set == NULL); + LASSERT(!req->rq_set); LASSERT(!req->rq_receiving_reply); set = ptlrpc_prep_set(); - if (set == NULL) { - CERROR("Unable to allocate ptlrpc set."); + if (!set) { + CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM); return -ENOMEM; } @@ -2847,12 +2833,9 @@ void ptlrpc_abort_set(struct ptlrpc_request_set *set) { struct list_head *tmp, *pos; - LASSERT(set != NULL); - list_for_each_safe(pos, tmp, &set->set_requests) { struct ptlrpc_request *req = - list_entry(pos, struct ptlrpc_request, - rq_set_chain); + list_entry(pos, struct ptlrpc_request, rq_set_chain); spin_lock(&req->rq_lock); if (req->rq_phase != RQ_PHASE_RPC) { @@ -2994,7 +2977,6 @@ static int work_interpreter(const struct lu_env *env, struct ptlrpc_work_async_args *arg = data; LASSERT(ptlrpcd_check_work(req)); - LASSERT(arg->cb != NULL); rc = arg->cb(env, arg->cbdata); @@ -3026,12 +3008,12 @@ void *ptlrpcd_alloc_work(struct obd_import *imp, might_sleep(); - if (cb == NULL) + if (!cb) return ERR_PTR(-EINVAL); /* copy some code from deprecated fakereq. */ req = ptlrpc_request_cache_alloc(GFP_NOFS); - if (req == NULL) { + if (!req) { CERROR("ptlrpc: run out of memory!\n"); return ERR_PTR(-ENOMEM); } diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c index da1f0b1ac..a14daff3f 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/connection.c +++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c @@ -72,7 +72,8 @@ ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self, * returned and may be compared against out object. */ /* In the function below, .hs_keycmp resolves to - * conn_keycmp() */ + * conn_keycmp() + */ /* coverity[overrun-buffer-val] */ conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash); if (conn != conn2) { @@ -172,7 +173,7 @@ conn_keycmp(const void *key, struct hlist_node *hnode) struct ptlrpc_connection *conn; const lnet_process_id_t *conn_key; - LASSERT(key != NULL); + LASSERT(key); conn_key = key; conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash); diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c index 990156986..47be21ac9 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/events.c +++ b/drivers/staging/lustre/lustre/ptlrpc/events.c @@ -71,7 +71,8 @@ void request_out_callback(lnet_event_t *ev) if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) { /* Failed send: make it seem like the reply timed out, just - * like failing sends in client.c does currently... */ + * like failing sends in client.c does currently... + */ req->rq_net_err = 1; ptlrpc_client_wake_req(req); @@ -95,7 +96,8 @@ void reply_in_callback(lnet_event_t *ev) LASSERT(ev->md.start == req->rq_repbuf); LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len); /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests - for adaptive timeouts' early reply. */ + * for adaptive timeouts' early reply. + */ LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0); spin_lock(&req->rq_lock); @@ -151,7 +153,8 @@ void reply_in_callback(lnet_event_t *ev) req->rq_reply_off = ev->offset; req->rq_nob_received = ev->mlength; /* LNetMDUnlink can't be called under the LNET_LOCK, - so we must unlink in ptlrpc_unregister_reply */ + * so we must unlink in ptlrpc_unregister_reply + */ DEBUG_REQ(D_INFO, req, "reply in flags=%x mlen=%u offset=%d replen=%d", lustre_msg_get_flags(req->rq_reqmsg), @@ -162,7 +165,8 @@ void reply_in_callback(lnet_event_t *ev) out_wake: /* NB don't unlock till after wakeup; req can disappear under us - * since we don't have our own ref */ + * since we don't have our own ref + */ ptlrpc_client_wake_req(req); spin_unlock(&req->rq_lock); } @@ -213,7 +217,8 @@ void client_bulk_callback(lnet_event_t *ev) desc->bd_failure = 1; /* NB don't unlock till after wakeup; desc can disappear under us - * otherwise */ + * otherwise + */ if (desc->bd_md_count == 0) ptlrpc_client_wake_req(desc->bd_req); @@ -250,7 +255,8 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt, __u64 new_seq; /* set sequence ID for request and add it to history list, - * it must be called with hold svcpt::scp_lock */ + * it must be called with hold svcpt::scp_lock + */ new_seq = (sec << REQS_SEC_SHIFT) | (usec << REQS_USEC_SHIFT) | @@ -258,7 +264,8 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt, if (new_seq > svcpt->scp_hist_seq) { /* This handles the initial case of scp_hist_seq == 0 or - * we just jumped into a new time window */ + * we just jumped into a new time window + */ svcpt->scp_hist_seq = new_seq; } else { LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT); @@ -266,7 +273,8 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt, * however, it's possible that we used up all bits for * sequence and jumped into the next usec bucket (future time), * then we hope there will be less RPCs per bucket at some - * point, and sequence will catch up again */ + * point, and sequence will catch up again + */ svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt)); new_seq = svcpt->scp_hist_seq; } @@ -302,7 +310,8 @@ void request_in_callback(lnet_event_t *ev) * request buffer we can use the request object embedded in * rqbd. Note that if we failed to allocate a request, * we'd have to re-post the rqbd, which we can't do in this - * context. */ + * context. + */ req = &rqbd->rqbd_req; memset(req, 0, sizeof(*req)); } else { @@ -312,7 +321,7 @@ void request_in_callback(lnet_event_t *ev) return; } req = ptlrpc_request_cache_alloc(GFP_ATOMIC); - if (req == NULL) { + if (!req) { CERROR("Can't allocate incoming request descriptor: Dropping %s RPC from %s\n", service->srv_name, libcfs_id2str(ev->initiator)); @@ -322,7 +331,8 @@ void request_in_callback(lnet_event_t *ev) /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL, * flags are reset and scalars are zero. We only set the message - * size to non-zero if this was a successful receive. */ + * size to non-zero if this was a successful receive. + */ req->rq_xid = ev->match_bits; req->rq_reqbuf = ev->md.start + ev->offset; if (ev->type == LNET_EVENT_PUT && ev->status == 0) @@ -352,7 +362,8 @@ void request_in_callback(lnet_event_t *ev) svcpt->scp_nrqbds_posted); /* Normally, don't complain about 0 buffers posted; LNET won't - * drop incoming reqs since we set the portal lazy */ + * drop incoming reqs since we set the portal lazy + */ if (test_req_buffer_pressure && ev->type != LNET_EVENT_UNLINK && svcpt->scp_nrqbds_posted == 0) @@ -369,7 +380,8 @@ void request_in_callback(lnet_event_t *ev) svcpt->scp_nreqs_incoming++; /* NB everything can disappear under us once the request - * has been queued and we unlock, so do the wake now... */ + * has been queued and we unlock, so do the wake now... + */ wake_up(&svcpt->scp_waitq); spin_unlock(&svcpt->scp_lock); @@ -390,7 +402,8 @@ void reply_out_callback(lnet_event_t *ev) if (!rs->rs_difficult) { /* 'Easy' replies have no further processing so I drop the - * net's ref on 'rs' */ + * net's ref on 'rs' + */ LASSERT(ev->unlinked); ptlrpc_rs_decref(rs); return; @@ -400,7 +413,8 @@ void reply_out_callback(lnet_event_t *ev) if (ev->unlinked) { /* Last network callback. The net's ref on 'rs' stays put - * until ptlrpc_handle_rs() is done with it */ + * until ptlrpc_handle_rs() is done with it + */ spin_lock(&svcpt->scp_rep_lock); spin_lock(&rs->rs_lock); @@ -438,15 +452,12 @@ int ptlrpc_uuid_to_peer(struct obd_uuid *uuid, __u32 best_order = 0; int count = 0; int rc = -ENOENT; - int portals_compatibility; int dist; __u32 order; lnet_nid_t dst_nid; lnet_nid_t src_nid; - portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL); - - peer->pid = LUSTRE_SRV_LNET_PID; + peer->pid = LNET_PID_LUSTRE; /* Choose the matching UUID that's closest */ while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) { @@ -466,14 +477,6 @@ int ptlrpc_uuid_to_peer(struct obd_uuid *uuid, best_dist = dist; best_order = order; - if (portals_compatibility > 1) { - /* Strong portals compatibility: Zero the nid's - * NET, so if I'm reading new config logs, or - * getting configured by (new) lconf I can - * still talk to old servers. */ - dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid)); - src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid)); - } peer->nid = dst_nid; *self = src_nid; rc = 0; @@ -494,7 +497,8 @@ static void ptlrpc_ni_fini(void) /* Wait for the event queue to become idle since there may still be * messages in flight with pending events (i.e. the fire-and-forget * messages == client requests and "non-difficult" server - * replies */ + * replies + */ for (retries = 0;; retries++) { rc = LNetEQFree(ptlrpc_eq_h); @@ -524,7 +528,7 @@ static lnet_pid_t ptl_get_pid(void) { lnet_pid_t pid; - pid = LUSTRE_SRV_LNET_PID; + pid = LNET_PID_LUSTRE; return pid; } @@ -544,11 +548,13 @@ static int ptlrpc_ni_init(void) } /* CAVEAT EMPTOR: how we process portals events is _radically_ - * different depending on... */ + * different depending on... + */ /* kernel LNet calls our master callback when there are new event, * because we are guaranteed to get every event via callback, * so we just set EQ size to 0 to avoid overhead of serializing - * enqueue/dequeue operations in LNet. */ + * enqueue/dequeue operations in LNet. + */ rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h); if (rc == 0) return 0; diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c index f752c789b..cd94fed0f 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/import.c +++ b/drivers/staging/lustre/lustre/ptlrpc/import.c @@ -112,7 +112,8 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp); * CLOSED. I would rather refcount the import and free it after * disconnection like we do with exports. To do that, the client_obd * will need to save the peer info somewhere other than in the import, - * though. */ + * though. + */ int ptlrpc_init_import(struct obd_import *imp) { spin_lock(&imp->imp_lock); @@ -139,7 +140,7 @@ static void deuuidify(char *uuid, const char *prefix, char **uuid_start, return; if (!strncmp(*uuid_start + *uuid_len - strlen(UUID_STR), - UUID_STR, strlen(UUID_STR))) + UUID_STR, strlen(UUID_STR))) *uuid_len -= strlen(UUID_STR); } @@ -282,11 +283,13 @@ void ptlrpc_invalidate_import(struct obd_import *imp) /* Wait forever until inflight == 0. We really can't do it another * way because in some cases we need to wait for very long reply * unlink. We can't do anything before that because there is really - * no guarantee that some rdma transfer is not in progress right now. */ + * no guarantee that some rdma transfer is not in progress right now. + */ do { /* Calculate max timeout for waiting on rpcs to error * out. Use obd_timeout if calculated value is smaller - * than it. */ + * than it. + */ if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) { timeout = ptlrpc_inflight_timeout(imp); timeout += timeout / 3; @@ -304,7 +307,8 @@ void ptlrpc_invalidate_import(struct obd_import *imp) /* Wait for all requests to error out and call completion * callbacks. Cap it at obd_timeout -- these should all - * have been locally cancelled by ptlrpc_abort_inflight. */ + * have been locally cancelled by ptlrpc_abort_inflight. + */ lwi = LWI_TIMEOUT_INTERVAL( cfs_timeout_cap(cfs_time_seconds(timeout)), (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2, @@ -328,28 +332,30 @@ void ptlrpc_invalidate_import(struct obd_import *imp) * maybe waiting for long reply unlink in * sluggish nets). Let's check this. If there * is no inflight and unregistering != 0, this - * is bug. */ + * is bug. + */ LASSERTF(count == 0, "Some RPCs are still unregistering: %d\n", count); /* Let's save one loop as soon as inflight have * dropped to zero. No new inflights possible at - * this point. */ + * this point. + */ rc = 0; } else { list_for_each_safe(tmp, n, - &imp->imp_sending_list) { + &imp->imp_sending_list) { req = list_entry(tmp, - struct ptlrpc_request, - rq_list); + struct ptlrpc_request, + rq_list); DEBUG_REQ(D_ERROR, req, "still on sending list"); } list_for_each_safe(tmp, n, - &imp->imp_delayed_list) { + &imp->imp_delayed_list) { req = list_entry(tmp, - struct ptlrpc_request, - rq_list); + struct ptlrpc_request, + rq_list); DEBUG_REQ(D_ERROR, req, "still on delayed list"); } @@ -427,7 +433,6 @@ EXPORT_SYMBOL(ptlrpc_fail_import); int ptlrpc_reconnect_import(struct obd_import *imp) { -#ifdef ENABLE_PINGER struct l_wait_info lwi; int secs = cfs_time_seconds(obd_timeout); int rc; @@ -443,33 +448,6 @@ int ptlrpc_reconnect_import(struct obd_import *imp) CDEBUG(D_HA, "%s: recovery finished s:%s\n", obd2cli_tgt(imp->imp_obd), ptlrpc_import_state_name(imp->imp_state)); return rc; -#else - ptlrpc_set_import_discon(imp, 0); - /* Force a new connect attempt */ - ptlrpc_invalidate_import(imp); - /* Do a fresh connect next time by zeroing the handle */ - ptlrpc_disconnect_import(imp, 1); - /* Wait for all invalidate calls to finish */ - if (atomic_read(&imp->imp_inval_count) > 0) { - int rc; - struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); - - rc = l_wait_event(imp->imp_recovery_waitq, - (atomic_read(&imp->imp_inval_count) == 0), - &lwi); - if (rc) - CERROR("Interrupted, inval=%d\n", - atomic_read(&imp->imp_inval_count)); - } - - /* Allow reconnect attempts */ - imp->imp_obd->obd_no_recov = 0; - /* Remove 'invalid' flag */ - ptlrpc_activate_import(imp); - /* Attempt a new connect */ - ptlrpc_recover_import(imp, NULL, 0); - return 0; -#endif } EXPORT_SYMBOL(ptlrpc_reconnect_import); @@ -501,18 +479,20 @@ static int import_select_connection(struct obd_import *imp) conn->oic_last_attempt); /* If we have not tried this connection since - the last successful attempt, go with this one */ + * the last successful attempt, go with this one + */ if ((conn->oic_last_attempt == 0) || cfs_time_beforeq_64(conn->oic_last_attempt, - imp->imp_last_success_conn)) { + imp->imp_last_success_conn)) { imp_conn = conn; tried_all = 0; break; } /* If all of the connections have already been tried - since the last successful connection; just choose the - least recently used */ + * since the last successful connection; just choose the + * least recently used + */ if (!imp_conn) imp_conn = conn; else if (cfs_time_before_64(conn->oic_last_attempt, @@ -529,10 +509,11 @@ static int import_select_connection(struct obd_import *imp) LASSERT(imp_conn->oic_conn); /* If we've tried everything, and we're back to the beginning of the - list, increase our timeout and try again. It will be reset when - we do finally connect. (FIXME: really we should wait for all network - state associated with the last connection attempt to drain before - trying to reconnect on it.) */ + * list, increase our timeout and try again. It will be reset when + * we do finally connect. (FIXME: really we should wait for all network + * state associated with the last connection attempt to drain before + * trying to reconnect on it.) + */ if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item)) { struct adaptive_timeout *at = &imp->imp_at.iat_net_latency; @@ -553,7 +534,6 @@ static int import_select_connection(struct obd_import *imp) imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn); dlmexp = class_conn2export(&imp->imp_dlm_handle); - LASSERT(dlmexp != NULL); ptlrpc_connection_put(dlmexp->exp_connection); dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn); class_export_put(dlmexp); @@ -590,7 +570,8 @@ static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno) struct list_head *tmp; /* The requests in committed_list always have smaller transnos than - * the requests in replay_list */ + * the requests in replay_list + */ if (!list_empty(&imp->imp_committed_list)) { tmp = imp->imp_committed_list.next; req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); @@ -674,7 +655,8 @@ int ptlrpc_connect_import(struct obd_import *imp) goto out; /* Reset connect flags to the originally requested flags, in case - * the server is updated on-the-fly we will get the new features. */ + * the server is updated on-the-fly we will get the new features. + */ imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig; /* Reset ocd_version each time so the server knows the exact versions */ imp->imp_connect_data.ocd_version = LUSTRE_VERSION_CODE; @@ -687,7 +669,7 @@ int ptlrpc_connect_import(struct obd_import *imp) goto out; request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT); - if (request == NULL) { + if (!request) { rc = -ENOMEM; goto out; } @@ -700,7 +682,8 @@ int ptlrpc_connect_import(struct obd_import *imp) } /* Report the rpc service time to the server so that it knows how long - * to wait for clients to join recovery */ + * to wait for clients to join recovery + */ lustre_msg_set_service_time(request->rq_reqmsg, at_timeout2est(request->rq_timeout)); @@ -708,7 +691,8 @@ int ptlrpc_connect_import(struct obd_import *imp) * import_select_connection will increase the net latency on * repeated reconnect attempts to cover slow networks. * We override/ignore the server rpc completion estimate here, - * which may be large if this is a reconnect attempt */ + * which may be large if this is a reconnect attempt + */ request->rq_timeout = INITIAL_CONNECT_TIMEOUT; lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout); @@ -799,7 +783,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, if (rc) { /* if this reconnect to busy export - not need select new target - * for connecting*/ + * for connecting + */ imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc); spin_unlock(&imp->imp_lock); ptlrpc_maybe_ping_import_soon(imp); @@ -817,7 +802,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, ocd = req_capsule_server_sized_get(&request->rq_pill, &RMF_CONNECT_DATA, ret); - if (ocd == NULL) { + if (!ocd) { CERROR("%s: no connect data from server\n", imp->imp_obd->obd_name); rc = -EPROTO; @@ -851,7 +836,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, if (!exp) { /* This could happen if export is cleaned during the - connect attempt */ + * connect attempt + */ CERROR("%s: missing export after connect\n", imp->imp_obd->obd_name); rc = -ENODEV; @@ -877,14 +863,16 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, } /* if applies, adjust the imp->imp_msg_magic here - * according to reply flags */ + * according to reply flags + */ imp->imp_remote_handle = *lustre_msg_get_handle(request->rq_repmsg); /* Initial connects are allowed for clients with non-random * uuids when servers are in recovery. Simply signal the - * servers replay is complete and wait in REPLAY_WAIT. */ + * servers replay is complete and wait in REPLAY_WAIT. + */ if (msg_flags & MSG_CONNECT_RECOVERING) { CDEBUG(D_HA, "connect to %s during recovery\n", obd2cli_tgt(imp->imp_obd)); @@ -923,7 +911,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env, * already erased all of our state because of previous * eviction. If it is in recovery - we are safe to * participate since we can reestablish all of our state - * with server again */ + * with server again + */ if ((msg_flags & MSG_CONNECT_RECOVERING)) { CDEBUG(level, "%s@%s changed server handle from %#llx to %#llx but is still in recovery\n", obd2cli_tgt(imp->imp_obd), @@ -1015,8 +1004,7 @@ finish: spin_lock(&imp->imp_lock); list_del(&imp->imp_conn_current->oic_item); - list_add(&imp->imp_conn_current->oic_item, - &imp->imp_conn_list); + list_add(&imp->imp_conn_current->oic_item, &imp->imp_conn_list); imp->imp_last_success_conn = imp->imp_conn_current->oic_last_attempt; @@ -1039,7 +1027,8 @@ finish: ocd->ocd_version < LUSTRE_VERSION_CODE - LUSTRE_VERSION_OFFSET_WARN)) { /* Sigh, some compilers do not like #ifdef in the middle - of macro arguments */ + * of macro arguments + */ const char *older = "older. Consider upgrading server or downgrading client" ; const char *newer = "newer than client version. Consider upgrading client" @@ -1061,7 +1050,8 @@ finish: * fixup is version-limited, because we don't want to carry the * OBD_CONNECT_MNE_SWAB flag around forever, just so long as we * need interop with unpatched 2.2 servers. For newer servers, - * the client will do MNE swabbing only as needed. LU-1644 */ + * the client will do MNE swabbing only as needed. LU-1644 + */ if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) && !(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) && OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 && @@ -1079,7 +1069,8 @@ finish: if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) { /* We sent to the server ocd_cksum_types with bits set * for algorithms we understand. The server masked off - * the checksum types it doesn't support */ + * the checksum types it doesn't support + */ if ((ocd->ocd_cksum_types & cksum_types_supported_client()) == 0) { LCONSOLE_WARN("The negotiation of the checksum algorithm to use with server %s failed (%x/%x), disabling checksums\n", @@ -1093,14 +1084,15 @@ finish: } } else { /* The server does not support OBD_CONNECT_CKSUM. - * Enforce ADLER for backward compatibility*/ + * Enforce ADLER for backward compatibility + */ cli->cl_supp_cksum_types = OBD_CKSUM_ADLER; } cli->cl_cksum_type = cksum_type_select(cli->cl_supp_cksum_types); if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) cli->cl_max_pages_per_rpc = - min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT, + min(ocd->ocd_brw_size >> PAGE_SHIFT, cli->cl_max_pages_per_rpc); else if (imp->imp_connect_op == MDS_CONNECT || imp->imp_connect_op == MGS_CONNECT) @@ -1109,7 +1101,8 @@ finish: /* Reset ns_connect_flags only for initial connect. It might be * changed in while using FS and if we reset it in reconnect * this leads to losing user settings done before such as - * disable lru_resize, etc. */ + * disable lru_resize, etc. + */ if (old_connect_flags != exp_connect_flags(exp) || aa->pcaa_initial_connect) { CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server flags: %#llx\n", @@ -1123,13 +1116,14 @@ finish: if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) && (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2)) /* We need a per-message support flag, because - a. we don't know if the incoming connect reply - supports AT or not (in reply_in_callback) - until we unpack it. - b. failovered server means export and flags are gone - (in ptlrpc_send_reply). - Can only be set when we know AT is supported at - both ends */ + * a. we don't know if the incoming connect reply + * supports AT or not (in reply_in_callback) + * until we unpack it. + * b. failovered server means export and flags are gone + * (in ptlrpc_send_reply). + * Can only be set when we know AT is supported at + * both ends + */ imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT; else imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT; @@ -1162,7 +1156,7 @@ out: struct obd_connect_data *ocd; /* reply message might not be ready */ - if (request->rq_repmsg == NULL) + if (!request->rq_repmsg) return -EPROTO; ocd = req_capsule_server_get(&request->rq_pill, @@ -1243,7 +1237,7 @@ static int signal_completed_replay(struct obd_import *imp) req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION, OBD_PING); - if (req == NULL) { + if (!req) { atomic_dec(&imp->imp_replay_inflight); return -ENOMEM; } @@ -1337,12 +1331,13 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp) { struct task_struct *task; /* bug 17802: XXX client_disconnect_export vs connect request - * race. if client will evicted at this time, we start + * race. if client is evicted at this time, we start * invalidate thread without reference to import and import can - * be freed at same time. */ + * be freed at same time. + */ class_import_get(imp); task = kthread_run(ptlrpc_invalidate_import_thread, imp, - "ll_imp_inval"); + "ll_imp_inval"); if (IS_ERR(task)) { class_import_put(imp); CERROR("error starting invalidate thread: %d\n", rc); @@ -1471,11 +1466,13 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose) if (req) { /* We are disconnecting, do not retry a failed DISCONNECT rpc if * it fails. We can get through the above with a down server - * if the client doesn't know the server is gone yet. */ + * if the client doesn't know the server is gone yet. + */ req->rq_no_resend = 1; /* We want client umounts to happen quickly, no matter the - server state... */ + * server state... + */ req->rq_timeout = min_t(int, req->rq_timeout, INITIAL_CONNECT_TIMEOUT); @@ -1507,9 +1504,10 @@ EXPORT_SYMBOL(ptlrpc_disconnect_import); extern unsigned int at_min, at_max, at_history; /* Bin into timeslices using AT_BINS bins. - This gives us a max of the last binlimit*AT_BINS secs without the storage, - but still smoothing out a return to normalcy from a slow response. - (E.g. remember the maximum latency in each minute of the last 4 minutes.) */ + * This gives us a max of the last binlimit*AT_BINS secs without the storage, + * but still smoothing out a return to normalcy from a slow response. + * (E.g. remember the maximum latency in each minute of the last 4 minutes.) + */ int at_measured(struct adaptive_timeout *at, unsigned int val) { unsigned int old = at->at_current; @@ -1523,7 +1521,8 @@ int at_measured(struct adaptive_timeout *at, unsigned int val) if (val == 0) /* 0's don't count, because we never want our timeout to - drop to 0, and because 0 could mean an error */ + * drop to 0, and because 0 could mean an error + */ return 0; spin_lock(&at->at_lock); @@ -1565,7 +1564,8 @@ int at_measured(struct adaptive_timeout *at, unsigned int val) if (at->at_flags & AT_FLG_NOHIST) /* Only keep last reported val; keeping the rest of the history - for proc only */ + * for debugfs only + */ at->at_current = val; if (at_max > 0) diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c index c0e613c23..5b06901e5 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/layout.c +++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c @@ -118,25 +118,6 @@ static const struct req_msg_field *quotactl_only[] = { &RMF_OBD_QUOTACTL }; -static const struct req_msg_field *quota_body_only[] = { - &RMF_PTLRPC_BODY, - &RMF_QUOTA_BODY -}; - -static const struct req_msg_field *ldlm_intent_quota_client[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REQ, - &RMF_LDLM_INTENT, - &RMF_QUOTA_BODY -}; - -static const struct req_msg_field *ldlm_intent_quota_server[] = { - &RMF_PTLRPC_BODY, - &RMF_DLM_REP, - &RMF_DLM_LVB, - &RMF_QUOTA_BODY -}; - static const struct req_msg_field *mdt_close_client[] = { &RMF_PTLRPC_BODY, &RMF_MDT_EPOCH, @@ -514,16 +495,6 @@ static const struct req_msg_field *mds_setattr_server[] = { &RMF_CAPA2 }; -static const struct req_msg_field *mds_update_client[] = { - &RMF_PTLRPC_BODY, - &RMF_UPDATE, -}; - -static const struct req_msg_field *mds_update_server[] = { - &RMF_PTLRPC_BODY, - &RMF_UPDATE_REPLY, -}; - static const struct req_msg_field *llog_origin_handle_create_client[] = { &RMF_PTLRPC_BODY, &RMF_LLOGD_BODY, @@ -551,16 +522,6 @@ static const struct req_msg_field *llog_origin_handle_next_block_server[] = { &RMF_EADATA }; -static const struct req_msg_field *obd_idx_read_client[] = { - &RMF_PTLRPC_BODY, - &RMF_IDX_INFO -}; - -static const struct req_msg_field *obd_idx_read_server[] = { - &RMF_PTLRPC_BODY, - &RMF_IDX_INFO -}; - static const struct req_msg_field *ost_body_only[] = { &RMF_PTLRPC_BODY, &RMF_OST_BODY @@ -676,7 +637,6 @@ static const struct req_msg_field *mdt_hsm_request[] = { static struct req_format *req_formats[] = { &RQF_OBD_PING, &RQF_OBD_SET_INFO, - &RQF_OBD_IDX_READ, &RQF_SEC_CTX, &RQF_MGS_TARGET_REG, &RQF_MGS_SET_INFO, @@ -721,7 +681,6 @@ static struct req_format *req_formats[] = { &RQF_MDS_HSM_ACTION, &RQF_MDS_HSM_REQUEST, &RQF_MDS_SWAP_LAYOUTS, - &RQF_UPDATE_OBJ, &RQF_QC_CALLBACK, &RQF_OST_CONNECT, &RQF_OST_DISCONNECT, @@ -759,8 +718,6 @@ static struct req_format *req_formats[] = { &RQF_LDLM_INTENT_CREATE, &RQF_LDLM_INTENT_UNLINK, &RQF_LDLM_INTENT_GETXATTR, - &RQF_LDLM_INTENT_QUOTA, - &RQF_QUOTA_DQACQ, &RQF_LOG_CANCEL, &RQF_LLOG_ORIGIN_HANDLE_CREATE, &RQF_LLOG_ORIGIN_HANDLE_DESTROY, @@ -899,11 +856,6 @@ struct req_msg_field RMF_OBD_QUOTACTL = lustre_swab_obd_quotactl, NULL); EXPORT_SYMBOL(RMF_OBD_QUOTACTL); -struct req_msg_field RMF_QUOTA_BODY = - DEFINE_MSGF("quota_body", 0, - sizeof(struct quota_body), lustre_swab_quota_body, NULL); -EXPORT_SYMBOL(RMF_QUOTA_BODY); - struct req_msg_field RMF_MDT_EPOCH = DEFINE_MSGF("mdt_ioepoch", 0, sizeof(struct mdt_ioepoch), lustre_swab_mdt_ioepoch, NULL); @@ -938,12 +890,12 @@ EXPORT_SYMBOL(RMF_SYMTGT); struct req_msg_field RMF_TGTUUID = DEFINE_MSGF("tgtuuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL, - NULL); + NULL); EXPORT_SYMBOL(RMF_TGTUUID); struct req_msg_field RMF_CLUUID = DEFINE_MSGF("cluuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL, - NULL); + NULL); EXPORT_SYMBOL(RMF_CLUUID); struct req_msg_field RMF_STRING = @@ -1078,7 +1030,7 @@ EXPORT_SYMBOL(RMF_RCS); struct req_msg_field RMF_EAVALS_LENS = DEFINE_MSGF("eavals_lens", RMF_F_STRUCT_ARRAY, sizeof(__u32), - lustre_swab_generic_32s, NULL); + lustre_swab_generic_32s, NULL); EXPORT_SYMBOL(RMF_EAVALS_LENS); struct req_msg_field RMF_OBD_ID = @@ -1105,10 +1057,6 @@ struct req_msg_field RMF_FIEMAP_VAL = DEFINE_MSGF("fiemap", 0, -1, lustre_swab_fiemap, NULL); EXPORT_SYMBOL(RMF_FIEMAP_VAL); -struct req_msg_field RMF_IDX_INFO = - DEFINE_MSGF("idx_info", 0, sizeof(struct idx_info), - lustre_swab_idx_info, NULL); -EXPORT_SYMBOL(RMF_IDX_INFO); struct req_msg_field RMF_HSM_USER_STATE = DEFINE_MSGF("hsm_user_state", 0, sizeof(struct hsm_user_state), lustre_swab_hsm_user_state, NULL); @@ -1145,15 +1093,6 @@ struct req_msg_field RMF_MDS_HSM_REQUEST = lustre_swab_hsm_request, NULL); EXPORT_SYMBOL(RMF_MDS_HSM_REQUEST); -struct req_msg_field RMF_UPDATE = DEFINE_MSGF("update", 0, -1, - lustre_swab_update_buf, NULL); -EXPORT_SYMBOL(RMF_UPDATE); - -struct req_msg_field RMF_UPDATE_REPLY = DEFINE_MSGF("update_reply", 0, -1, - lustre_swab_update_reply_buf, - NULL); -EXPORT_SYMBOL(RMF_UPDATE_REPLY); - struct req_msg_field RMF_SWAP_LAYOUTS = DEFINE_MSGF("swap_layouts", 0, sizeof(struct mdc_swap_layouts), lustre_swab_swap_layouts, NULL); @@ -1196,29 +1135,23 @@ struct req_format RQF_OBD_SET_INFO = DEFINE_REQ_FMT0("OBD_SET_INFO", obd_set_info_client, empty); EXPORT_SYMBOL(RQF_OBD_SET_INFO); -/* Read index file through the network */ -struct req_format RQF_OBD_IDX_READ = - DEFINE_REQ_FMT0("OBD_IDX_READ", - obd_idx_read_client, obd_idx_read_server); -EXPORT_SYMBOL(RQF_OBD_IDX_READ); - struct req_format RQF_SEC_CTX = DEFINE_REQ_FMT0("SEC_CTX", empty, empty); EXPORT_SYMBOL(RQF_SEC_CTX); struct req_format RQF_MGS_TARGET_REG = DEFINE_REQ_FMT0("MGS_TARGET_REG", mgs_target_info_only, - mgs_target_info_only); + mgs_target_info_only); EXPORT_SYMBOL(RQF_MGS_TARGET_REG); struct req_format RQF_MGS_SET_INFO = DEFINE_REQ_FMT0("MGS_SET_INFO", mgs_set_info, - mgs_set_info); + mgs_set_info); EXPORT_SYMBOL(RQF_MGS_SET_INFO); struct req_format RQF_MGS_CONFIG_READ = DEFINE_REQ_FMT0("MGS_CONFIG_READ", mgs_config_read_client, - mgs_config_read_server); + mgs_config_read_server); EXPORT_SYMBOL(RQF_MGS_CONFIG_READ); struct req_format RQF_SEQ_QUERY = @@ -1253,16 +1186,6 @@ struct req_format RQF_QC_CALLBACK = DEFINE_REQ_FMT0("QC_CALLBACK", quotactl_only, empty); EXPORT_SYMBOL(RQF_QC_CALLBACK); -struct req_format RQF_QUOTA_DQACQ = - DEFINE_REQ_FMT0("QUOTA_DQACQ", quota_body_only, quota_body_only); -EXPORT_SYMBOL(RQF_QUOTA_DQACQ); - -struct req_format RQF_LDLM_INTENT_QUOTA = - DEFINE_REQ_FMT0("LDLM_INTENT_QUOTA", - ldlm_intent_quota_client, - ldlm_intent_quota_server); -EXPORT_SYMBOL(RQF_LDLM_INTENT_QUOTA); - struct req_format RQF_MDS_GETSTATUS = DEFINE_REQ_FMT0("MDS_GETSTATUS", mdt_body_only, mdt_body_capa); EXPORT_SYMBOL(RQF_MDS_GETSTATUS); @@ -1357,11 +1280,6 @@ struct req_format RQF_MDS_GET_INFO = mds_getinfo_server); EXPORT_SYMBOL(RQF_MDS_GET_INFO); -struct req_format RQF_UPDATE_OBJ = - DEFINE_REQ_FMT0("OBJECT_UPDATE_OBJ", mds_update_client, - mds_update_server); -EXPORT_SYMBOL(RQF_UPDATE_OBJ); - struct req_format RQF_LDLM_ENQUEUE = DEFINE_REQ_FMT0("LDLM_ENQUEUE", ldlm_enqueue_client, ldlm_enqueue_lvb_server); @@ -1598,32 +1516,32 @@ EXPORT_SYMBOL(RQF_OST_STATFS); struct req_format RQF_OST_SET_GRANT_INFO = DEFINE_REQ_FMT0("OST_SET_GRANT_INFO", ost_grant_shrink_client, - ost_body_only); + ost_body_only); EXPORT_SYMBOL(RQF_OST_SET_GRANT_INFO); struct req_format RQF_OST_GET_INFO_GENERIC = DEFINE_REQ_FMT0("OST_GET_INFO", ost_get_info_generic_client, - ost_get_info_generic_server); + ost_get_info_generic_server); EXPORT_SYMBOL(RQF_OST_GET_INFO_GENERIC); struct req_format RQF_OST_GET_INFO_LAST_ID = DEFINE_REQ_FMT0("OST_GET_INFO_LAST_ID", ost_get_info_generic_client, - ost_get_last_id_server); + ost_get_last_id_server); EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_ID); struct req_format RQF_OST_GET_INFO_LAST_FID = DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", obd_set_info_client, - ost_get_last_fid_server); + ost_get_last_fid_server); EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_FID); struct req_format RQF_OST_SET_INFO_LAST_FID = DEFINE_REQ_FMT0("OST_SET_INFO_LAST_FID", obd_set_info_client, - empty); + empty); EXPORT_SYMBOL(RQF_OST_SET_INFO_LAST_FID); struct req_format RQF_OST_GET_INFO_FIEMAP = DEFINE_REQ_FMT0("OST_GET_INFO_FIEMAP", ost_get_fiemap_client, - ost_get_fiemap_server); + ost_get_fiemap_server); EXPORT_SYMBOL(RQF_OST_GET_INFO_FIEMAP); #if !defined(__REQ_LAYOUT_USER__) @@ -1712,7 +1630,7 @@ void req_capsule_init(struct req_capsule *pill, * high-priority RPC queue getting peeked at before ost_handle() * handles an OST RPC. */ - if (req != NULL && pill == &req->rq_pill && req->rq_pill_init) + if (req && pill == &req->rq_pill && req->rq_pill_init) return; memset(pill, 0, sizeof(*pill)); @@ -1720,7 +1638,7 @@ void req_capsule_init(struct req_capsule *pill, pill->rc_loc = location; req_capsule_init_area(pill); - if (req != NULL && pill == &req->rq_pill) + if (req && pill == &req->rq_pill) req->rq_pill_init = 1; } EXPORT_SYMBOL(req_capsule_init); @@ -1752,7 +1670,7 @@ static struct lustre_msg *__req_msg(const struct req_capsule *pill, */ void req_capsule_set(struct req_capsule *pill, const struct req_format *fmt) { - LASSERT(pill->rc_fmt == NULL || pill->rc_fmt == fmt); + LASSERT(!pill->rc_fmt || pill->rc_fmt == fmt); LASSERT(__req_format_is_sane(fmt)); pill->rc_fmt = fmt; @@ -1773,8 +1691,6 @@ int req_capsule_filled_sizes(struct req_capsule *pill, const struct req_format *fmt = pill->rc_fmt; int i; - LASSERT(fmt != NULL); - for (i = 0; i < fmt->rf_fields[loc].nr; ++i) { if (pill->rc_area[loc][i] == -1) { pill->rc_area[loc][i] = @@ -1810,15 +1726,15 @@ int req_capsule_server_pack(struct req_capsule *pill) LASSERT(pill->rc_loc == RCL_SERVER); fmt = pill->rc_fmt; - LASSERT(fmt != NULL); + LASSERT(fmt); count = req_capsule_filled_sizes(pill, RCL_SERVER); rc = lustre_pack_reply(pill->rc_req, count, pill->rc_area[RCL_SERVER], NULL); if (rc != 0) { DEBUG_REQ(D_ERROR, pill->rc_req, - "Cannot pack %d fields in format `%s': ", - count, fmt->rf_name); + "Cannot pack %d fields in format `%s': ", + count, fmt->rf_name); } return rc; } @@ -1835,9 +1751,8 @@ static int __req_capsule_offset(const struct req_capsule *pill, int offset; offset = field->rmf_offset[pill->rc_fmt->rf_idx][loc]; - LASSERTF(offset > 0, "%s:%s, off=%d, loc=%d\n", - pill->rc_fmt->rf_name, - field->rmf_name, offset, loc); + LASSERTF(offset > 0, "%s:%s, off=%d, loc=%d\n", pill->rc_fmt->rf_name, + field->rmf_name, offset, loc); offset--; LASSERT(0 <= offset && offset < REQ_MAX_FIELD_NR); @@ -1865,7 +1780,7 @@ swabber_dumper_helper(struct req_capsule *pill, swabber = swabber ?: field->rmf_swabber; if (ptlrpc_buf_need_swab(pill->rc_req, inout, offset) && - swabber != NULL && value != NULL) + swabber && value) do_swab = 1; else do_swab = 0; @@ -1883,7 +1798,7 @@ swabber_dumper_helper(struct req_capsule *pill, return; swabber(value); ptlrpc_buf_set_swabbed(pill->rc_req, inout, offset); - if (dump) { + if (dump && field->rmf_dumper) { CDEBUG(D_RPCTRACE, "Dump of swabbed field %s follows\n", field->rmf_name); field->rmf_dumper(value); @@ -1947,17 +1862,15 @@ static void *__req_capsule_get(struct req_capsule *pill, [RCL_SERVER] = "server" }; - LASSERT(pill != NULL); - LASSERT(pill != LP_POISON); fmt = pill->rc_fmt; - LASSERT(fmt != NULL); + LASSERT(fmt); LASSERT(fmt != LP_POISON); LASSERT(__req_format_is_sane(fmt)); offset = __req_capsule_offset(pill, field, loc); msg = __req_msg(pill, loc); - LASSERT(msg != NULL); + LASSERT(msg); getter = (field->rmf_flags & RMF_F_STRING) ? (typeof(getter))lustre_msg_string : lustre_msg_buf; @@ -1980,7 +1893,7 @@ static void *__req_capsule_get(struct req_capsule *pill, } value = getter(msg, offset, len); - if (value == NULL) { + if (!value) { DEBUG_REQ(D_ERROR, pill->rc_req, "Wrong buffer for field `%s' (%d of %d) in format `%s': %d vs. %d (%s)\n", field->rmf_name, offset, lustre_msg_bufcount(msg), @@ -2209,7 +2122,7 @@ void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt) const struct req_format *old; - LASSERT(pill->rc_fmt != NULL); + LASSERT(pill->rc_fmt); LASSERT(__req_format_is_sane(fmt)); old = pill->rc_fmt; @@ -2222,7 +2135,7 @@ void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt) const struct req_msg_field *ofield = FMT_FIELD(old, i, j); /* "opaque" fields can be transmogrified */ - if (ofield->rmf_swabber == NULL && + if (!ofield->rmf_swabber && (ofield->rmf_flags & ~RMF_F_NO_SIZE_CHECK) == 0 && (ofield->rmf_size == -1 || ofield->rmf_flags == RMF_F_NO_SIZE_CHECK)) @@ -2289,7 +2202,7 @@ void req_capsule_shrink(struct req_capsule *pill, int offset; fmt = pill->rc_fmt; - LASSERT(fmt != NULL); + LASSERT(fmt); LASSERT(__req_format_is_sane(fmt)); LASSERT(req_capsule_has_field(pill, field, loc)); LASSERT(req_capsule_field_present(pill, field, loc)); @@ -2299,7 +2212,7 @@ void req_capsule_shrink(struct req_capsule *pill, msg = __req_msg(pill, loc); len = lustre_msg_buflen(msg, offset); LASSERTF(newlen <= len, "%s:%s, oldlen=%d, newlen=%d\n", - fmt->rf_name, field->rmf_name, len, newlen); + fmt->rf_name, field->rmf_name, len, newlen); if (loc == RCL_CLIENT) pill->rc_req->rq_reqlen = lustre_shrink_msg(msg, offset, newlen, diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c index e87702073..a23ac5f9a 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c +++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c @@ -75,7 +75,8 @@ } while (0) /* This is a callback from the llog_* functions. - * Assumes caller has already pushed us into the kernel context. */ + * Assumes caller has already pushed us into the kernel context. + */ static int llog_client_open(const struct lu_env *env, struct llog_handle *lgh, struct llog_logid *logid, char *name, enum llog_open_param open_param) @@ -93,7 +94,7 @@ static int llog_client_open(const struct lu_env *env, LASSERT(lgh); req = ptlrpc_request_alloc(imp, &RQF_LLOG_ORIGIN_HANDLE_CREATE); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto out; } @@ -130,7 +131,7 @@ static int llog_client_open(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); - if (body == NULL) { + if (!body) { rc = -EFAULT; goto out; } @@ -158,7 +159,7 @@ static int llog_client_next_block(const struct lu_env *env, req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK, LUSTRE_LOG_VERSION, LLOG_ORIGIN_HANDLE_NEXT_BLOCK); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto err_exit; } @@ -179,14 +180,14 @@ static int llog_client_next_block(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); - if (body == NULL) { + if (!body) { rc = -EFAULT; goto out; } /* The log records are swabbed as they are processed */ ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA); - if (ptr == NULL) { + if (!ptr) { rc = -EFAULT; goto out; } @@ -216,7 +217,7 @@ static int llog_client_prev_block(const struct lu_env *env, req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK, LUSTRE_LOG_VERSION, LLOG_ORIGIN_HANDLE_PREV_BLOCK); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto err_exit; } @@ -236,13 +237,13 @@ static int llog_client_prev_block(const struct lu_env *env, goto out; body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); - if (body == NULL) { + if (!body) { rc = -EFAULT; goto out; } ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA); - if (ptr == NULL) { + if (!ptr) { rc = -EFAULT; goto out; } @@ -269,7 +270,7 @@ static int llog_client_read_header(const struct lu_env *env, req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER, LUSTRE_LOG_VERSION, LLOG_ORIGIN_HANDLE_READ_HEADER); - if (req == NULL) { + if (!req) { rc = -ENOMEM; goto err_exit; } @@ -285,7 +286,7 @@ static int llog_client_read_header(const struct lu_env *env, goto out; hdr = req_capsule_server_get(&req->rq_pill, &RMF_LLOG_LOG_HDR); - if (hdr == NULL) { + if (!hdr) { rc = -EFAULT; goto out; } @@ -316,8 +317,9 @@ static int llog_client_close(const struct lu_env *env, struct llog_handle *handle) { /* this doesn't call LLOG_ORIGIN_HANDLE_CLOSE because - the servers all close the file at the end of every - other LLOG_ RPC. */ + * the servers all close the file at the end of every + * other LLOG_ RPC. + */ return 0; } diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c index dac66f5b3..fbccb6221 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c +++ b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c @@ -58,7 +58,7 @@ int llog_initiator_connect(struct llog_ctxt *ctxt) LASSERT(ctxt); new_imp = ctxt->loc_obd->u.cli.cl_import; - LASSERTF(ctxt->loc_imp == NULL || ctxt->loc_imp == new_imp, + LASSERTF(!ctxt->loc_imp || ctxt->loc_imp == new_imp, "%p - %p\n", ctxt->loc_imp, new_imp); mutex_lock(&ctxt->loc_mutex); if (ctxt->loc_imp != new_imp) { diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c index cc55b7973..c95a91ce2 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c +++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c @@ -131,7 +131,6 @@ static struct ll_rpc_opcode { { SEC_CTX_INIT_CONT, "sec_ctx_init_cont" }, { SEC_CTX_FINI, "sec_ctx_fini" }, { FLD_QUERY, "fld_query" }, - { UPDATE_OBJ, "update_obj" }, }; static struct ll_eopcode { @@ -192,15 +191,15 @@ ptlrpc_ldebugfs_register(struct dentry *root, char *dir, unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV; - LASSERT(*debugfs_root_ret == NULL); - LASSERT(*stats_ret == NULL); + LASSERT(!*debugfs_root_ret); + LASSERT(!*stats_ret); svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES+LUSTRE_MAX_OPCODES, 0); - if (svc_stats == NULL) + if (!svc_stats) return; - if (dir != NULL) { + if (dir) { svc_debugfs_entry = ldebugfs_register(dir, root, NULL, NULL); if (IS_ERR(svc_debugfs_entry)) { lprocfs_free_stats(&svc_stats); @@ -246,11 +245,11 @@ ptlrpc_ldebugfs_register(struct dentry *root, char *dir, rc = ldebugfs_register_stats(svc_debugfs_entry, name, svc_stats); if (rc < 0) { - if (dir != NULL) + if (dir) ldebugfs_remove(&svc_debugfs_entry); lprocfs_free_stats(&svc_stats); } else { - if (dir != NULL) + if (dir) *debugfs_root_ret = svc_debugfs_entry; *stats_ret = svc_stats; } @@ -307,8 +306,9 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file, /* This sanity check is more of an insanity check; we can still * hose a kernel by allowing the request history to grow too - * far. */ - bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + * far. + */ + bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT; if (val > totalram_pages / (2 * bufpages)) return -ERANGE; @@ -454,10 +454,8 @@ static const char *nrs_state2str(enum ptlrpc_nrs_pol_state state) * \param[out] info Holds returned status information */ static void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy, - struct ptlrpc_nrs_pol_info *info) + struct ptlrpc_nrs_pol_info *info) { - LASSERT(policy != NULL); - LASSERT(info != NULL); assert_spin_locked(&policy->pol_nrs->nrs_lock); memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX); @@ -508,7 +506,7 @@ static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n) spin_unlock(&nrs->nrs_lock); infos = kcalloc(num_pols, sizeof(*infos), GFP_NOFS); - if (infos == NULL) { + if (!infos) { rc = -ENOMEM; goto unlock; } @@ -520,8 +518,7 @@ again: pol_idx = 0; - list_for_each_entry(policy, &nrs->nrs_policy_list, - pol_list) { + list_for_each_entry(policy, &nrs->nrs_policy_list, pol_list) { LASSERT(pol_idx < num_pols); nrs_policy_get_info_locked(policy, &tmp); @@ -592,7 +589,7 @@ again: * active: 0 */ seq_printf(m, "%s\n", - !hp ? "\nregular_requests:" : "high_priority_requests:"); + !hp ? "\nregular_requests:" : "high_priority_requests:"); for (pol_idx = 0; pol_idx < num_pols; pol_idx++) { seq_printf(m, " - name: %s\n" @@ -676,7 +673,7 @@ static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file, /** * No [reg|hp] token has been specified */ - if (cmd == NULL) + if (!cmd) goto default_queue; /** @@ -733,15 +730,15 @@ ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt, struct list_head *e; struct ptlrpc_request *req; - if (srhi->srhi_req != NULL && - srhi->srhi_seq > svcpt->scp_hist_seq_culled && + if (srhi->srhi_req && srhi->srhi_seq > svcpt->scp_hist_seq_culled && srhi->srhi_seq <= seq) { /* If srhi_req was set previously, hasn't been culled and * we're searching for a seq on or after it (i.e. more * recent), search from it onwards. * Since the service history is LRU (i.e. culled reqs will * be near the head), we shouldn't have to do long - * re-scans */ + * re-scans + */ LASSERTF(srhi->srhi_seq == srhi->srhi_req->rq_history_seq, "%s:%d: seek seq %llu, request seq %llu\n", svcpt->scp_service->srv_name, svcpt->scp_cpt, @@ -919,7 +916,8 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter) * here. The request could contain any old crap, so you * must be just as careful as the service's request * parser. Currently I only print stuff here I know is OK - * to look at coz it was set up in request_in_callback()!!! */ + * to look at coz it was set up in request_in_callback()!!! + */ seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld:%lds(%+lds) ", req->rq_history_seq, nidstr, libcfs_id2str(req->rq_peer), req->rq_xid, @@ -927,7 +925,7 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter) (s64)req->rq_arrival_time.tv_sec, (long)(req->rq_sent - req->rq_arrival_time.tv_sec), (long)(req->rq_sent - req->rq_deadline)); - if (svc->srv_ops.so_req_printer == NULL) + if (!svc->srv_ops.so_req_printer) seq_putc(s, '\n'); else svc->srv_ops.so_req_printer(s, srhi->srhi_req); @@ -971,7 +969,7 @@ static int ptlrpc_lprocfs_timeouts_seq_show(struct seq_file *m, void *n) if (AT_OFF) { seq_printf(m, "adaptive timeouts off, using obd_timeout %u\n", - obd_timeout); + obd_timeout); return 0; } @@ -982,8 +980,8 @@ static int ptlrpc_lprocfs_timeouts_seq_show(struct seq_file *m, void *n) s2dhms(&ts, ktime_get_real_seconds() - worstt); seq_printf(m, "%10s : cur %3u worst %3u (at %lld, " - DHMS_FMT" ago) ", "service", - cur, worst, (s64)worstt, DHMS_VARS(&ts)); + DHMS_FMT " ago) ", "service", + cur, worst, (s64)worstt, DHMS_VARS(&ts)); lprocfs_at_hist_helper(m, &svcpt->scp_at_estimate); } @@ -1103,7 +1101,7 @@ void ptlrpc_ldebugfs_register_service(struct dentry *entry, "stats", &svc->srv_debugfs_entry, &svc->srv_stats); - if (svc->srv_debugfs_entry == NULL) + if (IS_ERR_OR_NULL(svc->srv_debugfs_entry)) return; ldebugfs_add_vars(svc->srv_debugfs_entry, lproc_vars, NULL); @@ -1129,7 +1127,7 @@ void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount) int opc = opcode_offset(op); svc_stats = req->rq_import->imp_obd->obd_svc_stats; - if (svc_stats == NULL || opc <= 0) + if (!svc_stats || opc <= 0) return; LASSERT(opc < LUSTRE_MAX_OPCODES); if (!(op == LDLM_ENQUEUE || op == MDS_REINT)) @@ -1166,7 +1164,7 @@ EXPORT_SYMBOL(ptlrpc_lprocfs_brw); void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc) { - if (svc->srv_debugfs_entry != NULL) + if (!IS_ERR_OR_NULL(svc->srv_debugfs_entry)) ldebugfs_remove(&svc->srv_debugfs_entry); if (svc->srv_stats) @@ -1198,7 +1196,7 @@ int lprocfs_wr_ping(struct file *file, const char __user *buffer, req = ptlrpc_prep_ping(obd->u.cli.cl_import); up_read(&obd->u.cli.cl_sem); - if (req == NULL) + if (!req) return -ENOMEM; req->rq_send_state = LUSTRE_IMP_FULL; @@ -1228,7 +1226,7 @@ int lprocfs_wr_import(struct file *file, const char __user *buffer, const char prefix[] = "connection="; const int prefix_len = sizeof(prefix) - 1; - if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len) + if (count > PAGE_SIZE - 1 || count <= prefix_len) return -EINVAL; kbuf = kzalloc(count + 1, GFP_NOFS); @@ -1298,7 +1296,7 @@ int lprocfs_rd_pinger_recov(struct seq_file *m, void *n) EXPORT_SYMBOL(lprocfs_rd_pinger_recov); int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer, - size_t count, loff_t *off) + size_t count, loff_t *off) { struct obd_device *obd = ((struct seq_file *)file->private_data)->private; struct client_obd *cli = &obd->u.cli; diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c index c5d7ff5cb..10b8fe82a 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c +++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c @@ -56,7 +56,6 @@ static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len, lnet_md_t md; LASSERT(portal != 0); - LASSERT(conn != NULL); CDEBUG(D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer)); md.start = base; md.length = len; @@ -88,7 +87,8 @@ static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len, int rc2; /* We're going to get an UNLINK event when I unlink below, * which will complete just like any other failed send, so - * I fall through and return success here! */ + * I fall through and return success here! + */ CERROR("LNetPut(%s, %d, %lld) failed: %d\n", libcfs_id2str(conn->c_peer), portal, xid, rc); rc2 = LNetMDUnlink(*mdh); @@ -130,7 +130,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req) LASSERT(desc->bd_md_count == 0); LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT); LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); - LASSERT(desc->bd_req != NULL); + LASSERT(desc->bd_req); LASSERT(desc->bd_type == BULK_PUT_SINK || desc->bd_type == BULK_GET_SOURCE); @@ -153,7 +153,8 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req) * using the same RDMA match bits after an error. * * For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The - * first bulk XID is power-of-two aligned before rq_xid. LU-1431 */ + * first bulk XID is power-of-two aligned before rq_xid. LU-1431 + */ xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1); LASSERTF(!(desc->bd_registered && req->rq_send_state != LUSTRE_IMP_REPLAY) || @@ -209,7 +210,8 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req) } /* Set rq_xid to matchbits of the final bulk so that server can - * infer the number of bulks that were prepared */ + * infer the number of bulks that were prepared + */ req->rq_xid = --xid; LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK), "bd_last_xid = x%llu, rq_xid = x%llu\n", @@ -260,7 +262,8 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async) /* the unlink ensures the callback happens ASAP and is the last * one. If it fails, it must be because completion just happened, * but we must still l_wait_event() in this case to give liblustre - * a chance to run client_bulk_callback() */ + * a chance to run client_bulk_callback() + */ mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw); if (ptlrpc_client_bulk_active(req) == 0) /* completed or */ @@ -273,14 +276,15 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async) if (async) return 0; - if (req->rq_set != NULL) + if (req->rq_set) wq = &req->rq_set->set_waitq; else wq = &req->rq_reply_waitq; for (;;) { /* Network access will complete in finite time but the HUGE - * timeout lets us CWARN for visibility of sluggish NALs */ + * timeout lets us CWARN for visibility of sluggish LNDs + */ lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK), cfs_time_seconds(1), NULL, NULL); rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi); @@ -305,13 +309,13 @@ static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags) req->rq_arrival_time.tv_sec, 1); if (!(flags & PTLRPC_REPLY_EARLY) && - (req->rq_type != PTL_RPC_MSG_ERR) && - (req->rq_reqmsg != NULL) && + (req->rq_type != PTL_RPC_MSG_ERR) && req->rq_reqmsg && !(lustre_msg_get_flags(req->rq_reqmsg) & (MSG_RESENT | MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) { /* early replies, errors and recovery requests don't count - * toward our service time estimate */ + * toward our service time estimate + */ int oldse = at_measured(&svcpt->scp_at_estimate, service_time); if (oldse != 0) { @@ -325,7 +329,8 @@ static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags) lustre_msg_set_service_time(req->rq_repmsg, service_time); /* Report service time estimate for future client reqs, but report 0 * (to be ignored by client) if it's a error reply during recovery. - * (bz15815) */ + * (bz15815) + */ if (req->rq_type == PTL_RPC_MSG_ERR && !req->rq_export) lustre_msg_set_timeout(req->rq_repmsg, 0); else @@ -360,10 +365,10 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags) * target_queue_final_reply(). */ LASSERT(req->rq_no_reply == 0); - LASSERT(req->rq_reqbuf != NULL); - LASSERT(rs != NULL); + LASSERT(req->rq_reqbuf); + LASSERT(rs); LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult); - LASSERT(req->rq_repmsg != NULL); + LASSERT(req->rq_repmsg); LASSERT(req->rq_repmsg == rs->rs_msg); LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback); LASSERT(rs->rs_cb_id.cbid_arg == rs); @@ -403,12 +408,12 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags) ptlrpc_at_set_reply(req, flags); - if (req->rq_export == NULL || req->rq_export->exp_connection == NULL) + if (!req->rq_export || !req->rq_export->exp_connection) conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL); else conn = ptlrpc_connection_addref(req->rq_export->exp_connection); - if (unlikely(conn == NULL)) { + if (unlikely(!conn)) { CERROR("not replying on NULL connection\n"); /* bug 9635 */ return -ENOTCONN; } @@ -498,14 +503,15 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) LASSERT(request->rq_wait_ctx == 0); /* If this is a re-transmit, we're required to have disengaged - * cleanly from the previous attempt */ + * cleanly from the previous attempt + */ LASSERT(!request->rq_receiving_reply); LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) && - (request->rq_import->imp_state == LUSTRE_IMP_FULL))); + (request->rq_import->imp_state == LUSTRE_IMP_FULL))); - if (unlikely(obd != NULL && obd->obd_fail)) { + if (unlikely(obd && obd->obd_fail)) { CDEBUG(D_HA, "muting rpc for failed imp obd %s\n", - obd->obd_name); + obd->obd_name); /* this prevents us from waiting in ptlrpc_queue_wait */ spin_lock(&request->rq_lock); request->rq_err = 1; @@ -535,7 +541,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) goto out; /* bulk register should be done after wrap_request() */ - if (request->rq_bulk != NULL) { + if (request->rq_bulk) { rc = ptlrpc_register_bulk(request); if (rc != 0) goto out; @@ -543,14 +549,15 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) if (!noreply) { LASSERT(request->rq_replen != 0); - if (request->rq_repbuf == NULL) { - LASSERT(request->rq_repdata == NULL); - LASSERT(request->rq_repmsg == NULL); + if (!request->rq_repbuf) { + LASSERT(!request->rq_repdata); + LASSERT(!request->rq_repmsg); rc = sptlrpc_cli_alloc_repbuf(request, request->rq_replen); if (rc) { /* this prevents us from looping in - * ptlrpc_queue_wait */ + * ptlrpc_queue_wait + */ spin_lock(&request->rq_lock); request->rq_err = 1; spin_unlock(&request->rq_lock); @@ -602,7 +609,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) reply_md.eq_handle = ptlrpc_eq_h; /* We must see the unlink callback to unset rq_reply_unlink, - so we can't auto-unlink */ + * so we can't auto-unlink + */ rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN, &request->rq_reply_md_h); if (rc != 0) { @@ -623,7 +631,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) /* add references on request for request_out_callback */ ptlrpc_request_addref(request); - if (obd != NULL && obd->obd_svc_stats != NULL) + if (obd && obd->obd_svc_stats) lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR, atomic_read(&request->rq_import->imp_inflight)); @@ -632,7 +640,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) ktime_get_real_ts64(&request->rq_arrival_time); request->rq_sent = ktime_get_real_seconds(); /* We give the server rq_timeout secs to process the req, and - add the network latency for our local timeout. */ + * add the network latency for our local timeout. + */ request->rq_deadline = request->rq_sent + request->rq_timeout + ptlrpc_at_get_net_latency(request); @@ -656,7 +665,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) cleanup_me: /* MEUnlink is safe; the PUT didn't even get off the ground, and * nobody apart from the PUT's target has the right nid+XID to - * access the reply buffer. */ + * access the reply buffer. + */ rc2 = LNetMEUnlink(reply_me_h); LASSERT(rc2 == 0); /* UNLINKED callback called synchronously */ @@ -664,7 +674,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) cleanup_bulk: /* We do sync unlink here as there was no real transfer here so - * the chance to have long unlink to sluggish net is smaller here. */ + * the chance to have long unlink to sluggish net is smaller here. + */ ptlrpc_unregister_bulk(request, 0); out: if (request->rq_memalloc) @@ -692,7 +703,8 @@ int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd) /* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL, * which means buffer can only be attached on local CPT, and LND - * threads can find it by grabbing a local lock */ + * threads can find it by grabbing a local lock + */ rc = LNetMEAttach(service->srv_req_portal, match_id, 0, ~0, LNET_UNLINK, rqbd->rqbd_svcpt->scp_cpt >= 0 ? diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c index 7044e1ff6..710fb806f 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c +++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c @@ -13,10 +13,6 @@ * GNU General Public License version 2 for more details. A copy is * included in the COPYING file that accompanied this code. - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * GPL HEADER END */ /* @@ -47,9 +43,6 @@ #include "../../include/linux/libcfs/libcfs.h" #include "ptlrpc_internal.h" -/* XXX: This is just for liblustre. Remove the #if defined directive when the - * "cfs_" prefix is dropped from cfs_list_head. */ - /** * NRS core object. */ @@ -57,7 +50,7 @@ struct nrs_core nrs_core; static int nrs_policy_init(struct ptlrpc_nrs_policy *policy) { - return policy->pol_desc->pd_ops->op_policy_init != NULL ? + return policy->pol_desc->pd_ops->op_policy_init ? policy->pol_desc->pd_ops->op_policy_init(policy) : 0; } @@ -66,7 +59,7 @@ static void nrs_policy_fini(struct ptlrpc_nrs_policy *policy) LASSERT(policy->pol_ref == 0); LASSERT(policy->pol_req_queued == 0); - if (policy->pol_desc->pd_ops->op_policy_fini != NULL) + if (policy->pol_desc->pd_ops->op_policy_fini) policy->pol_desc->pd_ops->op_policy_fini(policy); } @@ -82,7 +75,7 @@ static int nrs_policy_ctl_locked(struct ptlrpc_nrs_policy *policy, if (policy->pol_state == NRS_POL_STATE_STOPPED) return -ENODEV; - return policy->pol_desc->pd_ops->op_policy_ctl != NULL ? + return policy->pol_desc->pd_ops->op_policy_ctl ? policy->pol_desc->pd_ops->op_policy_ctl(policy, opc, arg) : -ENOSYS; } @@ -91,7 +84,7 @@ static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy) { struct ptlrpc_nrs *nrs = policy->pol_nrs; - if (policy->pol_desc->pd_ops->op_policy_stop != NULL) { + if (policy->pol_desc->pd_ops->op_policy_stop) { spin_unlock(&nrs->nrs_lock); policy->pol_desc->pd_ops->op_policy_stop(policy); @@ -154,7 +147,7 @@ static void nrs_policy_stop_primary(struct ptlrpc_nrs *nrs) { struct ptlrpc_nrs_policy *tmp = nrs->nrs_policy_primary; - if (tmp == NULL) + if (!tmp) return; nrs->nrs_policy_primary = NULL; @@ -220,12 +213,12 @@ static int nrs_policy_start_locked(struct ptlrpc_nrs_policy *policy) * nrs_policy_flags::PTLRPC_NRS_FL_FALLBACK flag set can * register with NRS core. */ - LASSERT(nrs->nrs_policy_fallback == NULL); + LASSERT(!nrs->nrs_policy_fallback); } else { /** * Shouldn't start primary policy if w/o fallback policy. */ - if (nrs->nrs_policy_fallback == NULL) + if (!nrs->nrs_policy_fallback) return -EPERM; if (policy->pol_state == NRS_POL_STATE_STARTED) @@ -311,7 +304,7 @@ static void nrs_policy_put_locked(struct ptlrpc_nrs_policy *policy) policy->pol_ref--; if (unlikely(policy->pol_ref == 0 && - policy->pol_state == NRS_POL_STATE_STOPPING)) + policy->pol_state == NRS_POL_STATE_STOPPING)) nrs_policy_stop0(policy); } @@ -326,7 +319,7 @@ static void nrs_policy_put(struct ptlrpc_nrs_policy *policy) * Find and return a policy by name. */ static struct ptlrpc_nrs_policy *nrs_policy_find_locked(struct ptlrpc_nrs *nrs, - char *name) + char *name) { struct ptlrpc_nrs_policy *tmp; @@ -348,10 +341,10 @@ static void nrs_resource_put(struct ptlrpc_nrs_resource *res) { struct ptlrpc_nrs_policy *policy = res->res_policy; - if (policy->pol_desc->pd_ops->op_res_put != NULL) { + if (policy->pol_desc->pd_ops->op_res_put) { struct ptlrpc_nrs_resource *parent; - for (; res != NULL; res = parent) { + for (; res; res = parent) { parent = res->res_parent; policy->pol_desc->pd_ops->op_res_put(policy, res); } @@ -390,12 +383,11 @@ struct ptlrpc_nrs_resource *nrs_resource_get(struct ptlrpc_nrs_policy *policy, rc = policy->pol_desc->pd_ops->op_res_get(policy, nrq, res, &tmp, moving_req); if (rc < 0) { - if (res != NULL) + if (res) nrs_resource_put(res); return NULL; } - LASSERT(tmp != NULL); tmp->res_parent = res; tmp->res_policy = policy; res = tmp; @@ -445,7 +437,7 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs, nrs_policy_get_locked(fallback); primary = nrs->nrs_policy_primary; - if (primary != NULL) + if (primary) nrs_policy_get_locked(primary); spin_unlock(&nrs->nrs_lock); @@ -454,9 +446,9 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs, * Obtain resource hierarchy references. */ resp[NRS_RES_FALLBACK] = nrs_resource_get(fallback, nrq, moving_req); - LASSERT(resp[NRS_RES_FALLBACK] != NULL); + LASSERT(resp[NRS_RES_FALLBACK]); - if (primary != NULL) { + if (primary) { resp[NRS_RES_PRIMARY] = nrs_resource_get(primary, nrq, moving_req); /** @@ -465,7 +457,7 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs, * reference on the policy as it will not be used for this * request. */ - if (resp[NRS_RES_PRIMARY] == NULL) + if (!resp[NRS_RES_PRIMARY]) nrs_policy_put(primary); } } @@ -482,11 +474,10 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs, static void nrs_resource_put_safe(struct ptlrpc_nrs_resource **resp) { struct ptlrpc_nrs_policy *pols[NRS_RES_MAX]; - struct ptlrpc_nrs *nrs = NULL; int i; for (i = 0; i < NRS_RES_MAX; i++) { - if (resp[i] != NULL) { + if (resp[i]) { pols[i] = resp[i]->res_policy; nrs_resource_put(resp[i]); resp[i] = NULL; @@ -496,18 +487,9 @@ static void nrs_resource_put_safe(struct ptlrpc_nrs_resource **resp) } for (i = 0; i < NRS_RES_MAX; i++) { - if (pols[i] == NULL) - continue; - - if (nrs == NULL) { - nrs = pols[i]->pol_nrs; - spin_lock(&nrs->nrs_lock); - } - nrs_policy_put_locked(pols[i]); + if (pols[i]) + nrs_policy_put(pols[i]); } - - if (nrs != NULL) - spin_unlock(&nrs->nrs_lock); } /** @@ -536,7 +518,7 @@ struct ptlrpc_nrs_request *nrs_request_get(struct ptlrpc_nrs_policy *policy, nrq = policy->pol_desc->pd_ops->op_req_get(policy, peek, force); - LASSERT(ergo(nrq != NULL, nrs_request_policy(nrq) == policy)); + LASSERT(ergo(nrq, nrs_request_policy(nrq) == policy)); return nrq; } @@ -562,7 +544,7 @@ static inline void nrs_request_enqueue(struct ptlrpc_nrs_request *nrq) * the preferred choice. */ for (i = NRS_RES_MAX - 1; i >= 0; i--) { - if (nrq->nr_res_ptrs[i] == NULL) + if (!nrq->nr_res_ptrs[i]) continue; nrq->nr_res_idx = i; @@ -632,7 +614,7 @@ static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name, spin_lock(&nrs->nrs_lock); policy = nrs_policy_find_locked(nrs, name); - if (policy == NULL) { + if (!policy) { rc = -ENOENT; goto out; } @@ -654,7 +636,7 @@ static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name, break; } out: - if (policy != NULL) + if (policy) nrs_policy_put_locked(policy); spin_unlock(&nrs->nrs_lock); @@ -679,7 +661,7 @@ static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name) spin_lock(&nrs->nrs_lock); policy = nrs_policy_find_locked(nrs, name); - if (policy == NULL) { + if (!policy) { spin_unlock(&nrs->nrs_lock); CERROR("Can't find NRS policy %s\n", name); @@ -712,7 +694,7 @@ static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name) nrs_policy_fini(policy); - LASSERT(policy->pol_private == NULL); + LASSERT(!policy->pol_private); kfree(policy); return 0; @@ -736,18 +718,16 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs, struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt; int rc; - LASSERT(svcpt != NULL); - LASSERT(desc->pd_ops != NULL); - LASSERT(desc->pd_ops->op_res_get != NULL); - LASSERT(desc->pd_ops->op_req_get != NULL); - LASSERT(desc->pd_ops->op_req_enqueue != NULL); - LASSERT(desc->pd_ops->op_req_dequeue != NULL); - LASSERT(desc->pd_compat != NULL); + LASSERT(desc->pd_ops->op_res_get); + LASSERT(desc->pd_ops->op_req_get); + LASSERT(desc->pd_ops->op_req_enqueue); + LASSERT(desc->pd_ops->op_req_dequeue); + LASSERT(desc->pd_compat); policy = kzalloc_node(sizeof(*policy), GFP_NOFS, cfs_cpt_spread_node(svcpt->scp_service->srv_cptable, svcpt->scp_cpt)); - if (policy == NULL) + if (!policy) return -ENOMEM; policy->pol_nrs = nrs; @@ -767,7 +747,7 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs, spin_lock(&nrs->nrs_lock); tmp = nrs_policy_find_locked(nrs, policy->pol_desc->pd_name); - if (tmp != NULL) { + if (tmp) { CERROR("NRS policy %s has been registered, can't register it for %s\n", policy->pol_desc->pd_name, svcpt->scp_service->srv_name); @@ -817,7 +797,7 @@ static void ptlrpc_nrs_req_add_nolock(struct ptlrpc_request *req) */ if (unlikely(list_empty(&policy->pol_list_queued))) list_add_tail(&policy->pol_list_queued, - &policy->pol_nrs->nrs_policy_queued); + &policy->pol_nrs->nrs_policy_queued); } /** @@ -957,14 +937,14 @@ static int nrs_svcpt_setup_locked(struct ptlrpc_service_part *svcpt) /** * Optionally allocate a high-priority NRS head. */ - if (svcpt->scp_service->srv_ops.so_hpreq_handler == NULL) + if (!svcpt->scp_service->srv_ops.so_hpreq_handler) goto out; svcpt->scp_nrs_hp = kzalloc_node(sizeof(*svcpt->scp_nrs_hp), GFP_NOFS, cfs_cpt_spread_node(svcpt->scp_service->srv_cptable, svcpt->scp_cpt)); - if (svcpt->scp_nrs_hp == NULL) { + if (!svcpt->scp_nrs_hp) { rc = -ENOMEM; goto out; } @@ -998,8 +978,7 @@ again: nrs = nrs_svcpt2nrs(svcpt, hp); nrs->nrs_stopping = 1; - list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, - pol_list) { + list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, pol_list) { rc = nrs_policy_unregister(nrs, policy->pol_desc->pd_name); LASSERT(rc == 0); } @@ -1089,7 +1068,7 @@ again: } } - if (desc->pd_ops->op_lprocfs_fini != NULL) + if (desc->pd_ops->op_lprocfs_fini) desc->pd_ops->op_lprocfs_fini(svc); } @@ -1115,15 +1094,15 @@ static int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf) { struct ptlrpc_service *svc; struct ptlrpc_nrs_pol_desc *desc; + size_t len; int rc = 0; - LASSERT(conf != NULL); - LASSERT(conf->nc_ops != NULL); - LASSERT(conf->nc_compat != NULL); + LASSERT(conf->nc_ops); + LASSERT(conf->nc_compat); LASSERT(ergo(conf->nc_compat == nrs_policy_compat_one, - conf->nc_compat_svc_name != NULL)); + conf->nc_compat_svc_name)); LASSERT(ergo((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) != 0, - conf->nc_owner != NULL)); + conf->nc_owner)); conf->nc_name[NRS_POL_NAME_MAX - 1] = '\0'; @@ -1146,7 +1125,7 @@ static int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf) mutex_lock(&nrs_core.nrs_mutex); - if (nrs_policy_find_desc_locked(conf->nc_name) != NULL) { + if (nrs_policy_find_desc_locked(conf->nc_name)) { CERROR("NRS: failing to register policy %s which has already been registered with NRS core!\n", conf->nc_name); rc = -EEXIST; @@ -1159,7 +1138,12 @@ static int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf) goto fail; } - strncpy(desc->pd_name, conf->nc_name, NRS_POL_NAME_MAX); + len = strlcpy(desc->pd_name, conf->nc_name, sizeof(desc->pd_name)); + if (len >= sizeof(desc->pd_name)) { + kfree(desc); + rc = -E2BIG; + goto fail; + } desc->pd_ops = conf->nc_ops; desc->pd_compat = conf->nc_compat; desc->pd_compat_svc_name = conf->nc_compat_svc_name; @@ -1224,7 +1208,7 @@ again: * No need to take a reference to other modules here, as we * will be calling from the module's init() function. */ - if (desc->pd_ops->op_lprocfs_init != NULL) { + if (desc->pd_ops->op_lprocfs_init) { rc = desc->pd_ops->op_lprocfs_init(svc); if (rc != 0) { rc2 = nrs_policy_unregister_locked(desc); @@ -1288,7 +1272,7 @@ int ptlrpc_service_nrs_setup(struct ptlrpc_service *svc) if (!nrs_policy_compatible(svc, desc)) continue; - if (desc->pd_ops->op_lprocfs_init != NULL) { + if (desc->pd_ops->op_lprocfs_init) { rc = desc->pd_ops->op_lprocfs_init(svc); if (rc != 0) goto failed; @@ -1329,7 +1313,7 @@ void ptlrpc_service_nrs_cleanup(struct ptlrpc_service *svc) if (!nrs_policy_compatible(svc, desc)) continue; - if (desc->pd_ops->op_lprocfs_fini != NULL) + if (desc->pd_ops->op_lprocfs_fini) desc->pd_ops->op_lprocfs_fini(svc); } @@ -1376,7 +1360,8 @@ void ptlrpc_nrs_req_finalize(struct ptlrpc_request *req) if (req->rq_nrq.nr_initialized) { nrs_resource_put_safe(req->rq_nrq.nr_res_ptrs); /* no protection on bit nr_initialized because no - * contention at this late stage */ + * contention at this late stage + */ req->rq_nrq.nr_finalized = 1; } } @@ -1434,7 +1419,7 @@ static void nrs_request_removed(struct ptlrpc_nrs_policy *policy) policy->pol_nrs->nrs_req_queued); list_move_tail(&policy->pol_list_queued, - &policy->pol_nrs->nrs_policy_queued); + &policy->pol_nrs->nrs_policy_queued); } } @@ -1466,10 +1451,9 @@ ptlrpc_nrs_req_get_nolock0(struct ptlrpc_service_part *svcpt, bool hp, * Always try to drain requests from all NRS polices even if they are * inactive, because the user can change policy status at runtime. */ - list_for_each_entry(policy, &nrs->nrs_policy_queued, - pol_list_queued) { + list_for_each_entry(policy, &nrs->nrs_policy_queued, pol_list_queued) { nrq = nrs_request_get(policy, peek, force); - if (nrq != NULL) { + if (nrq) { if (likely(!peek)) { nrq->nr_started = 1; @@ -1619,8 +1603,7 @@ void ptlrpc_nrs_fini(void) struct ptlrpc_nrs_pol_desc *desc; struct ptlrpc_nrs_pol_desc *tmp; - list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies, - pd_list) { + list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies, pd_list) { list_del_init(&desc->pd_list); kfree(desc); } diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c index 8e21f0cdc..b123a9324 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c +++ b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c @@ -13,10 +13,6 @@ * GNU General Public License version 2 for more details. A copy is * included in the COPYING file that accompanied this code. - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * GPL HEADER END */ /* @@ -83,7 +79,7 @@ static int nrs_fifo_start(struct ptlrpc_nrs_policy *policy) head = kzalloc_node(sizeof(*head), GFP_NOFS, cfs_cpt_spread_node(nrs_pol2cptab(policy), nrs_pol2cptid(policy))); - if (head == NULL) + if (!head) return -ENOMEM; INIT_LIST_HEAD(&head->fh_list); @@ -104,7 +100,7 @@ static void nrs_fifo_stop(struct ptlrpc_nrs_policy *policy) { struct nrs_fifo_head *head = policy->pol_private; - LASSERT(head != NULL); + LASSERT(head); LASSERT(list_empty(&head->fh_list)); kfree(head); @@ -167,9 +163,9 @@ struct ptlrpc_nrs_request *nrs_fifo_req_get(struct ptlrpc_nrs_policy *policy, nrq = unlikely(list_empty(&head->fh_list)) ? NULL : list_entry(head->fh_list.next, struct ptlrpc_nrs_request, - nr_u.fifo.fr_list); + nr_u.fifo.fr_list); - if (likely(!peek && nrq != NULL)) { + if (likely(!peek && nrq)) { struct ptlrpc_request *req = container_of(nrq, struct ptlrpc_request, rq_nrq); diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c index f3cb5184f..492d63fad 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c +++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c @@ -133,7 +133,8 @@ EXPORT_SYMBOL(lustre_msg_size_v2); * NOTE: this should only be used for NEW requests, and should always be * in the form of a v2 request. If this is a connection to a v1 * target then the first buffer will be stripped because the ptlrpc - * data is part of the lustre_msg_v1 header. b=14043 */ + * data is part of the lustre_msg_v1 header. b=14043 + */ int lustre_msg_size(__u32 magic, int count, __u32 *lens) { __u32 size[] = { sizeof(struct ptlrpc_body) }; @@ -157,7 +158,8 @@ int lustre_msg_size(__u32 magic, int count, __u32 *lens) EXPORT_SYMBOL(lustre_msg_size); /* This is used to determine the size of a buffer that was already packed - * and will correctly handle the different message formats. */ + * and will correctly handle the different message formats. + */ int lustre_packed_msg_size(struct lustre_msg *msg) { switch (msg->lm_magic) { @@ -183,7 +185,7 @@ void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens, for (i = 0; i < count; i++) msg->lm_buflens[i] = lens[i]; - if (bufs == NULL) + if (!bufs) return; ptr = (char *)msg + lustre_msg_hdr_size_v2(count); @@ -267,7 +269,8 @@ lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt) spin_unlock(&svcpt->scp_rep_lock); /* If we cannot get anything for some long time, we better - * bail out instead of waiting infinitely */ + * bail out instead of waiting infinitely + */ lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL); rc = l_wait_event(svcpt->scp_rep_waitq, !list_empty(&svcpt->scp_rep_idle), &lwi); @@ -277,7 +280,7 @@ lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt) } rs = list_entry(svcpt->scp_rep_idle.next, - struct ptlrpc_reply_state, rs_list); + struct ptlrpc_reply_state, rs_list); list_del(&rs->rs_list); spin_unlock(&svcpt->scp_rep_lock); @@ -306,7 +309,7 @@ int lustre_pack_reply_v2(struct ptlrpc_request *req, int count, struct ptlrpc_reply_state *rs; int msg_len, rc; - LASSERT(req->rq_reply_state == NULL); + LASSERT(!req->rq_reply_state); if ((flags & LPRFL_EARLY_REPLY) == 0) { spin_lock(&req->rq_lock); @@ -383,7 +386,6 @@ void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size) { int i, offset, buflen, bufcount; - LASSERT(m != NULL); LASSERT(n >= 0); bufcount = m->lm_bufcount; @@ -488,7 +490,7 @@ void lustre_free_reply_state(struct ptlrpc_reply_state *rs) LASSERT(!rs->rs_difficult || rs->rs_handled); LASSERT(!rs->rs_on_net); LASSERT(!rs->rs_scheduled); - LASSERT(rs->rs_export == NULL); + LASSERT(!rs->rs_export); LASSERT(rs->rs_nlocks == 0); LASSERT(list_empty(&rs->rs_exp_list)); LASSERT(list_empty(&rs->rs_obd_list)); @@ -677,7 +679,8 @@ int lustre_msg_buflen(struct lustre_msg *m, int n) EXPORT_SYMBOL(lustre_msg_buflen); /* NB return the bufcount for lustre_msg_v2 format, so if message is packed - * in V1 format, the result is one bigger. (add struct ptlrpc_body). */ + * in V1 format, the result is one bigger. (add struct ptlrpc_body). + */ int lustre_msg_bufcount(struct lustre_msg *m) { switch (m->lm_magic) { @@ -705,7 +708,7 @@ char *lustre_msg_string(struct lustre_msg *m, int index, int max_len) LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic); } - if (str == NULL) { + if (!str) { CERROR("can't unpack string in msg %p buffer[%d]\n", m, index); return NULL; } @@ -740,7 +743,6 @@ static inline void *__lustre_swab_buf(struct lustre_msg *msg, int index, { void *ptr = NULL; - LASSERT(msg != NULL); switch (msg->lm_magic) { case LUSTRE_MSG_MAGIC_V2: ptr = lustre_msg_buf_v2(msg, index, min_size); @@ -799,7 +801,8 @@ __u32 lustre_msg_get_flags(struct lustre_msg *msg) /* no break */ default: /* flags might be printed in debug code while message - * uninitialized */ + * uninitialized + */ return 0; } } @@ -1032,7 +1035,8 @@ int lustre_msg_get_status(struct lustre_msg *msg) /* no break */ default: /* status might be printed in debug code while message - * uninitialized */ + * uninitialized + */ return -EINVAL; } } @@ -1368,7 +1372,8 @@ void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid) struct ptlrpc_body *pb; /* Don't set jobid for ldlm ast RPCs, they've been shrunk. - * See the comment in ptlrpc_request_pack(). */ + * See the comment in ptlrpc_request_pack(). + */ if (!opc || opc == LDLM_BL_CALLBACK || opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK) return; @@ -1377,7 +1382,7 @@ void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid) sizeof(struct ptlrpc_body)); LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg); - if (jobid != NULL) + if (jobid) memcpy(pb->pb_jobid, jobid, JOBSTATS_JOBID_SIZE); else if (pb->pb_jobid[0] == '\0') lustre_get_jobid(pb->pb_jobid); @@ -1427,7 +1432,7 @@ int do_set_info_async(struct obd_import *imp, int rc; req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO); - if (req == NULL) + if (!req) return -ENOMEM; req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, @@ -1488,7 +1493,8 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b) * clients and servers without ptlrpc_body_v2 (< 2.3) * do not swab any fields beyond pb_jobid, as we are * using this swab function for both ptlrpc_body - * and ptlrpc_body_v2. */ + * and ptlrpc_body_v2. + */ CLASSERT(offsetof(typeof(*b), pb_jobid) != 0); } EXPORT_SYMBOL(lustre_swab_ptlrpc_body); @@ -1502,7 +1508,8 @@ void lustre_swab_connect(struct obd_connect_data *ocd) __swab32s(&ocd->ocd_index); __swab32s(&ocd->ocd_brw_size); /* ocd_blocksize and ocd_inodespace don't need to be swabbed because - * they are 8-byte values */ + * they are 8-byte values + */ __swab16s(&ocd->ocd_grant_extent); __swab32s(&ocd->ocd_unused); __swab64s(&ocd->ocd_transno); @@ -1512,7 +1519,8 @@ void lustre_swab_connect(struct obd_connect_data *ocd) /* Fields after ocd_cksum_types are only accessible by the receiver * if the corresponding flag in ocd_connect_flags is set. Accessing * any field after ocd_maxbytes on the receiver without a valid flag - * may result in out-of-bound memory access and kernel oops. */ + * may result in out-of-bound memory access and kernel oops. + */ if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE) __swab32s(&ocd->ocd_max_easize); if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES) @@ -1848,20 +1856,6 @@ void lustre_swab_fiemap(struct ll_user_fiemap *fiemap) } EXPORT_SYMBOL(lustre_swab_fiemap); -void lustre_swab_idx_info(struct idx_info *ii) -{ - __swab32s(&ii->ii_magic); - __swab32s(&ii->ii_flags); - __swab16s(&ii->ii_count); - __swab32s(&ii->ii_attrs); - lustre_swab_lu_fid(&ii->ii_fid); - __swab64s(&ii->ii_version); - __swab64s(&ii->ii_hash_start); - __swab64s(&ii->ii_hash_end); - __swab16s(&ii->ii_keysize); - __swab16s(&ii->ii_recsize); -} - void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr) { __swab32s(&rr->rr_opcode); @@ -1914,7 +1908,7 @@ static void print_lum(struct lov_user_md *lum) CDEBUG(D_OTHER, "\tlmm_stripe_size: %#x\n", lum->lmm_stripe_size); CDEBUG(D_OTHER, "\tlmm_stripe_count: %#x\n", lum->lmm_stripe_count); CDEBUG(D_OTHER, "\tlmm_stripe_offset/lmm_layout_gen: %#x\n", - lum->lmm_stripe_offset); + lum->lmm_stripe_offset); } static void lustre_swab_lmm_oi(struct ost_id *oi) @@ -1986,7 +1980,8 @@ static void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d) { /* the lock data is a union and the first two fields are always an * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock - * data the same way. */ + * data the same way. + */ __swab64s(&d->l_extent.start); __swab64s(&d->l_extent.end); __swab64s(&d->l_extent.gid); @@ -2035,16 +2030,6 @@ void lustre_swab_ldlm_reply(struct ldlm_reply *r) } EXPORT_SYMBOL(lustre_swab_ldlm_reply); -void lustre_swab_quota_body(struct quota_body *b) -{ - lustre_swab_lu_fid(&b->qb_fid); - lustre_swab_lu_fid((struct lu_fid *)&b->qb_id); - __swab32s(&b->qb_flags); - __swab64s(&b->qb_count); - __swab64s(&b->qb_usage); - __swab64s(&b->qb_slv_ver); -} - /* Dump functions */ void dump_ioo(struct obd_ioobj *ioo) { @@ -2288,24 +2273,6 @@ void lustre_swab_hsm_request(struct hsm_request *hr) } EXPORT_SYMBOL(lustre_swab_hsm_request); -void lustre_swab_update_buf(struct update_buf *ub) -{ - __swab32s(&ub->ub_magic); - __swab32s(&ub->ub_count); -} -EXPORT_SYMBOL(lustre_swab_update_buf); - -void lustre_swab_update_reply_buf(struct update_reply *ur) -{ - int i; - - __swab32s(&ur->ur_version); - __swab32s(&ur->ur_count); - for (i = 0; i < ur->ur_count; i++) - __swab32s(&ur->ur_lens[i]); -} -EXPORT_SYMBOL(lustre_swab_update_reply_buf); - void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl) { __swab64s(&msl->msl_flags); diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c index fb2d5236a..8a869315c 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c +++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c @@ -68,7 +68,7 @@ int ptlrpc_obd_ping(struct obd_device *obd) struct ptlrpc_request *req; req = ptlrpc_prep_ping(obd->u.cli.cl_import); - if (req == NULL) + if (!req) return -ENOMEM; req->rq_send_state = LUSTRE_IMP_FULL; @@ -86,7 +86,7 @@ static int ptlrpc_ping(struct obd_import *imp) struct ptlrpc_request *req; req = ptlrpc_prep_ping(imp); - if (req == NULL) { + if (!req) { CERROR("OOM trying to ping %s->%s\n", imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd)); @@ -242,7 +242,7 @@ static int ptlrpc_pinger_main(void *arg) list_for_each(iter, &pinger_imports) { struct obd_import *imp = list_entry(iter, struct obd_import, - imp_pinger_chain); + imp_pinger_chain); ptlrpc_pinger_process_import(imp, this_ping); /* obd_timeout might have changed */ @@ -257,11 +257,12 @@ static int ptlrpc_pinger_main(void *arg) /* Wait until the next ping time, or until we're stopped. */ time_to_next_wake = pinger_check_timeout(this_ping); /* The ping sent by ptlrpc_send_rpc may get sent out - say .01 second after this. - ptlrpc_pinger_sending_on_import will then set the - next ping time to next_ping + .01 sec, which means - we will SKIP the next ping at next_ping, and the - ping will get sent 2 timeouts from now! Beware. */ + * say .01 second after this. + * ptlrpc_pinger_sending_on_import will then set the + * next ping time to next_ping + .01 sec, which means + * we will SKIP the next ping at next_ping, and the + * ping will get sent 2 timeouts from now! Beware. + */ CDEBUG(D_INFO, "next wakeup in " CFS_DURATION_T " (%ld)\n", time_to_next_wake, cfs_time_add(this_ping, @@ -293,6 +294,7 @@ static struct ptlrpc_thread pinger_thread; int ptlrpc_start_pinger(void) { struct l_wait_info lwi = { 0 }; + struct task_struct *task; int rc; if (!thread_is_init(&pinger_thread) && @@ -303,10 +305,11 @@ int ptlrpc_start_pinger(void) strcpy(pinger_thread.t_name, "ll_ping"); - rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, &pinger_thread, - "%s", pinger_thread.t_name)); - if (IS_ERR_VALUE(rc)) { - CERROR("cannot start thread: %d\n", rc); + task = kthread_run(ptlrpc_pinger_main, &pinger_thread, + pinger_thread.t_name); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("cannot start pinger thread: rc = %d\n", rc); return rc; } l_wait_event(pinger_thread.t_ctl_waitq, @@ -401,7 +404,8 @@ EXPORT_SYMBOL(ptlrpc_pinger_del_import); * be called when timeout happens. */ static struct timeout_item *ptlrpc_new_timeout(int time, - enum timeout_event event, timeout_cb_t cb, void *data) + enum timeout_event event, + timeout_cb_t cb, void *data) { struct timeout_item *ti; @@ -489,7 +493,6 @@ int ptlrpc_del_timeout_client(struct list_head *obd_list, break; } } - LASSERTF(ti != NULL, "ti is NULL !\n"); if (list_empty(&ti->ti_obd_list)) { list_del(&ti->ti_chain); kfree(ti); diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h index 8f67e0562..6ca26c98d 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h +++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h @@ -101,8 +101,6 @@ struct nrs_core { * registration/unregistration, and NRS core lprocfs operations. */ struct mutex nrs_mutex; - /* XXX: This is just for liblustre. Remove the #if defined directive - * when the * "cfs_" prefix is dropped from cfs_list_head. */ /** * List of all policy descriptors registered with NRS core; protected * by nrs_core::nrs_mutex. diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c index c4f1d0f5d..a8ec0e9d7 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c +++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c @@ -162,8 +162,8 @@ static void __exit ptlrpc_exit(void) MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>"); MODULE_DESCRIPTION("Lustre Request Processor and Lock Management"); +MODULE_VERSION(LUSTRE_VERSION_STRING); MODULE_LICENSE("GPL"); -MODULE_VERSION("1.0.0"); module_init(ptlrpc_init); module_exit(ptlrpc_exit); diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c index 60fb0ced7..db003f5da 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c +++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c @@ -163,8 +163,6 @@ void ptlrpcd_wake(struct ptlrpc_request *req) { struct ptlrpc_request_set *rq_set = req->rq_set; - LASSERT(rq_set != NULL); - wake_up(&rq_set->set_waitq); } EXPORT_SYMBOL(ptlrpcd_wake); @@ -176,7 +174,7 @@ ptlrpcd_select_pc(struct ptlrpc_request *req) int cpt; int idx; - if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL) + if (req && req->rq_send_state != LUSTRE_IMP_FULL) return &ptlrpcd_rcv; cpt = cfs_cpt_current(cfs_cpt_table, 1); @@ -209,11 +207,10 @@ static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des, if (likely(!list_empty(&src->set_new_requests))) { list_for_each_safe(pos, tmp, &src->set_new_requests) { req = list_entry(pos, struct ptlrpc_request, - rq_set_chain); + rq_set_chain); req->rq_set = des; } - list_splice_init(&src->set_new_requests, - &des->set_requests); + list_splice_init(&src->set_new_requests, &des->set_requests); rc = atomic_read(&src->set_new_count); atomic_add(rc, &des->set_remaining); atomic_set(&src->set_new_count, 0); @@ -240,10 +237,11 @@ void ptlrpcd_add_req(struct ptlrpc_request *req) req->rq_invalid_rqset = 0; spin_unlock(&req->rq_lock); - l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi); + l_wait_event(req->rq_set_waitq, !req->rq_set, &lwi); } else if (req->rq_set) { /* If we have a valid "rq_set", just reuse it to avoid double - * linked. */ + * linked. + */ LASSERT(req->rq_phase == RQ_PHASE_NEW); LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY); @@ -286,9 +284,9 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) spin_lock(&set->set_new_req_lock); if (likely(!list_empty(&set->set_new_requests))) { list_splice_init(&set->set_new_requests, - &set->set_requests); + &set->set_requests); atomic_add(atomic_read(&set->set_new_count), - &set->set_remaining); + &set->set_remaining); atomic_set(&set->set_new_count, 0); /* * Need to calculate its timeout. @@ -321,7 +319,8 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) rc |= ptlrpc_check_set(env, set); /* NB: ptlrpc_check_set has already moved completed request at the - * head of seq::set_requests */ + * head of seq::set_requests + */ list_for_each_safe(pos, tmp, &set->set_requests) { req = list_entry(pos, struct ptlrpc_request, rq_set_chain); if (req->rq_phase != RQ_PHASE_COMPLETE) @@ -339,7 +338,8 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) rc = atomic_read(&set->set_new_count); /* If we have nothing to do, check whether we can take some - * work from our partner threads. */ + * work from our partner threads. + */ if (rc == 0 && pc->pc_npartners > 0) { struct ptlrpcd_ctl *partner; struct ptlrpc_request_set *ps; @@ -349,12 +349,12 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) partner = pc->pc_partners[pc->pc_cursor++]; if (pc->pc_cursor >= pc->pc_npartners) pc->pc_cursor = 0; - if (partner == NULL) + if (!partner) continue; spin_lock(&partner->pc_lock); ps = partner->pc_set; - if (ps == NULL) { + if (!ps) { spin_unlock(&partner->pc_lock); continue; } @@ -422,7 +422,6 @@ static int ptlrpcd(void *arg) complete(&pc->pc_starting); /* - * This mainloop strongly resembles ptlrpc_set_wait() except that our * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when * there are requests in the set. New requests come in on the set's @@ -580,7 +579,7 @@ int ptlrpcd_start(struct ptlrpcd_ctl *pc) return 0; out_set: - if (pc->pc_set != NULL) { + if (pc->pc_set) { struct ptlrpc_request_set *set = pc->pc_set; spin_lock(&pc->pc_lock); @@ -631,7 +630,7 @@ void ptlrpcd_free(struct ptlrpcd_ctl *pc) out: if (pc->pc_npartners > 0) { - LASSERT(pc->pc_partners != NULL); + LASSERT(pc->pc_partners); kfree(pc->pc_partners); pc->pc_partners = NULL; @@ -645,7 +644,7 @@ static void ptlrpcd_fini(void) int i; int j; - if (ptlrpcds != NULL) { + if (ptlrpcds) { for (i = 0; i < ptlrpcds_num; i++) { if (!ptlrpcds[i]) break; diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c index db6626cab..30d9a164e 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/recover.c +++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c @@ -107,14 +107,14 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) /* Replay all the committed open requests on committed_list first */ if (!list_empty(&imp->imp_committed_list)) { tmp = imp->imp_committed_list.prev; - req = list_entry(tmp, struct ptlrpc_request, - rq_replay_list); + req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); /* The last request on committed_list hasn't been replayed */ if (req->rq_transno > last_transno) { /* Since the imp_committed_list is immutable before * all of it's requests being replayed, it's safe to - * use a cursor to accelerate the search */ + * use a cursor to accelerate the search + */ imp->imp_replay_cursor = imp->imp_replay_cursor->next; while (imp->imp_replay_cursor != @@ -137,8 +137,9 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) } /* All the requests in committed list have been replayed, let's replay - * the imp_replay_list */ - if (req == NULL) { + * the imp_replay_list + */ + if (!req) { list_for_each_safe(tmp, pos, &imp->imp_replay_list) { req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); @@ -152,15 +153,16 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) /* If need to resend the last sent transno (because a reconnect * has occurred), then stop on the matching req and send it again. * If, however, the last sent transno has been committed then we - * continue replay from the next request. */ - if (req != NULL && imp->imp_resend_replay) + * continue replay from the next request. + */ + if (req && imp->imp_resend_replay) lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT); spin_lock(&imp->imp_lock); imp->imp_resend_replay = 0; spin_unlock(&imp->imp_lock); - if (req != NULL) { + if (req) { rc = ptlrpc_replay_req(req); if (rc) { CERROR("recovery replay error %d for req %llu\n", @@ -192,9 +194,8 @@ int ptlrpc_resend(struct obd_import *imp) return -1; } - list_for_each_entry_safe(req, next, &imp->imp_sending_list, - rq_list) { - LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON, + list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) { + LASSERTF((long)req > PAGE_SIZE && req != LP_POISON, "req %p bad\n", req); LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); if (!ptlrpc_no_resend(req)) @@ -249,7 +250,8 @@ void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req) } /* Wait for recovery to complete and resend. If evicted, then - this request will be errored out later.*/ + * this request will be errored out later. + */ spin_lock(&failed_req->rq_lock); if (!failed_req->rq_no_resend) failed_req->rq_resend = 1; @@ -260,7 +262,7 @@ void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req) * Administratively active/deactive a client. * This should only be called by the ioctl interface, currently * - the lctl deactivate and activate commands - * - echo 0/1 >> /proc/osc/XXX/active + * - echo 0/1 >> /sys/fs/lustre/osc/XXX/active * - client umount -f (ll_umount_begin) */ int ptlrpc_set_import_active(struct obd_import *imp, int active) @@ -271,13 +273,15 @@ int ptlrpc_set_import_active(struct obd_import *imp, int active) LASSERT(obd); /* When deactivating, mark import invalid, and abort in-flight - * requests. */ + * requests. + */ if (!active) { LCONSOLE_WARN("setting import %s INACTIVE by administrator request\n", obd2cli_tgt(imp->imp_obd)); /* set before invalidate to avoid messages about imp_inval - * set without imp_deactive in ptlrpc_import_delay_req */ + * set without imp_deactive in ptlrpc_import_delay_req + */ spin_lock(&imp->imp_lock); imp->imp_deactive = 1; spin_unlock(&imp->imp_lock); diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c index 39f5261c9..187fd1d68 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c @@ -94,7 +94,7 @@ int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy) LASSERT(number < SPTLRPC_POLICY_MAX); write_lock(&policy_lock); - if (unlikely(policies[number] == NULL)) { + if (unlikely(!policies[number])) { write_unlock(&policy_lock); CERROR("%s: already unregistered\n", policy->sp_name); return -EINVAL; @@ -126,11 +126,11 @@ struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor) policy = policies[number]; if (policy && !try_module_get(policy->sp_owner)) policy = NULL; - if (policy == NULL) + if (!policy) flag = atomic_read(&loaded); read_unlock(&policy_lock); - if (policy != NULL || flag != 0 || + if (policy || flag != 0 || number != SPTLRPC_POLICY_GSS) break; @@ -327,7 +327,7 @@ static int import_sec_validate_get(struct obd_import *imp, } *sec = sptlrpc_import_sec_ref(imp); - if (*sec == NULL) { + if (!*sec) { CERROR("import %p (%s) with no sec\n", imp, ptlrpc_import_state_name(imp->imp_state)); return -EACCES; @@ -429,7 +429,7 @@ int sptlrpc_req_ctx_switch(struct ptlrpc_request *req, reqmsg_size = req->rq_reqlen; if (reqmsg_size != 0) { reqmsg = libcfs_kvzalloc(reqmsg_size, GFP_NOFS); - if (reqmsg == NULL) + if (!reqmsg) return -ENOMEM; memcpy(reqmsg, req->rq_reqmsg, reqmsg_size); } @@ -445,7 +445,8 @@ int sptlrpc_req_ctx_switch(struct ptlrpc_request *req, /* alloc new request buffer * we don't need to alloc reply buffer here, leave it to the - * rest procedure of ptlrpc */ + * rest procedure of ptlrpc + */ if (reqmsg_size != 0) { rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size); if (!rc) { @@ -609,7 +610,7 @@ again: if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) { CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n", - req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc); + req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc); req_off_ctx_list(req, ctx); sptlrpc_req_replace_dead_ctx(req); ctx = req->rq_cli_ctx; @@ -798,7 +799,8 @@ void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode) spin_unlock(&sec->ps_lock); /* force SVC_NULL for context initiation rpc, SVC_INTG for context - * destruction rpc */ + * destruction rpc + */ if (unlikely(req->rq_ctx_init)) flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL); else if (unlikely(req->rq_ctx_fini)) @@ -938,7 +940,7 @@ static int do_cli_unwrap_reply(struct ptlrpc_request *req) LASSERT(ctx->cc_sec); LASSERT(req->rq_repbuf); LASSERT(req->rq_repdata); - LASSERT(req->rq_repmsg == NULL); + LASSERT(!req->rq_repmsg); req->rq_rep_swab_mask = 0; @@ -1000,8 +1002,8 @@ static int do_cli_unwrap_reply(struct ptlrpc_request *req) int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req) { LASSERT(req->rq_repbuf); - LASSERT(req->rq_repdata == NULL); - LASSERT(req->rq_repmsg == NULL); + LASSERT(!req->rq_repdata); + LASSERT(!req->rq_repmsg); LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len); if (req->rq_reply_off == 0 && @@ -1046,13 +1048,13 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req, int rc; early_req = ptlrpc_request_cache_alloc(GFP_NOFS); - if (early_req == NULL) + if (!early_req) return -ENOMEM; early_size = req->rq_nob_received; early_bufsz = size_roundup_power2(early_size); early_buf = libcfs_kvzalloc(early_bufsz, GFP_NOFS); - if (early_buf == NULL) { + if (!early_buf) { rc = -ENOMEM; goto err_req; } @@ -1067,8 +1069,8 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req, } LASSERT(req->rq_repbuf); - LASSERT(req->rq_repdata == NULL); - LASSERT(req->rq_repmsg == NULL); + LASSERT(!req->rq_repdata); + LASSERT(!req->rq_repmsg); if (req->rq_reply_off != 0) { CERROR("early reply with offset %u\n", req->rq_reply_off); @@ -1354,12 +1356,12 @@ int sptlrpc_import_sec_adapt(struct obd_import *imp, might_sleep(); - if (imp == NULL) + if (!imp) return 0; conn = imp->imp_connection; - if (svc_ctx == NULL) { + if (!svc_ctx) { struct client_obd *cliobd = &imp->imp_obd->u.cli; /* * normal import, determine flavor from rule set, except @@ -1447,11 +1449,11 @@ static void import_flush_ctx_common(struct obd_import *imp, { struct ptlrpc_sec *sec; - if (imp == NULL) + if (!imp) return; sec = sptlrpc_import_sec_ref(imp); - if (sec == NULL) + if (!sec) return; sec_cop_flush_ctx_cache(sec, uid, grace, force); @@ -1484,7 +1486,7 @@ int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize) LASSERT(ctx); LASSERT(ctx->cc_sec); LASSERT(ctx->cc_sec->ps_policy); - LASSERT(req->rq_reqmsg == NULL); + LASSERT(!req->rq_reqmsg); LASSERT_ATOMIC_POS(&ctx->cc_refcount); policy = ctx->cc_sec->ps_policy; @@ -1515,7 +1517,7 @@ void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req) LASSERT(ctx->cc_sec->ps_policy); LASSERT_ATOMIC_POS(&ctx->cc_refcount); - if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL) + if (!req->rq_reqbuf && !req->rq_clrbuf) return; policy = ctx->cc_sec->ps_policy; @@ -1632,7 +1634,7 @@ void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req) LASSERT(ctx->cc_sec->ps_policy); LASSERT_ATOMIC_POS(&ctx->cc_refcount); - if (req->rq_repbuf == NULL) + if (!req->rq_repbuf) return; LASSERT(req->rq_repbuf_len); @@ -1684,12 +1686,13 @@ int sptlrpc_target_export_check(struct obd_export *exp, { struct sptlrpc_flavor flavor; - if (exp == NULL) + if (!exp) return 0; /* client side export has no imp_reverse, skip - * FIXME maybe we should check flavor this as well??? */ - if (exp->exp_imp_reverse == NULL) + * FIXME maybe we should check flavor this as well??? + */ + if (!exp->exp_imp_reverse) return 0; /* don't care about ctx fini rpc */ @@ -1702,11 +1705,13 @@ int sptlrpc_target_export_check(struct obd_export *exp, * the first req with the new flavor, then treat it as current flavor, * adapt reverse sec according to it. * note the first rpc with new flavor might not be with root ctx, in - * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */ + * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. + */ if (unlikely(exp->exp_flvr_changed) && flavor_allowed(&exp->exp_flvr_old[1], req)) { /* make the new flavor as "current", and old ones as - * about-to-expire */ + * about-to-expire + */ CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc); flavor = exp->exp_flvr_old[1]; @@ -1742,10 +1747,12 @@ int sptlrpc_target_export_check(struct obd_export *exp, } /* if it equals to the current flavor, we accept it, but need to - * dealing with reverse sec/ctx */ + * dealing with reverse sec/ctx + */ if (likely(flavor_allowed(&exp->exp_flvr, req))) { /* most cases should return here, we only interested in - * gss root ctx init */ + * gss root ctx init + */ if (!req->rq_auth_gss || !req->rq_ctx_init || (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt && !req->rq_auth_usr_ost)) { @@ -1755,7 +1762,8 @@ int sptlrpc_target_export_check(struct obd_export *exp, /* if flavor just changed, we should not proceed, just leave * it and current flavor will be discovered and replaced - * shortly, and let _this_ rpc pass through */ + * shortly, and let _this_ rpc pass through + */ if (exp->exp_flvr_changed) { LASSERT(exp->exp_flvr_adapt); spin_unlock(&exp->exp_lock); @@ -1809,7 +1817,8 @@ int sptlrpc_target_export_check(struct obd_export *exp, } /* now it doesn't match the current flavor, the only chance we can - * accept it is match the old flavors which is not expired. */ + * accept it is match the old flavors which is not expired. + */ if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) { if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) { if (flavor_allowed(&exp->exp_flvr_old[1], req)) { @@ -1915,9 +1924,9 @@ int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req) int rc; LASSERT(msg); - LASSERT(req->rq_reqmsg == NULL); - LASSERT(req->rq_repmsg == NULL); - LASSERT(req->rq_svc_ctx == NULL); + LASSERT(!req->rq_reqmsg); + LASSERT(!req->rq_repmsg); + LASSERT(!req->rq_svc_ctx); req->rq_req_swab_mask = 0; @@ -1986,15 +1995,15 @@ int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen) if (svcpt->scp_service->srv_max_reply_size < msglen + sizeof(struct ptlrpc_reply_state)) { /* Just return failure if the size is too big */ - CERROR("size of message is too big (%zd), %d allowed", - msglen + sizeof(struct ptlrpc_reply_state), - svcpt->scp_service->srv_max_reply_size); + CERROR("size of message is too big (%zd), %d allowed\n", + msglen + sizeof(struct ptlrpc_reply_state), + svcpt->scp_service->srv_max_reply_size); return -ENOMEM; } /* failed alloc, try emergency pool */ rs = lustre_get_emerg_rs(svcpt); - if (rs == NULL) + if (!rs) return -ENOMEM; req->rq_reply_state = rs; @@ -2059,7 +2068,7 @@ void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req) { struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx; - if (ctx != NULL) + if (ctx) atomic_inc(&ctx->sc_refcount); } @@ -2067,7 +2076,7 @@ void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req) { struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx; - if (ctx == NULL) + if (!ctx) return; LASSERT_ATOMIC_POS(&ctx->sc_refcount); @@ -2156,7 +2165,7 @@ int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req, * in case of privacy mode, nob_transferred needs to be adjusted. */ if (desc->bd_nob != desc->bd_nob_transferred) { - CERROR("nob %d doesn't match transferred nob %d", + CERROR("nob %d doesn't match transferred nob %d\n", desc->bd_nob, desc->bd_nob_transferred); return -EPROTO; } diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c index 6152c1b76..d3872b8c9 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c @@ -58,7 +58,7 @@ * bulk encryption page pools * ****************************************/ -#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *)) +#define POINTERS_PER_PAGE (PAGE_SIZE / sizeof(void *)) #define PAGES_PER_POOL (POINTERS_PER_PAGE) #define IDLE_IDX_MAX (100) @@ -120,7 +120,7 @@ static struct ptlrpc_enc_page_pool { } page_pools; /* - * /proc/fs/lustre/sptlrpc/encrypt_page_pools + * /sys/kernel/debug/lustre/sptlrpc/encrypt_page_pools */ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v) { @@ -195,7 +195,7 @@ static void enc_pools_release_free_pages(long npages) while (npages--) { LASSERT(page_pools.epp_pools[p_idx]); - LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL); + LASSERT(page_pools.epp_pools[p_idx][g_idx]); __free_page(page_pools.epp_pools[p_idx][g_idx]); page_pools.epp_pools[p_idx][g_idx] = NULL; @@ -304,7 +304,6 @@ static unsigned long enc_pools_cleanup(struct page ***pools, int npools) static inline void enc_pools_wakeup(void) { assert_spin_locked(&page_pools.epp_lock); - LASSERT(page_pools.epp_waitqlen >= 0); if (unlikely(page_pools.epp_waitqlen)) { LASSERT(waitqueue_active(&page_pools.epp_waitq)); @@ -317,7 +316,7 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) int p_idx, g_idx; int i; - if (desc->bd_enc_iov == NULL) + if (!desc->bd_enc_iov) return; LASSERT(desc->bd_iov_count > 0); @@ -332,9 +331,9 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) LASSERT(page_pools.epp_pools[p_idx]); for (i = 0; i < desc->bd_iov_count; i++) { - LASSERT(desc->bd_enc_iov[i].kiov_page != NULL); + LASSERT(desc->bd_enc_iov[i].kiov_page); LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]); - LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL); + LASSERT(!page_pools.epp_pools[p_idx][g_idx]); page_pools.epp_pools[p_idx][g_idx] = desc->bd_enc_iov[i].kiov_page; @@ -413,7 +412,7 @@ int sptlrpc_enc_pool_init(void) page_pools.epp_st_max_wait = 0; enc_pools_alloc(); - if (page_pools.epp_pools == NULL) + if (!page_pools.epp_pools) return -ENOMEM; register_shrinker(&pools_shrinker); @@ -476,7 +475,7 @@ int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed) int size = msg->lm_buflens[offset]; bsd = lustre_msg_buf(msg, offset, sizeof(*bsd)); - if (bsd == NULL) { + if (!bsd) { CERROR("Invalid bulk sec desc: size %d\n", size); return -EINVAL; } diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c index 4b0b81c11..a51b18bbf 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c @@ -78,7 +78,7 @@ int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr) memset(flvr, 0, sizeof(*flvr)); - if (str == NULL || str[0] == '\0') { + if (!str || str[0] == '\0') { flvr->sf_rpc = SPTLRPC_FLVR_INVALID; return 0; } @@ -103,7 +103,7 @@ int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr) * format: plain-hash:<hash_alg> */ alg = strchr(bulk, ':'); - if (alg == NULL) + if (!alg) goto err_out; *alg++ = '\0'; @@ -166,7 +166,7 @@ static int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule) sptlrpc_rule_init(rule); flavor = strchr(param, '='); - if (flavor == NULL) { + if (!flavor) { CERROR("invalid param, no '='\n"); return -EINVAL; } @@ -216,7 +216,7 @@ static int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule) static void sptlrpc_rule_set_free(struct sptlrpc_rule_set *rset) { LASSERT(rset->srs_nslot || - (rset->srs_nrule == 0 && rset->srs_rules == NULL)); + (rset->srs_nrule == 0 && !rset->srs_rules)); if (rset->srs_nslot) { kfree(rset->srs_rules); @@ -241,7 +241,7 @@ static int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *rset) /* better use realloc() if available */ rules = kcalloc(nslot, sizeof(*rset->srs_rules), GFP_NOFS); - if (rules == NULL) + if (!rules) return -ENOMEM; if (rset->srs_nrule) { @@ -450,7 +450,7 @@ static void target2fsname(const char *tgt, char *fsname, int buflen) } /* if we didn't find the pattern, treat the whole string as fsname */ - if (ptr == NULL) + if (!ptr) len = strlen(tgt); else len = ptr - tgt; @@ -467,7 +467,7 @@ static void sptlrpc_conf_free_rsets(struct sptlrpc_conf *conf) sptlrpc_rule_set_free(&conf->sc_rset); list_for_each_entry_safe(conf_tgt, conf_tgt_next, - &conf->sc_tgts, sct_list) { + &conf->sc_tgts, sct_list) { sptlrpc_rule_set_free(&conf_tgt->sct_rset); list_del(&conf_tgt->sct_list); kfree(conf_tgt); @@ -517,6 +517,7 @@ struct sptlrpc_conf *sptlrpc_conf_get(const char *fsname, int create) { struct sptlrpc_conf *conf; + size_t len; list_for_each_entry(conf, &sptlrpc_confs, sc_list) { if (strcmp(conf->sc_fsname, fsname) == 0) @@ -530,7 +531,11 @@ struct sptlrpc_conf *sptlrpc_conf_get(const char *fsname, if (!conf) return NULL; - strcpy(conf->sc_fsname, fsname); + len = strlcpy(conf->sc_fsname, fsname, sizeof(conf->sc_fsname)); + if (len >= sizeof(conf->sc_fsname)) { + kfree(conf); + return NULL; + } sptlrpc_rule_set_init(&conf->sc_rset); INIT_LIST_HEAD(&conf->sc_tgts); list_add(&conf->sc_list, &sptlrpc_confs); @@ -579,13 +584,13 @@ static int __sptlrpc_process_config(struct lustre_cfg *lcfg, int rc; target = lustre_cfg_string(lcfg, 1); - if (target == NULL) { + if (!target) { CERROR("missing target name\n"); return -EINVAL; } param = lustre_cfg_string(lcfg, 2); - if (param == NULL) { + if (!param) { CERROR("missing parameter\n"); return -EINVAL; } @@ -603,12 +608,12 @@ static int __sptlrpc_process_config(struct lustre_cfg *lcfg, if (rc) return -EINVAL; - if (conf == NULL) { + if (!conf) { target2fsname(target, fsname, sizeof(fsname)); mutex_lock(&sptlrpc_conf_lock); conf = sptlrpc_conf_get(fsname, 0); - if (conf == NULL) { + if (!conf) { CERROR("can't find conf\n"); rc = -ENOMEM; } else { @@ -638,7 +643,7 @@ static int logname2fsname(const char *logname, char *buf, int buflen) int len; ptr = strrchr(logname, '-'); - if (ptr == NULL || strcmp(ptr, "-sptlrpc")) { + if (!ptr || strcmp(ptr, "-sptlrpc")) { CERROR("%s is not a sptlrpc config log\n", logname); return -EINVAL; } @@ -772,7 +777,7 @@ void sptlrpc_conf_choose_flavor(enum lustre_sec_part from, mutex_lock(&sptlrpc_conf_lock); conf = sptlrpc_conf_get(name, 0); - if (conf == NULL) + if (!conf) goto out; /* convert uuid name (supposed end with _UUID) to target name */ diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c index 6e58d5f95..9082da06b 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c @@ -109,7 +109,7 @@ static void sec_process_ctx_list(void) while (!list_empty(&sec_gc_ctx_list)) { ctx = list_entry(sec_gc_ctx_list.next, - struct ptlrpc_cli_ctx, cc_gc_chain); + struct ptlrpc_cli_ctx, cc_gc_chain); list_del_init(&ctx->cc_gc_chain); spin_unlock(&sec_gc_ctx_list_lock); @@ -131,7 +131,7 @@ static void sec_do_gc(struct ptlrpc_sec *sec) if (unlikely(sec->ps_gc_next == 0)) { CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n", - sec, sec->ps_policy->sp_name); + sec, sec->ps_policy->sp_name); return; } @@ -166,11 +166,13 @@ again: * is not optimal. we perhaps want to use balanced binary tree * to trace each sec as order of expiry time. * another issue here is we wakeup as fixed interval instead of - * according to each sec's expiry time */ + * according to each sec's expiry time + */ mutex_lock(&sec_gc_mutex); list_for_each_entry(sec, &sec_gc_list, ps_gc_list) { /* if someone is waiting to be deleted, let it - * proceed as soon as possible. */ + * proceed as soon as possible. + */ if (atomic_read(&sec_gc_wait_del)) { CDEBUG(D_SEC, "deletion pending, start over\n"); mutex_unlock(&sec_gc_mutex); diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c index bda9a77af..e610a8ddd 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c @@ -82,7 +82,7 @@ static int sptlrpc_info_lprocfs_seq_show(struct seq_file *seq, void *v) if (cli->cl_import) sec = sptlrpc_import_sec_ref(cli->cl_import); - if (sec == NULL) + if (!sec) goto out; sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str)); @@ -121,7 +121,7 @@ static int sptlrpc_ctxs_lprocfs_seq_show(struct seq_file *seq, void *v) if (cli->cl_import) sec = sptlrpc_import_sec_ref(cli->cl_import); - if (sec == NULL) + if (!sec) goto out; if (sec->ps_policy->sp_cops->display) @@ -178,7 +178,7 @@ int sptlrpc_lproc_init(void) { int rc; - LASSERT(sptlrpc_debugfs_dir == NULL); + LASSERT(!sptlrpc_debugfs_dir); sptlrpc_debugfs_dir = ldebugfs_register("sptlrpc", debugfs_lustre_root, sptlrpc_lprocfs_vars, NULL); diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c index ebfa6092b..40e5349de 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c @@ -250,7 +250,7 @@ int null_enlarge_reqbuf(struct ptlrpc_sec *sec, alloc_size = size_roundup_power2(newmsg_size); newbuf = libcfs_kvzalloc(alloc_size, GFP_NOFS); - if (newbuf == NULL) + if (!newbuf) return -ENOMEM; /* Must lock this, so that otherwise unprotected change of @@ -258,7 +258,8 @@ int null_enlarge_reqbuf(struct ptlrpc_sec *sec, * imp_replay_list traversing threads. See LU-3333 * This is a bandaid at best, we really need to deal with this * in request enlarging code before unpacking that's already - * there */ + * there + */ if (req->rq_import) spin_lock(&req->rq_import->imp_lock); memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen); @@ -319,7 +320,7 @@ int null_alloc_rs(struct ptlrpc_request *req, int msgsize) LASSERT(rs->rs_size >= rs_size); } else { rs = libcfs_kvzalloc(rs_size, GFP_NOFS); - if (rs == NULL) + if (!rs) return -ENOMEM; rs->rs_size = rs_size; diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c index 905a41451..6276bf59c 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c @@ -104,7 +104,7 @@ static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed) return -EPROTO; bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE); - if (bsd == NULL) { + if (!bsd) { CERROR("bulk sec desc has short size %d\n", lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF)); return -EPROTO; @@ -227,7 +227,7 @@ int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) swabbed = ptlrpc_rep_need_swab(req); phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr)); - if (phdr == NULL) { + if (!phdr) { CERROR("missing plain header\n"); return -EPROTO; } @@ -264,7 +264,8 @@ int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) } } else { /* whether we sent with bulk or not, we expect the same - * in reply, except for early reply */ + * in reply, except for early reply + */ if (!req->rq_early && !equi(req->rq_pack_bulk == 1, phdr->ph_flags & PLAIN_FL_BULK)) { @@ -419,7 +420,7 @@ void plain_destroy_sec(struct ptlrpc_sec *sec) LASSERT(sec->ps_import); LASSERT(atomic_read(&sec->ps_refcount) == 0); LASSERT(atomic_read(&sec->ps_nctx) == 0); - LASSERT(plsec->pls_ctx == NULL); + LASSERT(!plsec->pls_ctx); class_import_put(sec->ps_import); @@ -468,7 +469,7 @@ struct ptlrpc_sec *plain_create_sec(struct obd_import *imp, /* install ctx immediately if this is a reverse sec */ if (svc_ctx) { ctx = plain_sec_install_ctx(plsec); - if (ctx == NULL) { + if (!ctx) { plain_destroy_sec(sec); return NULL; } @@ -492,7 +493,7 @@ struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec, atomic_inc(&ctx->cc_refcount); read_unlock(&plsec->pls_lock); - if (unlikely(ctx == NULL)) + if (unlikely(!ctx)) ctx = plain_sec_install_ctx(plsec); return ctx; @@ -665,7 +666,7 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec, newbuf_size = size_roundup_power2(newbuf_size); newbuf = libcfs_kvzalloc(newbuf_size, GFP_NOFS); - if (newbuf == NULL) + if (!newbuf) return -ENOMEM; /* Must lock this, so that otherwise unprotected change of @@ -673,7 +674,8 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec, * imp_replay_list traversing threads. See LU-3333 * This is a bandaid at best, we really need to deal with this * in request enlarging code before unpacking that's already - * there */ + * there + */ if (req->rq_import) spin_lock(&req->rq_import->imp_lock); @@ -732,7 +734,7 @@ int plain_accept(struct ptlrpc_request *req) swabbed = ptlrpc_req_need_swab(req); phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr)); - if (phdr == NULL) { + if (!phdr) { CERROR("missing plain header\n"); return -EPROTO; } @@ -801,7 +803,7 @@ int plain_alloc_rs(struct ptlrpc_request *req, int msgsize) LASSERT(rs->rs_size >= rs_size); } else { rs = libcfs_kvzalloc(rs_size, GFP_NOFS); - if (rs == NULL) + if (!rs) return -ENOMEM; rs->rs_size = rs_size; diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c index 8598300a6..1bbd1d39c 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/service.c +++ b/drivers/staging/lustre/lustre/ptlrpc/service.c @@ -77,7 +77,7 @@ ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt) rqbd = kzalloc_node(sizeof(*rqbd), GFP_NOFS, cfs_cpt_spread_node(svc->srv_cptable, svcpt->scp_cpt)); - if (rqbd == NULL) + if (!rqbd) return NULL; rqbd->rqbd_svcpt = svcpt; @@ -89,7 +89,7 @@ ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt) svcpt->scp_cpt, svc->srv_buf_size, GFP_KERNEL); - if (rqbd->rqbd_buffer == NULL) { + if (!rqbd->rqbd_buffer) { kfree(rqbd); return NULL; } @@ -144,13 +144,14 @@ ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post) for (i = 0; i < svc->srv_nbuf_per_group; i++) { /* NB: another thread might have recycled enough rqbds, we - * need to make sure it wouldn't over-allocate, see LU-1212. */ + * need to make sure it wouldn't over-allocate, see LU-1212. + */ if (svcpt->scp_nrqbds_posted >= svc->srv_nbuf_per_group) break; rqbd = ptlrpc_alloc_rqbd(svcpt); - if (rqbd == NULL) { + if (!rqbd) { CERROR("%s: Can't allocate request buffer\n", svc->srv_name); rc = -ENOMEM; @@ -298,8 +299,8 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt) } rqbd = list_entry(svcpt->scp_rqbd_idle.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); + struct ptlrpc_request_buffer_desc, + rqbd_list); list_del(&rqbd->rqbd_list); /* assume we will post successfully */ @@ -322,7 +323,8 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt) list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle); /* Don't complain if no request buffers are posted right now; LNET - * won't drop requests because we set the portal lazy! */ + * won't drop requests because we set the portal lazy! + */ spin_unlock(&svcpt->scp_lock); @@ -363,13 +365,15 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, init = max_t(int, init, tc->tc_nthrs_init); /* NB: please see comments in lustre_lnet.h for definition - * details of these members */ + * details of these members + */ LASSERT(tc->tc_nthrs_max != 0); if (tc->tc_nthrs_user != 0) { /* In case there is a reason to test a service with many * threads, we give a less strict check here, it can - * be up to 8 * nthrs_max */ + * be up to 8 * nthrs_max + */ total = min(tc->tc_nthrs_max * 8, tc->tc_nthrs_user); nthrs = total / svc->srv_ncpts; init = max(init, nthrs); @@ -379,7 +383,8 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, total = tc->tc_nthrs_max; if (tc->tc_nthrs_base == 0) { /* don't care about base threads number per partition, - * this is most for non-affinity service */ + * this is most for non-affinity service + */ nthrs = total / svc->srv_ncpts; goto out; } @@ -390,7 +395,8 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, /* NB: Increase the base number if it's single partition * and total number of cores/HTs is larger or equal to 4. - * result will always < 2 * nthrs_base */ + * result will always < 2 * nthrs_base + */ weight = cfs_cpt_weight(svc->srv_cptable, CFS_CPT_ANY); for (i = 1; (weight >> (i + 1)) != 0 && /* >= 4 cores/HTs */ (tc->tc_nthrs_base >> i) != 0; i++) @@ -490,7 +496,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc, array->paa_reqs_array = kzalloc_node(sizeof(struct list_head) * size, GFP_NOFS, cfs_cpt_spread_node(svc->srv_cptable, cpt)); - if (array->paa_reqs_array == NULL) + if (!array->paa_reqs_array) return -ENOMEM; for (index = 0; index < size; index++) @@ -499,14 +505,15 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc, array->paa_reqs_count = kzalloc_node(sizeof(__u32) * size, GFP_NOFS, cfs_cpt_spread_node(svc->srv_cptable, cpt)); - if (array->paa_reqs_count == NULL) + if (!array->paa_reqs_count) goto free_reqs_array; setup_timer(&svcpt->scp_at_timer, ptlrpc_at_timer, (unsigned long)svcpt); /* At SOW, service time should be quick; 10s seems generous. If client - * timeout is less than this, we'll be sending an early reply. */ + * timeout is less than this, we'll be sending an early reply. + */ at_init(&svcpt->scp_at_estimate, 10, 0); /* assign this before call ptlrpc_grow_req_bufs */ @@ -514,7 +521,8 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc, /* Now allocate the request buffers, but don't post them now */ rc = ptlrpc_grow_req_bufs(svcpt, 0); /* We shouldn't be under memory pressure at startup, so - * fail if we can't allocate all our buffers at this time. */ + * fail if we can't allocate all our buffers at this time. + */ if (rc != 0) goto free_reqs_count; @@ -556,14 +564,14 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf, LASSERT(conf->psc_thr.tc_ctx_tags != 0); cptable = cconf->cc_cptable; - if (cptable == NULL) + if (!cptable) cptable = cfs_cpt_table; if (!conf->psc_thr.tc_cpu_affinity) { ncpts = 1; } else { ncpts = cfs_cpt_number(cptable); - if (cconf->cc_pattern != NULL) { + if (cconf->cc_pattern) { struct cfs_expr_list *el; rc = cfs_expr_list_parse(cconf->cc_pattern, @@ -632,11 +640,11 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf, if (!conf->psc_thr.tc_cpu_affinity) cpt = CFS_CPT_ANY; else - cpt = cpts != NULL ? cpts[i] : i; + cpt = cpts ? cpts[i] : i; svcpt = kzalloc_node(sizeof(*svcpt), GFP_NOFS, cfs_cpt_spread_node(cptable, cpt)); - if (svcpt == NULL) { + if (!svcpt) { rc = -ENOMEM; goto failed; } @@ -696,7 +704,8 @@ static void ptlrpc_server_free_request(struct ptlrpc_request *req) LASSERT(list_empty(&req->rq_timed_list)); /* DEBUG_REQ() assumes the reply state of a request with a valid - * ref will not be destroyed until that reference is dropped. */ + * ref will not be destroyed until that reference is dropped. + */ ptlrpc_req_drop_rs(req); sptlrpc_svc_ctx_decref(req); @@ -704,7 +713,8 @@ static void ptlrpc_server_free_request(struct ptlrpc_request *req) if (req != &req->rq_rqbd->rqbd_req) { /* NB request buffers use an embedded * req if the incoming req unlinked the - * MD; this isn't one of them! */ + * MD; this isn't one of them! + */ ptlrpc_request_cache_free(req); } } @@ -728,7 +738,8 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req) if (req->rq_at_linked) { spin_lock(&svcpt->scp_at_lock); /* recheck with lock, in case it's unlinked by - * ptlrpc_at_check_timed() */ + * ptlrpc_at_check_timed() + */ if (likely(req->rq_at_linked)) ptlrpc_at_remove_timed(req); spin_unlock(&svcpt->scp_at_lock); @@ -755,20 +766,22 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req) svcpt->scp_hist_nrqbds++; /* cull some history? - * I expect only about 1 or 2 rqbds need to be recycled here */ + * I expect only about 1 or 2 rqbds need to be recycled here + */ while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) { rqbd = list_entry(svcpt->scp_hist_rqbds.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); + struct ptlrpc_request_buffer_desc, + rqbd_list); list_del(&rqbd->rqbd_list); svcpt->scp_hist_nrqbds--; /* remove rqbd's reqs from svc's req history while - * I've got the service lock */ + * I've got the service lock + */ list_for_each(tmp, &rqbd->rqbd_reqs) { req = list_entry(tmp, struct ptlrpc_request, - rq_list); + rq_list); /* Track the highest culled req seq */ if (req->rq_history_seq > svcpt->scp_hist_seq_culled) { @@ -782,8 +795,8 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req) list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) { req = list_entry(rqbd->rqbd_reqs.next, - struct ptlrpc_request, - rq_list); + struct ptlrpc_request, + rq_list); list_del(&req->rq_list); ptlrpc_server_free_request(req); } @@ -795,8 +808,7 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req) */ LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) == 0); - list_add_tail(&rqbd->rqbd_list, - &svcpt->scp_rqbd_idle); + list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle); } spin_unlock(&svcpt->scp_lock); @@ -846,7 +858,7 @@ static void ptlrpc_server_finish_active_request( ptlrpc_nrs_req_finalize(req); - if (req->rq_export != NULL) + if (req->rq_export) class_export_rpc_dec(req->rq_export); ptlrpc_server_finish_request(svcpt, req); @@ -869,13 +881,13 @@ static int ptlrpc_check_req(struct ptlrpc_request *req) req->rq_export->exp_conn_cnt); return -EEXIST; } - if (unlikely(obd == NULL || obd->obd_fail)) { + if (unlikely(!obd || obd->obd_fail)) { /* * Failing over, don't handle any more reqs, send * error response instead. */ CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n", - req, (obd != NULL) ? obd->obd_name : "unknown"); + req, obd ? obd->obd_name : "unknown"); rc = -ENODEV; } else if (lustre_msg_get_flags(req->rq_reqmsg) & (MSG_REPLAY | MSG_REQ_REPLAY_DONE)) { @@ -942,13 +954,13 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req) div_u64_rem(req->rq_deadline, array->paa_size, &index); if (array->paa_reqs_count[index] > 0) { /* latest rpcs will have the latest deadlines in the list, - * so search backward. */ - list_for_each_entry_reverse(rq, - &array->paa_reqs_array[index], - rq_timed_list) { + * so search backward. + */ + list_for_each_entry_reverse(rq, &array->paa_reqs_array[index], + rq_timed_list) { if (req->rq_deadline >= rq->rq_deadline) { list_add(&req->rq_timed_list, - &rq->rq_timed_list); + &rq->rq_timed_list); break; } } @@ -956,8 +968,7 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req) /* Add the request at the head of the list */ if (list_empty(&req->rq_timed_list)) - list_add(&req->rq_timed_list, - &array->paa_reqs_array[index]); + list_add(&req->rq_timed_list, &array->paa_reqs_array[index]); spin_lock(&req->rq_lock); req->rq_at_linked = 1; @@ -1003,7 +1014,8 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) int rc; /* deadline is when the client expects us to reply, margin is the - difference between clients' and servers' expectations */ + * difference between clients' and servers' expectations + */ DEBUG_REQ(D_ADAPTTO, req, "%ssending early reply (deadline %+lds, margin %+lds) for %d+%d", AT_OFF ? "AT off - not " : "", @@ -1027,12 +1039,14 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) } /* Fake our processing time into the future to ask the clients - * for some extra amount of time */ + * for some extra amount of time + */ at_measured(&svcpt->scp_at_estimate, at_extra + ktime_get_real_seconds() - req->rq_arrival_time.tv_sec); /* Check to see if we've actually increased the deadline - - * we may be past adaptive_max */ + * we may be past adaptive_max + */ if (req->rq_deadline >= req->rq_arrival_time.tv_sec + at_get(&svcpt->scp_at_estimate)) { DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%lld), not sending early reply\n", @@ -1044,7 +1058,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) newdl = ktime_get_real_seconds() + at_get(&svcpt->scp_at_estimate); reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS); - if (reqcopy == NULL) + if (!reqcopy) return -ENOMEM; reqmsg = libcfs_kvzalloc(req->rq_reqlen, GFP_NOFS); if (!reqmsg) { @@ -1074,7 +1088,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) /* Connection ref */ reqcopy->rq_export = class_conn2export( lustre_msg_get_handle(reqcopy->rq_reqmsg)); - if (reqcopy->rq_export == NULL) { + if (!reqcopy->rq_export) { rc = -ENODEV; goto out; } @@ -1102,7 +1116,8 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) } /* Free the (early) reply state from lustre_pack_reply. - (ptlrpc_send_reply takes it's own rs ref, so this is safe here) */ + * (ptlrpc_send_reply takes it's own rs ref, so this is safe here) + */ ptlrpc_req_drop_rs(reqcopy); out_put: @@ -1117,8 +1132,9 @@ out_free: } /* Send early replies to everybody expiring within at_early_margin - asking for at_extra time */ -static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) + * asking for at_extra time + */ +static void ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) { struct ptlrpc_at_array *array = &svcpt->scp_at_array; struct ptlrpc_request *rq, *n; @@ -1132,14 +1148,14 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) spin_lock(&svcpt->scp_at_lock); if (svcpt->scp_at_check == 0) { spin_unlock(&svcpt->scp_at_lock); - return 0; + return; } delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime); svcpt->scp_at_check = 0; if (array->paa_count == 0) { spin_unlock(&svcpt->scp_at_lock); - return 0; + return; } /* The timer went off, but maybe the nearest rpc already completed. */ @@ -1148,20 +1164,20 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) /* We've still got plenty of time. Reset the timer. */ ptlrpc_at_set_timer(svcpt); spin_unlock(&svcpt->scp_at_lock); - return 0; + return; } /* We're close to a timeout, and we don't know how much longer the - server will take. Send early replies to everyone expiring soon. */ + * server will take. Send early replies to everyone expiring soon. + */ INIT_LIST_HEAD(&work_list); deadline = -1; div_u64_rem(array->paa_deadline, array->paa_size, &index); count = array->paa_count; while (count > 0) { count -= array->paa_reqs_count[index]; - list_for_each_entry_safe(rq, n, - &array->paa_reqs_array[index], - rq_timed_list) { + list_for_each_entry_safe(rq, n, &array->paa_reqs_array[index], + rq_timed_list) { if (rq->rq_deadline > now + at_early_margin) { /* update the earliest deadline */ if (deadline == -1 || @@ -1194,7 +1210,8 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) first, at_extra, counter); if (first < 0) { /* We're already past request deadlines before we even get a - chance to send early replies */ + * chance to send early replies + */ LCONSOLE_WARN("%s: This server is not able to keep up with request traffic (cpu-bound).\n", svcpt->scp_service->srv_name); CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n", @@ -1204,10 +1221,11 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) } /* we took additional refcount so entries can't be deleted from list, no - * locking is needed */ + * locking is needed + */ while (!list_empty(&work_list)) { rq = list_entry(work_list.next, struct ptlrpc_request, - rq_timed_list); + rq_timed_list); list_del_init(&rq->rq_timed_list); if (ptlrpc_at_send_early_reply(rq) == 0) @@ -1215,8 +1233,6 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) ptlrpc_server_drop_request(rq); } - - return 1; /* return "did_something" for liblustre */ } /** @@ -1237,7 +1253,8 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt, if (req->rq_export && req->rq_ops) { /* Perform request specific check. We should do this check * before the request is added into exp_hp_rpcs list otherwise - * it may hit swab race at LU-1044. */ + * it may hit swab race at LU-1044. + */ if (req->rq_ops->hpreq_check) { rc = req->rq_ops->hpreq_check(req); /** @@ -1257,8 +1274,7 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt, } spin_lock_bh(&req->rq_export->exp_rpc_lock); - list_add(&req->rq_exp_list, - &req->rq_export->exp_hp_rpcs); + list_add(&req->rq_exp_list, &req->rq_export->exp_hp_rpcs); spin_unlock_bh(&req->rq_export->exp_rpc_lock); } @@ -1272,7 +1288,8 @@ static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req) { if (req->rq_export && req->rq_ops) { /* refresh lock timeout again so that client has more - * room to send lock cancel RPC. */ + * room to send lock cancel RPC. + */ if (req->rq_ops->hpreq_fini) req->rq_ops->hpreq_fini(req); @@ -1316,7 +1333,7 @@ static bool ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt, CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) { /* leave just 1 thread for normal RPCs */ running = PTLRPC_NTHRS_INIT; - if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL) + if (svcpt->scp_service->srv_ops.so_hpreq_handler) running += 1; } @@ -1355,7 +1372,7 @@ static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt, CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) { /* leave just 1 thread for normal RPCs */ running = PTLRPC_NTHRS_INIT; - if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL) + if (svcpt->scp_service->srv_ops.so_hpreq_handler) running += 1; } @@ -1405,7 +1422,7 @@ ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force) if (ptlrpc_server_high_pending(svcpt, force)) { req = ptlrpc_nrs_req_get_nolock(svcpt, true, force); - if (req != NULL) { + if (req) { svcpt->scp_hreq_count++; goto got_request; } @@ -1413,7 +1430,7 @@ ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force) if (ptlrpc_server_normal_pending(svcpt, force)) { req = ptlrpc_nrs_req_get_nolock(svcpt, false, force); - if (req != NULL) { + if (req) { svcpt->scp_hreq_count = 0; goto got_request; } @@ -1457,11 +1474,12 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt, } req = list_entry(svcpt->scp_req_incoming.next, - struct ptlrpc_request, rq_list); + struct ptlrpc_request, rq_list); list_del_init(&req->rq_list); svcpt->scp_nreqs_incoming--; /* Consider this still a "queued" request as far as stats are - * concerned */ + * concerned + */ spin_unlock(&svcpt->scp_lock); /* go through security check/transform */ @@ -1598,7 +1616,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt, int fail_opc = 0; request = ptlrpc_server_request_get(svcpt, false); - if (request == NULL) + if (!request) return 0; if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT)) @@ -1620,7 +1638,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt, timediff = timespec64_sub(work_start, request->rq_arrival_time); timediff_usecs = timediff.tv_sec * USEC_PER_SEC + timediff.tv_nsec / NSEC_PER_USEC; - if (likely(svc->srv_stats != NULL)) { + if (likely(svc->srv_stats)) { lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR, timediff_usecs); lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR, @@ -1652,7 +1670,8 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt, } /* Discard requests queued for longer than the deadline. - The deadline is increased if we send an early reply. */ + * The deadline is increased if we send an early reply. + */ if (ktime_get_real_seconds() > request->rq_deadline) { DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline " CFS_DURATION_T ":" CFS_DURATION_T "s ago\n", libcfs_id2str(request->rq_peer), @@ -1718,7 +1737,7 @@ put_conn: request->rq_status, (request->rq_repmsg ? lustre_msg_get_status(request->rq_repmsg) : -999)); - if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) { + if (likely(svc->srv_stats && request->rq_reqmsg)) { __u32 op = lustre_msg_get_opc(request->rq_reqmsg); int opc = opcode_offset(op); @@ -1804,7 +1823,8 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs) if (nlocks == 0 && !been_handled) { /* If we see this, we should already have seen the warning - * in mds_steal_ack_locks() */ + * in mds_steal_ack_locks() + */ CDEBUG(D_HA, "All locks stolen from rs %p x%lld.t%lld o%d NID %s\n", rs, rs->rs_xid, rs->rs_transno, rs->rs_opc, @@ -1858,7 +1878,8 @@ ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt) /* CAVEAT EMPTOR: We might be allocating buffers here because we've * allowed the request history to grow out of control. We could put a * sanity check on that here and cull some history if we need the - * space. */ + * space. + */ if (avail <= low_water) ptlrpc_grow_req_bufs(svcpt, 1); @@ -1992,7 +2013,8 @@ static int ptlrpc_main(void *arg) /* NB: we will call cfs_cpt_bind() for all threads, because we * might want to run lustre server only on a subset of system CPUs, - * in that case ->scp_cpt is CFS_CPT_ANY */ + * in that case ->scp_cpt is CFS_CPT_ANY + */ rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt); if (rc != 0) { CWARN("%s: failed to bind %s on CPT %d\n", @@ -2008,7 +2030,7 @@ static int ptlrpc_main(void *arg) set_current_groups(ginfo); put_group_info(ginfo); - if (svc->srv_ops.so_thr_init != NULL) { + if (svc->srv_ops.so_thr_init) { rc = svc->srv_ops.so_thr_init(thread); if (rc) goto out; @@ -2035,7 +2057,7 @@ static int ptlrpc_main(void *arg) continue; CERROR("Failed to post rqbd for %s on CPT %d: %d\n", - svc->srv_name, svcpt->scp_cpt, rc); + svc->srv_name, svcpt->scp_cpt, rc); goto out_srv_fini; } @@ -2057,7 +2079,8 @@ static int ptlrpc_main(void *arg) /* SVC_STOPPING may already be set here if someone else is trying * to stop the service while this new thread has been dynamically * forked. We still set SVC_RUNNING to let our creator know that - * we are now running, however we will exit as soon as possible */ + * we are now running, however we will exit as soon as possible + */ thread_add_flags(thread, SVC_RUNNING); svcpt->scp_nthrs_running++; spin_unlock(&svcpt->scp_lock); @@ -2116,7 +2139,8 @@ static int ptlrpc_main(void *arg) ptlrpc_server_post_idle_rqbds(svcpt) < 0) { /* I just failed to repost request buffers. * Wait for a timeout (unless something else - * happens) before I try again */ + * happens) before I try again + */ svcpt->scp_rqbd_timeout = cfs_time_seconds(1) / 10; CDEBUG(D_RPCTRACE, "Posted buffers: %d\n", svcpt->scp_nrqbds_posted); @@ -2132,10 +2156,10 @@ out_srv_fini: /* * deconstruct service specific state created by ptlrpc_start_thread() */ - if (svc->srv_ops.so_thr_done != NULL) + if (svc->srv_ops.so_thr_done) svc->srv_ops.so_thr_done(thread); - if (env != NULL) { + if (env) { lu_context_fini(&env->le_ctx); kfree(env); } @@ -2183,7 +2207,7 @@ static int ptlrpc_hr_main(void *arg) { struct ptlrpc_hr_thread *hrt = arg; struct ptlrpc_hr_partition *hrp = hrt->hrt_partition; - LIST_HEAD (replies); + LIST_HEAD(replies); char threadname[20]; int rc; @@ -2206,9 +2230,8 @@ static int ptlrpc_hr_main(void *arg) while (!list_empty(&replies)) { struct ptlrpc_reply_state *rs; - rs = list_entry(replies.prev, - struct ptlrpc_reply_state, - rs_list); + rs = list_entry(replies.prev, struct ptlrpc_reply_state, + rs_list); list_del_init(&rs->rs_list); ptlrpc_handle_rs(rs); } @@ -2229,18 +2252,18 @@ static void ptlrpc_stop_hr_threads(void) ptlrpc_hr.hr_stopping = 1; cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { - if (hrp->hrp_thrs == NULL) + if (!hrp->hrp_thrs) continue; /* uninitialized */ for (j = 0; j < hrp->hrp_nthrs; j++) wake_up_all(&hrp->hrp_thrs[j].hrt_waitq); } cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { - if (hrp->hrp_thrs == NULL) + if (!hrp->hrp_thrs) continue; /* uninitialized */ wait_event(ptlrpc_hr.hr_waitq, - atomic_read(&hrp->hrp_nstopped) == - atomic_read(&hrp->hrp_nstarted)); + atomic_read(&hrp->hrp_nstopped) == + atomic_read(&hrp->hrp_nstarted)); } } @@ -2255,24 +2278,26 @@ static int ptlrpc_start_hr_threads(void) for (j = 0; j < hrp->hrp_nthrs; j++) { struct ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j]; - - rc = PTR_ERR(kthread_run(ptlrpc_hr_main, - &hrp->hrp_thrs[j], - "ptlrpc_hr%02d_%03d", - hrp->hrp_cpt, - hrt->hrt_id)); - if (IS_ERR_VALUE(rc)) + struct task_struct *task; + + task = kthread_run(ptlrpc_hr_main, + &hrp->hrp_thrs[j], + "ptlrpc_hr%02d_%03d", + hrp->hrp_cpt, hrt->hrt_id); + if (IS_ERR(task)) { + rc = PTR_ERR(task); break; + } } wait_event(ptlrpc_hr.hr_waitq, - atomic_read(&hrp->hrp_nstarted) == j); - if (!IS_ERR_VALUE(rc)) - continue; + atomic_read(&hrp->hrp_nstarted) == j); - CERROR("Reply handling thread %d:%d Failed on starting: rc = %d\n", - i, j, rc); - ptlrpc_stop_hr_threads(); - return rc; + if (rc < 0) { + CERROR("cannot start reply handler thread %d:%d: rc = %d\n", + i, j, rc); + ptlrpc_stop_hr_threads(); + return rc; + } } return 0; } @@ -2281,7 +2306,7 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt) { struct l_wait_info lwi = { 0 }; struct ptlrpc_thread *thread; - LIST_HEAD (zombie); + LIST_HEAD(zombie); CDEBUG(D_INFO, "Stopping threads for service %s\n", svcpt->scp_service->srv_name); @@ -2298,7 +2323,7 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt) while (!list_empty(&svcpt->scp_threads)) { thread = list_entry(svcpt->scp_threads.next, - struct ptlrpc_thread, t_link); + struct ptlrpc_thread, t_link); if (thread_is_stopped(thread)) { list_del(&thread->t_link); list_add(&thread->t_link, &zombie); @@ -2333,7 +2358,7 @@ static void ptlrpc_stop_all_threads(struct ptlrpc_service *svc) int i; ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service != NULL) + if (svcpt->scp_service) ptlrpc_svcpt_stop_threads(svcpt); } } @@ -2374,10 +2399,9 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) struct l_wait_info lwi = { 0 }; struct ptlrpc_thread *thread; struct ptlrpc_service *svc; + struct task_struct *task; int rc; - LASSERT(svcpt != NULL); - svc = svcpt->scp_service; CDEBUG(D_RPCTRACE, "%s[%d] started %d min %d max %d\n", @@ -2396,7 +2420,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) thread = kzalloc_node(sizeof(*thread), GFP_NOFS, cfs_cpt_spread_node(svc->srv_cptable, svcpt->scp_cpt)); - if (thread == NULL) + if (!thread) return -ENOMEM; init_waitqueue_head(&thread->t_ctl_waitq); @@ -2409,7 +2433,8 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) if (svcpt->scp_nthrs_starting != 0) { /* serialize starting because some modules (obdfilter) - * might require unique and contiguous t_id */ + * might require unique and contiguous t_id + */ LASSERT(svcpt->scp_nthrs_starting == 1); spin_unlock(&svcpt->scp_lock); kfree(thread); @@ -2442,9 +2467,10 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) } CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name); - rc = PTR_ERR(kthread_run(ptlrpc_main, thread, "%s", thread->t_name)); - if (IS_ERR_VALUE(rc)) { - CERROR("cannot start thread '%s': rc %d\n", + task = kthread_run(ptlrpc_main, thread, "%s", thread->t_name); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("cannot start thread '%s': rc = %d\n", thread->t_name, rc); spin_lock(&svcpt->scp_lock); --svcpt->scp_nthrs_starting; @@ -2488,7 +2514,7 @@ int ptlrpc_hr_init(void) ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table, sizeof(*hrp)); - if (ptlrpc_hr.hr_partitions == NULL) + if (!ptlrpc_hr.hr_partitions) return -ENOMEM; init_waitqueue_head(&ptlrpc_hr.hr_waitq); @@ -2509,7 +2535,7 @@ int ptlrpc_hr_init(void) kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS, cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table, i)); - if (hrp->hrp_thrs == NULL) { + if (!hrp->hrp_thrs) { rc = -ENOMEM; goto out; } @@ -2537,7 +2563,7 @@ void ptlrpc_hr_fini(void) struct ptlrpc_hr_partition *hrp; int i; - if (ptlrpc_hr.hr_partitions == NULL) + if (!ptlrpc_hr.hr_partitions) return; ptlrpc_stop_hr_threads(); @@ -2577,7 +2603,7 @@ ptlrpc_service_del_atimer(struct ptlrpc_service *svc) /* early disarm AT timer... */ ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service != NULL) + if (svcpt->scp_service) del_timer(&svcpt->scp_at_timer); } } @@ -2592,18 +2618,20 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc) int i; /* All history will be culled when the next request buffer is - * freed in ptlrpc_service_purge_all() */ + * freed in ptlrpc_service_purge_all() + */ svc->srv_hist_nrqbds_cpt_max = 0; rc = LNetClearLazyPortal(svc->srv_req_portal); LASSERT(rc == 0); ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service == NULL) + if (!svcpt->scp_service) break; /* Unlink all the request buffers. This forces a 'final' - * event with its 'unlink' flag set for each posted rqbd */ + * event with its 'unlink' flag set for each posted rqbd + */ list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted, rqbd_list) { rc = LNetMDUnlink(rqbd->rqbd_md_h); @@ -2612,17 +2640,19 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc) } ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service == NULL) + if (!svcpt->scp_service) break; /* Wait for the network to release any buffers - * it's currently filling */ + * it's currently filling + */ spin_lock(&svcpt->scp_lock); while (svcpt->scp_nrqbds_posted != 0) { spin_unlock(&svcpt->scp_lock); /* Network access will complete in finite time but * the HUGE timeout lets us CWARN for visibility - * of sluggish NALs */ + * of sluggish LNDs + */ lwi = LWI_TIMEOUT_INTERVAL( cfs_time_seconds(LONG_UNLINK), cfs_time_seconds(1), NULL, NULL); @@ -2648,13 +2678,13 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc) int i; ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service == NULL) + if (!svcpt->scp_service) break; spin_lock(&svcpt->scp_rep_lock); while (!list_empty(&svcpt->scp_rep_active)) { rs = list_entry(svcpt->scp_rep_active.next, - struct ptlrpc_reply_state, rs_list); + struct ptlrpc_reply_state, rs_list); spin_lock(&rs->rs_lock); ptlrpc_schedule_difficult_reply(rs); spin_unlock(&rs->rs_lock); @@ -2663,10 +2693,11 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc) /* purge the request queue. NB No new replies (rqbds * all unlinked) and no service threads, so I'm the only - * thread noodling the request queue now */ + * thread noodling the request queue now + */ while (!list_empty(&svcpt->scp_req_incoming)) { req = list_entry(svcpt->scp_req_incoming.next, - struct ptlrpc_request, rq_list); + struct ptlrpc_request, rq_list); list_del(&req->rq_list); svcpt->scp_nreqs_incoming--; @@ -2682,24 +2713,26 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc) LASSERT(svcpt->scp_nreqs_incoming == 0); LASSERT(svcpt->scp_nreqs_active == 0); /* history should have been culled by - * ptlrpc_server_finish_request */ + * ptlrpc_server_finish_request + */ LASSERT(svcpt->scp_hist_nrqbds == 0); /* Now free all the request buffers since nothing - * references them any more... */ + * references them any more... + */ while (!list_empty(&svcpt->scp_rqbd_idle)) { rqbd = list_entry(svcpt->scp_rqbd_idle.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); + struct ptlrpc_request_buffer_desc, + rqbd_list); ptlrpc_free_rqbd(rqbd); } ptlrpc_wait_replies(svcpt); while (!list_empty(&svcpt->scp_rep_idle)) { rs = list_entry(svcpt->scp_rep_idle.next, - struct ptlrpc_reply_state, - rs_list); + struct ptlrpc_reply_state, + rs_list); list_del(&rs->rs_list); kvfree(rs); } @@ -2714,7 +2747,7 @@ ptlrpc_service_free(struct ptlrpc_service *svc) int i; ptlrpc_service_for_each_part(svcpt, i, svc) { - if (svcpt->scp_service == NULL) + if (!svcpt->scp_service) break; /* In case somebody rearmed this in the meantime */ @@ -2730,7 +2763,7 @@ ptlrpc_service_free(struct ptlrpc_service *svc) ptlrpc_service_for_each_part(svcpt, i, svc) kfree(svcpt); - if (svc->srv_cpts != NULL) + if (svc->srv_cpts) cfs_expr_list_values_free(svc->srv_cpts, svc->srv_ncpts); kfree(svc); diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c index 61d9ca93c..3ffd2d91f 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c +++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c @@ -333,17 +333,9 @@ void lustre_assert_wire_constants(void) CLASSERT(LDLM_MAX_TYPE == 14); CLASSERT(LUSTRE_RES_ID_SEQ_OFF == 0); CLASSERT(LUSTRE_RES_ID_VER_OID_OFF == 1); - LASSERTF(UPDATE_OBJ == 1000, "found %lld\n", - (long long)UPDATE_OBJ); - LASSERTF(UPDATE_LAST_OPC == 1001, "found %lld\n", - (long long)UPDATE_LAST_OPC); CLASSERT(LUSTRE_RES_ID_QUOTA_SEQ_OFF == 2); CLASSERT(LUSTRE_RES_ID_QUOTA_VER_OID_OFF == 3); CLASSERT(LUSTRE_RES_ID_HSH_OFF == 3); - CLASSERT(LQUOTA_TYPE_USR == 0); - CLASSERT(LQUOTA_TYPE_GRP == 1); - CLASSERT(LQUOTA_RES_MD == 1); - CLASSERT(LQUOTA_RES_DT == 2); LASSERTF(OBD_PING == 400, "found %lld\n", (long long)OBD_PING); LASSERTF(OBD_LOG_CANCEL == 401, "found %lld\n", @@ -437,30 +429,6 @@ void lustre_assert_wire_constants(void) (unsigned)LMAC_NOT_IN_OI); LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n", (unsigned)LMAC_FID_ON_OST); - LASSERTF(OBJ_CREATE == 1, "found %lld\n", - (long long)OBJ_CREATE); - LASSERTF(OBJ_DESTROY == 2, "found %lld\n", - (long long)OBJ_DESTROY); - LASSERTF(OBJ_REF_ADD == 3, "found %lld\n", - (long long)OBJ_REF_ADD); - LASSERTF(OBJ_REF_DEL == 4, "found %lld\n", - (long long)OBJ_REF_DEL); - LASSERTF(OBJ_ATTR_SET == 5, "found %lld\n", - (long long)OBJ_ATTR_SET); - LASSERTF(OBJ_ATTR_GET == 6, "found %lld\n", - (long long)OBJ_ATTR_GET); - LASSERTF(OBJ_XATTR_SET == 7, "found %lld\n", - (long long)OBJ_XATTR_SET); - LASSERTF(OBJ_XATTR_GET == 8, "found %lld\n", - (long long)OBJ_XATTR_GET); - LASSERTF(OBJ_INDEX_LOOKUP == 9, "found %lld\n", - (long long)OBJ_INDEX_LOOKUP); - LASSERTF(OBJ_INDEX_LOOKUP == 9, "found %lld\n", - (long long)OBJ_INDEX_LOOKUP); - LASSERTF(OBJ_INDEX_INSERT == 10, "found %lld\n", - (long long)OBJ_INDEX_INSERT); - LASSERTF(OBJ_INDEX_DELETE == 11, "found %lld\n", - (long long)OBJ_INDEX_DELETE); /* Checks for struct ost_id */ LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n", @@ -587,9 +555,6 @@ void lustre_assert_wire_constants(void) (long long)LDF_COLLIDE); LASSERTF(LU_PAGE_SIZE == 4096, "found %lld\n", (long long)LU_PAGE_SIZE); - /* Checks for union lu_page */ - LASSERTF((int)sizeof(union lu_page) == 4096, "found %lld\n", - (long long)(int)sizeof(union lu_page)); /* Checks for struct lustre_handle */ LASSERTF((int)sizeof(struct lustre_handle) == 8, "found %lld\n", @@ -1535,11 +1500,6 @@ void lustre_assert_wire_constants(void) LASSERTF((int)sizeof(union lquota_id) == 16, "found %lld\n", (long long)(int)sizeof(union lquota_id)); - LASSERTF(QUOTABLOCK_BITS == 10, "found %lld\n", - (long long)QUOTABLOCK_BITS); - LASSERTF(QUOTABLOCK_SIZE == 1024, "found %lld\n", - (long long)QUOTABLOCK_SIZE); - /* Checks for struct obd_quotactl */ LASSERTF((int)sizeof(struct obd_quotactl) == 112, "found %lld\n", (long long)(int)sizeof(struct obd_quotactl)); @@ -1642,138 +1602,6 @@ void lustre_assert_wire_constants(void) LASSERTF(Q_FINVALIDATE == 0x800104, "found 0x%.8x\n", Q_FINVALIDATE); - /* Checks for struct lquota_acct_rec */ - LASSERTF((int)sizeof(struct lquota_acct_rec) == 16, "found %lld\n", - (long long)(int)sizeof(struct lquota_acct_rec)); - LASSERTF((int)offsetof(struct lquota_acct_rec, bspace) == 0, "found %lld\n", - (long long)(int)offsetof(struct lquota_acct_rec, bspace)); - LASSERTF((int)sizeof(((struct lquota_acct_rec *)0)->bspace) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_acct_rec *)0)->bspace)); - LASSERTF((int)offsetof(struct lquota_acct_rec, ispace) == 8, "found %lld\n", - (long long)(int)offsetof(struct lquota_acct_rec, ispace)); - LASSERTF((int)sizeof(((struct lquota_acct_rec *)0)->ispace) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_acct_rec *)0)->ispace)); - - /* Checks for struct lquota_glb_rec */ - LASSERTF((int)sizeof(struct lquota_glb_rec) == 32, "found %lld\n", - (long long)(int)sizeof(struct lquota_glb_rec)); - LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_hardlimit) == 0, "found %lld\n", - (long long)(int)offsetof(struct lquota_glb_rec, qbr_hardlimit)); - LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_hardlimit) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_hardlimit)); - LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_softlimit) == 8, "found %lld\n", - (long long)(int)offsetof(struct lquota_glb_rec, qbr_softlimit)); - LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_softlimit) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_softlimit)); - LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_time) == 16, "found %lld\n", - (long long)(int)offsetof(struct lquota_glb_rec, qbr_time)); - LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_time) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_time)); - LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_granted) == 24, "found %lld\n", - (long long)(int)offsetof(struct lquota_glb_rec, qbr_granted)); - LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_granted) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_granted)); - - /* Checks for struct lquota_slv_rec */ - LASSERTF((int)sizeof(struct lquota_slv_rec) == 8, "found %lld\n", - (long long)(int)sizeof(struct lquota_slv_rec)); - LASSERTF((int)offsetof(struct lquota_slv_rec, qsr_granted) == 0, "found %lld\n", - (long long)(int)offsetof(struct lquota_slv_rec, qsr_granted)); - LASSERTF((int)sizeof(((struct lquota_slv_rec *)0)->qsr_granted) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lquota_slv_rec *)0)->qsr_granted)); - - /* Checks for struct idx_info */ - LASSERTF((int)sizeof(struct idx_info) == 80, "found %lld\n", - (long long)(int)sizeof(struct idx_info)); - LASSERTF((int)offsetof(struct idx_info, ii_magic) == 0, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_magic)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_magic) == 4, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_magic)); - LASSERTF((int)offsetof(struct idx_info, ii_flags) == 4, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_flags)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_flags)); - LASSERTF((int)offsetof(struct idx_info, ii_count) == 8, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_count)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_count) == 2, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_count)); - LASSERTF((int)offsetof(struct idx_info, ii_pad0) == 10, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_pad0)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad0) == 2, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_pad0)); - LASSERTF((int)offsetof(struct idx_info, ii_attrs) == 12, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_attrs)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_attrs) == 4, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_attrs)); - LASSERTF((int)offsetof(struct idx_info, ii_fid) == 16, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_fid)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_fid)); - LASSERTF((int)offsetof(struct idx_info, ii_version) == 32, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_version)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_version) == 8, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_version)); - LASSERTF((int)offsetof(struct idx_info, ii_hash_start) == 40, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_hash_start)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_hash_start) == 8, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_hash_start)); - LASSERTF((int)offsetof(struct idx_info, ii_hash_end) == 48, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_hash_end)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_hash_end) == 8, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_hash_end)); - LASSERTF((int)offsetof(struct idx_info, ii_keysize) == 56, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_keysize)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_keysize) == 2, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_keysize)); - LASSERTF((int)offsetof(struct idx_info, ii_recsize) == 58, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_recsize)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_recsize) == 2, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_recsize)); - LASSERTF((int)offsetof(struct idx_info, ii_pad1) == 60, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_pad1)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad1) == 4, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_pad1)); - LASSERTF((int)offsetof(struct idx_info, ii_pad2) == 64, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_pad2)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad2) == 8, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_pad2)); - LASSERTF((int)offsetof(struct idx_info, ii_pad3) == 72, "found %lld\n", - (long long)(int)offsetof(struct idx_info, ii_pad3)); - LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad3) == 8, "found %lld\n", - (long long)(int)sizeof(((struct idx_info *)0)->ii_pad3)); - CLASSERT(IDX_INFO_MAGIC == 0x3D37CC37); - - /* Checks for struct lu_idxpage */ - LASSERTF((int)sizeof(struct lu_idxpage) == 16, "found %lld\n", - (long long)(int)sizeof(struct lu_idxpage)); - LASSERTF((int)offsetof(struct lu_idxpage, lip_magic) == 0, "found %lld\n", - (long long)(int)offsetof(struct lu_idxpage, lip_magic)); - LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_magic) == 4, "found %lld\n", - (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_magic)); - LASSERTF((int)offsetof(struct lu_idxpage, lip_flags) == 4, "found %lld\n", - (long long)(int)offsetof(struct lu_idxpage, lip_flags)); - LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_flags) == 2, "found %lld\n", - (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_flags)); - LASSERTF((int)offsetof(struct lu_idxpage, lip_nr) == 6, "found %lld\n", - (long long)(int)offsetof(struct lu_idxpage, lip_nr)); - LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_nr) == 2, "found %lld\n", - (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_nr)); - LASSERTF((int)offsetof(struct lu_idxpage, lip_pad0) == 8, "found %lld\n", - (long long)(int)offsetof(struct lu_idxpage, lip_pad0)); - LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_pad0) == 8, "found %lld\n", - (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_pad0)); - CLASSERT(LIP_MAGIC == 0x8A6D6B6C); - LASSERTF(LIP_HDR_SIZE == 16, "found %lld\n", - (long long)LIP_HDR_SIZE); - LASSERTF(II_FL_NOHASH == 1, "found %lld\n", - (long long)II_FL_NOHASH); - LASSERTF(II_FL_VARKEY == 2, "found %lld\n", - (long long)II_FL_VARKEY); - LASSERTF(II_FL_VARREC == 4, "found %lld\n", - (long long)II_FL_VARREC); - LASSERTF(II_FL_NONUNQ == 8, "found %lld\n", - (long long)II_FL_NONUNQ); - /* Checks for struct niobuf_remote */ LASSERTF((int)sizeof(struct niobuf_remote) == 16, "found %lld\n", (long long)(int)sizeof(struct niobuf_remote)); @@ -3753,50 +3581,6 @@ void lustre_assert_wire_constants(void) LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap) == 32, "found %lld\n", (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap)); - /* Checks for struct quota_body */ - LASSERTF((int)sizeof(struct quota_body) == 112, "found %lld\n", - (long long)(int)sizeof(struct quota_body)); - LASSERTF((int)offsetof(struct quota_body, qb_fid) == 0, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_fid)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_fid)); - LASSERTF((int)offsetof(struct quota_body, qb_id) == 16, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_id)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_id) == 16, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_id)); - LASSERTF((int)offsetof(struct quota_body, qb_flags) == 32, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_flags)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_flags) == 4, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_flags)); - LASSERTF((int)offsetof(struct quota_body, qb_padding) == 36, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_padding)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_padding) == 4, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_padding)); - LASSERTF((int)offsetof(struct quota_body, qb_count) == 40, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_count)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_count) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_count)); - LASSERTF((int)offsetof(struct quota_body, qb_usage) == 48, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_usage)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_usage) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_usage)); - LASSERTF((int)offsetof(struct quota_body, qb_slv_ver) == 56, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_slv_ver)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_slv_ver) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_slv_ver)); - LASSERTF((int)offsetof(struct quota_body, qb_lockh) == 64, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_lockh)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_lockh) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_lockh)); - LASSERTF((int)offsetof(struct quota_body, qb_glb_lockh) == 72, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_glb_lockh)); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_glb_lockh) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_glb_lockh)); - LASSERTF((int)offsetof(struct quota_body, qb_padding1[4]) == 112, "found %lld\n", - (long long)(int)offsetof(struct quota_body, qb_padding1[4])); - LASSERTF((int)sizeof(((struct quota_body *)0)->qb_padding1[4]) == 8, "found %lld\n", - (long long)(int)sizeof(((struct quota_body *)0)->qb_padding1[4])); - /* Checks for struct mgs_target_info */ LASSERTF((int)sizeof(struct mgs_target_info) == 4544, "found %lld\n", (long long)(int)sizeof(struct mgs_target_info)); @@ -4431,60 +4215,4 @@ void lustre_assert_wire_constants(void) LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_archive_id) == 4, "found %lld\n", (long long)sizeof(((struct hsm_user_import *)0)->hui_archive_id)); - - /* Checks for struct update_buf */ - LASSERTF((int)sizeof(struct update_buf) == 8, "found %lld\n", - (long long)(int)sizeof(struct update_buf)); - LASSERTF((int)offsetof(struct update_buf, ub_magic) == 0, "found %lld\n", - (long long)(int)offsetof(struct update_buf, ub_magic)); - LASSERTF((int)sizeof(((struct update_buf *)0)->ub_magic) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update_buf *)0)->ub_magic)); - LASSERTF((int)offsetof(struct update_buf, ub_count) == 4, "found %lld\n", - (long long)(int)offsetof(struct update_buf, ub_count)); - LASSERTF((int)sizeof(((struct update_buf *)0)->ub_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update_buf *)0)->ub_count)); - LASSERTF((int)offsetof(struct update_buf, ub_bufs) == 8, "found %lld\n", - (long long)(int)offsetof(struct update_buf, ub_bufs)); - LASSERTF((int)sizeof(((struct update_buf *)0)->ub_bufs) == 0, "found %lld\n", - (long long)(int)sizeof(((struct update_buf *)0)->ub_bufs)); - - /* Checks for struct update_reply */ - LASSERTF((int)sizeof(struct update_reply) == 8, "found %lld\n", - (long long)(int)sizeof(struct update_reply)); - LASSERTF((int)offsetof(struct update_reply, ur_version) == 0, "found %lld\n", - (long long)(int)offsetof(struct update_reply, ur_version)); - LASSERTF((int)sizeof(((struct update_reply *)0)->ur_version) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update_reply *)0)->ur_version)); - LASSERTF((int)offsetof(struct update_reply, ur_count) == 4, "found %lld\n", - (long long)(int)offsetof(struct update_reply, ur_count)); - LASSERTF((int)sizeof(((struct update_reply *)0)->ur_count) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update_reply *)0)->ur_count)); - LASSERTF((int)offsetof(struct update_reply, ur_lens) == 8, "found %lld\n", - (long long)(int)offsetof(struct update_reply, ur_lens)); - LASSERTF((int)sizeof(((struct update_reply *)0)->ur_lens) == 0, "found %lld\n", - (long long)(int)sizeof(((struct update_reply *)0)->ur_lens)); - - /* Checks for struct update */ - LASSERTF((int)sizeof(struct update) == 56, "found %lld\n", - (long long)(int)sizeof(struct update)); - LASSERTF((int)offsetof(struct update, u_type) == 0, "found %lld\n", - (long long)(int)offsetof(struct update, u_type)); - LASSERTF((int)sizeof(((struct update *)0)->u_type) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update *)0)->u_type)); - LASSERTF((int)offsetof(struct update, u_batchid) == 4, "found %lld\n", - (long long)(int)offsetof(struct update, u_batchid)); - LASSERTF((int)sizeof(((struct update *)0)->u_batchid) == 4, "found %lld\n", - (long long)(int)sizeof(((struct update *)0)->u_batchid)); - LASSERTF((int)offsetof(struct update, u_fid) == 8, "found %lld\n", - (long long)(int)offsetof(struct update, u_fid)); - LASSERTF((int)sizeof(((struct update *)0)->u_fid) == 16, "found %lld\n", - (long long)(int)sizeof(((struct update *)0)->u_fid)); - LASSERTF((int)offsetof(struct update, u_lens) == 24, "found %lld\n", - (long long)(int)offsetof(struct update, u_lens)); - LASSERTF((int)sizeof(((struct update *)0)->u_lens) == 32, "found %lld\n", - (long long)(int)sizeof(((struct update *)0)->u_lens)); - LASSERTF((int)offsetof(struct update, u_bufs) == 56, "found %lld\n", - (long long)(int)offsetof(struct update, u_bufs)); - LASSERTF((int)sizeof(((struct update *)0)->u_bufs) == 0, "found %lld\n", - (long long)(int)sizeof(((struct update *)0)->u_bufs)); } |